From 70b7a38d7ebd819aa7a2869e0c2fbacaae8071c1 Mon Sep 17 00:00:00 2001 From: matthewpeterkort Date: Tue, 10 Feb 2026 15:07:49 -0800 Subject: [PATCH 01/13] start refactor to support non-gen3 file handling backend --- backend/drs/drs.go | 295 ++++++++++++++++++++++++++++++++ backend/gen3/gen3.go | 235 +++++++++++++++++++++++++ backend/interface.go | 51 ++++++ cmd/download-multiple.go | 14 +- cmd/download-single.go | 15 +- cmd/retry-upload.go | 2 +- cmd/root.go | 2 + common/common.go | 7 + common/resource.go | 5 +- download/batch.go | 14 +- download/downloader.go | 22 +-- download/file_info.go | 107 ++---------- download/transfer.go | 41 +++-- download/transfer_test.go | 102 +++++------ download/types.go | 8 +- download/url_resolution.go | 57 +----- drs/drs.go | 80 ++++++++- drs/object_builder.go | 27 ++- drs/types.go | 29 +++- g3client/client.go | 1 + hash/hash.go | 4 +- indexd/client.go | 113 +++++++++++- indexd/client_test.go | 6 +- indexd/convert.go | 8 +- indexd/upsert.go | 2 +- logs/factory.go | 2 +- logs/scoreboard.go | 21 ++- mocks/mock_configure.go | 27 ++- mocks/mock_gen3interface.go | 31 ++++ mocks/mock_indexd.go | 45 +++++ request/auth.go | 4 +- request/request.go | 4 +- tests/download-multiple_test.go | 54 +++--- tests/utils_test.go | 15 +- upload/multipart_test.go | 7 + 35 files changed, 1138 insertions(+), 319 deletions(-) create mode 100644 backend/drs/drs.go create mode 100644 backend/gen3/gen3.go create mode 100644 backend/interface.go diff --git a/backend/drs/drs.go b/backend/drs/drs.go new file mode 100644 index 0000000..8dca651 --- /dev/null +++ b/backend/drs/drs.go @@ -0,0 +1,295 @@ +package drs_backend + +import ( + "context" + "encoding/json" + "fmt" + "io" + "log/slog" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/calypr/data-client/backend" + "github.com/calypr/data-client/common" + drs "github.com/calypr/data-client/drs" + "github.com/calypr/data-client/request" +) + +type DrsBackend struct { + BaseURL string + logger *slog.Logger + req request.RequestInterface +} + +func NewDrsBackend(baseURL string, logger *slog.Logger, req request.RequestInterface) backend.Backend { + return &DrsBackend{ + BaseURL: baseURL, + logger: logger, + req: req, + } +} + +func (d *DrsBackend) Name() string { + return "DRS" +} + +func (d *DrsBackend) Logger() *slog.Logger { + return d.logger +} + +func (d *DrsBackend) Download(ctx context.Context, fdr *common.FileDownloadResponseObject) (*http.Response, error) { + skipAuth := common.IsCloudPresignedURL(fdr.PresignedURL) + + rb := d.req.New(http.MethodGet, fdr.PresignedURL) + if fdr.Range > 0 { + rb.WithHeader("Range", "bytes="+strconv.FormatInt(fdr.Range, 10)+"-") + } + + if skipAuth { + rb.WithSkipAuth(true) + } + + return d.req.Do(ctx, rb) +} + +func (d *DrsBackend) buildURL(paths ...string) (string, error) { + u, err := url.Parse(d.BaseURL) + if err != nil { + return "", err + } + // path.Join collapses //, which mangles access_id if it's a URL. + // We join manually but ensure we don't end up with triple slashes if a part starts/ends with /. + fullPath := u.Path + for _, p := range paths { + if p == "" { + continue + } + if !strings.HasSuffix(fullPath, "/") && !strings.HasPrefix(p, "/") { + fullPath += "/" + } + fullPath += p + } + u.Path = fullPath + return u.String(), nil +} + +func (d *DrsBackend) doJSONRequest(ctx context.Context, method, url string, body interface{}, dst interface{}) error { + rb := d.req.New(method, url) + if body != nil { + if _, err := rb.WithJSONBody(body); err != nil { + return err + } + } + + resp, err := d.req.Do(ctx, rb) + if err != nil { + return err + } + defer resp.Body.Close() + + if resp.StatusCode >= 400 { + bodyBytes, _ := io.ReadAll(resp.Body) + return fmt.Errorf("request to %s failed with status %d: %s", url, resp.StatusCode, string(bodyBytes)) + } + + if dst != nil { + return json.NewDecoder(resp.Body).Decode(dst) + } + return nil +} + +func (d *DrsBackend) GetFileDetails(ctx context.Context, guid string) (*drs.DRSObject, error) { + u, err := d.buildURL("ga4gh/drs/v1/objects", guid) + if err != nil { + return nil, err + } + + var obj drs.DRSObject + if err := d.doJSONRequest(ctx, http.MethodGet, u, nil, &obj); err != nil { + return nil, err + } + return &obj, nil +} + +func (d *DrsBackend) GetDownloadURL(ctx context.Context, guid string, accessID string) (string, error) { + // If accessID is empty, try to find one + if accessID == "" { + obj, err := d.GetFileDetails(ctx, guid) + if err != nil { + return "", err + } + if len(obj.AccessMethods) == 0 { + return "", fmt.Errorf("no access methods found for object %s", guid) + } + + // Prefer one with AccessID + for _, am := range obj.AccessMethods { + if am.AccessID != "" { + accessID = am.AccessID + break + } + } + if accessID == "" { + // Fallback to first if defined + if len(obj.AccessMethods) > 0 && obj.AccessMethods[0].AccessID != "" { + accessID = obj.AccessMethods[0].AccessID + } else { + // If no access ID, maybe direct URL? + if obj.AccessMethods[0].AccessURL.URL != "" { + return obj.AccessMethods[0].AccessURL.URL, nil + } + return "", fmt.Errorf("no suitable access method found for object %s", guid) + } + } + } + + u, err := d.buildURL("ga4gh/drs/v1/objects", guid, "access", accessID) + if err != nil { + return "", err + } + + var accessURL drs.AccessURL + if err := d.doJSONRequest(ctx, http.MethodGet, u, nil, &accessURL); err != nil { + return "", err + } + return accessURL.URL, nil +} + +func (d *DrsBackend) GetObjectByHash(ctx context.Context, checksumType, checksum string) ([]drs.DRSObject, error) { + // Query: GET /ga4gh/drs/v1/objects/checksum/ + // Note: checksumType is ignored here as per original implementation in LocalClient relying on checksum only in path. + // Or should we use checksumType? + u, err := d.buildURL("ga4gh/drs/v1/objects", "checksum", checksum) + if err != nil { + return nil, err + } + + var objs []drs.DRSObject + if err := d.doJSONRequest(ctx, http.MethodGet, u, nil, &objs); err != nil { + return nil, err + } + + return objs, nil +} + +func (d *DrsBackend) BatchGetObjectsByHash(ctx context.Context, hashes []string) (map[string][]drs.DRSObject, error) { + // Custom endpoint: POST /index/index/bulk/hashes + // This path suggests it's mimicking Indexd API structure even if it's a DRS server + u, err := d.buildURL("index/index/bulk/hashes") + if err != nil { + return nil, err + } + + req := struct { + Hashes []string `json:"hashes"` + }{ + Hashes: hashes, + } + + var list struct { + Records []drs.DRSObject `json:"records"` + } + if err := d.doJSONRequest(ctx, http.MethodPost, u, req, &list); err != nil { + return nil, err + } + + result := make(map[string][]drs.DRSObject) + for _, obj := range list.Records { + if obj.Checksums.SHA256 != "" { + result[obj.Checksums.SHA256] = append(result[obj.Checksums.SHA256], obj) + } + } + return result, nil +} + +func (d *DrsBackend) Register(ctx context.Context, obj *drs.DRSObject) (*drs.DRSObject, error) { + u, err := d.buildURL("ga4gh/drs/v1/objects/register") + if err != nil { + return nil, err + } + + req := drs.RegisterObjectsRequest{ + Candidates: []drs.DRSObjectCandidate{drs.ConvertToCandidate(obj)}, + } + + var registeredObjs []*drs.DRSObject + if err := d.doJSONRequest(ctx, http.MethodPost, u, req, ®isteredObjs); err != nil { + return nil, err + } + + if len(registeredObjs) == 0 { + return nil, fmt.Errorf("server returned no registered objects") + } + + return registeredObjs[0], nil +} + +func (d *DrsBackend) BatchRegister(ctx context.Context, objs []*drs.DRSObject) ([]*drs.DRSObject, error) { + u, err := d.buildURL("ga4gh/drs/v1/objects/register") + if err != nil { + return nil, err + } + + var candidates []drs.DRSObjectCandidate + for _, obj := range objs { + candidates = append(candidates, drs.ConvertToCandidate(obj)) + } + req := drs.RegisterObjectsRequest{ + Candidates: candidates, + } + + var registeredObjs []*drs.DRSObject + if err := d.doJSONRequest(ctx, http.MethodPost, u, req, ®isteredObjs); err != nil { + return nil, err + } + + return registeredObjs, nil +} + +func (d *DrsBackend) GetUploadURL(ctx context.Context, guid string, filename string, metadata common.FileMetadata, bucket string) (string, error) { + // Hits the server's clean /data/upload/{file_id} endpoint + u, err := d.buildURL("data/upload", guid) + if err != nil { + return "", err + } + // Add filename/bucket hints + q := url.Values{} + q.Set("file_name", filename) + + // Evaluate bucket from argument or struct + effectiveBucket := bucket + if effectiveBucket != "" { + q.Set("bucket", effectiveBucket) + } + + u += "?" + q.Encode() + + var res struct { + URL string `json:"url"` + } + if err := d.doJSONRequest(ctx, http.MethodGet, u, nil, &res); err != nil { + return "", err + } + return res.URL, nil +} + +func (d *DrsBackend) Upload(ctx context.Context, url string, body io.Reader, size int64) error { + rb := d.req.New(http.MethodPut, url). + WithBody(body). + WithSkipAuth(true) // S3 presigned URLs don't need our bearer token + + resp, err := d.req.Do(ctx, rb) + if err != nil { + return fmt.Errorf("upload to %s failed: %w", url, err) + } + defer resp.Body.Close() + + if resp.StatusCode >= 400 { + bodyBytes, _ := io.ReadAll(resp.Body) + return fmt.Errorf("upload to %s failed with status %d: %s", url, resp.StatusCode, string(bodyBytes)) + } + + return nil +} diff --git a/backend/gen3/gen3.go b/backend/gen3/gen3.go new file mode 100644 index 0000000..a71c9f4 --- /dev/null +++ b/backend/gen3/gen3.go @@ -0,0 +1,235 @@ +package gen3 + +import ( + "context" + "encoding/json" + "fmt" + "io" + "log/slog" + "net/http" + "strconv" + "strings" + + "github.com/calypr/data-client/backend" + "github.com/calypr/data-client/common" + drs "github.com/calypr/data-client/drs" + "github.com/calypr/data-client/fence" + "github.com/calypr/data-client/g3client" + "github.com/calypr/data-client/request" +) + +type Gen3Backend struct { + client g3client.Gen3Interface +} + +func NewGen3Backend(client g3client.Gen3Interface) backend.Backend { + return &Gen3Backend{ + client: client, + } +} + +func (g *Gen3Backend) Name() string { + return "Gen3" +} + +func (g *Gen3Backend) Logger() *slog.Logger { + return g.client.Logger().Logger +} + +func (g *Gen3Backend) Download(ctx context.Context, fdr *common.FileDownloadResponseObject) (*http.Response, error) { + skipAuth := common.IsCloudPresignedURL(fdr.PresignedURL) + + rb := g.client.New(http.MethodGet, fdr.PresignedURL) + if fdr.Range > 0 { + rb.WithHeader("Range", "bytes="+strconv.FormatInt(fdr.Range, 10)+"-") + } + + if skipAuth { + rb.WithSkipAuth(true) + } + + return g.client.Do(ctx, rb) +} + +func (g *Gen3Backend) GetFileDetails(ctx context.Context, guid string) (*drs.DRSObject, error) { + // 1. Try Shepherd + hasShepherd, err := g.client.Fence().CheckForShepherdAPI(ctx) + if err == nil && hasShepherd { + endpoint := strings.TrimSuffix(g.client.GetCredential().APIEndpoint, "/") + common.ShepherdEndpoint + "/objects/" + guid + rb := g.client.New(http.MethodGet, endpoint) + resp, err := g.client.Do(ctx, rb) + if err == nil && resp.StatusCode == http.StatusOK { + defer resp.Body.Close() + var shepherdResp struct { + Record struct { + FileName string `json:"file_name"` + Size int64 `json:"size"` + Did string `json:"did"` + } `json:"record"` + } + if err := json.NewDecoder(resp.Body).Decode(&shepherdResp); err == nil { + return &drs.DRSObject{ + Name: shepherdResp.Record.FileName, + Size: shepherdResp.Record.Size, + Id: shepherdResp.Record.Did, + }, nil + } + } + if err != nil { + g.Logger().Warn("Shepherd lookup failed, falling back to Indexd", "guid", guid, "error", err) + } + } + + // 2. Fallback to Indexd + return g.client.Indexd().GetObject(ctx, guid) +} + +func (g *Gen3Backend) GetObjectByHash(ctx context.Context, checksumType, checksum string) ([]drs.DRSObject, error) { + return g.client.Indexd().GetObjectByHash(ctx, checksumType, checksum) +} + +func (g *Gen3Backend) BatchGetObjectsByHash(ctx context.Context, hashes []string) (map[string][]drs.DRSObject, error) { + return g.client.Indexd().BatchGetObjectsByHash(ctx, hashes) +} + +func (g *Gen3Backend) GetDownloadURL(ctx context.Context, guid string, accessID string) (string, error) { + // For Gen3, often "accessID" is used as a protocol hint like "s3", "gs", or "?protocol=s3" + // 1. Try Fence first + url, err := g.client.Fence().GetDownloadPresignedUrl(ctx, guid, accessID) + if err == nil && url != "" { + return url, nil + } + + // 2. Fallback to Indexd + // Indexd expects "s3", "gs", "ftp", "http", "https" etc. + // We need to clean up accessID if it contains query params like "?protocol=" + accessType := "s3" // default + if strings.Contains(accessID, "protocol=") { + parts := strings.Split(accessID, "=") + if len(parts) > 1 { + accessType = parts[len(parts)-1] + } + } else if accessID != "" { + accessType = accessID + } + + resp, errIdx := g.client.Indexd().GetDownloadURL(ctx, guid, accessType) + if errIdx == nil && resp != nil && resp.URL != "" { + return resp.URL, nil + } + + if err != nil { + return "", err + } + if errIdx != nil { + return "", errIdx + } + return "", fmt.Errorf("failed to resolve download URL for %s", guid) +} + +func (g *Gen3Backend) Register(ctx context.Context, obj *drs.DRSObject) (*drs.DRSObject, error) { + return g.client.Indexd().RegisterRecord(ctx, obj) +} + +func (g *Gen3Backend) BatchRegister(ctx context.Context, objs []*drs.DRSObject) ([]*drs.DRSObject, error) { + return g.client.Indexd().RegisterRecords(ctx, objs) +} + +// ShepherdInitRequestObject copied from upload/types.go to avoid circular dependency +type ShepherdInitRequestObject struct { + Filename string `json:"file_name"` + Authz ShepherdAuthz `json:"authz"` + Aliases []string `json:"aliases"` + Metadata map[string]any `json:"metadata"` +} + +type ShepherdAuthz struct { + Version string `json:"version"` + ResourcePaths []string `json:"resource_paths"` +} + +type PresignedURLResponse struct { + GUID string `json:"guid"` + URL string `json:"upload_url"` +} + +func (g *Gen3Backend) GetUploadURL(ctx context.Context, guid string, filename string, metadata common.FileMetadata, bucket string) (string, error) { + hasShepherd, err := g.client.Fence().CheckForShepherdAPI(ctx) + if err != nil || !hasShepherd { + // Fallback to Fence + var msg fence.FenceResponse + + if guid != "" { + msg, err = g.client.Fence().GetUploadPresignedUrl(ctx, guid, filename, bucket) + } else { + // Init upload if no GUID + msg, err = g.client.Fence().InitUpload(ctx, filename, bucket, "") + } + + if err != nil { + return "", err + } + if msg.URL == "" { + return "", fmt.Errorf("error in generating presigned URL for %s", filename) + } + return msg.URL, nil + } + + // Shepherd Logic + shepherdPayload := ShepherdInitRequestObject{ + Filename: filename, + Authz: ShepherdAuthz{ + Version: "0", ResourcePaths: metadata.Authz, + }, + Aliases: metadata.Aliases, + Metadata: metadata.Metadata, + } + + reader, err := common.ToJSONReader(shepherdPayload) + if err != nil { + return "", err + } + + cred := g.client.GetCredential() + r, err := g.client.Fence().Do( + ctx, + &request.RequestBuilder{ + Url: cred.APIEndpoint + common.ShepherdEndpoint + "/objects", + Method: http.MethodPost, + Body: reader, + Token: cred.AccessToken, + }) + if err != nil { + return "", fmt.Errorf("shepherd upload init failed: %w", err) + } + defer r.Body.Close() + + if r.StatusCode != http.StatusCreated && r.StatusCode != http.StatusOK { + return "", fmt.Errorf("shepherd upload init failed with status %d", r.StatusCode) + } + + var res PresignedURLResponse + if err := json.NewDecoder(r.Body).Decode(&res); err != nil { + return "", err + } + return res.URL, nil +} + +func (g *Gen3Backend) Upload(ctx context.Context, url string, body io.Reader, size int64) error { + rb := g.client.New(http.MethodPut, url). + WithBody(body). + WithSkipAuth(true) + + resp, err := g.client.Do(ctx, rb) + if err != nil { + return fmt.Errorf("upload to %s failed: %w", url, err) + } + defer resp.Body.Close() + + if resp.StatusCode >= 400 { + bodyBytes, _ := io.ReadAll(resp.Body) + return fmt.Errorf("upload to %s failed with status %d: %s", url, resp.StatusCode, string(bodyBytes)) + } + + return nil +} diff --git a/backend/interface.go b/backend/interface.go new file mode 100644 index 0000000..4b75ad9 --- /dev/null +++ b/backend/interface.go @@ -0,0 +1,51 @@ +package backend + +import ( + "context" + "io" + "log/slog" + "net/http" + + "github.com/calypr/data-client/common" + drs "github.com/calypr/data-client/drs" +) + +// Backend abstract the interaction with underlying data service (Gen3 or standard DRS) +type Backend interface { + Name() string + Logger() *slog.Logger + + // --- Read Operations --- + + // GetFileDetails retrieves the DRS object for a given GUID/DID + GetFileDetails(ctx context.Context, guid string) (*drs.DRSObject, error) + + // GetObjectByHash retrieves objects matching a checksum + GetObjectByHash(ctx context.Context, checksumType, checksum string) ([]drs.DRSObject, error) + + // BatchGetObjectsByHash retrieves objects matching a list of hashes + BatchGetObjectsByHash(ctx context.Context, hashes []string) (map[string][]drs.DRSObject, error) + + // GetDownloadURL retrieves a signed URL for downloading the file content + // accessID is optional (used for DRS objects with multiple access methods) + GetDownloadURL(ctx context.Context, guid string, accessID string) (string, error) + + // Download performs the HTTP GET for the file content using the backend's preferred request engine. + Download(ctx context.Context, fdr *common.FileDownloadResponseObject) (*http.Response, error) + + // --- Write Operations --- + + // Register registers a new file metadata record. + // Returns the registered object (with populated GUID/DID if it was new) + Register(ctx context.Context, obj *drs.DRSObject) (*drs.DRSObject, error) + + // BatchRegister registers multiple file metadata records. + BatchRegister(ctx context.Context, objs []*drs.DRSObject) ([]*drs.DRSObject, error) + + // GetUploadURL retrieves a presigned URL for uploading file content. + // implementation handles provider-specific logic (e.g. Fence vs Shepherd vs DRS-Upload) + GetUploadURL(ctx context.Context, guid string, filename string, metadata common.FileMetadata, bucket string) (string, error) + + // Upload performs the HTTP PUT for the file content to the presigned URL. + Upload(ctx context.Context, url string, body io.Reader, size int64) error +} diff --git a/cmd/download-multiple.go b/cmd/download-multiple.go index fa91c15..01d5c47 100644 --- a/cmd/download-multiple.go +++ b/cmd/download-multiple.go @@ -7,6 +7,9 @@ import ( "log" "os" + "github.com/calypr/data-client/backend" + drsbackend "github.com/calypr/data-client/backend/drs" + gen3backend "github.com/calypr/data-client/backend/gen3" "github.com/calypr/data-client/common" "github.com/calypr/data-client/download" "github.com/calypr/data-client/g3client" @@ -78,9 +81,18 @@ func init() { g3i.Logger().Fatalf("Error has occurred during unmarshalling manifest object: %v\n", err) } + var bk backend.Backend + if backendType == "drs" { + cred := g3i.GetCredential() + // Use the API endpoint from the profile as the DRS server URL + bk = drsbackend.NewDrsBackend(cred.APIEndpoint, logger.Logger, g3i) + } else { + bk = gen3backend.NewGen3Backend(g3i) + } + err = download.DownloadMultiple( context.Background(), - g3i, + bk, objects, downloadPath, filenameFormat, diff --git a/cmd/download-single.go b/cmd/download-single.go index 6d1c5db..b94ea01 100644 --- a/cmd/download-single.go +++ b/cmd/download-single.go @@ -4,6 +4,9 @@ import ( "context" "log" + "github.com/calypr/data-client/backend" + drsbackend "github.com/calypr/data-client/backend/drs" + gen3backend "github.com/calypr/data-client/backend/gen3" "github.com/calypr/data-client/common" "github.com/calypr/data-client/download" "github.com/calypr/data-client/g3client" @@ -37,14 +40,22 @@ func init() { log.Fatalf("Failed to parse config on profile %s, %v", profile, err) } + var bk backend.Backend + if backendType == "drs" { + cred := g3I.GetCredential() + bk = drsbackend.NewDrsBackend(cred.APIEndpoint, logger.Logger, g3I) + } else { + bk = gen3backend.NewGen3Backend(g3I) + } + objects := []common.ManifestObject{ - common.ManifestObject{ + { GUID: guid, }, } err = download.DownloadMultiple( context.Background(), - g3I, + bk, objects, downloadPath, filenameFormat, diff --git a/cmd/retry-upload.go b/cmd/retry-upload.go index 69de60d..042922b 100644 --- a/cmd/retry-upload.go +++ b/cmd/retry-upload.go @@ -36,7 +36,7 @@ func init() { logger := g3.Logger() // Create scoreboard with our logger injected - sb := logs.NewSB(common.MaxRetryCount, logger) + sb := logs.NewSB(common.MaxRetryCount, logger.Logger) // Load failed log failedMap, err := common.LoadFailedLog(failedLogPath) diff --git a/cmd/root.go b/cmd/root.go index a2ec2f8..2a676e4 100644 --- a/cmd/root.go +++ b/cmd/root.go @@ -7,6 +7,7 @@ import ( ) var profile string +var backendType string // RootCmd represents the base command when called without any subcommands var RootCmd = &cobra.Command{ @@ -27,5 +28,6 @@ func Execute() { func init() { RootCmd.PersistentFlags().StringVar(&profile, "profile", "", "Specify profile to use") + RootCmd.PersistentFlags().StringVar(&backendType, "backend", "gen3", "Specify backend to use (gen3 or drs)") _ = RootCmd.MarkFlagRequired("profile") } diff --git a/common/common.go b/common/common.go index 716625f..711cf75 100644 --- a/common/common.go +++ b/common/common.go @@ -139,3 +139,10 @@ func CanDownloadFile(signedURL string) error { return fmt.Errorf("failed to access file, HTTP status: %d", resp.StatusCode) } +func IsCloudPresignedURL(url string) bool { + return strings.Contains(url, "X-Amz-Signature") || + strings.Contains(url, "X-Goog-Signature") || + strings.Contains(url, "Signature=") || + strings.Contains(url, "AWSAccessKeyId=") || + strings.Contains(url, "Expires=") +} diff --git a/common/resource.go b/common/resource.go index 9e0d011..86d0828 100644 --- a/common/resource.go +++ b/common/resource.go @@ -6,8 +6,11 @@ import ( ) func ProjectToResource(project string) (string, error) { + if project == "" { + return "", fmt.Errorf("error: project ID is empty") + } if !strings.Contains(project, "-") { - return "", fmt.Errorf("error: invalid project ID %s, ID should look like -", project) + return "/programs/default/projects/" + project, nil } projectIdArr := strings.SplitN(project, "-", 2) return "/programs/" + projectIdArr[0] + "/projects/" + projectIdArr[1], nil diff --git a/download/batch.go b/download/batch.go index 967f16a..9159cef 100644 --- a/download/batch.go +++ b/download/batch.go @@ -9,8 +9,8 @@ import ( "sync" "sync/atomic" + "github.com/calypr/data-client/backend" "github.com/calypr/data-client/common" - "github.com/calypr/data-client/g3client" "github.com/calypr/data-client/logs" "github.com/hashicorp/go-multierror" "github.com/vbauerster/mpb/v8" @@ -21,7 +21,7 @@ import ( // downloadFiles performs bounded parallel downloads and collects ALL errors func downloadFiles( ctx context.Context, - g3i g3client.Gen3Interface, + bk backend.Backend, files []common.FileDownloadResponseObject, numParallel int, protocol string, @@ -30,7 +30,7 @@ func downloadFiles( return 0, nil } - logger := g3i.Logger() + logger := bk.Logger() protocolText := "" if protocol != "" { @@ -76,7 +76,7 @@ func downloadFiles( }() // Get presigned URL - if err = GetDownloadResponse(ctx, g3i, fdr, protocolText); err != nil { + if err = GetDownloadResponse(ctx, bk, fdr, protocolText); err != nil { err = fmt.Errorf("get URL for %s (GUID: %s): %w", fdr.Filename, fdr.GUID, err) return err } @@ -179,10 +179,10 @@ func downloadFiles( sb.PrintSB() if combinedError != nil { - logger.Printf("%d files downloaded, but %d failed:\n", downloaded, len(allErrors)) - logger.Println(combinedError.Error()) + logger.Info(fmt.Sprintf("%d files downloaded, but %d failed:", downloaded, len(allErrors))) + logger.Info(combinedError.Error()) } else { - logger.Printf("%d files downloaded successfully.\n", downloaded) + logger.Info(fmt.Sprintf("%d files downloaded successfully.", downloaded)) } return downloaded, combinedError diff --git a/download/downloader.go b/download/downloader.go index 044eeb8..7e3fc73 100644 --- a/download/downloader.go +++ b/download/downloader.go @@ -7,8 +7,8 @@ import ( "os" "strings" + "github.com/calypr/data-client/backend" "github.com/calypr/data-client/common" - "github.com/calypr/data-client/g3client" "github.com/vbauerster/mpb/v8" "github.com/vbauerster/mpb/v8/decor" ) @@ -16,7 +16,7 @@ import ( // DownloadMultiple is the public entry point called from g3cmd func DownloadMultiple( ctx context.Context, - g3i g3client.Gen3Interface, + bk backend.Backend, objects []common.ManifestObject, downloadPath string, filenameFormat string, @@ -26,7 +26,7 @@ func DownloadMultiple( numParallel int, skipCompleted bool, ) error { - logger := g3i.Logger() + logger := bk.Logger() // === Input validation === if numParallel < 1 { @@ -52,7 +52,7 @@ func DownloadMultiple( } // === Warnings and user confirmation === - if err := handleWarningsAndConfirmation(ctx, logger.Logger, downloadPath, filenameFormat, rename, noPrompt); err != nil { + if err := handleWarningsAndConfirmation(ctx, logger, downloadPath, filenameFormat, rename, noPrompt); err != nil { return err // aborted by user } @@ -62,7 +62,7 @@ func DownloadMultiple( } // === Prepare files (metadata + local validation) === - toDownload, skipped, renamed, err := prepareFiles(ctx, g3i, objects, downloadPath, filenameFormat, rename, skipCompleted, protocol) + toDownload, skipped, renamed, err := prepareFiles(ctx, bk, objects, downloadPath, filenameFormat, rename, skipCompleted, protocol) if err != nil { return err } @@ -73,12 +73,12 @@ func DownloadMultiple( "Skipped", len(skipped)) // === Download phase === - downloaded, downloadErr := downloadFiles(ctx, g3i, toDownload, numParallel, protocol) + downloaded, downloadErr := downloadFiles(ctx, bk, toDownload, numParallel, protocol) // === Final summary === logger.InfoContext(ctx, fmt.Sprintf("%d files downloaded successfully.", downloaded)) - printRenamed(ctx, logger.Logger, renamed) - printSkipped(ctx, logger.Logger, skipped) + printRenamed(ctx, logger, renamed) + printSkipped(ctx, logger, skipped) if downloadErr != nil { logger.WarnContext(ctx, "Some downloads failed. See errors above.") @@ -109,13 +109,13 @@ func handleWarningsAndConfirmation(ctx context.Context, logger *slog.Logger, dow // prepareFiles gathers metadata, checks local files, collects skips/renames func prepareFiles( ctx context.Context, - g3i g3client.Gen3Interface, + bk backend.Backend, objects []common.ManifestObject, downloadPath, filenameFormat string, rename, skipCompleted bool, protocol string, ) ([]common.FileDownloadResponseObject, []RenamedOrSkippedFileInfo, []RenamedOrSkippedFileInfo, error) { - logger := g3i.Logger() + logger := bk.Logger() renamed := make([]RenamedOrSkippedFileInfo, 0) skipped := make([]RenamedOrSkippedFileInfo, 0) toDownload := make([]common.FileDownloadResponseObject, 0, len(objects)) @@ -137,7 +137,7 @@ func prepareFiles( var err error if info.Name == "" || info.Size == 0 { // Very strict object id checking - info, err = AskGen3ForFileInfo(ctx, g3i, obj.GUID, protocol, downloadPath, filenameFormat, rename, &renamed) + info, err = GetFileInfo(ctx, bk, obj.GUID, protocol, downloadPath, filenameFormat, rename, &renamed) if err != nil { return nil, nil, nil, err } diff --git a/download/file_info.go b/download/file_info.go index 8fb8134..58f955e 100644 --- a/download/file_info.go +++ b/download/file_info.go @@ -2,112 +2,35 @@ package download import ( "context" - "encoding/json" - "fmt" - "net/http" - "github.com/calypr/data-client/common" - "github.com/calypr/data-client/g3client" - "github.com/calypr/data-client/request" + "github.com/calypr/data-client/backend" ) -func AskGen3ForFileInfo( +func GetFileInfo( ctx context.Context, - g3i g3client.Gen3Interface, + bk backend.Backend, guid, protocol, downloadPath, filenameFormat string, rename bool, renamedFiles *[]RenamedOrSkippedFileInfo, ) (*IndexdResponse, error) { - hasShepherd, err := g3i.Fence().CheckForShepherdAPI(ctx) + // Use Backend to get object details + drsObj, err := bk.GetFileDetails(ctx, guid) if err != nil { - g3i.Logger().Println("Error checking Shepherd API: " + err.Error()) - g3i.Logger().Println("Falling back to Indexd...") - hasShepherd = false - } - - if hasShepherd { - info, err := fetchFromShepherd(ctx, g3i, guid, downloadPath, filenameFormat, renamedFiles) - if err == nil { - return info, nil - } - g3i.Logger().Printf("Shepherd fetch failed for %s: %v. Falling back to Indexd...\n", guid, err) - } - info, err := fetchFromIndexd(ctx, g3i, http.MethodGet, guid, protocol, downloadPath, filenameFormat, rename, renamedFiles) - if err != nil { - g3i.Logger().Printf("All meta-data lookups failed for %s: %v. Using GUID as default filename.\n", guid, err) + bk.Logger().Warn("Failed to get file details", "guid", guid, "error", err) + // Fallback: use GUID as filename if failed? + // Original code: "All meta-data lookups failed... Using GUID as default" *renamedFiles = append(*renamedFiles, RenamedOrSkippedFileInfo{GUID: guid, OldFilename: guid, NewFilename: guid}) - return &IndexdResponse{guid, 0}, nil - } - return info, nil -} - -func fetchFromShepherd( - ctx context.Context, - g3i g3client.Gen3Interface, - guid, downloadPath, filenameFormat string, - renamedFiles *[]RenamedOrSkippedFileInfo, -) (*IndexdResponse, error) { - cred := g3i.GetCredential() - res, err := g3i.Fence().Do(ctx, - &request.RequestBuilder{ - Url: cred.APIEndpoint + "/" + cred.AccessToken + common.ShepherdEndpoint + "/objects/" + guid, - Method: http.MethodGet, - Token: cred.AccessToken, - }) - if err != nil { - return nil, err - } - defer res.Body.Close() - - var decoded struct { - Record struct { - FileName string `json:"file_name"` - Size int64 `json:"size"` - } `json:"record"` - } - if err := json.NewDecoder(res.Body).Decode(&decoded); err != nil { - return nil, err - } - - return &IndexdResponse{applyFilenameFormat(decoded.Record.FileName, guid, downloadPath, filenameFormat, false, renamedFiles), decoded.Record.Size}, nil -} - -func fetchFromIndexd( - ctx context.Context, - g3i g3client.Gen3Interface, method, - guid, protocol, downloadPath, filenameFormat string, - rename bool, - renamedFiles *[]RenamedOrSkippedFileInfo, -) (*IndexdResponse, error) { - - cred := g3i.GetCredential() - resp, err := g3i.Fence().Do( - ctx, - &request.RequestBuilder{ - Url: cred.APIEndpoint + common.IndexdIndexEndpoint + "/" + guid, - Method: method, - Token: cred.AccessToken, - }, - ) - if err != nil { - return nil, fmt.Errorf("error in fetch FromIndexd: %s", err) - } - - defer resp.Body.Close() - msg, err := g3i.Fence().ParseFenceURLResponse(resp) - if err != nil { - return nil, err - } - - if filenameFormat == "guid" { - return &IndexdResponse{guid, msg.Size}, nil + return &IndexdResponse{Name: guid, Size: 0}, nil } - if msg.FileName == "" { - return nil, fmt.Errorf("FileName is a required field in Indexd to download the file, but upload record %#v does not contain it", msg) + name := drsObj.Name + if name == "" { + // If name is empty (some DRS servers might not return it?), use GUID + name = guid } - return &IndexdResponse{applyFilenameFormat(msg.FileName, guid, downloadPath, filenameFormat, rename, renamedFiles), msg.Size}, nil + finalName := applyFilenameFormat(name, guid, downloadPath, filenameFormat, rename, renamedFiles) + return &IndexdResponse{Name: finalName, Size: drsObj.Size}, nil } func applyFilenameFormat(baseName, guid, downloadPath, format string, rename bool, renamedFiles *[]RenamedOrSkippedFileInfo) string { diff --git a/download/transfer.go b/download/transfer.go index d171313..b6e678a 100644 --- a/download/transfer.go +++ b/download/transfer.go @@ -4,18 +4,19 @@ import ( "context" "fmt" "io" + "log/slog" "os" "path/filepath" "strings" + "github.com/calypr/data-client/backend" "github.com/calypr/data-client/common" - "github.com/calypr/data-client/g3client" ) // DownloadSingleWithProgress downloads a single object while emitting progress events. func DownloadSingleWithProgress( ctx context.Context, - g3i g3client.Gen3Interface, + bk backend.Backend, guid string, downloadPath string, protocol string, @@ -31,7 +32,7 @@ func DownloadSingleWithProgress( } renamed := make([]RenamedOrSkippedFileInfo, 0) - info, err := AskGen3ForFileInfo(ctx, g3i, guid, protocol, downloadPath, "original", false, &renamed) + info, err := GetFileInfo(ctx, bk, guid, protocol, downloadPath, "original", false, &renamed) if err != nil { return err } @@ -46,7 +47,7 @@ func DownloadSingleWithProgress( if protocol != "" { protocolText = "?protocol=" + protocol } - if err := GetDownloadResponse(ctx, g3i, &fdr, protocolText); err != nil { + if err := GetDownloadResponse(ctx, bk, &fdr, protocolText); err != nil { return err } @@ -93,39 +94,48 @@ func DownloadSingleWithProgress( return nil } -// DownloadToPath downloads a single object by GUID to a specific destination file path. -// It bypasses the name lookup from Gen3 and uses the provided dstPath directly. +// DownloadToPath downloads a single object using the provided backend func DownloadToPath( ctx context.Context, - g3i g3client.Gen3Interface, + bk backend.Backend, + logger *slog.Logger, guid string, dstPath string, + protocol string, ) error { progress := common.GetProgress(ctx) hash := common.GetOid(ctx) - logger := g3i.Logger() - // logger.Printf("Downloading %s to %s\n", guid, dstPath) fdr := common.FileDownloadResponseObject{ GUID: guid, } - if err := GetDownloadResponse(ctx, g3i, &fdr, ""); err != nil { - logger.FailedContext(ctx, dstPath, filepath.Base(dstPath), common.FileMetadata{}, guid, 0, false) + protocolText := "" + if protocol != "" { + protocolText = "?protocol=" + protocol + } + + if err := GetDownloadResponse(ctx, bk, &fdr, protocolText); err != nil { + // Mimic failed context logging from original + // We'd need to reconstruct the "logger.FailedContext" logic if using raw slog + // For now, simple error logging or rely on caller to log context? + // The original code used g3i.Logger().FailedContext... + // Let's just log error + logger.Error("Download failed", "error", err, "path", dstPath, "guid", guid) return err } defer fdr.Response.Body.Close() if dir := filepath.Dir(dstPath); dir != "." { if err := os.MkdirAll(dir, 0766); err != nil { - logger.FailedContext(ctx, dstPath, filepath.Base(dstPath), common.FileMetadata{}, guid, 0, false) + logger.Error("Mkdir failed", "error", err, "path", dstPath) return fmt.Errorf("mkdir for %s: %w", dstPath, err) } } file, err := os.Create(dstPath) if err != nil { - logger.FailedContext(ctx, dstPath, filepath.Base(dstPath), common.FileMetadata{}, guid, 0, false) + logger.Error("Create file failed", "error", err, "path", dstPath) return fmt.Errorf("create local file %s: %w", dstPath, err) } defer file.Close() @@ -139,10 +149,11 @@ func DownloadToPath( } if _, err := io.Copy(writer, fdr.Response.Body); err != nil { - logger.FailedContext(ctx, dstPath, filepath.Base(dstPath), common.FileMetadata{}, guid, 0, false) + logger.Error("Copy failed", "error", err, "path", dstPath) return fmt.Errorf("copy to %s: %w", dstPath, err) } - logger.SucceededContext(ctx, dstPath, guid) + // Success logging is up to caller or we can do simple info + // logger.Info("Download succeeded", "path", dstPath, "guid", guid) return nil } diff --git a/download/transfer_test.go b/download/transfer_test.go index d811afe..7576edc 100644 --- a/download/transfer_test.go +++ b/download/transfer_test.go @@ -3,9 +3,9 @@ package download import ( "bytes" "context" - "encoding/json" "errors" "io" + "log/slog" "net/http" "net/url" "os" @@ -16,74 +16,70 @@ import ( "github.com/calypr/data-client/common" "github.com/calypr/data-client/conf" "github.com/calypr/data-client/drs" - "github.com/calypr/data-client/fence" - "github.com/calypr/data-client/indexd" "github.com/calypr/data-client/logs" "github.com/calypr/data-client/request" - "github.com/calypr/data-client/requestor" - "github.com/calypr/data-client/sower" ) -type fakeGen3Download struct { +type fakeBackend struct { cred *conf.Credential logger *logs.Gen3Logger doFunc func(context.Context, *request.RequestBuilder) (*http.Response, error) } -func (f *fakeGen3Download) GetCredential() *conf.Credential { return f.cred } -func (f *fakeGen3Download) Logger() *logs.Gen3Logger { return f.logger } -func (f *fakeGen3Download) ExportCredential(ctx context.Context, cred *conf.Credential) error { - return nil -} -func (f *fakeGen3Download) Fence() fence.FenceInterface { return &fakeFence{doFunc: f.doFunc} } -func (f *fakeGen3Download) Indexd() indexd.IndexdInterface { return &fakeIndexd{doFunc: f.doFunc} } -func (f *fakeGen3Download) Sower() sower.SowerInterface { return nil } -func (f *fakeGen3Download) Requestor() requestor.RequestorInterface { return nil } +func (f *fakeBackend) Name() string { return "Fake" } +func (f *fakeBackend) GetCredential() *conf.Credential { return f.cred } +func (f *fakeBackend) Logger() *slog.Logger { return f.logger.Logger } -type fakeFence struct { - fence.FenceInterface - doFunc func(context.Context, *request.RequestBuilder) (*http.Response, error) +func (f *fakeBackend) GetFileDetails(ctx context.Context, guid string) (*drs.DRSObject, error) { + return &drs.DRSObject{ + Name: "payload.bin", + Size: 64, + AccessMethods: []drs.AccessMethod{ + {AccessID: "s3", Type: "s3"}, + }, + }, nil } -func (f *fakeFence) Do(ctx context.Context, req *request.RequestBuilder) (*http.Response, error) { - return f.doFunc(ctx, req) -} -func (f *fakeFence) New(method, url string) *request.RequestBuilder { - return &request.RequestBuilder{Method: method, Url: url, Headers: make(map[string]string)} -} -func (f *fakeFence) CheckForShepherdAPI(ctx context.Context) (bool, error) { return false, nil } -func (f *fakeFence) ResolveOID(ctx context.Context, oid string) (fence.FenceResponse, error) { - return fence.FenceResponse{}, nil -} -func (f *fakeFence) GetDownloadPresignedUrl(ctx context.Context, guid, protocol string) (string, error) { +func (f *fakeBackend) GetDownloadURL(ctx context.Context, guid string, accessID string) (string, error) { if guid == "test-fallback" { - return "", errors.New("fence fallback") + return "", errors.New("fallback") } return "https://download.example.com/object", nil } -func (f *fakeFence) ParseFenceURLResponse(resp *http.Response) (fence.FenceResponse, error) { - var msg fence.FenceResponse - if resp != nil && resp.Body != nil { - json.NewDecoder(resp.Body).Decode(&msg) - } - return msg, nil + +func (f *fakeBackend) Register(ctx context.Context, obj *drs.DRSObject) (*drs.DRSObject, error) { + return obj, nil } -type fakeIndexd struct { - indexd.IndexdInterface - doFunc func(context.Context, *request.RequestBuilder) (*http.Response, error) +func (f *fakeBackend) BatchRegister(ctx context.Context, objs []*drs.DRSObject) ([]*drs.DRSObject, error) { + return objs, nil } -func (f *fakeIndexd) Do(ctx context.Context, req *request.RequestBuilder) (*http.Response, error) { +func (f *fakeBackend) GetUploadURL(ctx context.Context, guid string, filename string, metadata common.FileMetadata, bucket string) (string, error) { + return "", errors.New("not implemented") +} + +func (f *fakeBackend) Do(ctx context.Context, req *request.RequestBuilder) (*http.Response, error) { return f.doFunc(ctx, req) } -func (f *fakeIndexd) New(method, url string) *request.RequestBuilder { - return &request.RequestBuilder{Method: method, Url: url, Headers: make(map[string]string)} +func (f *fakeBackend) Download(ctx context.Context, fdr *common.FileDownloadResponseObject) (*http.Response, error) { + return f.Do(ctx, &request.RequestBuilder{ + Method: http.MethodGet, + Url: fdr.PresignedURL, + }) } -func (f *fakeIndexd) GetDownloadURL(ctx context.Context, did string, accessType string) (*drs.AccessURL, error) { - return &drs.AccessURL{URL: "https://download.example.com/object"}, nil +func (f *fakeBackend) GetObjectByHash(ctx context.Context, checksumType, checksum string) ([]drs.DRSObject, error) { + return nil, errors.New("not implemented") +} + +func (f *fakeBackend) BatchGetObjectsByHash(ctx context.Context, hashes []string) (map[string][]drs.DRSObject, error) { + return nil, errors.New("not implemented") +} + +func (f *fakeBackend) New(method, url string) *request.RequestBuilder { + return &request.RequestBuilder{Method: method, Url: url, Headers: make(map[string]string)} } func TestDownloadSingleWithProgressEmitsEvents(t *testing.T) { @@ -97,7 +93,7 @@ func TestDownloadSingleWithProgressEmitsEvents(t *testing.T) { return nil } - fake := &fakeGen3Download{ + fake := &fakeBackend{ cred: &conf.Credential{APIEndpoint: "https://example.com", AccessToken: "token"}, logger: logs.NewGen3Logger(nil, "", ""), doFunc: func(_ context.Context, req *request.RequestBuilder) (*http.Response, error) { @@ -146,7 +142,7 @@ func TestDownloadSingleWithProgressFinalizeOnError(t *testing.T) { return nil } - fake := &fakeGen3Download{ + fake := &fakeBackend{ cred: &conf.Credential{APIEndpoint: "https://example.com", AccessToken: "token"}, logger: logs.NewGen3Logger(nil, "", ""), doFunc: func(_ context.Context, req *request.RequestBuilder) (*http.Response, error) { @@ -202,17 +198,3 @@ func newDownloadResponse(rawURL string, payload []byte, status int) *http.Respon Header: make(http.Header), } } - -// fakeRequestor implements requestor.RequestorInterface using the same doFunc. -type fakeRequestor struct { - requestor.RequestorInterface - doFunc func(context.Context, *request.RequestBuilder) (*http.Response, error) -} - -func (f *fakeRequestor) Do(ctx context.Context, req *request.RequestBuilder) (*http.Response, error) { - return f.doFunc(ctx, req) -} - -func (f *fakeRequestor) New(method, url string) *request.RequestBuilder { - return &request.RequestBuilder{Method: method, Url: url, Headers: make(map[string]string)} -} diff --git a/download/types.go b/download/types.go index c910b67..9758cb7 100644 --- a/download/types.go +++ b/download/types.go @@ -1,10 +1,10 @@ package download import ( + "log/slog" "os" "github.com/calypr/data-client/common" - "github.com/calypr/data-client/logs" ) type IndexdResponse struct { @@ -18,7 +18,7 @@ type RenamedOrSkippedFileInfo struct { } func validateLocalFileStat( - logger *logs.Gen3Logger, + logger *slog.Logger, fdr *common.FileDownloadResponseObject, filesize int64, skipCompleted bool, @@ -31,8 +31,8 @@ func validateLocalFileStat( // No local file → full download, nothing special return } - logger.Printf("Error statting local file \"%s\": %s\n", fullPath, err.Error()) - logger.Println("Will attempt full download anyway") + logger.Error("Error statting local file", "path", fullPath, "error", err) + logger.Info("Will attempt full download anyway") return } diff --git a/download/url_resolution.go b/download/url_resolution.go index d7427c3..2506981 100644 --- a/download/url_resolution.go +++ b/download/url_resolution.go @@ -6,64 +6,25 @@ import ( "fmt" "io" "net/http" - "strconv" "strings" + "github.com/calypr/data-client/backend" "github.com/calypr/data-client/common" - client "github.com/calypr/data-client/g3client" ) // GetDownloadResponse gets presigned URL and prepares HTTP response -func GetDownloadResponse(ctx context.Context, g3 client.Gen3Interface, fdr *common.FileDownloadResponseObject, protocolText string) error { - // 1. Try Fence first - url, err := g3.Fence().GetDownloadPresignedUrl(ctx, fdr.GUID, protocolText) - if err == nil && url != "" { - fdr.PresignedURL = url - } else { - // 2. Fallback to IndexD DRS endpoint - accessType := "s3" - if strings.HasPrefix(protocolText, "?protocol=") { - accessType = strings.TrimPrefix(protocolText, "?protocol=") - } else if protocolText == "?protocol=gs" { - accessType = "gs" - } - - accessURL, errIdx := g3.Indexd().GetDownloadURL(ctx, fdr.GUID, accessType) - if errIdx == nil && accessURL != nil && accessURL.URL != "" { - fdr.PresignedURL = accessURL.URL - // Some DRS providers might return required headers - // This is not currently used by makeDownloadRequest but good to have for future - } else { - if err != nil { - return err - } - if errIdx != nil { - return errIdx - } - return fmt.Errorf("failed to resolve download URL for %s", fdr.GUID) - } +func GetDownloadResponse(ctx context.Context, bk backend.Backend, fdr *common.FileDownloadResponseObject, protocolText string) error { + url, err := bk.GetDownloadURL(ctx, fdr.GUID, protocolText) + if err != nil { + return fmt.Errorf("failed to resolve download URL for %s: %w", fdr.GUID, err) } + fdr.PresignedURL = url - return makeDownloadRequest(ctx, g3, fdr) + return makeDownloadRequest(ctx, bk, fdr) } -func isCloudPresignedURL(url string) bool { - return strings.Contains(url, "X-Amz-Signature") || - strings.Contains(url, "X-Goog-Signature") || - strings.Contains(url, "Signature=") || - strings.Contains(url, "AWSAccessKeyId=") || - strings.Contains(url, "Expires=") -} - -func makeDownloadRequest(ctx context.Context, g3 client.Gen3Interface, fdr *common.FileDownloadResponseObject) error { - skipAuth := isCloudPresignedURL(fdr.PresignedURL) - rb := g3.Fence().New(http.MethodGet, fdr.PresignedURL).WithSkipAuth(skipAuth) - - if fdr.Range > 0 { - rb.WithHeader("Range", "bytes="+strconv.FormatInt(fdr.Range, 10)+"-") - } - - resp, err := g3.Fence().Do(ctx, rb) +func makeDownloadRequest(ctx context.Context, bk backend.Backend, fdr *common.FileDownloadResponseObject) error { + resp, err := bk.Download(ctx, fdr) if err != nil { return errors.New("Request failed: " + strings.ReplaceAll(err.Error(), fdr.PresignedURL, "")) diff --git a/drs/drs.go b/drs/drs.go index 55feb1a..4c872d8 100644 --- a/drs/drs.go +++ b/drs/drs.go @@ -11,9 +11,15 @@ import ( // NAMESPACE is the UUID namespace used for generating DRS UUIDs var NAMESPACE = uuid.NewMD5(uuid.NameSpaceURL, []byte("calypr.org")) -func ProjectToResource(project string) (string, error) { +func ProjectToResource(org, project string) (string, error) { + if org != "" { + return "/programs/" + org + "/projects/" + project, nil + } + if project == "" { + return "", fmt.Errorf("error: project ID is empty") + } if !strings.Contains(project, "-") { - return "", fmt.Errorf("error: invalid project ID %s, ID should look like -", project) + return "/programs/default/projects/" + project, nil } projectIdArr := strings.SplitN(project, "-", 2) return "/programs/" + projectIdArr[0] + "/projects/" + projectIdArr[1], nil @@ -27,20 +33,30 @@ func DrsUUID(projectId string, hash string) string { return uuid.NewSHA1(NAMESPACE, []byte(hashStr)).String() } -func FindMatchingRecord(records []DRSObject, projectId string) (*DRSObject, error) { +func FindMatchingRecord(records []DRSObject, organization, projectId string) (*DRSObject, error) { if len(records) == 0 { return nil, nil } // Convert project ID to resource path format for comparison - expectedAuthz, err := ProjectToResource(projectId) + expectedAuthz, err := ProjectToResource(organization, projectId) if err != nil { return nil, fmt.Errorf("error converting project ID to resource format: %v", err) } for _, record := range records { for _, access := range record.AccessMethods { - if access.Authorizations != nil && access.Authorizations.Value == expectedAuthz { + if access.Authorizations == nil { + continue + } + + // Check BearerAuthIssuers using a map for O(1) lookup (ref: "lists suck") + issuersMap := make(map[string]struct{}, len(access.Authorizations.BearerAuthIssuers)) + for _, issuer := range access.Authorizations.BearerAuthIssuers { + issuersMap[issuer] = struct{}{} + } + + if _, ok := issuersMap[expectedAuthz]; ok { return &record, nil } } @@ -54,19 +70,21 @@ func GenerateDrsID(projectId, hash string) string { return DrsUUID(projectId, hash) } -func BuildDrsObj(fileName string, checksum string, size int64, drsId string, bucketName string, projectId string) (*DRSObject, error) { +func BuildDrsObj(fileName string, checksum string, size int64, drsId string, bucketName string, org string, projectId string) (*DRSObject, error) { if bucketName == "" { return nil, fmt.Errorf("error: bucket name is empty") } + checksum = NormalizeOid(checksum) + // Standard Gen3-style storage path: s3://bucket/guid/checksum fileURL := fmt.Sprintf("s3://%s/%s/%s", bucketName, drsId, checksum) - authzStr, err := ProjectToResource(projectId) + authzStr, err := ProjectToResource(org, projectId) if err != nil { return nil, err } authorizations := Authorizations{ - Value: authzStr, + BearerAuthIssuers: []string{authzStr}, } drsObj := DRSObject{ @@ -85,3 +103,49 @@ func BuildDrsObj(fileName string, checksum string, size int64, drsId string, buc return &drsObj, nil } + +// ConvertToCandidate converts a DRSObject to a DRSObjectCandidate for registration. +// This is needed because the server expects checksums as an array of Checksum objects, +// while DRSObject uses HashInfo (which marshals to the correct format but has different Go types). +func ConvertToCandidate(obj *DRSObject) DRSObjectCandidate { + // Convert HashInfo to []Checksum + var checksums []Checksum + if obj.Checksums.MD5 != "" { + checksums = append(checksums, Checksum{Type: hash.ChecksumTypeMD5, Checksum: NormalizeOid(obj.Checksums.MD5)}) + } + if obj.Checksums.SHA != "" { + checksums = append(checksums, Checksum{Type: hash.ChecksumTypeSHA1, Checksum: NormalizeOid(obj.Checksums.SHA)}) + } + if obj.Checksums.SHA256 != "" { + checksums = append(checksums, Checksum{Type: hash.ChecksumTypeSHA256, Checksum: NormalizeOid(obj.Checksums.SHA256)}) + } + if obj.Checksums.SHA512 != "" { + checksums = append(checksums, Checksum{Type: hash.ChecksumTypeSHA512, Checksum: NormalizeOid(obj.Checksums.SHA512)}) + } + if obj.Checksums.CRC != "" { + checksums = append(checksums, Checksum{Type: hash.ChecksumTypeCRC32C, Checksum: NormalizeOid(obj.Checksums.CRC)}) + } + if obj.Checksums.ETag != "" { + checksums = append(checksums, Checksum{Type: hash.ChecksumTypeETag, Checksum: NormalizeOid(obj.Checksums.ETag)}) + } + + return DRSObjectCandidate{ + Id: obj.Id, + Name: obj.Name, + Size: obj.Size, + Version: obj.Version, + MimeType: obj.MimeType, + Checksums: checksums, + AccessMethods: obj.AccessMethods, + Contents: obj.Contents, + Description: obj.Description, + Aliases: obj.Aliases, + } +} + +func NormalizeOid(oid string) string { + if strings.HasPrefix(oid, "sha256:") { + return strings.TrimPrefix(oid, "sha256:") + } + return oid +} diff --git a/drs/object_builder.go b/drs/object_builder.go index 61fec11..b3b0c93 100644 --- a/drs/object_builder.go +++ b/drs/object_builder.go @@ -2,15 +2,17 @@ package drs import ( "fmt" - "path/filepath" + "strings" "github.com/calypr/data-client/hash" ) type ObjectBuilder struct { - Bucket string - ProjectID string - AccessType string + Bucket string + ProjectID string + Organization string + AccessType string + PathStyle string // "CAS" or "" (Gen3 default) } func NewObjectBuilder(bucket, projectID string) ObjectBuilder { @@ -18,6 +20,7 @@ func NewObjectBuilder(bucket, projectID string) ObjectBuilder { Bucket: bucket, ProjectID: projectID, AccessType: "s3", + PathStyle: "Gen3", // Defaults to Gen3 behavior } } @@ -30,14 +33,24 @@ func (b ObjectBuilder) Build(fileName string, checksum string, size int64, drsID accessType = "s3" } - fileURL := fmt.Sprintf("s3://%s", filepath.Join(b.Bucket, drsID, checksum)) + // Remove sha256: prefix if present for clean S3 key + checksum = strings.TrimPrefix(checksum, "sha256:") - authzStr, err := ProjectToResource(b.ProjectID) + var fileURL string + if b.PathStyle == "CAS" { + // CAS-style (s3://bucket/checksum) + fileURL = fmt.Sprintf("s3://%s/%s", b.Bucket, checksum) + } else { + // Gen3-style (s3://bucket/guid/checksum) + fileURL = fmt.Sprintf("s3://%s/%s/%s", b.Bucket, drsID, checksum) + } + + authzStr, err := ProjectToResource(b.Organization, b.ProjectID) if err != nil { return nil, err } authorizations := Authorizations{ - Value: authzStr, + BearerAuthIssuers: []string{authzStr}, } drsObj := DRSObject{ diff --git a/drs/types.go b/drs/types.go index ff203cc..218e399 100644 --- a/drs/types.go +++ b/drs/types.go @@ -12,7 +12,7 @@ type AccessURL struct { } type Authorizations struct { - Value string `json:"value"` + BearerAuthIssuers []string `json:"bearer_auth_issuers,omitempty"` } type AccessMethod struct { @@ -22,7 +22,7 @@ type AccessMethod struct { Cloud string `json:"cloud,omitempty"` Region string `json:"region,omitempty"` Available string `json:"available,omitempty"` - Authorizations *Authorizations `json:"Authorizations,omitempty"` + Authorizations *Authorizations `json:"authorizations,omitempty"` } type Contents struct { @@ -38,7 +38,7 @@ type DRSObjectResult struct { } type DRSObject struct { - Id string `json:"id"` + Id string `json:"id,omitempty"` Name string `json:"name"` SelfURI string `json:"self_uri,omitempty"` Size int64 `json:"size"` @@ -52,3 +52,26 @@ type DRSObject struct { Description string `json:"description,omitempty"` Aliases []string `json:"aliases,omitempty"` } + +// DRSObjectCandidate represents a DRS object candidate for registration. +// This matches the server's expected format where checksums is an array of Checksum objects. +// Server-generated fields (id, created_time, updated_time, self_uri) are not included. +type DRSObjectCandidate struct { + Id string `json:"id,omitempty"` + Name string `json:"name,omitempty"` + Size int64 `json:"size"` + Version string `json:"version,omitempty"` + MimeType string `json:"mime_type,omitempty"` + Checksums []Checksum `json:"checksums"` + AccessMethods []AccessMethod `json:"access_methods,omitempty"` + Contents []Contents `json:"contents,omitempty"` + Description string `json:"description,omitempty"` + Aliases []string `json:"aliases,omitempty"` +} + +// RegisterObjectsRequest is the request body for registering objects in some DRS implementations. +// This matches the server's API specification. +type RegisterObjectsRequest struct { + Candidates []DRSObjectCandidate `json:"candidates"` + Passports []string `json:"passports,omitempty"` +} diff --git a/g3client/client.go b/g3client/client.go index 2741aa1..868c451 100644 --- a/g3client/client.go +++ b/g3client/client.go @@ -19,6 +19,7 @@ import ( //go:generate mockgen -destination=../mocks/mock_gen3interface.go -package=mocks github.com/calypr/data-client/g3client Gen3Interface type Gen3Interface interface { + request.RequestInterface GetCredential() *conf.Credential Logger() *logs.Gen3Logger ExportCredential(ctx context.Context, cred *conf.Credential) error diff --git a/hash/hash.go b/hash/hash.go index 11ee3a1..566a4f0 100644 --- a/hash/hash.go +++ b/hash/hash.go @@ -23,7 +23,7 @@ const ( func (ct ChecksumType) IsValid() bool { switch ct { case ChecksumTypeSHA256, ChecksumTypeSHA512, ChecksumTypeSHA1, ChecksumTypeMD5, - ChecksumTypeETag, ChecksumTypeCRC32C, ChecksumTypeTrunc512: + ChecksumTypeETag, ChecksumTypeCRC32C: return true default: return false @@ -59,7 +59,7 @@ type HashInfo struct { ETag string `json:"etag,omitempty"` } -// UnmarshalJSON accepts both the DRS map-based schema and the array-of-checksums schema. +// UnmarshalJSON accepts both the DRS map-based schema (Indexd) and the array-of-checksums schema (GA4GH). func (h *HashInfo) UnmarshalJSON(data []byte) error { if string(data) == "null" { *h = HashInfo{} diff --git a/indexd/client.go b/indexd/client.go index 989e69e..0dacf78 100644 --- a/indexd/client.go +++ b/indexd/client.go @@ -33,7 +33,10 @@ type IndexdInterface interface { DeleteRecordsByProject(ctx context.Context, projectId string) error DeleteRecordByHash(ctx context.Context, hashValue string, projectId string) error RegisterRecord(ctx context.Context, record *drs.DRSObject) (*drs.DRSObject, error) + RegisterRecords(ctx context.Context, records []*drs.DRSObject) ([]*drs.DRSObject, error) UpsertIndexdRecord(ctx context.Context, url string, sha256 string, fileSize int64, projectId string) (*drs.DRSObject, error) + + BatchGetObjectsByHash(ctx context.Context, hashes []string) (map[string][]drs.DRSObject, error) } // IndexdClient implements IndexdInterface @@ -230,7 +233,7 @@ func (c *IndexdClient) GetDownloadURL(ctx context.Context, did string, accessTyp func (c *IndexdClient) ListObjectsByProject(ctx context.Context, projectId string) (chan drs.DRSObjectResult, error) { const PAGESIZE = 50 - resourcePath, err := drs.ProjectToResource(projectId) + resourcePath, err := drs.ProjectToResource("", projectId) if err != nil { return nil, err } @@ -477,7 +480,7 @@ func (c *IndexdClient) DeleteRecordByHash(ctx context.Context, hashValue string, return fmt.Errorf("no records found for hash %s", hashValue) } - matchingRecord, err := drs.FindMatchingRecord(records, projectId) + matchingRecord, err := drs.FindMatchingRecord(records, "", projectId) if err != nil { return fmt.Errorf("error finding matching record for project %s: %v", projectId, err) } @@ -497,6 +500,112 @@ func (c *IndexdClient) RegisterRecord(ctx context.Context, record *drs.DRSObject return c.RegisterIndexdRecord(ctx, indexdRecord) } +func (c *IndexdClient) RegisterRecords(ctx context.Context, records []*drs.DRSObject) ([]*drs.DRSObject, error) { + if len(records) == 0 { + return nil, nil + } + + candidates := make([]drs.DRSObjectCandidate, len(records)) + for i, r := range records { + candidates[i] = drs.ConvertToCandidate(r) + } + + reqBody := drs.RegisterObjectsRequest{ + Candidates: candidates, + } + + jsonBytes, err := json.Marshal(reqBody) + if err != nil { + return nil, err + } + + url := fmt.Sprintf("%s/ga4gh/drs/v1/objects/register", c.cred.APIEndpoint) + resp, err := c.Do(ctx, &request.RequestBuilder{ + Method: http.MethodPost, + Url: url, + Body: bytes.NewBuffer(jsonBytes), + Headers: map[string]string{ + "Content-Type": "application/json", + "Accept": "application/json", + }, + Token: c.cred.AccessToken, + }) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusCreated { + body, _ := io.ReadAll(resp.Body) + return nil, fmt.Errorf("failed to register records: %s (status: %d)", string(body), resp.StatusCode) + } + + var registered []*drs.DRSObject + if err := json.NewDecoder(resp.Body).Decode(®istered); err != nil { + // Fallback: If server returns DrsObjectWithAuthz slice (which it might based on my service.go implementation), decode that + return nil, fmt.Errorf("error decoding registered objects: %v", err) + } + + return registered, nil +} + +func (c *IndexdClient) BatchGetObjectsByHash(ctx context.Context, hashes []string) (map[string][]drs.DRSObject, error) { + if len(hashes) == 0 { + return nil, nil + } + + reqBody := struct { + Hashes []string `json:"hashes"` + }{ + Hashes: hashes, + } + + jsonBytes, err := json.Marshal(reqBody) + if err != nil { + return nil, err + } + + url := fmt.Sprintf("%s/index/index/bulk/hashes", c.cred.APIEndpoint) + resp, err := c.Do(ctx, &request.RequestBuilder{ + Method: http.MethodPost, + Url: url, + Body: bytes.NewBuffer(jsonBytes), + Headers: map[string]string{ + "Content-Type": "application/json", + "Accept": "application/json", + }, + Token: c.cred.AccessToken, + }) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + body, _ := io.ReadAll(resp.Body) + return nil, fmt.Errorf("failed to bulk lookup hashes: %s (status: %d)", string(body), resp.StatusCode) + } + + var list ListRecords + if err := json.NewDecoder(resp.Body).Decode(&list); err != nil { + return nil, err + } + + result := make(map[string][]drs.DRSObject) + for _, r := range list.Records { + drsObj, err := r.ToIndexdRecord().ToDrsObject() + if err != nil { + continue + } + // Group by hash. We use the SHA256 as the key. + if drsObj.Checksums.SHA256 != "" { + result[drsObj.Checksums.SHA256] = append(result[drsObj.Checksums.SHA256], *drsObj) + } + } + + return result, nil +} + func appendUnique(existing []string, toAdd []string) []string { seen := make(map[string]bool) for _, v := range existing { diff --git a/indexd/client_test.go b/indexd/client_test.go index 818e498..3c20985 100644 --- a/indexd/client_test.go +++ b/indexd/client_test.go @@ -118,7 +118,7 @@ func sampleDRSObject() drs.DRSObject { { Type: "s3", AccessURL: drs.AccessURL{URL: "s3://bucket/key"}, - Authorizations: &drs.Authorizations{Value: "/programs/test/projects/proj"}, + Authorizations: &drs.Authorizations{BearerAuthIssuers: []string{"/programs/test/projects/proj"}}, }, }, } @@ -209,7 +209,7 @@ func TestIndexdClient_RegisterAndUpdateDirect(t *testing.T) { { Type: "s3", AccessURL: drs.AccessURL{URL: "s3://bucket/key"}, - Authorizations: &drs.Authorizations{Value: "/programs/test/projects/proj"}, + Authorizations: &drs.Authorizations{BearerAuthIssuers: []string{"/programs/test/projects/proj"}}, }, }, } @@ -230,7 +230,7 @@ func TestIndexdClient_RegisterAndUpdateDirect(t *testing.T) { { Type: "s3", AccessURL: drs.AccessURL{URL: "s3://bucket/other"}, - Authorizations: &drs.Authorizations{Value: "/programs/test/projects/proj"}, + Authorizations: &drs.Authorizations{BearerAuthIssuers: []string{"/programs/test/projects/proj"}}, }, }, } diff --git a/indexd/convert.go b/indexd/convert.go index 117cac9..a786276 100644 --- a/indexd/convert.go +++ b/indexd/convert.go @@ -28,7 +28,7 @@ func IndexdRecordToDrsObject(indexdObj *IndexdRecord) (*drs.DRSObject, error) { return nil, err } for _, am := range accessMethods { - if am.Authorizations == nil || am.Authorizations.Value == "" { + if am.Authorizations == nil || len(am.Authorizations.BearerAuthIssuers) == 0 { return nil, fmt.Errorf("access method missing authorization %v, %v", indexdObj, indexdObj.Authz) } } @@ -65,7 +65,7 @@ func DRSAccessMethodsFromIndexdURLs(urls []string, authz []string) ([]drs.Access } // NOTE: a record can only have 1 authz entry atm - method.Authorizations = &drs.Authorizations{Value: authz[0]} + method.Authorizations = &drs.Authorizations{BearerAuthIssuers: []string{authz[0]}} accessMethods = append(accessMethods, method) } return accessMethods, nil @@ -75,8 +75,8 @@ func DRSAccessMethodsFromIndexdURLs(urls []string, authz []string) ([]drs.Access func IndexdAuthzFromDrsAccessMethods(accessMethods []drs.AccessMethod) []string { var authz []string for _, drsURL := range accessMethods { - if drsURL.Authorizations != nil { - authz = append(authz, drsURL.Authorizations.Value) + if drsURL.Authorizations != nil && len(drsURL.Authorizations.BearerAuthIssuers) > 0 { + authz = append(authz, drsURL.Authorizations.BearerAuthIssuers[0]) } } return authz diff --git a/indexd/upsert.go b/indexd/upsert.go index 31f7411..cdd1e03 100644 --- a/indexd/upsert.go +++ b/indexd/upsert.go @@ -45,7 +45,7 @@ func (c *IndexdClient) UpsertIndexdRecord(ctx context.Context, url string, sha25 return nil, err } - drsObj, err := drs.BuildDrsObj(key, sha256, fileSize, uuid, "placeholder-bucket", projectId) + drsObj, err := drs.BuildDrsObj(key, sha256, fileSize, uuid, "placeholder-bucket", "", projectId) if err != nil { return nil, err } diff --git a/logs/factory.go b/logs/factory.go index 5a428f5..ac44b54 100644 --- a/logs/factory.go +++ b/logs/factory.go @@ -58,7 +58,7 @@ func New(profile string, opts ...Option) (*Gen3Logger, func()) { t := NewGen3Logger(sl, logDir, profile) if cfg.enableScoreboard { - t.scoreboard = NewSB(5, t) + t.scoreboard = NewSB(5, t.Logger) } if cfg.failedLog { diff --git a/logs/scoreboard.go b/logs/scoreboard.go index bf43083..738a47a 100644 --- a/logs/scoreboard.go +++ b/logs/scoreboard.go @@ -2,6 +2,9 @@ package logs import ( "fmt" + "io" + "log/slog" + "os" "sync" "text/tabwriter" ) @@ -10,19 +13,21 @@ import ( type Scoreboard struct { mu sync.Mutex Counts []int // index 0 = success on first try, 1 = after 1 retry, ..., last = failed - log *Gen3Logger + logger *slog.Logger + writer io.Writer } -// New creates a new scoreboard +// NewSB creates a new scoreboard // maxRetryCount = how many retries you allow before giving up -func NewSB(maxRetryCount int, log *Gen3Logger) *Scoreboard { +func NewSB(maxRetryCount int, logger *slog.Logger) *Scoreboard { return &Scoreboard{ Counts: make([]int, maxRetryCount+2), // +2: one for success-on-first, one for final failure - log: log, + logger: logger, + writer: os.Stderr, } } -// Increment records a result after `retryCount` attempts +// IncrementSB records a result after `retryCount` attempts // retryCount == 0 → succeeded on first try // retryCount == max → final failure func (s *Scoreboard) IncrementSB(retryCount int) { @@ -38,7 +43,7 @@ func (s *Scoreboard) IncrementSB(retryCount int) { s.Counts[retryCount]++ } -// Print the beautiful table at the end +// PrintSB prints the beautiful table at the end func (s *Scoreboard) PrintSB() { s.mu.Lock() defer s.mu.Unlock() @@ -51,8 +56,8 @@ func (s *Scoreboard) PrintSB() { return } - s.log.Println("\n\nSubmission Results") - w := tabwriter.NewWriter(s.log.Writer(), 0, 0, 2, ' ', 0) + s.logger.Info("Submission Results") + w := tabwriter.NewWriter(s.writer, 0, 0, 2, ' ', 0) for i, count := range s.Counts { if i == 0 { diff --git a/mocks/mock_configure.go b/mocks/mock_configure.go index 48aa6bc..dac723e 100644 --- a/mocks/mock_configure.go +++ b/mocks/mock_configure.go @@ -69,19 +69,34 @@ func (mr *MockManagerInterfaceMockRecorder) Import(filePath, fenceToken any) *go return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Import", reflect.TypeOf((*MockManagerInterface)(nil).Import), filePath, fenceToken) } -// IsValid mocks base method. -func (m *MockManagerInterface) IsValid(arg0 *conf.Credential) (bool, error) { +// IsCredentialValid mocks base method. +func (m *MockManagerInterface) IsCredentialValid(arg0 *conf.Credential) (bool, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "IsValid", arg0) + ret := m.ctrl.Call(m, "IsCredentialValid", arg0) ret0, _ := ret[0].(bool) ret1, _ := ret[1].(error) return ret0, ret1 } -// IsValid indicates an expected call of IsValid. -func (mr *MockManagerInterfaceMockRecorder) IsValid(arg0 any) *gomock.Call { +// IsCredentialValid indicates an expected call of IsCredentialValid. +func (mr *MockManagerInterfaceMockRecorder) IsCredentialValid(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IsValid", reflect.TypeOf((*MockManagerInterface)(nil).IsValid), arg0) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IsCredentialValid", reflect.TypeOf((*MockManagerInterface)(nil).IsCredentialValid), arg0) +} + +// IsTokenValid mocks base method. +func (m *MockManagerInterface) IsTokenValid(arg0 string) (bool, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "IsTokenValid", arg0) + ret0, _ := ret[0].(bool) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// IsTokenValid indicates an expected call of IsTokenValid. +func (mr *MockManagerInterfaceMockRecorder) IsTokenValid(arg0 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IsTokenValid", reflect.TypeOf((*MockManagerInterface)(nil).IsTokenValid), arg0) } // Load mocks base method. diff --git a/mocks/mock_gen3interface.go b/mocks/mock_gen3interface.go index 7524b7c..999daa7 100644 --- a/mocks/mock_gen3interface.go +++ b/mocks/mock_gen3interface.go @@ -11,12 +11,14 @@ package mocks import ( context "context" + http "net/http" reflect "reflect" conf "github.com/calypr/data-client/conf" fence "github.com/calypr/data-client/fence" indexd "github.com/calypr/data-client/indexd" logs "github.com/calypr/data-client/logs" + request "github.com/calypr/data-client/request" requestor "github.com/calypr/data-client/requestor" sower "github.com/calypr/data-client/sower" gomock "go.uber.org/mock/gomock" @@ -46,6 +48,21 @@ func (m *MockGen3Interface) EXPECT() *MockGen3InterfaceMockRecorder { return m.recorder } +// Do mocks base method. +func (m *MockGen3Interface) Do(ctx context.Context, req *request.RequestBuilder) (*http.Response, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Do", ctx, req) + ret0, _ := ret[0].(*http.Response) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Do indicates an expected call of Do. +func (mr *MockGen3InterfaceMockRecorder) Do(ctx, req any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Do", reflect.TypeOf((*MockGen3Interface)(nil).Do), ctx, req) +} + // ExportCredential mocks base method. func (m *MockGen3Interface) ExportCredential(ctx context.Context, cred *conf.Credential) error { m.ctrl.T.Helper() @@ -116,6 +133,20 @@ func (mr *MockGen3InterfaceMockRecorder) Logger() *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Logger", reflect.TypeOf((*MockGen3Interface)(nil).Logger)) } +// New mocks base method. +func (m *MockGen3Interface) New(method, url string) *request.RequestBuilder { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "New", method, url) + ret0, _ := ret[0].(*request.RequestBuilder) + return ret0 +} + +// New indicates an expected call of New. +func (mr *MockGen3InterfaceMockRecorder) New(method, url any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "New", reflect.TypeOf((*MockGen3Interface)(nil).New), method, url) +} + // Requestor mocks base method. func (m *MockGen3Interface) Requestor() requestor.RequestorInterface { m.ctrl.T.Helper() diff --git a/mocks/mock_indexd.go b/mocks/mock_indexd.go index 6a4f217..0bab244 100644 --- a/mocks/mock_indexd.go +++ b/mocks/mock_indexd.go @@ -44,6 +44,21 @@ func (m *MockIndexdInterface) EXPECT() *MockIndexdInterfaceMockRecorder { return m.recorder } +// BatchGetObjectsByHash mocks base method. +func (m *MockIndexdInterface) BatchGetObjectsByHash(ctx context.Context, hashes []string) (map[string][]drs.DRSObject, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "BatchGetObjectsByHash", ctx, hashes) + ret0, _ := ret[0].(map[string][]drs.DRSObject) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// BatchGetObjectsByHash indicates an expected call of BatchGetObjectsByHash. +func (mr *MockIndexdInterfaceMockRecorder) BatchGetObjectsByHash(ctx, hashes any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BatchGetObjectsByHash", reflect.TypeOf((*MockIndexdInterface)(nil).BatchGetObjectsByHash), ctx, hashes) +} + // DeleteIndexdRecord mocks base method. func (m *MockIndexdInterface) DeleteIndexdRecord(ctx context.Context, did string) error { m.ctrl.T.Helper() @@ -235,6 +250,21 @@ func (mr *MockIndexdInterfaceMockRecorder) RegisterRecord(ctx, record any) *gomo return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RegisterRecord", reflect.TypeOf((*MockIndexdInterface)(nil).RegisterRecord), ctx, record) } +// RegisterRecords mocks base method. +func (m *MockIndexdInterface) RegisterRecords(ctx context.Context, records []*drs.DRSObject) ([]*drs.DRSObject, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "RegisterRecords", ctx, records) + ret0, _ := ret[0].([]*drs.DRSObject) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// RegisterRecords indicates an expected call of RegisterRecords. +func (mr *MockIndexdInterfaceMockRecorder) RegisterRecords(ctx, records any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RegisterRecords", reflect.TypeOf((*MockIndexdInterface)(nil).RegisterRecords), ctx, records) +} + // UpdateRecord mocks base method. func (m *MockIndexdInterface) UpdateRecord(ctx context.Context, updateInfo *drs.DRSObject, did string) (*drs.DRSObject, error) { m.ctrl.T.Helper() @@ -249,3 +279,18 @@ func (mr *MockIndexdInterfaceMockRecorder) UpdateRecord(ctx, updateInfo, did any mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateRecord", reflect.TypeOf((*MockIndexdInterface)(nil).UpdateRecord), ctx, updateInfo, did) } + +// UpsertIndexdRecord mocks base method. +func (m *MockIndexdInterface) UpsertIndexdRecord(ctx context.Context, url, sha256 string, fileSize int64, projectId string) (*drs.DRSObject, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "UpsertIndexdRecord", ctx, url, sha256, fileSize, projectId) + ret0, _ := ret[0].(*drs.DRSObject) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// UpsertIndexdRecord indicates an expected call of UpsertIndexdRecord. +func (mr *MockIndexdInterfaceMockRecorder) UpsertIndexdRecord(ctx, url, sha256, fileSize, projectId any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpsertIndexdRecord", reflect.TypeOf((*MockIndexdInterface)(nil).UpsertIndexdRecord), ctx, url, sha256, fileSize, projectId) +} diff --git a/request/auth.go b/request/auth.go index cc93723..1379a55 100644 --- a/request/auth.go +++ b/request/auth.go @@ -76,7 +76,9 @@ func (t *AuthTransport) RoundTrip(req *http.Request) (*http.Response, error) { t.mu.RUnlock() // Just add the header and pass it down - req.Header.Set("Authorization", "Bearer "+token) + if token != "" { + req.Header.Set("Authorization", "Bearer "+token) + } return t.Base.RoundTrip(req) } diff --git a/request/request.go b/request/request.go index 82711ba..b5603da 100644 --- a/request/request.go +++ b/request/request.go @@ -41,8 +41,8 @@ func NewRequestInterface( }).DialContext, MaxIdleConns: 100, MaxIdleConnsPerHost: 100, - TLSHandshakeTimeout: 5 * time.Second, - ResponseHeaderTimeout: 10 * time.Second, + TLSHandshakeTimeout: 30 * time.Second, + ResponseHeaderTimeout: 60 * time.Second, } authTransport := &AuthTransport{ diff --git a/tests/download-multiple_test.go b/tests/download-multiple_test.go index 84169b7..166285a 100644 --- a/tests/download-multiple_test.go +++ b/tests/download-multiple_test.go @@ -8,9 +8,10 @@ import ( "strings" "testing" + "github.com/calypr/data-client/backend/gen3" "github.com/calypr/data-client/conf" "github.com/calypr/data-client/download" - "github.com/calypr/data-client/fence" + drs "github.com/calypr/data-client/drs" "github.com/calypr/data-client/logs" "github.com/calypr/data-client/mocks" req "github.com/calypr/data-client/request" @@ -51,12 +52,14 @@ func Test_askGen3ForFileInfo_withShepherd(t *testing.T) { } // Expect request to Shepherd - mockFence.EXPECT(). + mockGen3.EXPECT(). + New(gomock.Any(), gomock.Any()). + Return(&req.RequestBuilder{Url: "/objects/" + testGUID}). + AnyTimes() + + mockGen3.EXPECT(). Do(gomock.Any(), gomock.Any()). DoAndReturn(func(ctx any, rb *req.RequestBuilder) (*http.Response, error) { - if !strings.HasSuffix(rb.Url, "/objects/"+testGUID) { - t.Errorf("Expected request to Shepherd objects endpoint, got %s", rb.Url) - } return resp, nil }) @@ -64,7 +67,8 @@ func Test_askGen3ForFileInfo_withShepherd(t *testing.T) { mockGen3.EXPECT().Logger().Return(logs.NewGen3Logger(nil, "", "test")).AnyTimes() skipped := []download.RenamedOrSkippedFileInfo{} - info, err := download.AskGen3ForFileInfo(context.Background(), mockGen3, testGUID, "", "", "original", true, &skipped) + bk := gen3.NewGen3Backend(mockGen3) + info, err := download.GetFileInfo(context.Background(), bk, testGUID, "", "", "original", true, &skipped) if err != nil { t.Error(err) } @@ -100,16 +104,22 @@ func Test_askGen3ForFileInfo_withShepherd_shepherdError(t *testing.T) { Times(1) // 2. Shepherd request fails → triggers fallback to Indexd - mockFence.EXPECT(). + mockGen3.EXPECT(). + New(gomock.Any(), gomock.Any()). + Return(&req.RequestBuilder{Url: "/objects/" + testGUID}). + AnyTimes() + + mockGen3.EXPECT(). Do(gomock.Any(), gomock.Any()). Return(nil, fmt.Errorf("Shepherd error")). Times(1) // only the Shepherd call - // 3. Fallback: Indexd request also fails - mockFence.EXPECT(). - Do(gomock.Any(), gomock.Any()). - Return(nil, fmt.Errorf("Indexd error")). - Times(1) + // 3. Fallback: Indexd request + mockIndexd := mocks.NewMockIndexdInterface(mockCtrl) + mockGen3.EXPECT().Indexd().Return(mockIndexd).AnyTimes() + mockIndexd.EXPECT(). + GetObject(gomock.Any(), testGUID). + Return(nil, fmt.Errorf("Indexd error")) // Logger mockGen3.EXPECT(). @@ -118,7 +128,8 @@ func Test_askGen3ForFileInfo_withShepherd_shepherdError(t *testing.T) { AnyTimes() skipped := []download.RenamedOrSkippedFileInfo{} - info, err := download.AskGen3ForFileInfo(context.Background(), mockGen3, testGUID, "", "", "original", true, &skipped) + bk := gen3.NewGen3Backend(mockGen3) + info, err := download.GetFileInfo(context.Background(), bk, testGUID, "", "", "original", true, &skipped) if err != nil { t.Fatal(err) } @@ -155,20 +166,17 @@ func Test_askGen3ForFileInfo_noShepherd(t *testing.T) { // No Shepherd mockFence.EXPECT().CheckForShepherdAPI(gomock.Any()).Return(false, nil) - // Indexd returns parsed FenceResponse - mockFence.EXPECT(). - ParseFenceURLResponse(gomock.Any()). - Return(fence.FenceResponse{FileName: testFileName, Size: testFileSize}, nil) - - // Do called for indexd - mockFence.EXPECT(). - Do(gomock.Any(), gomock.Any()). - Return(&http.Response{StatusCode: 200, Body: io.NopCloser(strings.NewReader("{}"))}, nil) + mockIndexd := mocks.NewMockIndexdInterface(mockCtrl) + mockGen3.EXPECT().Indexd().Return(mockIndexd).AnyTimes() + mockIndexd.EXPECT(). + GetObject(gomock.Any(), testGUID). + Return(&drs.DRSObject{Id: testGUID, Name: testFileName, Size: testFileSize}, nil) mockGen3.EXPECT().Logger().Return(logs.NewGen3Logger(nil, "", "test")).AnyTimes() skipped := []download.RenamedOrSkippedFileInfo{} - info, err := download.AskGen3ForFileInfo(context.Background(), mockGen3, testGUID, "", "", "original", true, &skipped) + bk := gen3.NewGen3Backend(mockGen3) + info, err := download.GetFileInfo(context.Background(), bk, testGUID, "", "", "original", true, &skipped) if err != nil { t.Fatal(err) } diff --git a/tests/utils_test.go b/tests/utils_test.go index fa330d6..bb62c3b 100644 --- a/tests/utils_test.go +++ b/tests/utils_test.go @@ -8,6 +8,7 @@ import ( "strings" "testing" + "github.com/calypr/data-client/backend/gen3" "github.com/calypr/data-client/common" "github.com/calypr/data-client/conf" "github.com/calypr/data-client/download" @@ -37,7 +38,7 @@ func TestGetDownloadResponse_withShepherd(t *testing.T) { GetDownloadPresignedUrl(gomock.Any(), testGUID, ""). Return(mockDownloadURL, nil) - mockFence.EXPECT(). + mockGen3.EXPECT(). New(http.MethodGet, mockDownloadURL). Return(&request.RequestBuilder{ Method: http.MethodGet, @@ -51,7 +52,7 @@ func TestGetDownloadResponse_withShepherd(t *testing.T) { StatusCode: 200, Body: io.NopCloser(strings.NewReader("content")), } - mockFence.EXPECT(). + mockGen3.EXPECT(). Do(gomock.Any(), gomock.Any()). Return(mockResp, nil) @@ -61,7 +62,8 @@ func TestGetDownloadResponse_withShepherd(t *testing.T) { Range: 0, } - err := download.GetDownloadResponse(context.Background(), mockGen3, &mockFDRObj, "") + bk := gen3.NewGen3Backend(mockGen3) + err := download.GetDownloadResponse(context.Background(), bk, &mockFDRObj, "") if err != nil { t.Fatalf("Unexpected error: %v", err) } @@ -89,7 +91,7 @@ func TestGetDownloadResponse_noShepherd(t *testing.T) { GetDownloadPresignedUrl(gomock.Any(), testGUID, ""). Return(mockDownloadURL, nil) - mockFence.EXPECT(). + mockGen3.EXPECT(). New(http.MethodGet, mockDownloadURL). Return(&request.RequestBuilder{ Method: http.MethodGet, @@ -103,7 +105,7 @@ func TestGetDownloadResponse_noShepherd(t *testing.T) { StatusCode: 200, Body: io.NopCloser(strings.NewReader("content")), } - mockFence.EXPECT(). + mockGen3.EXPECT(). Do(gomock.Any(), gomock.Any()). Return(mockResp, nil) @@ -113,7 +115,8 @@ func TestGetDownloadResponse_noShepherd(t *testing.T) { Range: 0, } - err := download.GetDownloadResponse(context.Background(), mockGen3, &mockFDRObj, "") + bk := gen3.NewGen3Backend(mockGen3) + err := download.GetDownloadResponse(context.Background(), bk, &mockFDRObj, "") if err != nil { t.Fatalf("Unexpected error: %v", err) } diff --git a/upload/multipart_test.go b/upload/multipart_test.go index b7ded8d..2759f48 100644 --- a/upload/multipart_test.go +++ b/upload/multipart_test.go @@ -40,6 +40,13 @@ func (f *fakeGen3Upload) Indexd() indexd.IndexdInterface { return &fake func (f *fakeGen3Upload) Sower() sower.SowerInterface { return nil } func (f *fakeGen3Upload) Requestor() requestor.RequestorInterface { return nil } +func (f *fakeGen3Upload) Do(ctx context.Context, req *request.RequestBuilder) (*http.Response, error) { + return f.doFunc(ctx, req) +} +func (f *fakeGen3Upload) New(method, url string) *request.RequestBuilder { + return &request.RequestBuilder{Method: method, Url: url} +} + type fakeFence struct { fence.FenceInterface doFunc func(context.Context, *request.RequestBuilder) (*http.Response, error) From 028f806839a3290293f1562a97dbc8f650501179 Mon Sep 17 00:00:00 2001 From: matthewpeterkort Date: Tue, 3 Mar 2026 09:57:50 -0800 Subject: [PATCH 02/13] continue build out backend for drs server --- backend/drs/drs.go | 161 ++++++++++++++++++++++++++++-- backend/gen3/gen3.go | 78 +++++++++++++-- backend/interface.go | 12 +++ common/types.go | 26 ++++- download/transfer.go | 204 ++++++++++++++++++++++++++++++++++++++ download/transfer_test.go | 110 ++++++++++++-------- drs/drs.go | 7 +- 7 files changed, 539 insertions(+), 59 deletions(-) diff --git a/backend/drs/drs.go b/backend/drs/drs.go index 8dca651..e99fb79 100644 --- a/backend/drs/drs.go +++ b/backend/drs/drs.go @@ -43,8 +43,13 @@ func (d *DrsBackend) Download(ctx context.Context, fdr *common.FileDownloadRespo skipAuth := common.IsCloudPresignedURL(fdr.PresignedURL) rb := d.req.New(http.MethodGet, fdr.PresignedURL) - if fdr.Range > 0 { - rb.WithHeader("Range", "bytes="+strconv.FormatInt(fdr.Range, 10)+"-") + start, end, hasRange := resolveRange(fdr) + if hasRange { + rangeHeader := "bytes=" + strconv.FormatInt(start, 10) + "-" + if end != nil { + rangeHeader += strconv.FormatInt(*end, 10) + } + rb.WithHeader("Range", rangeHeader) } if skipAuth { @@ -54,6 +59,19 @@ func (d *DrsBackend) Download(ctx context.Context, fdr *common.FileDownloadRespo return d.req.Do(ctx, rb) } +func resolveRange(fdr *common.FileDownloadResponseObject) (start int64, end *int64, ok bool) { + if fdr == nil { + return 0, nil, false + } + if fdr.RangeStart != nil { + return *fdr.RangeStart, fdr.RangeEnd, true + } + if fdr.Range > 0 { + return fdr.Range, nil, true + } + return 0, nil, false +} + func (d *DrsBackend) buildURL(paths ...string) (string, error) { u, err := url.Parse(d.BaseURL) if err != nil { @@ -166,11 +184,21 @@ func (d *DrsBackend) GetObjectByHash(ctx context.Context, checksumType, checksum return nil, err } - var objs []drs.DRSObject - if err := d.doJSONRequest(ctx, http.MethodGet, u, nil, &objs); err != nil { + // Server may return either a single object (canonical spec) or an array (legacy behavior). + var raw json.RawMessage + if err := d.doJSONRequest(ctx, http.MethodGet, u, nil, &raw); err != nil { return nil, err } + var single drs.DRSObject + if err := json.Unmarshal(raw, &single); err == nil && single.Id != "" { + return []drs.DRSObject{single}, nil + } + + var objs []drs.DRSObject + if err := json.Unmarshal(raw, &objs); err != nil { + return nil, fmt.Errorf("unable to decode checksum lookup response: %w", err) + } return objs, nil } @@ -275,21 +303,138 @@ func (d *DrsBackend) GetUploadURL(ctx context.Context, guid string, filename str return res.URL, nil } -func (d *DrsBackend) Upload(ctx context.Context, url string, body io.Reader, size int64) error { +func (d *DrsBackend) InitMultipartUpload(ctx context.Context, guid string, filename string, bucket string) (*common.MultipartUploadInit, error) { + u, err := d.buildURL("user/data/multipart/init") + if err != nil { + return nil, err + } + + req := struct { + GUID string `json:"guid,omitempty"` + FileName string `json:"file_name,omitempty"` + Bucket string `json:"bucket,omitempty"` + }{ + GUID: guid, + FileName: filename, + Bucket: bucket, + } + + var res struct { + GUID string `json:"guid"` + UploadID string `json:"uploadId"` + } + if err := d.doJSONRequest(ctx, http.MethodPost, u, req, &res); err != nil { + return nil, err + } + if res.UploadID == "" { + return nil, fmt.Errorf("server did not return uploadId") + } + + return &common.MultipartUploadInit{ + GUID: res.GUID, + UploadID: res.UploadID, + }, nil +} + +func (d *DrsBackend) GetMultipartUploadURL(ctx context.Context, key string, uploadID string, partNumber int32, bucket string) (string, error) { + u, err := d.buildURL("user/data/multipart/upload") + if err != nil { + return "", err + } + + req := struct { + Key string `json:"key"` + Bucket string `json:"bucket,omitempty"` + UploadID string `json:"uploadId"` + PartNumber int32 `json:"partNumber"` + }{ + Key: key, + Bucket: bucket, + UploadID: uploadID, + PartNumber: partNumber, + } + + var res struct { + PresignedURL string `json:"presigned_url"` + } + if err := d.doJSONRequest(ctx, http.MethodPost, u, req, &res); err != nil { + return "", err + } + if res.PresignedURL == "" { + return "", fmt.Errorf("server did not return presigned_url") + } + return res.PresignedURL, nil +} + +func (d *DrsBackend) CompleteMultipartUpload(ctx context.Context, key string, uploadID string, parts []common.MultipartUploadPart, bucket string) error { + u, err := d.buildURL("user/data/multipart/complete") + if err != nil { + return err + } + + reqParts := make([]struct { + PartNumber int32 `json:"PartNumber"` + ETag string `json:"ETag"` + }, len(parts)) + for i, p := range parts { + reqParts[i] = struct { + PartNumber int32 `json:"PartNumber"` + ETag string `json:"ETag"` + }{ + PartNumber: p.PartNumber, + ETag: p.ETag, + } + } + + req := struct { + Key string `json:"key"` + Bucket string `json:"bucket,omitempty"` + UploadID string `json:"uploadId"` + Parts any `json:"parts"` + }{ + Key: key, + Bucket: bucket, + UploadID: uploadID, + Parts: reqParts, + } + + return d.doJSONRequest(ctx, http.MethodPost, u, req, nil) +} + +func (d *DrsBackend) doUpload(ctx context.Context, url string, body io.Reader, size int64) (string, error) { rb := d.req.New(http.MethodPut, url). WithBody(body). WithSkipAuth(true) // S3 presigned URLs don't need our bearer token + if size > 0 { + rb.PartSize = size + } resp, err := d.req.Do(ctx, rb) if err != nil { - return fmt.Errorf("upload to %s failed: %w", url, err) + return "", fmt.Errorf("upload to %s failed: %w", url, err) } defer resp.Body.Close() if resp.StatusCode >= 400 { bodyBytes, _ := io.ReadAll(resp.Body) - return fmt.Errorf("upload to %s failed with status %d: %s", url, resp.StatusCode, string(bodyBytes)) + return "", fmt.Errorf("upload to %s failed with status %d: %s", url, resp.StatusCode, string(bodyBytes)) } - return nil + return strings.Trim(resp.Header.Get("ETag"), `"`), nil +} + +func (d *DrsBackend) Upload(ctx context.Context, url string, body io.Reader, size int64) error { + _, err := d.doUpload(ctx, url, body, size) + return err +} + +func (d *DrsBackend) UploadPart(ctx context.Context, url string, body io.Reader, size int64) (string, error) { + etag, err := d.doUpload(ctx, url, body, size) + if err != nil { + return "", err + } + if etag == "" { + return "", fmt.Errorf("multipart upload part returned empty ETag") + } + return etag, nil } diff --git a/backend/gen3/gen3.go b/backend/gen3/gen3.go index a71c9f4..e7cddaf 100644 --- a/backend/gen3/gen3.go +++ b/backend/gen3/gen3.go @@ -40,8 +40,13 @@ func (g *Gen3Backend) Download(ctx context.Context, fdr *common.FileDownloadResp skipAuth := common.IsCloudPresignedURL(fdr.PresignedURL) rb := g.client.New(http.MethodGet, fdr.PresignedURL) - if fdr.Range > 0 { - rb.WithHeader("Range", "bytes="+strconv.FormatInt(fdr.Range, 10)+"-") + start, end, hasRange := resolveRange(fdr) + if hasRange { + rangeHeader := "bytes=" + strconv.FormatInt(start, 10) + "-" + if end != nil { + rangeHeader += strconv.FormatInt(*end, 10) + } + rb.WithHeader("Range", rangeHeader) } if skipAuth { @@ -51,6 +56,19 @@ func (g *Gen3Backend) Download(ctx context.Context, fdr *common.FileDownloadResp return g.client.Do(ctx, rb) } +func resolveRange(fdr *common.FileDownloadResponseObject) (start int64, end *int64, ok bool) { + if fdr == nil { + return 0, nil, false + } + if fdr.RangeStart != nil { + return *fdr.RangeStart, fdr.RangeEnd, true + } + if fdr.Range > 0 { + return fdr.Range, nil, true + } + return 0, nil, false +} + func (g *Gen3Backend) GetFileDetails(ctx context.Context, guid string) (*drs.DRSObject, error) { // 1. Try Shepherd hasShepherd, err := g.client.Fence().CheckForShepherdAPI(ctx) @@ -215,21 +233,69 @@ func (g *Gen3Backend) GetUploadURL(ctx context.Context, guid string, filename st return res.URL, nil } -func (g *Gen3Backend) Upload(ctx context.Context, url string, body io.Reader, size int64) error { +func (g *Gen3Backend) InitMultipartUpload(ctx context.Context, guid string, filename string, bucket string) (*common.MultipartUploadInit, error) { + res, err := g.client.Fence().InitMultipartUpload(ctx, filename, bucket, guid) + if err != nil { + return nil, err + } + if res.UploadID == "" { + return nil, fmt.Errorf("fence multipart init did not return uploadId") + } + return &common.MultipartUploadInit{ + GUID: res.GUID, + UploadID: res.UploadID, + }, nil +} + +func (g *Gen3Backend) GetMultipartUploadURL(ctx context.Context, key string, uploadID string, partNumber int32, bucket string) (string, error) { + return g.client.Fence().GenerateMultipartPresignedURL(ctx, key, uploadID, int(partNumber), bucket) +} + +func (g *Gen3Backend) CompleteMultipartUpload(ctx context.Context, key string, uploadID string, parts []common.MultipartUploadPart, bucket string) error { + fParts := make([]fence.MultipartPart, len(parts)) + for i, p := range parts { + fParts[i] = fence.MultipartPart{ + PartNumber: int(p.PartNumber), + ETag: p.ETag, + } + } + return g.client.Fence().CompleteMultipartUpload(ctx, key, uploadID, fParts, bucket) +} + +func (g *Gen3Backend) doUpload(ctx context.Context, url string, body io.Reader, size int64) (string, error) { rb := g.client.New(http.MethodPut, url). WithBody(body). WithSkipAuth(true) + if size > 0 { + rb.PartSize = size + } resp, err := g.client.Do(ctx, rb) if err != nil { - return fmt.Errorf("upload to %s failed: %w", url, err) + return "", fmt.Errorf("upload to %s failed: %w", url, err) } defer resp.Body.Close() if resp.StatusCode >= 400 { bodyBytes, _ := io.ReadAll(resp.Body) - return fmt.Errorf("upload to %s failed with status %d: %s", url, resp.StatusCode, string(bodyBytes)) + return "", fmt.Errorf("upload to %s failed with status %d: %s", url, resp.StatusCode, string(bodyBytes)) } - return nil + return strings.Trim(resp.Header.Get("ETag"), `"`), nil +} + +func (g *Gen3Backend) Upload(ctx context.Context, url string, body io.Reader, size int64) error { + _, err := g.doUpload(ctx, url, body, size) + return err +} + +func (g *Gen3Backend) UploadPart(ctx context.Context, url string, body io.Reader, size int64) (string, error) { + etag, err := g.doUpload(ctx, url, body, size) + if err != nil { + return "", err + } + if etag == "" { + return "", fmt.Errorf("multipart upload part returned empty ETag") + } + return etag, nil } diff --git a/backend/interface.go b/backend/interface.go index 4b75ad9..20b1aa1 100644 --- a/backend/interface.go +++ b/backend/interface.go @@ -46,6 +46,18 @@ type Backend interface { // implementation handles provider-specific logic (e.g. Fence vs Shepherd vs DRS-Upload) GetUploadURL(ctx context.Context, guid string, filename string, metadata common.FileMetadata, bucket string) (string, error) + // InitMultipartUpload initializes multipart upload and returns upload metadata. + InitMultipartUpload(ctx context.Context, guid string, filename string, bucket string) (*common.MultipartUploadInit, error) + + // GetMultipartUploadURL retrieves a presigned URL for an individual multipart part. + GetMultipartUploadURL(ctx context.Context, key string, uploadID string, partNumber int32, bucket string) (string, error) + + // CompleteMultipartUpload finalizes a multipart upload with uploaded parts. + CompleteMultipartUpload(ctx context.Context, key string, uploadID string, parts []common.MultipartUploadPart, bucket string) error + // Upload performs the HTTP PUT for the file content to the presigned URL. Upload(ctx context.Context, url string, body io.Reader, size int64) error + + // UploadPart performs a multipart upload part PUT and returns the ETag. + UploadPart(ctx context.Context, url string, body io.Reader, size int64) (string, error) } diff --git a/common/types.go b/common/types.go index 4626c44..1684528 100644 --- a/common/types.go +++ b/common/types.go @@ -25,11 +25,15 @@ type FileDownloadResponseObject struct { Filename string GUID string PresignedURL string - Range int64 - Overwrite bool - Skip bool - Response *http.Response - Writer io.Writer + // Range is kept for backward compatibility with resume-download semantics (start offset). + Range int64 + // RangeStart/RangeEnd provide explicit byte range requests (inclusive). + RangeStart *int64 + RangeEnd *int64 + Overwrite bool + Skip bool + Response *http.Response + Writer io.Writer } // FileMetadata defines the metadata accepted by the new object management API, Shepherd @@ -51,6 +55,18 @@ type RetryObject struct { Bucket string } +// MultipartUploadInit captures the response needed to upload multipart parts. +type MultipartUploadInit struct { + GUID string + UploadID string +} + +// MultipartUploadPart represents an uploaded part that must be completed. +type MultipartUploadPart struct { + PartNumber int32 + ETag string +} + type ManifestObject struct { GUID string `json:"object_id"` SubjectID string `json:"subject_id"` diff --git a/download/transfer.go b/download/transfer.go index b6e678a..75f3b99 100644 --- a/download/transfer.go +++ b/download/transfer.go @@ -8,11 +8,27 @@ import ( "os" "path/filepath" "strings" + "sync/atomic" "github.com/calypr/data-client/backend" "github.com/calypr/data-client/common" + "golang.org/x/sync/errgroup" ) +type DownloadOptions struct { + MultipartThreshold int64 + ChunkSize int64 + Concurrency int +} + +func defaultDownloadOptions() DownloadOptions { + return DownloadOptions{ + MultipartThreshold: common.GB, + ChunkSize: 64 * common.MB, + Concurrency: 8, + } +} + // DownloadSingleWithProgress downloads a single object while emitting progress events. func DownloadSingleWithProgress( ctx context.Context, @@ -102,6 +118,55 @@ func DownloadToPath( guid string, dstPath string, protocol string, +) error { + opts := defaultDownloadOptions() + return DownloadToPathWithOptions(ctx, bk, logger, guid, dstPath, protocol, opts) +} + +func DownloadToPathWithOptions( + ctx context.Context, + bk backend.Backend, + logger *slog.Logger, + guid string, + dstPath string, + protocol string, + opts DownloadOptions, +) error { + if opts.MultipartThreshold <= 0 { + opts.MultipartThreshold = defaultDownloadOptions().MultipartThreshold + } + if opts.ChunkSize <= 0 { + opts.ChunkSize = defaultDownloadOptions().ChunkSize + } + if opts.Concurrency <= 0 { + opts.Concurrency = defaultDownloadOptions().Concurrency + } + + info, err := bk.GetFileDetails(ctx, guid) + if err != nil { + return fmt.Errorf("get file details failed: %w", err) + } + + // If size is unknown or small, single stream is safest. + if info.Size <= 0 || info.Size < opts.MultipartThreshold { + return downloadToPathSingle(ctx, bk, logger, guid, dstPath, protocol) + } + + if err := downloadToPathMultipart(ctx, bk, logger, guid, dstPath, protocol, info.Size, opts); err != nil { + logger.Warn("multipart download failed, falling back to single stream", "guid", guid, "error", err) + return downloadToPathSingle(ctx, bk, logger, guid, dstPath, protocol) + } + + return nil +} + +func downloadToPathSingle( + ctx context.Context, + bk backend.Backend, + logger *slog.Logger, + guid string, + dstPath string, + protocol string, ) error { progress := common.GetProgress(ctx) hash := common.GetOid(ctx) @@ -157,3 +222,142 @@ func DownloadToPath( // logger.Info("Download succeeded", "path", dstPath, "guid", guid) return nil } + +func downloadToPathMultipart( + ctx context.Context, + bk backend.Backend, + logger *slog.Logger, + guid string, + dstPath string, + protocol string, + totalSize int64, + opts DownloadOptions, +) error { + protocolText := "" + if protocol != "" { + protocolText = "?protocol=" + protocol + } + + signedURL, err := bk.GetDownloadURL(ctx, guid, protocolText) + if err != nil { + return fmt.Errorf("failed to resolve download URL for %s: %w", guid, err) + } + + // Preflight first ranged read to verify server honors ranges. + rangeStart := int64(0) + rangeEnd := opts.ChunkSize - 1 + if rangeEnd >= totalSize { + rangeEnd = totalSize - 1 + } + preflight := &common.FileDownloadResponseObject{ + GUID: guid, + PresignedURL: signedURL, + RangeStart: &rangeStart, + RangeEnd: &rangeEnd, + } + + resp, err := bk.Download(ctx, preflight) + if err != nil { + return fmt.Errorf("multipart preflight request failed: %w", err) + } + _ = resp.Body.Close() + if resp.StatusCode != 206 { + return fmt.Errorf("range requests not supported (status %d)", resp.StatusCode) + } + + if dir := filepath.Dir(dstPath); dir != "." { + if err := os.MkdirAll(dir, 0766); err != nil { + return fmt.Errorf("mkdir for %s: %w", dstPath, err) + } + } + + file, err := os.OpenFile(dstPath, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0666) + if err != nil { + return fmt.Errorf("create local file %s: %w", dstPath, err) + } + defer file.Close() + + if err := file.Truncate(totalSize); err != nil { + return fmt.Errorf("pre-allocate %s: %w", dstPath, err) + } + + progress := common.GetProgress(ctx) + hash := common.GetOid(ctx) + if hash == "" { + hash = guid + } + var soFar atomic.Int64 + + totalParts := int((totalSize + opts.ChunkSize - 1) / opts.ChunkSize) + g, gctx := errgroup.WithContext(ctx) + g.SetLimit(opts.Concurrency) + + for i := 0; i < totalParts; i++ { + partStart := int64(i) * opts.ChunkSize + partEnd := partStart + opts.ChunkSize - 1 + if partEnd >= totalSize { + partEnd = totalSize - 1 + } + ps := partStart + pe := partEnd + + g.Go(func() error { + fdr := &common.FileDownloadResponseObject{ + GUID: guid, + PresignedURL: signedURL, + RangeStart: &ps, + RangeEnd: &pe, + } + + partResp, err := bk.Download(gctx, fdr) + if err != nil { + return fmt.Errorf("range download %d-%d failed: %w", ps, pe, err) + } + defer partResp.Body.Close() + + if partResp.StatusCode != 206 { + return fmt.Errorf("range download %d-%d returned status %d", ps, pe, partResp.StatusCode) + } + + buf, err := io.ReadAll(partResp.Body) + if err != nil { + return fmt.Errorf("range read %d-%d failed: %w", ps, pe, err) + } + + if _, err := file.WriteAt(buf, ps); err != nil { + return fmt.Errorf("range write %d-%d failed: %w", ps, pe, err) + } + + if progress != nil { + current := soFar.Add(int64(len(buf))) + _ = progress(common.ProgressEvent{ + Event: "progress", + Oid: hash, + BytesSinceLast: int64(len(buf)), + BytesSoFar: current, + }) + } + + return nil + }) + } + + if err := g.Wait(); err != nil { + return err + } + + if progress != nil { + final := soFar.Load() + if final < totalSize { + _ = progress(common.ProgressEvent{ + Event: "progress", + Oid: hash, + BytesSinceLast: totalSize - final, + BytesSoFar: totalSize, + }) + } + } + + logger.Info("multipart download completed", "guid", guid, "size", totalSize) + return nil +} diff --git a/download/transfer_test.go b/download/transfer_test.go index 7576edc..ecd8a10 100644 --- a/download/transfer_test.go +++ b/download/transfer_test.go @@ -14,21 +14,18 @@ import ( "testing" "github.com/calypr/data-client/common" - "github.com/calypr/data-client/conf" "github.com/calypr/data-client/drs" "github.com/calypr/data-client/logs" - "github.com/calypr/data-client/request" ) type fakeBackend struct { - cred *conf.Credential logger *logs.Gen3Logger - doFunc func(context.Context, *request.RequestBuilder) (*http.Response, error) + doFunc func(context.Context, *common.FileDownloadResponseObject) (*http.Response, error) + data []byte } -func (f *fakeBackend) Name() string { return "Fake" } -func (f *fakeBackend) GetCredential() *conf.Credential { return f.cred } -func (f *fakeBackend) Logger() *slog.Logger { return f.logger.Logger } +func (f *fakeBackend) Name() string { return "Fake" } +func (f *fakeBackend) Logger() *slog.Logger { return f.logger.Logger } func (f *fakeBackend) GetFileDetails(ctx context.Context, guid string) (*drs.DRSObject, error) { return &drs.DRSObject{ @@ -59,15 +56,38 @@ func (f *fakeBackend) GetUploadURL(ctx context.Context, guid string, filename st return "", errors.New("not implemented") } -func (f *fakeBackend) Do(ctx context.Context, req *request.RequestBuilder) (*http.Response, error) { - return f.doFunc(ctx, req) +func (f *fakeBackend) InitMultipartUpload(ctx context.Context, guid string, filename string, bucket string) (*common.MultipartUploadInit, error) { + return nil, errors.New("not implemented") +} + +func (f *fakeBackend) GetMultipartUploadURL(ctx context.Context, key string, uploadID string, partNumber int32, bucket string) (string, error) { + return "", errors.New("not implemented") +} + +func (f *fakeBackend) CompleteMultipartUpload(ctx context.Context, key string, uploadID string, parts []common.MultipartUploadPart, bucket string) error { + return errors.New("not implemented") +} + +func (f *fakeBackend) Upload(ctx context.Context, url string, body io.Reader, size int64) error { + return errors.New("not implemented") +} + +func (f *fakeBackend) UploadPart(ctx context.Context, url string, body io.Reader, size int64) (string, error) { + return "", errors.New("not implemented") } func (f *fakeBackend) Download(ctx context.Context, fdr *common.FileDownloadResponseObject) (*http.Response, error) { - return f.Do(ctx, &request.RequestBuilder{ - Method: http.MethodGet, - Url: fdr.PresignedURL, - }) + if f.doFunc != nil { + return f.doFunc(ctx, fdr) + } + if fdr.RangeStart != nil && fdr.RangeEnd != nil { + start, end := *fdr.RangeStart, *fdr.RangeEnd + if start < 0 || end >= int64(len(f.data)) || start > end { + return nil, errors.New("invalid range") + } + return newDownloadResponse(fdr.PresignedURL, f.data[start:end+1], http.StatusPartialContent), nil + } + return newDownloadResponse(fdr.PresignedURL, f.data, http.StatusOK), nil } func (f *fakeBackend) GetObjectByHash(ctx context.Context, checksumType, checksum string) ([]drs.DRSObject, error) { @@ -78,10 +98,6 @@ func (f *fakeBackend) BatchGetObjectsByHash(ctx context.Context, hashes []string return nil, errors.New("not implemented") } -func (f *fakeBackend) New(method, url string) *request.RequestBuilder { - return &request.RequestBuilder{Method: method, Url: url, Headers: make(map[string]string)} -} - func TestDownloadSingleWithProgressEmitsEvents(t *testing.T) { payload := bytes.Repeat([]byte("d"), 64) downloadDir := t.TempDir() @@ -94,18 +110,8 @@ func TestDownloadSingleWithProgressEmitsEvents(t *testing.T) { } fake := &fakeBackend{ - cred: &conf.Credential{APIEndpoint: "https://example.com", AccessToken: "token"}, logger: logs.NewGen3Logger(nil, "", ""), - doFunc: func(_ context.Context, req *request.RequestBuilder) (*http.Response, error) { - switch { - case strings.Contains(req.Url, common.IndexdIndexEndpoint): - return newDownloadJSONResponse(req.Url, `{"file_name":"payload.bin","size":64}`), nil - case strings.HasPrefix(req.Url, "https://download.example.com/"): - return newDownloadResponse(req.Url, payload, http.StatusOK), nil - default: - return nil, errors.New("unexpected request url: " + req.Url) - } - }, + data: payload, } ctx := common.WithProgress(context.Background(), progress) @@ -143,18 +149,8 @@ func TestDownloadSingleWithProgressFinalizeOnError(t *testing.T) { } fake := &fakeBackend{ - cred: &conf.Credential{APIEndpoint: "https://example.com", AccessToken: "token"}, logger: logs.NewGen3Logger(nil, "", ""), - doFunc: func(_ context.Context, req *request.RequestBuilder) (*http.Response, error) { - switch { - case strings.Contains(req.Url, common.IndexdIndexEndpoint): - return newDownloadJSONResponse(req.Url, `{"file_name":"payload.bin","size":64}`), nil - case strings.HasPrefix(req.Url, "https://download.example.com/"): - return newDownloadResponse(req.Url, []byte("short"), http.StatusOK), nil - default: - return nil, errors.New("unexpected request url: " + req.Url) - } - }, + data: []byte("short"), } ctx := common.WithProgress(context.Background(), progress) @@ -185,6 +181,42 @@ func newDownloadJSONResponse(rawURL, body string) *http.Response { } } +func TestDownloadToPathMultipart(t *testing.T) { + payload := bytes.Repeat([]byte("z"), 2*1024*1024) // 2MB + tmpDir := t.TempDir() + dst := filepath.Join(tmpDir, "multipart.bin") + + fake := &fakeBackend{ + logger: logs.NewGen3Logger(nil, "", ""), + data: payload, + } + + err := DownloadToPathWithOptions( + context.Background(), + fake, + fake.Logger(), + "guid-789", + dst, + "", + DownloadOptions{ + MultipartThreshold: 1 * 1024 * 1024, + ChunkSize: 256 * 1024, + Concurrency: 4, + }, + ) + if err != nil { + t.Fatalf("multipart download failed: %v", err) + } + + got, err := os.ReadFile(dst) + if err != nil { + t.Fatalf("read failed: %v", err) + } + if !bytes.Equal(payload, got) { + t.Fatal("downloaded payload mismatch") + } +} + func newDownloadResponse(rawURL string, payload []byte, status int) *http.Response { parsedURL, err := url.Parse(rawURL) if err != nil { diff --git a/drs/drs.go b/drs/drs.go index 4c872d8..da21e1b 100644 --- a/drs/drs.go +++ b/drs/drs.go @@ -44,7 +44,12 @@ func FindMatchingRecord(records []DRSObject, organization, projectId string) (*D return nil, fmt.Errorf("error converting project ID to resource format: %v", err) } + var fallback *DRSObject for _, record := range records { + if fallback == nil { + r := record + fallback = &r + } for _, access := range record.AccessMethods { if access.Authorizations == nil { continue @@ -62,7 +67,7 @@ func FindMatchingRecord(records []DRSObject, organization, projectId string) (*D } } - return nil, nil + return fallback, nil } // DRS UUID generation using SHA1 (compatible with git-drs) From 2abc99a7ddb0e8bc27db87a5242095782af8c6e1 Mon Sep 17 00:00:00 2001 From: matthewpeterkort Date: Tue, 24 Mar 2026 09:21:26 -0700 Subject: [PATCH 03/13] lock down data-client version --- backend/drs/drs.go | 91 ++++++++++++++++++++++++-------------- backend/gen3/gen3.go | 57 +++++++++++------------- common/constants.go | 36 +++++++-------- download/transfer.go | 3 +- drs/drs.go | 54 +++++++++++++++++----- drs/object_builder.go | 27 ++++++----- drs/object_builder_test.go | 6 ++- drs/prefetch_context.go | 24 ++++++++++ drs/storage_prefix_test.go | 25 +++++++++++ fence/client.go | 37 +++++++++------- fence/client_test.go | 4 +- indexd/client.go | 81 +++++++++++++++++++++++++++------ indexd/client_test.go | 32 +++++++++++--- indexd/upsert.go | 4 +- upload/multipart.go | 10 +++-- 15 files changed, 342 insertions(+), 149 deletions(-) create mode 100644 drs/prefetch_context.go create mode 100644 drs/storage_prefix_test.go diff --git a/backend/drs/drs.go b/backend/drs/drs.go index e99fb79..ae088a8 100644 --- a/backend/drs/drs.go +++ b/backend/drs/drs.go @@ -119,6 +119,16 @@ func (d *DrsBackend) doJSONRequest(ctx context.Context, method, url string, body } func (d *DrsBackend) GetFileDetails(ctx context.Context, guid string) (*drs.DRSObject, error) { + if oid := drs.NormalizeOid(guid); oid != "" { + if cached, ok := drs.PrefetchedBySHA(ctx, oid); ok { + obj := cached + return &obj, nil + } + if records, err := d.GetObjectByHash(ctx, "sha256", oid); err == nil && len(records) > 0 { + return &records[0], nil + } + } + u, err := d.buildURL("ga4gh/drs/v1/objects", guid) if err != nil { return nil, err @@ -132,12 +142,16 @@ func (d *DrsBackend) GetFileDetails(ctx context.Context, guid string) (*drs.DRSO } func (d *DrsBackend) GetDownloadURL(ctx context.Context, guid string, accessID string) (string, error) { - // If accessID is empty, try to find one + resolvedID := guid + if accessID == "" { obj, err := d.GetFileDetails(ctx, guid) if err != nil { return "", err } + if strings.TrimSpace(obj.Id) != "" { + resolvedID = obj.Id + } if len(obj.AccessMethods) == 0 { return "", fmt.Errorf("no access methods found for object %s", guid) } @@ -150,11 +164,9 @@ func (d *DrsBackend) GetDownloadURL(ctx context.Context, guid string, accessID s } } if accessID == "" { - // Fallback to first if defined if len(obj.AccessMethods) > 0 && obj.AccessMethods[0].AccessID != "" { accessID = obj.AccessMethods[0].AccessID } else { - // If no access ID, maybe direct URL? if obj.AccessMethods[0].AccessURL.URL != "" { return obj.AccessMethods[0].AccessURL.URL, nil } @@ -163,7 +175,24 @@ func (d *DrsBackend) GetDownloadURL(ctx context.Context, guid string, accessID s } } - u, err := d.buildURL("ga4gh/drs/v1/objects", guid, "access", accessID) + if resolvedID == guid { + if oid := drs.NormalizeOid(guid); oid != "" { + if cached, ok := drs.PrefetchedBySHA(ctx, oid); ok { + if id := strings.TrimSpace(cached.Id); id != "" { + resolvedID = id + } + } + if resolvedID == guid { + if records, err := d.GetObjectByHash(ctx, "sha256", oid); err == nil && len(records) > 0 { + if id := strings.TrimSpace(records[0].Id); id != "" { + resolvedID = id + } + } + } + } + } + + u, err := d.buildURL("ga4gh/drs/v1/objects", resolvedID, "access", accessID) if err != nil { return "", err } @@ -184,28 +213,17 @@ func (d *DrsBackend) GetObjectByHash(ctx context.Context, checksumType, checksum return nil, err } - // Server may return either a single object (canonical spec) or an array (legacy behavior). - var raw json.RawMessage - if err := d.doJSONRequest(ctx, http.MethodGet, u, nil, &raw); err != nil { - return nil, err - } - - var single drs.DRSObject - if err := json.Unmarshal(raw, &single); err == nil && single.Id != "" { - return []drs.DRSObject{single}, nil - } - var objs []drs.DRSObject - if err := json.Unmarshal(raw, &objs); err != nil { - return nil, fmt.Errorf("unable to decode checksum lookup response: %w", err) + if err := d.doJSONRequest(ctx, http.MethodGet, u, nil, &objs); err != nil { + return nil, err } return objs, nil } func (d *DrsBackend) BatchGetObjectsByHash(ctx context.Context, hashes []string) (map[string][]drs.DRSObject, error) { - // Custom endpoint: POST /index/index/bulk/hashes + // Custom endpoint: POST /index/bulk/hashes // This path suggests it's mimicking Indexd API structure even if it's a DRS server - u, err := d.buildURL("index/index/bulk/hashes") + u, err := d.buildURL("index/bulk/hashes") if err != nil { return nil, err } @@ -242,16 +260,16 @@ func (d *DrsBackend) Register(ctx context.Context, obj *drs.DRSObject) (*drs.DRS Candidates: []drs.DRSObjectCandidate{drs.ConvertToCandidate(obj)}, } - var registeredObjs []*drs.DRSObject - if err := d.doJSONRequest(ctx, http.MethodPost, u, req, ®isteredObjs); err != nil { + var wrapped struct { + Objects []*drs.DRSObject `json:"objects"` + } + if err := d.doJSONRequest(ctx, http.MethodPost, u, req, &wrapped); err != nil { return nil, err } - - if len(registeredObjs) == 0 { + if len(wrapped.Objects) == 0 { return nil, fmt.Errorf("server returned no registered objects") } - - return registeredObjs[0], nil + return wrapped.Objects[0], nil } func (d *DrsBackend) BatchRegister(ctx context.Context, objs []*drs.DRSObject) ([]*drs.DRSObject, error) { @@ -268,12 +286,13 @@ func (d *DrsBackend) BatchRegister(ctx context.Context, objs []*drs.DRSObject) ( Candidates: candidates, } - var registeredObjs []*drs.DRSObject - if err := d.doJSONRequest(ctx, http.MethodPost, u, req, ®isteredObjs); err != nil { + var wrapped struct { + Objects []*drs.DRSObject `json:"objects"` + } + if err := d.doJSONRequest(ctx, http.MethodPost, u, req, &wrapped); err != nil { return nil, err } - - return registeredObjs, nil + return wrapped.Objects, nil } func (d *DrsBackend) GetUploadURL(ctx context.Context, guid string, filename string, metadata common.FileMetadata, bucket string) (string, error) { @@ -284,7 +303,9 @@ func (d *DrsBackend) GetUploadURL(ctx context.Context, guid string, filename str } // Add filename/bucket hints q := url.Values{} - q.Set("file_name", filename) + if strings.TrimSpace(filename) != "" { + q.Set("file_name", filename) + } // Evaluate bucket from argument or struct effectiveBucket := bucket @@ -292,7 +313,9 @@ func (d *DrsBackend) GetUploadURL(ctx context.Context, guid string, filename str q.Set("bucket", effectiveBucket) } - u += "?" + q.Encode() + if encoded := q.Encode(); encoded != "" { + u += "?" + encoded + } var res struct { URL string `json:"url"` @@ -304,7 +327,7 @@ func (d *DrsBackend) GetUploadURL(ctx context.Context, guid string, filename str } func (d *DrsBackend) InitMultipartUpload(ctx context.Context, guid string, filename string, bucket string) (*common.MultipartUploadInit, error) { - u, err := d.buildURL("user/data/multipart/init") + u, err := d.buildURL("data/multipart/init") if err != nil { return nil, err } @@ -337,7 +360,7 @@ func (d *DrsBackend) InitMultipartUpload(ctx context.Context, guid string, filen } func (d *DrsBackend) GetMultipartUploadURL(ctx context.Context, key string, uploadID string, partNumber int32, bucket string) (string, error) { - u, err := d.buildURL("user/data/multipart/upload") + u, err := d.buildURL("data/multipart/upload") if err != nil { return "", err } @@ -367,7 +390,7 @@ func (d *DrsBackend) GetMultipartUploadURL(ctx context.Context, key string, uplo } func (d *DrsBackend) CompleteMultipartUpload(ctx context.Context, key string, uploadID string, parts []common.MultipartUploadPart, bucket string) error { - u, err := d.buildURL("user/data/multipart/complete") + u, err := d.buildURL("data/multipart/complete") if err != nil { return err } diff --git a/backend/gen3/gen3.go b/backend/gen3/gen3.go index e7cddaf..4213601 100644 --- a/backend/gen3/gen3.go +++ b/backend/gen3/gen3.go @@ -70,35 +70,16 @@ func resolveRange(fdr *common.FileDownloadResponseObject) (start int64, end *int } func (g *Gen3Backend) GetFileDetails(ctx context.Context, guid string) (*drs.DRSObject, error) { - // 1. Try Shepherd - hasShepherd, err := g.client.Fence().CheckForShepherdAPI(ctx) - if err == nil && hasShepherd { - endpoint := strings.TrimSuffix(g.client.GetCredential().APIEndpoint, "/") + common.ShepherdEndpoint + "/objects/" + guid - rb := g.client.New(http.MethodGet, endpoint) - resp, err := g.client.Do(ctx, rb) - if err == nil && resp.StatusCode == http.StatusOK { - defer resp.Body.Close() - var shepherdResp struct { - Record struct { - FileName string `json:"file_name"` - Size int64 `json:"size"` - Did string `json:"did"` - } `json:"record"` - } - if err := json.NewDecoder(resp.Body).Decode(&shepherdResp); err == nil { - return &drs.DRSObject{ - Name: shepherdResp.Record.FileName, - Size: shepherdResp.Record.Size, - Id: shepherdResp.Record.Did, - }, nil - } + if oid := drs.NormalizeOid(guid); oid != "" { + if cached, ok := drs.PrefetchedBySHA(ctx, oid); ok { + obj := cached + return &obj, nil } - if err != nil { - g.Logger().Warn("Shepherd lookup failed, falling back to Indexd", "guid", guid, "error", err) + if recs, err := g.client.Indexd().GetObjectByHash(ctx, "sha256", oid); err == nil && len(recs) > 0 { + return &recs[0], nil } } - // 2. Fallback to Indexd return g.client.Indexd().GetObject(ctx, guid) } @@ -111,16 +92,13 @@ func (g *Gen3Backend) BatchGetObjectsByHash(ctx context.Context, hashes []string } func (g *Gen3Backend) GetDownloadURL(ctx context.Context, guid string, accessID string) (string, error) { - // For Gen3, often "accessID" is used as a protocol hint like "s3", "gs", or "?protocol=s3" - // 1. Try Fence first + // Try Fence first. url, err := g.client.Fence().GetDownloadPresignedUrl(ctx, guid, accessID) if err == nil && url != "" { return url, nil } - // 2. Fallback to Indexd - // Indexd expects "s3", "gs", "ftp", "http", "https" etc. - // We need to clean up accessID if it contains query params like "?protocol=" + // Resolve access type for Indexd. accessType := "s3" // default if strings.Contains(accessID, "protocol=") { parts := strings.Split(accessID, "=") @@ -131,7 +109,24 @@ func (g *Gen3Backend) GetDownloadURL(ctx context.Context, guid string, accessID accessType = accessID } - resp, errIdx := g.client.Indexd().GetDownloadURL(ctx, guid, accessType) + // Checksum-first resolution. + resolvedID := guid + if oid := drs.NormalizeOid(guid); oid != "" { + if cached, ok := drs.PrefetchedBySHA(ctx, oid); ok { + if id := strings.TrimSpace(cached.Id); id != "" { + resolvedID = id + } + } + if resolvedID == guid { + if recs, hashErr := g.client.Indexd().GetObjectByHash(ctx, "sha256", oid); hashErr == nil && len(recs) > 0 { + if id := strings.TrimSpace(recs[0].Id); id != "" { + resolvedID = id + } + } + } + } + + resp, errIdx := g.client.Indexd().GetDownloadURL(ctx, resolvedID, accessType) if errIdx == nil && resp != nil && resp.URL != "" { return resp.URL, nil } diff --git a/common/constants.go b/common/constants.go index 1191795..6abde97 100644 --- a/common/constants.go +++ b/common/constants.go @@ -7,15 +7,15 @@ import ( const ( // B is bytes - B int64 = iota + B int64 = 1 // KB is kilobytes - KB int64 = 1 << (10 * iota) + KB int64 = 1024 * B // MB is megabytes - MB + MB int64 = 1024 * KB // GB is gigabytes - GB + GB int64 = 1024 * MB // TB is terabytes - TB + TB int64 = 1024 * GB ) const ( // DefaultUseShepherd sets whether gen3client will attempt to use the Shepherd / Object Management API @@ -36,31 +36,31 @@ const ( ShepherdVersionEndpoint = "/mds/version" // IndexdIndexEndpoint is the endpoint postfix for INDEXD index - IndexdIndexEndpoint = "/index/index" + IndexdIndexEndpoint = "/index" // FenceUserEndpoint is the endpoint postfix for FENCE user FenceUserEndpoint = "/user/user" - // FenceDataEndpoint is the endpoint postfix for FENCE data - FenceDataEndpoint = "/user/data" + // FenceDataEndpoint is the canonical endpoint prefix for upload/delete flows + FenceDataEndpoint = "/data/upload" // FenceAccessTokenEndpoint is the endpoint postfix for FENCE access token FenceAccessTokenEndpoint = "/user/credentials/api/access_token" - // FenceDataUploadEndpoint is the endpoint postfix for FENCE data upload - FenceDataUploadEndpoint = FenceDataEndpoint + "/upload" + // FenceDataUploadEndpoint is the endpoint postfix for upload init/presigned-url + FenceDataUploadEndpoint = FenceDataEndpoint - // FenceDataDownloadEndpoint is the endpoint postfix for FENCE data download - FenceDataDownloadEndpoint = FenceDataEndpoint + "/download" + // FenceDataDownloadEndpoint is the endpoint postfix for download presigned-url + FenceDataDownloadEndpoint = "/data/download" - // FenceDataMultipartInitEndpoint is the endpoint postfix for FENCE multipart init - FenceDataMultipartInitEndpoint = FenceDataEndpoint + "/multipart/init" + // FenceDataMultipartInitEndpoint is the endpoint postfix for multipart init + FenceDataMultipartInitEndpoint = "/data/multipart/init" - // FenceDataMultipartUploadEndpoint is the endpoint postfix for FENCE multipart upload - FenceDataMultipartUploadEndpoint = FenceDataEndpoint + "/multipart/upload" + // FenceDataMultipartUploadEndpoint is the endpoint postfix for multipart upload + FenceDataMultipartUploadEndpoint = "/data/multipart/upload" - // FenceDataMultipartCompleteEndpoint is the endpoint postfix for FENCE multipart complete - FenceDataMultipartCompleteEndpoint = FenceDataEndpoint + "/multipart/complete" + // FenceDataMultipartCompleteEndpoint is the endpoint postfix for multipart complete + FenceDataMultipartCompleteEndpoint = "/data/multipart/complete" // PathSeparator is os dependent path separator char PathSeparator = string(os.PathSeparator) diff --git a/download/transfer.go b/download/transfer.go index 75f3b99..b879075 100644 --- a/download/transfer.go +++ b/download/transfer.go @@ -153,8 +153,7 @@ func DownloadToPathWithOptions( } if err := downloadToPathMultipart(ctx, bk, logger, guid, dstPath, protocol, info.Size, opts); err != nil { - logger.Warn("multipart download failed, falling back to single stream", "guid", guid, "error", err) - return downloadToPathSingle(ctx, bk, logger, guid, dstPath, protocol) + return err } return nil diff --git a/drs/drs.go b/drs/drs.go index da21e1b..2087b8c 100644 --- a/drs/drs.go +++ b/drs/drs.go @@ -11,6 +11,33 @@ import ( // NAMESPACE is the UUID namespace used for generating DRS UUIDs var NAMESPACE = uuid.NewMD5(uuid.NameSpaceURL, []byte("calypr.org")) +func sanitizePathComponent(v string) string { + v = strings.TrimSpace(v) + v = strings.Trim(v, "/") + v = strings.ReplaceAll(v, "\\", "/") + return strings.ReplaceAll(v, " ", "_") +} + +// StoragePrefix returns the bucket key prefix used for object placement. +// Preferred layout is "/" when organization is provided. +// When organization is empty, it falls back to "/" for hyphenated +// project IDs or "default/" otherwise. +func StoragePrefix(org, project string) string { + org = sanitizePathComponent(org) + project = sanitizePathComponent(project) + if project == "" { + return "" + } + if org != "" { + return org + "/" + project + } + if strings.Contains(project, "-") { + parts := strings.SplitN(project, "-", 2) + return sanitizePathComponent(parts[0]) + "/" + sanitizePathComponent(parts[1]) + } + return "default/" + project +} + func ProjectToResource(org, project string) (string, error) { if org != "" { return "/programs/" + org + "/projects/" + project, nil @@ -44,12 +71,7 @@ func FindMatchingRecord(records []DRSObject, organization, projectId string) (*D return nil, fmt.Errorf("error converting project ID to resource format: %v", err) } - var fallback *DRSObject for _, record := range records { - if fallback == nil { - r := record - fallback = &r - } for _, access := range record.AccessMethods { if access.Authorizations == nil { continue @@ -66,8 +88,7 @@ func FindMatchingRecord(records []DRSObject, organization, projectId string) (*D } } } - - return fallback, nil + return nil, nil } // DRS UUID generation using SHA1 (compatible with git-drs) @@ -76,13 +97,27 @@ func GenerateDrsID(projectId, hash string) string { } func BuildDrsObj(fileName string, checksum string, size int64, drsId string, bucketName string, org string, projectId string) (*DRSObject, error) { + return BuildDrsObjWithPrefix(fileName, checksum, size, drsId, bucketName, org, projectId, "") +} + +func BuildDrsObjWithPrefix(fileName string, checksum string, size int64, drsId string, bucketName string, org string, projectId string, storagePrefix string) (*DRSObject, error) { if bucketName == "" { return nil, fmt.Errorf("error: bucket name is empty") } checksum = NormalizeOid(checksum) - // Standard Gen3-style storage path: s3://bucket/guid/checksum - fileURL := fmt.Sprintf("s3://%s/%s/%s", bucketName, drsId, checksum) + prefix := strings.Trim(strings.TrimSpace(storagePrefix), "/") + if prefix == "" { + prefix = StoragePrefix(org, projectId) + } + var fileURL string + // Canonical CAS-style storage path: + // s3://bucket/{org}/{project}/sha256 + if prefix != "" { + fileURL = fmt.Sprintf("s3://%s/%s/%s", bucketName, prefix, checksum) + } else { + fileURL = fmt.Sprintf("s3://%s/%s", bucketName, checksum) + } authzStr, err := ProjectToResource(org, projectId) if err != nil { @@ -135,7 +170,6 @@ func ConvertToCandidate(obj *DRSObject) DRSObjectCandidate { } return DRSObjectCandidate{ - Id: obj.Id, Name: obj.Name, Size: obj.Size, Version: obj.Version, diff --git a/drs/object_builder.go b/drs/object_builder.go index b3b0c93..82a1b81 100644 --- a/drs/object_builder.go +++ b/drs/object_builder.go @@ -8,11 +8,12 @@ import ( ) type ObjectBuilder struct { - Bucket string - ProjectID string - Organization string - AccessType string - PathStyle string // "CAS" or "" (Gen3 default) + Bucket string + ProjectID string + Organization string + StoragePrefix string + AccessType string + PathStyle string // "CAS" or "" (Gen3 default) } func NewObjectBuilder(bucket, projectID string) ObjectBuilder { @@ -33,16 +34,20 @@ func (b ObjectBuilder) Build(fileName string, checksum string, size int64, drsID accessType = "s3" } - // Remove sha256: prefix if present for clean S3 key + // Remove sha256: prefix if present for clean S3 key. checksum = strings.TrimPrefix(checksum, "sha256:") + prefix := strings.Trim(strings.TrimSpace(b.StoragePrefix), "/") + if prefix == "" { + prefix = StoragePrefix(b.Organization, b.ProjectID) + } var fileURL string - if b.PathStyle == "CAS" { - // CAS-style (s3://bucket/checksum) - fileURL = fmt.Sprintf("s3://%s/%s", b.Bucket, checksum) + // Canonical CAS-style (s3://bucket/{org}/{project}/sha256). + // PathStyle is kept for compatibility, but object identity is checksum-first. + if prefix != "" { + fileURL = fmt.Sprintf("s3://%s/%s/%s", b.Bucket, prefix, checksum) } else { - // Gen3-style (s3://bucket/guid/checksum) - fileURL = fmt.Sprintf("s3://%s/%s/%s", b.Bucket, drsID, checksum) + fileURL = fmt.Sprintf("s3://%s/%s", b.Bucket, checksum) } authzStr, err := ProjectToResource(b.Organization, b.ProjectID) diff --git a/drs/object_builder_test.go b/drs/object_builder_test.go index e196e00..b02a385 100644 --- a/drs/object_builder_test.go +++ b/drs/object_builder_test.go @@ -1,7 +1,6 @@ package drs import ( - "path/filepath" "strings" "testing" ) @@ -31,9 +30,12 @@ func TestObjectBuilderBuildSuccess(t *testing.T) { if len(obj.AccessMethods) != 1 { t.Fatalf("expected 1 access method, got %d", len(obj.AccessMethods)) } - if !strings.Contains(obj.AccessMethods[0].AccessURL.URL, filepath.Join("bucket", "did-1", "sha-256")) { + if !strings.Contains(obj.AccessMethods[0].AccessURL.URL, "bucket/test/project/sha-256") { t.Fatalf("unexpected access URL: %s", obj.AccessMethods[0].AccessURL.URL) } + if len(obj.Aliases) != 0 { + t.Fatalf("expected no aliases, got: %#v", obj.Aliases) + } if obj.AccessMethods[0].Type != "s3" { t.Fatalf("unexpected access method type: %s", obj.AccessMethods[0].Type) } diff --git a/drs/prefetch_context.go b/drs/prefetch_context.go new file mode 100644 index 0000000..316a565 --- /dev/null +++ b/drs/prefetch_context.go @@ -0,0 +1,24 @@ +package drs + +import "context" + +type prefetchedBySHAContextKey struct{} + +// WithPrefetchedBySHA stores pre-resolved DRS records keyed by normalized sha256. +func WithPrefetchedBySHA(ctx context.Context, bySHA map[string]DRSObject) context.Context { + if len(bySHA) == 0 { + return ctx + } + return context.WithValue(ctx, prefetchedBySHAContextKey{}, bySHA) +} + +// PrefetchedBySHA returns a pre-resolved DRS record for a normalized sha256. +func PrefetchedBySHA(ctx context.Context, sha256 string) (DRSObject, bool) { + m, ok := ctx.Value(prefetchedBySHAContextKey{}).(map[string]DRSObject) + if !ok || len(m) == 0 { + return DRSObject{}, false + } + obj, exists := m[sha256] + return obj, exists +} + diff --git a/drs/storage_prefix_test.go b/drs/storage_prefix_test.go new file mode 100644 index 0000000..4276a70 --- /dev/null +++ b/drs/storage_prefix_test.go @@ -0,0 +1,25 @@ +package drs + +import "testing" + +func TestStoragePrefix(t *testing.T) { + tests := []struct { + name string + org string + project string + want string + }{ + {name: "org + project", org: "cbdsTest", project: "git_drs_e2e_test", want: "cbdsTest/git_drs_e2e_test"}, + {name: "hyphenated project", org: "", project: "prog-proj", want: "prog/proj"}, + {name: "plain project", org: "", project: "projonly", want: "default/projonly"}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := StoragePrefix(tt.org, tt.project) + if got != tt.want { + t.Fatalf("StoragePrefix(%q,%q)=%q want=%q", tt.org, tt.project, got, tt.want) + } + }) + } +} diff --git a/fence/client.go b/fence/client.go index 4a5cacf..6e9a71c 100644 --- a/fence/client.go +++ b/fence/client.go @@ -21,7 +21,7 @@ import ( ) // FenceBucketEndpoint is the endpoint postfix for FENCE bucket list -const FenceBucketEndpoint = "/user/data/buckets" +const FenceBucketEndpoint = "/data/buckets" //go:generate mockgen -destination=../mocks/mock_fence.go -package=mocks github.com/calypr/data-client/fence FenceInterface @@ -311,7 +311,7 @@ func (f *FenceClient) resolveFromFence(ctx context.Context, guid, protocolText s } func (f *FenceClient) GetBucketDetails(ctx context.Context, bucket string) (*S3Bucket, error) { - url := f.cred.APIEndpoint + "/user/data/buckets" + url := f.cred.APIEndpoint + "/data/buckets" resp, err := f.Do(ctx, &request.RequestBuilder{ Method: http.MethodGet, Url: url, @@ -506,31 +506,36 @@ func (f *FenceClient) ParseFenceURLResponse(resp *http.Response) (FenceResponse, return msg, fmt.Errorf("failed to read response body: %w", err) } bodyStr := string(bodyBytes) - - if len(bodyBytes) > 0 { - err = json.Unmarshal(bodyBytes, &msg) - if err != nil { - return msg, fmt.Errorf("failed to decode JSON: %w (Raw body: %s)", err, bodyStr) - } + strURL := "" + if resp.Request != nil && resp.Request.URL != nil { + strURL = resp.Request.URL.String() } + // Handle HTTP error statuses first so plain-text error bodies (for example: + // "Unauthorized") are reported accurately instead of as JSON decode failures. if !(resp.StatusCode == 200 || resp.StatusCode == 201 || resp.StatusCode == 204) { - strUrl := resp.Request.URL.String() switch resp.StatusCode { case http.StatusUnauthorized: - return msg, fmt.Errorf("401 Unauthorized: %s (URL: %s)", bodyStr, strUrl) + return msg, fmt.Errorf("401 Unauthorized: %s (URL: %s)", bodyStr, strURL) case http.StatusForbidden: - return msg, fmt.Errorf("403 Forbidden: %s (URL: %s)", bodyStr, strUrl) + return msg, fmt.Errorf("403 Forbidden: %s (URL: %s)", bodyStr, strURL) case http.StatusNotFound: - return msg, fmt.Errorf("404 Not Found: %s (URL: %s)", bodyStr, strUrl) + return msg, fmt.Errorf("404 Not Found: %s (URL: %s)", bodyStr, strURL) case http.StatusInternalServerError: - return msg, fmt.Errorf("500 Internal Server Error: %s (URL: %s)", bodyStr, strUrl) + return msg, fmt.Errorf("500 Internal Server Error: %s (URL: %s)", bodyStr, strURL) case http.StatusServiceUnavailable: - return msg, fmt.Errorf("503 Service Unavailable: %s (URL: %s)", bodyStr, strUrl) + return msg, fmt.Errorf("503 Service Unavailable: %s (URL: %s)", bodyStr, strURL) case http.StatusBadGateway: - return msg, fmt.Errorf("502 Bad Gateway: %s (URL: %s)", bodyStr, strUrl) + return msg, fmt.Errorf("502 Bad Gateway: %s (URL: %s)", bodyStr, strURL) default: - return msg, fmt.Errorf("unexpected error (%d): %s (URL: %s)", resp.StatusCode, bodyStr, strUrl) + return msg, fmt.Errorf("unexpected error (%d): %s (URL: %s)", resp.StatusCode, bodyStr, strURL) + } + } + + if len(bodyBytes) > 0 { + err = json.Unmarshal(bodyBytes, &msg) + if err != nil { + return msg, fmt.Errorf("failed to decode JSON response (status=%d, url=%s): %w (raw body: %s)", resp.StatusCode, strURL, err, bodyStr) } } diff --git a/fence/client_test.go b/fence/client_test.go index 6a85de3..9b01203 100644 --- a/fence/client_test.go +++ b/fence/client_test.go @@ -55,7 +55,7 @@ func (m *mockFenceServer) handler(t *testing.T) http.HandlerFunc { w.WriteHeader(http.StatusOK) _ = json.NewEncoder(w).Encode(FenceResponse{URL: "https://download.url"}) return - case r.Method == http.MethodGet && path == "/user/data/buckets": + case r.Method == http.MethodGet && path == "/data/buckets": w.WriteHeader(http.StatusOK) _ = json.NewEncoder(w).Encode(S3BucketsResponse{ S3Buckets: map[string]*S3Bucket{ @@ -244,7 +244,7 @@ func TestFenceClient_UserPing(t *testing.T) { } if resp.BucketPrograms["test-bucket"] != "" { - // Our mock for /user/data/buckets returns a bucket but no programs by default unless we update it + // Our mock for /data/buckets returns a bucket but no programs by default unless we update it // In my update to types.go, I added Programs to S3Bucket. } } diff --git a/indexd/client.go b/indexd/client.go index 0dacf78..c2114d8 100644 --- a/indexd/client.go +++ b/indexd/client.go @@ -8,6 +8,7 @@ import ( "io" "log/slog" "net/http" + "strings" "github.com/calypr/data-client/conf" "github.com/calypr/data-client/drs" @@ -93,7 +94,7 @@ func (c *IndexdClient) RegisterIndexdRecord(ctx context.Context, indexdObj *Inde return nil, err } - url := fmt.Sprintf("%s/index/index", c.cred.APIEndpoint) + url := fmt.Sprintf("%s/index", c.cred.APIEndpoint) resp, err := c.Do(ctx, &request.RequestBuilder{ Method: http.MethodPost, Url: url, @@ -124,7 +125,7 @@ func (c *IndexdClient) DeleteIndexdRecord(ctx context.Context, did string) error return err } - url := fmt.Sprintf("%s/index/index/%s?rev=%s", c.cred.APIEndpoint, did, record.Rev) + url := fmt.Sprintf("%s/index/%s?rev=%s", c.cred.APIEndpoint, did, record.Rev) resp, err := c.Do(ctx, &request.RequestBuilder{ Method: http.MethodDelete, Url: url, @@ -147,7 +148,7 @@ func (c *IndexdClient) DeleteIndexdRecord(ctx context.Context, did string) error } func (c *IndexdClient) getIndexdRecordByDID(ctx context.Context, did string) (*OutputInfo, error) { - url := fmt.Sprintf("%s/index/index/%s", c.cred.APIEndpoint, did) + url := fmt.Sprintf("%s/index/%s", c.cred.APIEndpoint, did) resp, err := c.Do(ctx, &request.RequestBuilder{ Method: http.MethodGet, Url: url, @@ -171,7 +172,7 @@ func (c *IndexdClient) getIndexdRecordByDID(ctx context.Context, did string) (*O } func (c *IndexdClient) GetObjectByHash(ctx context.Context, hashType, hashValue string) ([]drs.DRSObject, error) { - url := fmt.Sprintf("%s/index/index?hash=%s:%s", c.cred.APIEndpoint, hashType, hashValue) + url := fmt.Sprintf("%s/index?hash=%s:%s", c.cred.APIEndpoint, hashType, hashValue) resp, err := c.Do(ctx, &request.RequestBuilder{ Method: http.MethodGet, Url: url, @@ -246,7 +247,7 @@ func (c *IndexdClient) ListObjectsByProject(ctx context.Context, projectId strin active := true for active { - url := fmt.Sprintf("%s/index/index?authz=%s&limit=%d&page=%d", + url := fmt.Sprintf("%s/index?authz=%s&limit=%d&page=%d", c.cred.APIEndpoint, resourcePath, PAGESIZE, pageNum) resp, err := c.Do(ctx, &request.RequestBuilder{ @@ -348,7 +349,7 @@ func (c *IndexdClient) UpdateRecord(ctx context.Context, updateInfo *drs.DRSObje return nil, fmt.Errorf("error marshaling indexd update payload: %v", err) } - url := fmt.Sprintf("%s/index/index/%s?rev=%s", c.cred.APIEndpoint, did, record.Rev) + url := fmt.Sprintf("%s/index/%s?rev=%s", c.cred.APIEndpoint, did, record.Rev) resp, err := c.Do(ctx, &request.RequestBuilder{ Method: http.MethodPut, Url: url, @@ -458,19 +459,51 @@ func (c *IndexdClient) DeleteRecordsByProject(ctx context.Context, projectId str if err != nil { return err } + + // Snapshot and dedupe IDs first so pagination isn't affected by deletes-in-flight. + ids := make([]string, 0, 128) + seen := make(map[string]struct{}) for rec := range recs { if rec.Error != nil { return rec.Error } - err := c.DeleteIndexdRecord(ctx, rec.Object.Id) + + if rec.Object == nil || rec.Object.Id == "" { + continue + } + if _, ok := seen[rec.Object.Id]; ok { + continue + } + seen[rec.Object.Id] = struct{}{} + ids = append(ids, rec.Object.Id) + } + + for _, id := range ids { + err := c.DeleteIndexdRecord(ctx, id) if err != nil { - c.logger.Error(fmt.Sprintf("DeleteRecordsByProject Error for %s: %v", rec.Object.Id, err)) + // Project-wide cleanup should be idempotent; stale/deleted IDs are expected. + if isNotFoundErr(err) { + c.logger.Info(fmt.Sprintf("DeleteRecordsByProject: record already absent %s", id)) + continue + } + c.logger.Error(fmt.Sprintf("DeleteRecordsByProject Error for %s: %v", id, err)) continue } } return nil } +func isNotFoundErr(err error) bool { + if err == nil { + return false + } + msg := err.Error() + return strings.Contains(msg, "status: 404") || + strings.Contains(msg, "status=404") || + strings.Contains(msg, "Object not found") || + strings.Contains(msg, "not found") +} + func (c *IndexdClient) DeleteRecordByHash(ctx context.Context, hashValue string, projectId string) error { records, err := c.GetObjectByHash(ctx, "sha256", hashValue) if err != nil { @@ -540,15 +573,37 @@ func (c *IndexdClient) RegisterRecords(ctx context.Context, records []*drs.DRSOb return nil, fmt.Errorf("failed to register records: %s (status: %d)", string(body), resp.StatusCode) } - var registered []*drs.DRSObject - if err := json.NewDecoder(resp.Body).Decode(®istered); err != nil { - // Fallback: If server returns DrsObjectWithAuthz slice (which it might based on my service.go implementation), decode that - return nil, fmt.Errorf("error decoding registered objects: %v", err) + body, err := io.ReadAll(resp.Body) + if err != nil { + return nil, fmt.Errorf("error reading registered objects response: %v", err) } + registered, err := decodeRegisteredObjects(body) + if err != nil { + return nil, fmt.Errorf("error decoding registered objects: %v", err) + } return registered, nil } +func decodeRegisteredObjects(body []byte) ([]*drs.DRSObject, error) { + trimmed := bytes.TrimSpace(body) + if len(trimmed) == 0 { + return nil, fmt.Errorf("empty response body") + } + + // Canonical shape from DRS register API. + var wrapped struct { + Objects []*drs.DRSObject `json:"objects"` + } + if err := json.Unmarshal(trimmed, &wrapped); err != nil { + return nil, fmt.Errorf("unsupported response payload: %s", string(trimmed)) + } + if len(wrapped.Objects) == 0 { + return nil, fmt.Errorf("register response did not include objects") + } + return wrapped.Objects, nil +} + func (c *IndexdClient) BatchGetObjectsByHash(ctx context.Context, hashes []string) (map[string][]drs.DRSObject, error) { if len(hashes) == 0 { return nil, nil @@ -565,7 +620,7 @@ func (c *IndexdClient) BatchGetObjectsByHash(ctx context.Context, hashes []strin return nil, err } - url := fmt.Sprintf("%s/index/index/bulk/hashes", c.cred.APIEndpoint) + url := fmt.Sprintf("%s/index/bulk/hashes", c.cred.APIEndpoint) resp, err := c.Do(ctx, &request.RequestBuilder{ Method: http.MethodPost, Url: url, diff --git a/indexd/client_test.go b/indexd/client_test.go index 3c20985..7536742 100644 --- a/indexd/client_test.go +++ b/indexd/client_test.go @@ -28,7 +28,7 @@ func (m *mockIndexdServer) handler(t *testing.T) http.HandlerFunc { return func(w http.ResponseWriter, r *http.Request) { path := r.URL.Path switch { - case r.Method == http.MethodGet && path == "/index/index": + case r.Method == http.MethodGet && path == "/index": if hashQuery := r.URL.Query().Get("hash"); hashQuery != "" { record := sampleOutputInfo() page := ListRecords{Records: []OutputInfo{record}} @@ -50,7 +50,7 @@ func (m *mockIndexdServer) handler(t *testing.T) http.HandlerFunc { return } - case r.Method == http.MethodPost && path == "/index/index": + case r.Method == http.MethodPost && path == "/index": w.WriteHeader(http.StatusOK) _, _ = w.Write([]byte(`{"did":"did-1"}`)) return @@ -72,13 +72,13 @@ func (m *mockIndexdServer) handler(t *testing.T) http.HandlerFunc { w.WriteHeader(http.StatusOK) _ = json.NewEncoder(w).Encode(obj) return - case r.Method == http.MethodGet && strings.HasPrefix(path, "/index/index/"): + case r.Method == http.MethodGet && strings.HasPrefix(path, "/index/"): record := sampleOutputInfo() record.Rev = "rev-1" w.WriteHeader(http.StatusOK) _ = json.NewEncoder(w).Encode(record) return - case r.Method == http.MethodPut && strings.HasPrefix(path, "/index/index/"): + case r.Method == http.MethodPut && strings.HasPrefix(path, "/index/"): body, _ := io.ReadAll(r.Body) payload := UpdateInputInfo{} _ = json.Unmarshal(body, &payload) @@ -87,7 +87,7 @@ func (m *mockIndexdServer) handler(t *testing.T) http.HandlerFunc { m.mu.Unlock() w.WriteHeader(http.StatusOK) return - case r.Method == http.MethodDelete && strings.HasPrefix(path, "/index/index/"): + case r.Method == http.MethodDelete && strings.HasPrefix(path, "/index/"): w.WriteHeader(http.StatusNoContent) return } @@ -264,3 +264,25 @@ func TestIndexdClient_GetObjectDirect(t *testing.T) { t.Fatalf("unexpected record: %+v", record) } } + +func TestDecodeRegisteredObjects_Wrapped(t *testing.T) { + payload := []byte(`{"objects":[{"id":"did-1","name":"file.txt","size":123,"checksums":[{"type":"sha256","checksum":"sha-256"}],"access_methods":[{"type":"s3","access_url":{"url":"s3://bucket/key"}}]}]}`) + objs, err := decodeRegisteredObjects(payload) + if err != nil { + t.Fatalf("decodeRegisteredObjects wrapped payload error: %v", err) + } + if len(objs) != 1 { + t.Fatalf("expected 1 object, got %d", len(objs)) + } + if objs[0] == nil || objs[0].Id != "did-1" { + t.Fatalf("unexpected object: %+v", objs[0]) + } +} + +func TestDecodeRegisteredObjects_ArrayRejected(t *testing.T) { + payload := []byte(`[{"id":"did-1","name":"file.txt","size":123,"checksums":[{"type":"sha256","checksum":"sha-256"}],"access_methods":[{"type":"s3","access_url":{"url":"s3://bucket/key"}}]}]`) + _, err := decodeRegisteredObjects(payload) + if err == nil { + t.Fatal("expected error for non-canonical array register response, got nil") + } +} diff --git a/indexd/upsert.go b/indexd/upsert.go index cdd1e03..8fd52ff 100644 --- a/indexd/upsert.go +++ b/indexd/upsert.go @@ -11,6 +11,7 @@ import ( // UpsertIndexdRecord creates or updates an indexd record with a new URL. func (c *IndexdClient) UpsertIndexdRecord(ctx context.Context, url string, sha256 string, fileSize int64, projectId string) (*drs.DRSObject, error) { + sha256 = drs.NormalizeOid(sha256) uuid := drs.DrsUUID(projectId, sha256) records, err := c.GetObjectByHash(ctx, "sha256", sha256) @@ -20,7 +21,8 @@ func (c *IndexdClient) UpsertIndexdRecord(ctx context.Context, url string, sha25 var matchingRecord *drs.DRSObject for i := range records { - if records[i].Id == uuid { + // Hard cutover: checksum is content identity; do not match by record ID shape. + if records[i].Checksums.SHA256 == sha256 { matchingRecord = &records[i] break } diff --git a/upload/multipart.go b/upload/multipart.go index b0e3cce..dded504 100644 --- a/upload/multipart.go +++ b/upload/multipart.go @@ -54,10 +54,12 @@ func MultipartUpload(ctx context.Context, g3 client.Gen3Interface, req common.Fi return fmt.Errorf("failed to initiate multipart upload: %w", err) } - // 2. Construct the S3 Key correctly - // Ensure finalGUID is not empty to avoid a leading slash - key := fmt.Sprintf("%s/%s", finalGUID, req.ObjectKey) - g3.Logger().InfoContext(ctx, "Initialized Upload", "id", uploadID, "key", key) + // 2. Use the exact key passed during multipart init. + // The server creates the multipart session for `file_name=req.ObjectKey`. + // Rewriting the key client-side (for example prefixing GUID again) can cause + // "NoSuchUpload" because UploadPart/Complete then target a different object key. + key := req.ObjectKey + g3.Logger().InfoContext(ctx, "Initialized Upload", "id", uploadID, "guid", finalGUID, "key", key) chunkSize := OptimalChunkSize(fileSize) From aa49fb79a583edf960b9e1244f3ea7deb2f58629 Mon Sep 17 00:00:00 2001 From: matthewpeterkort Date: Thu, 26 Mar 2026 13:34:21 -0700 Subject: [PATCH 04/13] rearrange data client to support drs server --- .gitmodules | 4 + Makefile | 151 ++++++ backend/drs/drs.go | 463 ------------------ backend/gen3/gen3.go | 296 ------------ backend/interface.go | 63 --- cmd/auth.go | 6 +- cmd/collaborator.go | 6 +- cmd/configure.go | 2 +- cmd/delete.go | 2 +- cmd/download-multiple.go | 47 +- cmd/download-single.go | 34 +- cmd/retry-upload.go | 3 +- cmd/upload-multipart.go | 3 +- cmd/upload-multiple.go | 15 +- cmd/upload-single.go | 3 +- cmd/upload.go | 17 +- common/types.go | 18 + conf/config.go | 6 + credentials/interface.go | 18 + download/batch.go | 6 +- download/downloader.go | 21 +- download/file_info.go | 16 +- download/orchestrator.go | 18 + download/transfer.go | 126 ++++- download/transfer_test.go | 276 ++++++++++- download/url_resolution.go | 8 +- {indexd => drs}/client.go | 647 +++++++++++++------------- drs/convert.go | 107 +++++ drs/drs.go | 52 +-- drs/interface.go | 54 +++ drs/internal_types.go | 107 +++++ drs/object_builder.go | 16 +- drs/object_builder_test.go | 10 +- drs/resolve.go | 62 +++ drs/server_client.go | 23 + drs/types.go | 49 +- drs/upsert.go | 61 +++ fence/client_test.go | 4 + fence/types.go | 1 + g3client/client.go | 82 +++- ga4gh/data-repository-service-schemas | 1 + go.mod | 69 ++- go.sum | 173 ++++++- hash/hash.go | 25 + indexd/client_test.go | 288 ------------ indexd/convert.go | 99 ---- indexd/records.go | 97 ---- indexd/types.go | 70 --- indexd/types_test.go | 60 --- indexd/upsert.go | 56 --- localclient/client.go | 63 +++ mocks/mock_drs_client.go | 215 +++++++++ mocks/mock_gen3interface.go | 84 ++-- mocks/mock_indexd.go | 296 ------------ runtime/client.go | 48 ++ s3utils/s3_utils.go | 122 +++-- tests/download-multiple_test.go | 118 ++--- tests/utils_test.go | 59 ++- transfer/http.go | 69 +++ transfer/interface.go | 41 ++ transfer/service.go | 76 +++ transfer/signer/gen3/signer.go | 130 ++++++ transfer/signer/local/signer.go | 206 ++++++++ transfer/storage/gocloud.go | 89 ++++ upload/batch.go | 51 +- upload/multipart.go | 190 ++++++-- upload/multipart_test.go | 209 +++++++-- upload/orchestrator.go | 121 +++++ upload/request.go | 84 ++-- upload/retry.go | 29 +- upload/singleFile.go | 35 +- upload/types.go | 19 - upload/upload.go | 104 ++--- upload/utils.go | 16 +- 74 files changed, 3454 insertions(+), 2861 deletions(-) create mode 100644 .gitmodules delete mode 100644 backend/drs/drs.go delete mode 100644 backend/gen3/gen3.go delete mode 100644 backend/interface.go create mode 100644 credentials/interface.go create mode 100644 download/orchestrator.go rename {indexd => drs}/client.go (52%) create mode 100644 drs/convert.go create mode 100644 drs/interface.go create mode 100644 drs/internal_types.go create mode 100644 drs/resolve.go create mode 100644 drs/server_client.go create mode 100644 drs/upsert.go create mode 160000 ga4gh/data-repository-service-schemas delete mode 100644 indexd/client_test.go delete mode 100644 indexd/convert.go delete mode 100644 indexd/records.go delete mode 100644 indexd/types.go delete mode 100644 indexd/types_test.go delete mode 100644 indexd/upsert.go create mode 100644 localclient/client.go create mode 100644 mocks/mock_drs_client.go delete mode 100644 mocks/mock_indexd.go create mode 100644 runtime/client.go create mode 100644 transfer/http.go create mode 100644 transfer/interface.go create mode 100644 transfer/service.go create mode 100644 transfer/signer/gen3/signer.go create mode 100644 transfer/signer/local/signer.go create mode 100644 transfer/storage/gocloud.go create mode 100644 upload/orchestrator.go diff --git a/.gitmodules b/.gitmodules new file mode 100644 index 0000000..177c5c6 --- /dev/null +++ b/.gitmodules @@ -0,0 +1,4 @@ +[submodule "ga4gh/data-repository-service-schemas"] + path = ga4gh/data-repository-service-schemas + url = https://github.com/kellrott/data-repository-service-schemas.git + branch = feature/get-by-checksum diff --git a/Makefile b/Makefile index c936cd2..693e65a 100644 --- a/Makefile +++ b/Makefile @@ -14,6 +14,22 @@ BIN_DIR := ./bin COVERAGE_THRESHOLD := 30 PACKAGE_COVERAGE_THRESHOLD := 20 +# OpenAPI Generation Variables +OPENAPI ?= ga4gh/data-repository-service-schemas/openapi/data_repository_service.openapi.yaml +OAG_IMAGE ?= openapitools/openapi-generator-cli:latest +REDOCLY_IMAGE ?= redocly/cli:latest +YQ_IMAGE ?= mikefarah/yq:latest +GEN_OUT ?= .tmp/apigen.gen +LFS_OPENAPI ?= apigen/api/lfs.openapi.yaml +LFS_GEN_OUT ?= .tmp/apigen-lfs.gen +BUCKET_OPENAPI ?= apigen/api/bucket.openapi.yaml +BUCKET_GEN_OUT ?= .tmp/apigen-bucket.gen +METRICS_OPENAPI ?= apigen/api/metrics.openapi.yaml +METRICS_GEN_OUT ?= .tmp/apigen-metrics.gen +INTERNAL_OPENAPI ?= apigen/api/internal.openapi.yaml +INTERNAL_GEN_OUT ?= .tmp/apigen-internal.gen +SCHEMAS_SUBMODULE ?= ga4gh/data-repository-service-schemas + # --- Targets --- .PHONY: all build test test-coverage coverage-html coverage-check generate tidy clean help @@ -55,6 +71,140 @@ generate: @echo "--> Running code generation (go generate)..." @go generate ./... +## gen: Generates Go models from OpenAPI specs +gen: + @set -euo pipefail; \ + mkdir -p .tmp; \ + spec="$(OPENAPI)"; \ + if [[ ! -f "$$spec" ]]; then \ + echo "ERROR: OpenAPI spec '$$spec' not found. Run: make init-schemas"; \ + exit 1; \ + fi; \ + if ! command -v docker >/dev/null 2>&1; then \ + echo "ERROR: docker is required for 'make gen'."; \ + exit 1; \ + fi; \ + echo "Bundling canonical OpenAPI spec with Redocly..."; \ + docker run --rm \ + --user "$$(id -u):$$(id -g)" \ + -v "$(PWD):/local" \ + $(REDOCLY_IMAGE) bundle /local/$$spec --output /local/.tmp/drs.base.yaml --ext yaml; \ + echo "Merging internal Extensions with yq..."; \ + docker run --rm \ + --user "$$(id -u):$$(id -g)" \ + -v "$(PWD):/local" \ + $(YQ_IMAGE) eval-all 'select(fileIndex == 0) * select(fileIndex == 1)' /local/.tmp/drs.base.yaml /local/apigen/specs/drs-extensions-overlay.yaml > apigen/api/openapi.yaml; \ + rm -rf "$(GEN_OUT)"; \ + docker run --rm --pull=missing \ + --user "$$(id -u):$$(id -g)" \ + -v "$(PWD):/local" \ + $(OAG_IMAGE) generate \ + -g go \ + --skip-validate-spec \ + --git-repo-id data-client \ + --git-user-id calypr \ + -i /local/apigen/api/openapi.yaml \ + -o /local/$(GEN_OUT) \ + --global-property models,modelDocs=false,modelTests=false,supportingFiles=utils.go \ + --additional-properties packageName=drs,enumClassPrefix=true; \ + mkdir -p apigen/api apigen; \ + rm -rf apigen/drs; \ + mkdir -p apigen/drs; \ + find "$(GEN_OUT)" -maxdepth 1 -type f -name '*.go' -exec mv {} apigen/drs/ \; ; \ + echo "Generated DRS client models into ./apigen/drs"; \ + if [[ -f "$(LFS_OPENAPI)" ]]; then $(MAKE) gen-lfs; fi; \ + if [[ -f "$(BUCKET_OPENAPI)" ]]; then $(MAKE) gen-bucket; fi; \ + if [[ -f "$(METRICS_OPENAPI)" ]]; then $(MAKE) gen-metrics; fi; \ + if [[ -f "$(INTERNAL_OPENAPI)" ]]; then $(MAKE) gen-internal; fi + +.PHONY: gen-lfs +gen-lfs: + @set -euo pipefail; \ + rm -rf "$(LFS_GEN_OUT)"; \ + docker run --rm --pull=missing \ + --user "$$(id -u):$$(id -g)" \ + -v "$(PWD):/local" \ + $(OAG_IMAGE) generate \ + -g go \ + --skip-validate-spec \ + --git-repo-id data-client \ + --git-user-id calypr \ + -i /local/apigen/api/lfs.openapi.yaml \ + -o /local/$(LFS_GEN_OUT) \ + --global-property models,modelDocs=false,modelTests=false,supportingFiles=utils.go \ + --additional-properties packageName=lfsapi,enumClassPrefix=true; \ + rm -rf apigen/lfsapi; \ + mkdir -p apigen/lfsapi; \ + find "$(LFS_GEN_OUT)" -maxdepth 1 -type f -name '*.go' -exec mv {} apigen/lfsapi/ \; ; \ + echo "Generated LFS models into ./apigen/lfsapi" + +.PHONY: gen-bucket +gen-bucket: + @set -euo pipefail; \ + rm -rf "$(BUCKET_GEN_OUT)"; \ + docker run --rm --pull=missing \ + --user "$$(id -u):$$(id -g)" \ + -v "$(PWD):/local" \ + $(OAG_IMAGE) generate \ + -g go \ + --skip-validate-spec \ + --git-repo-id data-client \ + --git-user-id calypr \ + -i /local/apigen/api/bucket.openapi.yaml \ + -o /local/$(BUCKET_GEN_OUT) \ + --global-property models,modelDocs=false,modelTests=false,supportingFiles=utils.go \ + --additional-properties packageName=bucketapi,enumClassPrefix=true; \ + rm -rf apigen/bucketapi; \ + mkdir -p apigen/bucketapi; \ + find "$(BUCKET_GEN_OUT)" -maxdepth 1 -type f -name '*.go' -exec mv {} apigen/bucketapi/ \; ; \ + echo "Generated Bucket models into ./apigen/bucketapi" + +.PHONY: gen-metrics +gen-metrics: + @set -euo pipefail; \ + rm -rf "$(METRICS_GEN_OUT)"; \ + docker run --rm --pull=missing \ + --user "$$(id -u):$$(id -g)" \ + -v "$(PWD):/local" \ + $(OAG_IMAGE) generate \ + -g go \ + --skip-validate-spec \ + --git-repo-id data-client \ + --git-user-id calypr \ + -i /local/apigen/api/metrics.openapi.yaml \ + -o /local/$(METRICS_GEN_OUT) \ + --global-property models,modelDocs=false,modelTests=false,supportingFiles=utils.go \ + --additional-properties packageName=metricsapi,enumClassPrefix=true; \ + rm -rf apigen/metricsapi; \ + mkdir -p apigen/metricsapi; \ + find "$(METRICS_GEN_OUT)" -maxdepth 1 -type f -name '*.go' -exec mv {} apigen/metricsapi/ \; ; \ + echo "Generated Metrics models into ./apigen/metricsapi" + +.PHONY: gen-internal +gen-internal: + @set -euo pipefail; \ + rm -rf "$(INTERNAL_GEN_OUT)"; \ + docker run --rm --pull=missing \ + --user "$$(id -u):$$(id -g)" \ + -v "$(PWD):/local" \ + $(OAG_IMAGE) generate \ + -g go \ + --skip-validate-spec \ + --git-repo-id data-client \ + --git-user-id calypr \ + -i /local/apigen/api/internal.openapi.yaml \ + -o /local/$(INTERNAL_GEN_OUT) \ + --global-property models,modelDocs=false,modelTests=false,supportingFiles=utils.go \ + --additional-properties packageName=internalapi,enumClassPrefix=true; \ + rm -rf apigen/internalapi; \ + mkdir -p apigen/internalapi; \ + find "$(INTERNAL_GEN_OUT)" -maxdepth 1 -type f -name '*.go' -exec mv {} apigen/internalapi/ \; ; \ + echo "Generated Internal models into ./apigen/internalapi" + +.PHONY: init-schemas +init-schemas: + @git submodule update --init --recursive --depth 1 "$(SCHEMAS_SUBMODULE)" + ## tidy: Cleans up module dependencies and formats go files tidy: @echo "--> Tidying go.mod and formatting files..." @@ -66,4 +216,5 @@ clean: @echo "--> Cleaning up..." @rm -f $(BIN_DIR)/$(TARGET_NAME) @rm -f coverage.out coverage.html + @rm -rf .tmp diff --git a/backend/drs/drs.go b/backend/drs/drs.go deleted file mode 100644 index ae088a8..0000000 --- a/backend/drs/drs.go +++ /dev/null @@ -1,463 +0,0 @@ -package drs_backend - -import ( - "context" - "encoding/json" - "fmt" - "io" - "log/slog" - "net/http" - "net/url" - "strconv" - "strings" - - "github.com/calypr/data-client/backend" - "github.com/calypr/data-client/common" - drs "github.com/calypr/data-client/drs" - "github.com/calypr/data-client/request" -) - -type DrsBackend struct { - BaseURL string - logger *slog.Logger - req request.RequestInterface -} - -func NewDrsBackend(baseURL string, logger *slog.Logger, req request.RequestInterface) backend.Backend { - return &DrsBackend{ - BaseURL: baseURL, - logger: logger, - req: req, - } -} - -func (d *DrsBackend) Name() string { - return "DRS" -} - -func (d *DrsBackend) Logger() *slog.Logger { - return d.logger -} - -func (d *DrsBackend) Download(ctx context.Context, fdr *common.FileDownloadResponseObject) (*http.Response, error) { - skipAuth := common.IsCloudPresignedURL(fdr.PresignedURL) - - rb := d.req.New(http.MethodGet, fdr.PresignedURL) - start, end, hasRange := resolveRange(fdr) - if hasRange { - rangeHeader := "bytes=" + strconv.FormatInt(start, 10) + "-" - if end != nil { - rangeHeader += strconv.FormatInt(*end, 10) - } - rb.WithHeader("Range", rangeHeader) - } - - if skipAuth { - rb.WithSkipAuth(true) - } - - return d.req.Do(ctx, rb) -} - -func resolveRange(fdr *common.FileDownloadResponseObject) (start int64, end *int64, ok bool) { - if fdr == nil { - return 0, nil, false - } - if fdr.RangeStart != nil { - return *fdr.RangeStart, fdr.RangeEnd, true - } - if fdr.Range > 0 { - return fdr.Range, nil, true - } - return 0, nil, false -} - -func (d *DrsBackend) buildURL(paths ...string) (string, error) { - u, err := url.Parse(d.BaseURL) - if err != nil { - return "", err - } - // path.Join collapses //, which mangles access_id if it's a URL. - // We join manually but ensure we don't end up with triple slashes if a part starts/ends with /. - fullPath := u.Path - for _, p := range paths { - if p == "" { - continue - } - if !strings.HasSuffix(fullPath, "/") && !strings.HasPrefix(p, "/") { - fullPath += "/" - } - fullPath += p - } - u.Path = fullPath - return u.String(), nil -} - -func (d *DrsBackend) doJSONRequest(ctx context.Context, method, url string, body interface{}, dst interface{}) error { - rb := d.req.New(method, url) - if body != nil { - if _, err := rb.WithJSONBody(body); err != nil { - return err - } - } - - resp, err := d.req.Do(ctx, rb) - if err != nil { - return err - } - defer resp.Body.Close() - - if resp.StatusCode >= 400 { - bodyBytes, _ := io.ReadAll(resp.Body) - return fmt.Errorf("request to %s failed with status %d: %s", url, resp.StatusCode, string(bodyBytes)) - } - - if dst != nil { - return json.NewDecoder(resp.Body).Decode(dst) - } - return nil -} - -func (d *DrsBackend) GetFileDetails(ctx context.Context, guid string) (*drs.DRSObject, error) { - if oid := drs.NormalizeOid(guid); oid != "" { - if cached, ok := drs.PrefetchedBySHA(ctx, oid); ok { - obj := cached - return &obj, nil - } - if records, err := d.GetObjectByHash(ctx, "sha256", oid); err == nil && len(records) > 0 { - return &records[0], nil - } - } - - u, err := d.buildURL("ga4gh/drs/v1/objects", guid) - if err != nil { - return nil, err - } - - var obj drs.DRSObject - if err := d.doJSONRequest(ctx, http.MethodGet, u, nil, &obj); err != nil { - return nil, err - } - return &obj, nil -} - -func (d *DrsBackend) GetDownloadURL(ctx context.Context, guid string, accessID string) (string, error) { - resolvedID := guid - - if accessID == "" { - obj, err := d.GetFileDetails(ctx, guid) - if err != nil { - return "", err - } - if strings.TrimSpace(obj.Id) != "" { - resolvedID = obj.Id - } - if len(obj.AccessMethods) == 0 { - return "", fmt.Errorf("no access methods found for object %s", guid) - } - - // Prefer one with AccessID - for _, am := range obj.AccessMethods { - if am.AccessID != "" { - accessID = am.AccessID - break - } - } - if accessID == "" { - if len(obj.AccessMethods) > 0 && obj.AccessMethods[0].AccessID != "" { - accessID = obj.AccessMethods[0].AccessID - } else { - if obj.AccessMethods[0].AccessURL.URL != "" { - return obj.AccessMethods[0].AccessURL.URL, nil - } - return "", fmt.Errorf("no suitable access method found for object %s", guid) - } - } - } - - if resolvedID == guid { - if oid := drs.NormalizeOid(guid); oid != "" { - if cached, ok := drs.PrefetchedBySHA(ctx, oid); ok { - if id := strings.TrimSpace(cached.Id); id != "" { - resolvedID = id - } - } - if resolvedID == guid { - if records, err := d.GetObjectByHash(ctx, "sha256", oid); err == nil && len(records) > 0 { - if id := strings.TrimSpace(records[0].Id); id != "" { - resolvedID = id - } - } - } - } - } - - u, err := d.buildURL("ga4gh/drs/v1/objects", resolvedID, "access", accessID) - if err != nil { - return "", err - } - - var accessURL drs.AccessURL - if err := d.doJSONRequest(ctx, http.MethodGet, u, nil, &accessURL); err != nil { - return "", err - } - return accessURL.URL, nil -} - -func (d *DrsBackend) GetObjectByHash(ctx context.Context, checksumType, checksum string) ([]drs.DRSObject, error) { - // Query: GET /ga4gh/drs/v1/objects/checksum/ - // Note: checksumType is ignored here as per original implementation in LocalClient relying on checksum only in path. - // Or should we use checksumType? - u, err := d.buildURL("ga4gh/drs/v1/objects", "checksum", checksum) - if err != nil { - return nil, err - } - - var objs []drs.DRSObject - if err := d.doJSONRequest(ctx, http.MethodGet, u, nil, &objs); err != nil { - return nil, err - } - return objs, nil -} - -func (d *DrsBackend) BatchGetObjectsByHash(ctx context.Context, hashes []string) (map[string][]drs.DRSObject, error) { - // Custom endpoint: POST /index/bulk/hashes - // This path suggests it's mimicking Indexd API structure even if it's a DRS server - u, err := d.buildURL("index/bulk/hashes") - if err != nil { - return nil, err - } - - req := struct { - Hashes []string `json:"hashes"` - }{ - Hashes: hashes, - } - - var list struct { - Records []drs.DRSObject `json:"records"` - } - if err := d.doJSONRequest(ctx, http.MethodPost, u, req, &list); err != nil { - return nil, err - } - - result := make(map[string][]drs.DRSObject) - for _, obj := range list.Records { - if obj.Checksums.SHA256 != "" { - result[obj.Checksums.SHA256] = append(result[obj.Checksums.SHA256], obj) - } - } - return result, nil -} - -func (d *DrsBackend) Register(ctx context.Context, obj *drs.DRSObject) (*drs.DRSObject, error) { - u, err := d.buildURL("ga4gh/drs/v1/objects/register") - if err != nil { - return nil, err - } - - req := drs.RegisterObjectsRequest{ - Candidates: []drs.DRSObjectCandidate{drs.ConvertToCandidate(obj)}, - } - - var wrapped struct { - Objects []*drs.DRSObject `json:"objects"` - } - if err := d.doJSONRequest(ctx, http.MethodPost, u, req, &wrapped); err != nil { - return nil, err - } - if len(wrapped.Objects) == 0 { - return nil, fmt.Errorf("server returned no registered objects") - } - return wrapped.Objects[0], nil -} - -func (d *DrsBackend) BatchRegister(ctx context.Context, objs []*drs.DRSObject) ([]*drs.DRSObject, error) { - u, err := d.buildURL("ga4gh/drs/v1/objects/register") - if err != nil { - return nil, err - } - - var candidates []drs.DRSObjectCandidate - for _, obj := range objs { - candidates = append(candidates, drs.ConvertToCandidate(obj)) - } - req := drs.RegisterObjectsRequest{ - Candidates: candidates, - } - - var wrapped struct { - Objects []*drs.DRSObject `json:"objects"` - } - if err := d.doJSONRequest(ctx, http.MethodPost, u, req, &wrapped); err != nil { - return nil, err - } - return wrapped.Objects, nil -} - -func (d *DrsBackend) GetUploadURL(ctx context.Context, guid string, filename string, metadata common.FileMetadata, bucket string) (string, error) { - // Hits the server's clean /data/upload/{file_id} endpoint - u, err := d.buildURL("data/upload", guid) - if err != nil { - return "", err - } - // Add filename/bucket hints - q := url.Values{} - if strings.TrimSpace(filename) != "" { - q.Set("file_name", filename) - } - - // Evaluate bucket from argument or struct - effectiveBucket := bucket - if effectiveBucket != "" { - q.Set("bucket", effectiveBucket) - } - - if encoded := q.Encode(); encoded != "" { - u += "?" + encoded - } - - var res struct { - URL string `json:"url"` - } - if err := d.doJSONRequest(ctx, http.MethodGet, u, nil, &res); err != nil { - return "", err - } - return res.URL, nil -} - -func (d *DrsBackend) InitMultipartUpload(ctx context.Context, guid string, filename string, bucket string) (*common.MultipartUploadInit, error) { - u, err := d.buildURL("data/multipart/init") - if err != nil { - return nil, err - } - - req := struct { - GUID string `json:"guid,omitempty"` - FileName string `json:"file_name,omitempty"` - Bucket string `json:"bucket,omitempty"` - }{ - GUID: guid, - FileName: filename, - Bucket: bucket, - } - - var res struct { - GUID string `json:"guid"` - UploadID string `json:"uploadId"` - } - if err := d.doJSONRequest(ctx, http.MethodPost, u, req, &res); err != nil { - return nil, err - } - if res.UploadID == "" { - return nil, fmt.Errorf("server did not return uploadId") - } - - return &common.MultipartUploadInit{ - GUID: res.GUID, - UploadID: res.UploadID, - }, nil -} - -func (d *DrsBackend) GetMultipartUploadURL(ctx context.Context, key string, uploadID string, partNumber int32, bucket string) (string, error) { - u, err := d.buildURL("data/multipart/upload") - if err != nil { - return "", err - } - - req := struct { - Key string `json:"key"` - Bucket string `json:"bucket,omitempty"` - UploadID string `json:"uploadId"` - PartNumber int32 `json:"partNumber"` - }{ - Key: key, - Bucket: bucket, - UploadID: uploadID, - PartNumber: partNumber, - } - - var res struct { - PresignedURL string `json:"presigned_url"` - } - if err := d.doJSONRequest(ctx, http.MethodPost, u, req, &res); err != nil { - return "", err - } - if res.PresignedURL == "" { - return "", fmt.Errorf("server did not return presigned_url") - } - return res.PresignedURL, nil -} - -func (d *DrsBackend) CompleteMultipartUpload(ctx context.Context, key string, uploadID string, parts []common.MultipartUploadPart, bucket string) error { - u, err := d.buildURL("data/multipart/complete") - if err != nil { - return err - } - - reqParts := make([]struct { - PartNumber int32 `json:"PartNumber"` - ETag string `json:"ETag"` - }, len(parts)) - for i, p := range parts { - reqParts[i] = struct { - PartNumber int32 `json:"PartNumber"` - ETag string `json:"ETag"` - }{ - PartNumber: p.PartNumber, - ETag: p.ETag, - } - } - - req := struct { - Key string `json:"key"` - Bucket string `json:"bucket,omitempty"` - UploadID string `json:"uploadId"` - Parts any `json:"parts"` - }{ - Key: key, - Bucket: bucket, - UploadID: uploadID, - Parts: reqParts, - } - - return d.doJSONRequest(ctx, http.MethodPost, u, req, nil) -} - -func (d *DrsBackend) doUpload(ctx context.Context, url string, body io.Reader, size int64) (string, error) { - rb := d.req.New(http.MethodPut, url). - WithBody(body). - WithSkipAuth(true) // S3 presigned URLs don't need our bearer token - if size > 0 { - rb.PartSize = size - } - - resp, err := d.req.Do(ctx, rb) - if err != nil { - return "", fmt.Errorf("upload to %s failed: %w", url, err) - } - defer resp.Body.Close() - - if resp.StatusCode >= 400 { - bodyBytes, _ := io.ReadAll(resp.Body) - return "", fmt.Errorf("upload to %s failed with status %d: %s", url, resp.StatusCode, string(bodyBytes)) - } - - return strings.Trim(resp.Header.Get("ETag"), `"`), nil -} - -func (d *DrsBackend) Upload(ctx context.Context, url string, body io.Reader, size int64) error { - _, err := d.doUpload(ctx, url, body, size) - return err -} - -func (d *DrsBackend) UploadPart(ctx context.Context, url string, body io.Reader, size int64) (string, error) { - etag, err := d.doUpload(ctx, url, body, size) - if err != nil { - return "", err - } - if etag == "" { - return "", fmt.Errorf("multipart upload part returned empty ETag") - } - return etag, nil -} diff --git a/backend/gen3/gen3.go b/backend/gen3/gen3.go deleted file mode 100644 index 4213601..0000000 --- a/backend/gen3/gen3.go +++ /dev/null @@ -1,296 +0,0 @@ -package gen3 - -import ( - "context" - "encoding/json" - "fmt" - "io" - "log/slog" - "net/http" - "strconv" - "strings" - - "github.com/calypr/data-client/backend" - "github.com/calypr/data-client/common" - drs "github.com/calypr/data-client/drs" - "github.com/calypr/data-client/fence" - "github.com/calypr/data-client/g3client" - "github.com/calypr/data-client/request" -) - -type Gen3Backend struct { - client g3client.Gen3Interface -} - -func NewGen3Backend(client g3client.Gen3Interface) backend.Backend { - return &Gen3Backend{ - client: client, - } -} - -func (g *Gen3Backend) Name() string { - return "Gen3" -} - -func (g *Gen3Backend) Logger() *slog.Logger { - return g.client.Logger().Logger -} - -func (g *Gen3Backend) Download(ctx context.Context, fdr *common.FileDownloadResponseObject) (*http.Response, error) { - skipAuth := common.IsCloudPresignedURL(fdr.PresignedURL) - - rb := g.client.New(http.MethodGet, fdr.PresignedURL) - start, end, hasRange := resolveRange(fdr) - if hasRange { - rangeHeader := "bytes=" + strconv.FormatInt(start, 10) + "-" - if end != nil { - rangeHeader += strconv.FormatInt(*end, 10) - } - rb.WithHeader("Range", rangeHeader) - } - - if skipAuth { - rb.WithSkipAuth(true) - } - - return g.client.Do(ctx, rb) -} - -func resolveRange(fdr *common.FileDownloadResponseObject) (start int64, end *int64, ok bool) { - if fdr == nil { - return 0, nil, false - } - if fdr.RangeStart != nil { - return *fdr.RangeStart, fdr.RangeEnd, true - } - if fdr.Range > 0 { - return fdr.Range, nil, true - } - return 0, nil, false -} - -func (g *Gen3Backend) GetFileDetails(ctx context.Context, guid string) (*drs.DRSObject, error) { - if oid := drs.NormalizeOid(guid); oid != "" { - if cached, ok := drs.PrefetchedBySHA(ctx, oid); ok { - obj := cached - return &obj, nil - } - if recs, err := g.client.Indexd().GetObjectByHash(ctx, "sha256", oid); err == nil && len(recs) > 0 { - return &recs[0], nil - } - } - - return g.client.Indexd().GetObject(ctx, guid) -} - -func (g *Gen3Backend) GetObjectByHash(ctx context.Context, checksumType, checksum string) ([]drs.DRSObject, error) { - return g.client.Indexd().GetObjectByHash(ctx, checksumType, checksum) -} - -func (g *Gen3Backend) BatchGetObjectsByHash(ctx context.Context, hashes []string) (map[string][]drs.DRSObject, error) { - return g.client.Indexd().BatchGetObjectsByHash(ctx, hashes) -} - -func (g *Gen3Backend) GetDownloadURL(ctx context.Context, guid string, accessID string) (string, error) { - // Try Fence first. - url, err := g.client.Fence().GetDownloadPresignedUrl(ctx, guid, accessID) - if err == nil && url != "" { - return url, nil - } - - // Resolve access type for Indexd. - accessType := "s3" // default - if strings.Contains(accessID, "protocol=") { - parts := strings.Split(accessID, "=") - if len(parts) > 1 { - accessType = parts[len(parts)-1] - } - } else if accessID != "" { - accessType = accessID - } - - // Checksum-first resolution. - resolvedID := guid - if oid := drs.NormalizeOid(guid); oid != "" { - if cached, ok := drs.PrefetchedBySHA(ctx, oid); ok { - if id := strings.TrimSpace(cached.Id); id != "" { - resolvedID = id - } - } - if resolvedID == guid { - if recs, hashErr := g.client.Indexd().GetObjectByHash(ctx, "sha256", oid); hashErr == nil && len(recs) > 0 { - if id := strings.TrimSpace(recs[0].Id); id != "" { - resolvedID = id - } - } - } - } - - resp, errIdx := g.client.Indexd().GetDownloadURL(ctx, resolvedID, accessType) - if errIdx == nil && resp != nil && resp.URL != "" { - return resp.URL, nil - } - - if err != nil { - return "", err - } - if errIdx != nil { - return "", errIdx - } - return "", fmt.Errorf("failed to resolve download URL for %s", guid) -} - -func (g *Gen3Backend) Register(ctx context.Context, obj *drs.DRSObject) (*drs.DRSObject, error) { - return g.client.Indexd().RegisterRecord(ctx, obj) -} - -func (g *Gen3Backend) BatchRegister(ctx context.Context, objs []*drs.DRSObject) ([]*drs.DRSObject, error) { - return g.client.Indexd().RegisterRecords(ctx, objs) -} - -// ShepherdInitRequestObject copied from upload/types.go to avoid circular dependency -type ShepherdInitRequestObject struct { - Filename string `json:"file_name"` - Authz ShepherdAuthz `json:"authz"` - Aliases []string `json:"aliases"` - Metadata map[string]any `json:"metadata"` -} - -type ShepherdAuthz struct { - Version string `json:"version"` - ResourcePaths []string `json:"resource_paths"` -} - -type PresignedURLResponse struct { - GUID string `json:"guid"` - URL string `json:"upload_url"` -} - -func (g *Gen3Backend) GetUploadURL(ctx context.Context, guid string, filename string, metadata common.FileMetadata, bucket string) (string, error) { - hasShepherd, err := g.client.Fence().CheckForShepherdAPI(ctx) - if err != nil || !hasShepherd { - // Fallback to Fence - var msg fence.FenceResponse - - if guid != "" { - msg, err = g.client.Fence().GetUploadPresignedUrl(ctx, guid, filename, bucket) - } else { - // Init upload if no GUID - msg, err = g.client.Fence().InitUpload(ctx, filename, bucket, "") - } - - if err != nil { - return "", err - } - if msg.URL == "" { - return "", fmt.Errorf("error in generating presigned URL for %s", filename) - } - return msg.URL, nil - } - - // Shepherd Logic - shepherdPayload := ShepherdInitRequestObject{ - Filename: filename, - Authz: ShepherdAuthz{ - Version: "0", ResourcePaths: metadata.Authz, - }, - Aliases: metadata.Aliases, - Metadata: metadata.Metadata, - } - - reader, err := common.ToJSONReader(shepherdPayload) - if err != nil { - return "", err - } - - cred := g.client.GetCredential() - r, err := g.client.Fence().Do( - ctx, - &request.RequestBuilder{ - Url: cred.APIEndpoint + common.ShepherdEndpoint + "/objects", - Method: http.MethodPost, - Body: reader, - Token: cred.AccessToken, - }) - if err != nil { - return "", fmt.Errorf("shepherd upload init failed: %w", err) - } - defer r.Body.Close() - - if r.StatusCode != http.StatusCreated && r.StatusCode != http.StatusOK { - return "", fmt.Errorf("shepherd upload init failed with status %d", r.StatusCode) - } - - var res PresignedURLResponse - if err := json.NewDecoder(r.Body).Decode(&res); err != nil { - return "", err - } - return res.URL, nil -} - -func (g *Gen3Backend) InitMultipartUpload(ctx context.Context, guid string, filename string, bucket string) (*common.MultipartUploadInit, error) { - res, err := g.client.Fence().InitMultipartUpload(ctx, filename, bucket, guid) - if err != nil { - return nil, err - } - if res.UploadID == "" { - return nil, fmt.Errorf("fence multipart init did not return uploadId") - } - return &common.MultipartUploadInit{ - GUID: res.GUID, - UploadID: res.UploadID, - }, nil -} - -func (g *Gen3Backend) GetMultipartUploadURL(ctx context.Context, key string, uploadID string, partNumber int32, bucket string) (string, error) { - return g.client.Fence().GenerateMultipartPresignedURL(ctx, key, uploadID, int(partNumber), bucket) -} - -func (g *Gen3Backend) CompleteMultipartUpload(ctx context.Context, key string, uploadID string, parts []common.MultipartUploadPart, bucket string) error { - fParts := make([]fence.MultipartPart, len(parts)) - for i, p := range parts { - fParts[i] = fence.MultipartPart{ - PartNumber: int(p.PartNumber), - ETag: p.ETag, - } - } - return g.client.Fence().CompleteMultipartUpload(ctx, key, uploadID, fParts, bucket) -} - -func (g *Gen3Backend) doUpload(ctx context.Context, url string, body io.Reader, size int64) (string, error) { - rb := g.client.New(http.MethodPut, url). - WithBody(body). - WithSkipAuth(true) - if size > 0 { - rb.PartSize = size - } - - resp, err := g.client.Do(ctx, rb) - if err != nil { - return "", fmt.Errorf("upload to %s failed: %w", url, err) - } - defer resp.Body.Close() - - if resp.StatusCode >= 400 { - bodyBytes, _ := io.ReadAll(resp.Body) - return "", fmt.Errorf("upload to %s failed with status %d: %s", url, resp.StatusCode, string(bodyBytes)) - } - - return strings.Trim(resp.Header.Get("ETag"), `"`), nil -} - -func (g *Gen3Backend) Upload(ctx context.Context, url string, body io.Reader, size int64) error { - _, err := g.doUpload(ctx, url, body, size) - return err -} - -func (g *Gen3Backend) UploadPart(ctx context.Context, url string, body io.Reader, size int64) (string, error) { - etag, err := g.doUpload(ctx, url, body, size) - if err != nil { - return "", err - } - if etag == "" { - return "", fmt.Errorf("multipart upload part returned empty ETag") - } - return etag, nil -} diff --git a/backend/interface.go b/backend/interface.go deleted file mode 100644 index 20b1aa1..0000000 --- a/backend/interface.go +++ /dev/null @@ -1,63 +0,0 @@ -package backend - -import ( - "context" - "io" - "log/slog" - "net/http" - - "github.com/calypr/data-client/common" - drs "github.com/calypr/data-client/drs" -) - -// Backend abstract the interaction with underlying data service (Gen3 or standard DRS) -type Backend interface { - Name() string - Logger() *slog.Logger - - // --- Read Operations --- - - // GetFileDetails retrieves the DRS object for a given GUID/DID - GetFileDetails(ctx context.Context, guid string) (*drs.DRSObject, error) - - // GetObjectByHash retrieves objects matching a checksum - GetObjectByHash(ctx context.Context, checksumType, checksum string) ([]drs.DRSObject, error) - - // BatchGetObjectsByHash retrieves objects matching a list of hashes - BatchGetObjectsByHash(ctx context.Context, hashes []string) (map[string][]drs.DRSObject, error) - - // GetDownloadURL retrieves a signed URL for downloading the file content - // accessID is optional (used for DRS objects with multiple access methods) - GetDownloadURL(ctx context.Context, guid string, accessID string) (string, error) - - // Download performs the HTTP GET for the file content using the backend's preferred request engine. - Download(ctx context.Context, fdr *common.FileDownloadResponseObject) (*http.Response, error) - - // --- Write Operations --- - - // Register registers a new file metadata record. - // Returns the registered object (with populated GUID/DID if it was new) - Register(ctx context.Context, obj *drs.DRSObject) (*drs.DRSObject, error) - - // BatchRegister registers multiple file metadata records. - BatchRegister(ctx context.Context, objs []*drs.DRSObject) ([]*drs.DRSObject, error) - - // GetUploadURL retrieves a presigned URL for uploading file content. - // implementation handles provider-specific logic (e.g. Fence vs Shepherd vs DRS-Upload) - GetUploadURL(ctx context.Context, guid string, filename string, metadata common.FileMetadata, bucket string) (string, error) - - // InitMultipartUpload initializes multipart upload and returns upload metadata. - InitMultipartUpload(ctx context.Context, guid string, filename string, bucket string) (*common.MultipartUploadInit, error) - - // GetMultipartUploadURL retrieves a presigned URL for an individual multipart part. - GetMultipartUploadURL(ctx context.Context, key string, uploadID string, partNumber int32, bucket string) (string, error) - - // CompleteMultipartUpload finalizes a multipart upload with uploaded parts. - CompleteMultipartUpload(ctx context.Context, key string, uploadID string, parts []common.MultipartUploadPart, bucket string) error - - // Upload performs the HTTP PUT for the file content to the presigned URL. - Upload(ctx context.Context, url string, body io.Reader, size int64) error - - // UploadPart performs a multipart upload part PUT and returns the ETag. - UploadPart(ctx context.Context, url string, body io.Reader, size int64) (string, error) -} diff --git a/cmd/auth.go b/cmd/auth.go index 6e0398a..156030d 100644 --- a/cmd/auth.go +++ b/cmd/auth.go @@ -32,14 +32,14 @@ func init() { log.Fatalf("Fatal NewGen3Interface error: %s\n", err) } - resourceAccess, err := g3i.Fence().CheckPrivileges(context.Background()) + resourceAccess, err := g3i.FenceClient().CheckPrivileges(context.Background()) if err != nil { g3i.Logger().Fatalf("Fatal authentication error: %s\n", err) } else { if len(resourceAccess) == 0 { - g3i.Logger().Printf("\nYou don't currently have access to any resources at %s\n", g3i.GetCredential().APIEndpoint) + g3i.Logger().Printf("\nYou don't currently have access to any resources at %s\n", g3i.Credentials().Current().APIEndpoint) } else { - g3i.Logger().Printf("\nYou have access to the following resource(s) at %s:\n", g3i.GetCredential().APIEndpoint) + g3i.Logger().Printf("\nYou have access to the following resource(s) at %s:\n", g3i.Credentials().Current().APIEndpoint) // Sort by resource name resources := make([]string, 0, len(resourceAccess)) diff --git a/cmd/collaborator.go b/cmd/collaborator.go index 7fc1528..c307423 100644 --- a/cmd/collaborator.go +++ b/cmd/collaborator.go @@ -52,15 +52,15 @@ func getRequestorClient() (requestor.RequestorInterface, func()) { // Initialize logger logger, logCloser := logs.New(profile) - // Initialize Gen3Interface handles selective initialization - g3i, err := g3client.NewGen3Interface(profile, logger, g3client.WithClients(g3client.RequestorClient)) + // Initialize base Gen3 interface and build requestor client from it. + g3i, err := g3client.NewGen3Interface(profile, logger) if err != nil { fmt.Printf("Error accessing Gen3: %v\n", err) logCloser() os.Exit(1) } - return g3i.Requestor(), logCloser + return requestor.NewRequestorClient(g3i, g3i.Credentials().Current()), logCloser } var collaboratorListCmd = &cobra.Command{ diff --git a/cmd/configure.go b/cmd/configure.go index b6eb564..6305696 100644 --- a/cmd/configure.go +++ b/cmd/configure.go @@ -51,7 +51,7 @@ func init() { } g3i := g3client.NewGen3InterfaceFromCredential(cred, logger, g3client.WithClients()) - err := g3i.ExportCredential(context.Background(), cred) + err := g3i.Credentials().Export(context.Background(), cred) if err != nil { logger.Println(err.Error()) } diff --git a/cmd/delete.go b/cmd/delete.go index 4589577..dc9171e 100644 --- a/cmd/delete.go +++ b/cmd/delete.go @@ -29,7 +29,7 @@ If no profile is specified, "default" profile is used for authentication.`, logger.Fatalf("Fatal NewGen3Interface error: %s\n", err) } - msg, err := g3i.Fence().DeleteRecord(context.Background(), guid) + msg, err := g3i.FenceClient().DeleteRecord(context.Background(), guid) if err != nil { logger.Fatal(err) } diff --git a/cmd/download-multiple.go b/cmd/download-multiple.go index 01d5c47..ffdb3aa 100644 --- a/cmd/download-multiple.go +++ b/cmd/download-multiple.go @@ -7,13 +7,13 @@ import ( "log" "os" - "github.com/calypr/data-client/backend" - drsbackend "github.com/calypr/data-client/backend/drs" - gen3backend "github.com/calypr/data-client/backend/gen3" "github.com/calypr/data-client/common" "github.com/calypr/data-client/download" + "github.com/calypr/data-client/drs" "github.com/calypr/data-client/g3client" + "github.com/calypr/data-client/localclient" "github.com/calypr/data-client/logs" + "github.com/calypr/data-client/transfer" "github.com/vbauerster/mpb/v8" "github.com/vbauerster/mpb/v8/decor" @@ -41,22 +41,35 @@ func init() { logger, logCloser := logs.New(profile, logs.WithConsole(), logs.WithFailedLog(), logs.WithScoreboard(), logs.WithSucceededLog()) defer logCloser() - g3i, err := g3client.NewGen3Interface(profile, logger) - if err != nil { - log.Fatalf("Failed to parse config on profile %s, %v", profile, err) + var dc drs.Client + var bk transfer.Backend + if backendType == "drs" { + lc, err := localclient.NewLocalInterface(profile, logger) + if err != nil { + log.Fatalf("Failed to parse config on profile %s, %v", profile, err) + } + dc = lc.DRSClient() + bk = lc.DRSClient() + } else { + g3i, err := g3client.NewGen3Interface(profile, logger) + if err != nil { + log.Fatalf("Failed to parse config on profile %s, %v", profile, err) + } + dc = g3i.DRSClient() + bk = g3i.DRSClient() } manifestPath, _ = common.GetAbsolutePath(manifestPath) manifestFile, err := os.Open(manifestPath) if err != nil { - g3i.Logger().Fatalf("Failed to open manifest file %s, %v\n", manifestPath, err) + logger.Fatalf("Failed to open manifest file %s, %v\n", manifestPath, err) } defer manifestFile.Close() manifestFileStat, err := manifestFile.Stat() if err != nil { - g3i.Logger().Fatalf("Failed to get manifest file stats %s, %v\n", manifestPath, err) + logger.Fatalf("Failed to get manifest file stats %s, %v\n", manifestPath, err) } - g3i.Logger().Println("Reading manifest...") + logger.Println("Reading manifest...") manifestFileSize := manifestFileStat.Size() manifestProgress := mpb.New(mpb.WithOutput(os.Stdout)) manifestFileBar := manifestProgress.AddBar(manifestFileSize, @@ -71,27 +84,19 @@ func init() { manifestBytes, err := io.ReadAll(manifestFileReader) if err != nil { - g3i.Logger().Fatalf("Failed reading manifest %s, %v\n", manifestPath, err) + logger.Fatalf("Failed reading manifest %s, %v\n", manifestPath, err) } manifestProgress.Wait() var objects []common.ManifestObject err = json.Unmarshal(manifestBytes, &objects) if err != nil { - g3i.Logger().Fatalf("Error has occurred during unmarshalling manifest object: %v\n", err) - } - - var bk backend.Backend - if backendType == "drs" { - cred := g3i.GetCredential() - // Use the API endpoint from the profile as the DRS server URL - bk = drsbackend.NewDrsBackend(cred.APIEndpoint, logger.Logger, g3i) - } else { - bk = gen3backend.NewGen3Backend(g3i) + logger.Fatalf("Error has occurred during unmarshalling manifest object: %v\n", err) } err = download.DownloadMultiple( context.Background(), + dc, bk, objects, downloadPath, @@ -103,7 +108,7 @@ func init() { skipCompleted, ) if err != nil { - g3i.Logger().Fatal(err.Error()) + logger.Fatal(err.Error()) } }, } diff --git a/cmd/download-single.go b/cmd/download-single.go index b94ea01..69bf48b 100644 --- a/cmd/download-single.go +++ b/cmd/download-single.go @@ -4,13 +4,13 @@ import ( "context" "log" - "github.com/calypr/data-client/backend" - drsbackend "github.com/calypr/data-client/backend/drs" - gen3backend "github.com/calypr/data-client/backend/gen3" "github.com/calypr/data-client/common" "github.com/calypr/data-client/download" + "github.com/calypr/data-client/drs" "github.com/calypr/data-client/g3client" + "github.com/calypr/data-client/localclient" "github.com/calypr/data-client/logs" + "github.com/calypr/data-client/transfer" "github.com/spf13/cobra" ) @@ -35,17 +35,22 @@ func init() { logger, logCloser := logs.New(profile, logs.WithConsole(), logs.WithFailedLog(), logs.WithSucceededLog(), logs.WithScoreboard()) defer logCloser() - g3I, err := g3client.NewGen3Interface(profile, logger) - if err != nil { - log.Fatalf("Failed to parse config on profile %s, %v", profile, err) - } - - var bk backend.Backend + var dc drs.Client + var bk transfer.Backend if backendType == "drs" { - cred := g3I.GetCredential() - bk = drsbackend.NewDrsBackend(cred.APIEndpoint, logger.Logger, g3I) + lc, err := localclient.NewLocalInterface(profile, logger) + if err != nil { + log.Fatalf("Failed to parse config on profile %s, %v", profile, err) + } + dc = lc.DRSClient() + bk = lc.DRSClient() } else { - bk = gen3backend.NewGen3Backend(g3I) + g3I, err := g3client.NewGen3Interface(profile, logger) + if err != nil { + log.Fatalf("Failed to parse config on profile %s, %v", profile, err) + } + dc = g3I.DRSClient() + bk = g3I.DRSClient() } objects := []common.ManifestObject{ @@ -53,8 +58,9 @@ func init() { GUID: guid, }, } - err = download.DownloadMultiple( + err := download.DownloadMultiple( context.Background(), + dc, bk, objects, downloadPath, @@ -66,7 +72,7 @@ func init() { skipCompleted, ) if err != nil { - g3I.Logger().Println(err.Error()) + logger.Println(err.Error()) } }, } diff --git a/cmd/retry-upload.go b/cmd/retry-upload.go index 042922b..bdc73c3 100644 --- a/cmd/retry-upload.go +++ b/cmd/retry-upload.go @@ -32,6 +32,7 @@ func init() { if err != nil { Logger.Fatalf("Failed to initialize client: %v", err) } + bk := g3.DRSClient() logger := g3.Logger() @@ -44,7 +45,7 @@ func init() { logger.Fatalf("Cannot read failed log: %v", err) } - upload.RetryFailedUploads(context.Background(), g3, failedMap) + upload.RetryFailedUploads(context.Background(), bk, logger, failedMap) sb.PrintSB() }, } diff --git a/cmd/upload-multipart.go b/cmd/upload-multipart.go index 5f020e5..e80983f 100644 --- a/cmd/upload-multipart.go +++ b/cmd/upload-multipart.go @@ -43,6 +43,7 @@ This method is resilient to network interruptions and supports resume capability if err != nil { logger.Fatalf("failed to initialize Gen3 interface: %v", err) } + bk := g3.DRSClient() absPath, err := common.GetAbsolutePath(filePath) if err != nil { @@ -62,7 +63,7 @@ This method is resilient to network interruptions and supports resume capability } defer file.Close() - err = upload.MultipartUpload(context.Background(), g3, fileInfo, file, true) + err = upload.MultipartUpload(context.Background(), bk, fileInfo, file, true) if err != nil { logger.Fatal(err) } diff --git a/cmd/upload-multiple.go b/cmd/upload-multiple.go index 99e58ff..24aff9b 100644 --- a/cmd/upload-multiple.go +++ b/cmd/upload-multiple.go @@ -45,9 +45,10 @@ Options to run multipart uploads for large files and parallel batch uploading ar if err != nil { logger.Fatalf("Failed to parse config on profile %s: %v", profile, err) } + bk := g3i.DRSClient() // Basic config validation - profileConfig := g3i.GetCredential() + profileConfig := g3i.Credentials().Current() if profileConfig.APIEndpoint == "" { logger.Fatal("No APIEndpoint found in configuration. Run \"./data-client configure\" first.") } @@ -100,7 +101,7 @@ Options to run multipart uploads for large files and parallel batch uploading ar } // Classify single vs multipart - single, multi := upload.SeparateSingleAndMultipartUploads(g3i, requests) + single, multi := upload.SeparateSingleAndMultipartUploads(bk, requests) // Upload single-part files if batch { @@ -110,16 +111,16 @@ Options to run multipart uploads for large files and parallel batch uploading ar if len(batchFURObjects) < workers { batchFURObjects = append(batchFURObjects, furObject) } else { - upload.BatchUpload(ctx, g3i, batchFURObjects, workers, respCh, errCh, bucketName) + upload.BatchUpload(ctx, bk, logger, batchFURObjects, workers, respCh, errCh, bucketName) batchFURObjects = []common.FileUploadRequestObject{furObject} } if i == len(single)-1 && len(batchFURObjects) > 0 { - upload.BatchUpload(ctx, g3i, batchFURObjects, workers, respCh, errCh, bucketName) + upload.BatchUpload(ctx, bk, logger, batchFURObjects, workers, respCh, errCh, bucketName) } } } else { for _, req := range single { - upload.UploadSingle(ctx, g3i, req, true) + upload.UploadSingle(ctx, bk, logger, req, true) } } @@ -132,7 +133,7 @@ Options to run multipart uploads for large files and parallel batch uploading ar continue } - err = upload.MultipartUpload(ctx, g3i, req, file, true) + err = upload.MultipartUpload(ctx, bk, req, file, true) if err != nil { logger.Println("Multipart upload failed:", err) } @@ -142,7 +143,7 @@ Options to run multipart uploads for large files and parallel batch uploading ar if len(logger.GetSucceededLogMap()) == 0 { failed := logger.GetFailedLogMap() if len(failed) > 0 { - upload.RetryFailedUploads(ctx, g3i, failed) + upload.RetryFailedUploads(ctx, bk, logger, failed) } } diff --git a/cmd/upload-single.go b/cmd/upload-single.go index 34eb9ba..d1cb0e1 100644 --- a/cmd/upload-single.go +++ b/cmd/upload-single.go @@ -31,6 +31,7 @@ func init() { if err != nil { log.Fatalf("Failed to parse config on profile %s: %v", profile, err) } + bk := g3i.DRSClient() req := common.FileUploadRequestObject{ SourcePath: filePath, @@ -38,7 +39,7 @@ func init() { Bucket: bucketName, GUID: guid, } - err = upload.UploadSingle(context.Background(), g3i, req, true) + err = upload.UploadSingle(context.Background(), bk, logger, req, true) if err != nil { log.Fatalln(err.Error()) } diff --git a/cmd/upload.go b/cmd/upload.go index a99fdc0..6d123f8 100644 --- a/cmd/upload.go +++ b/cmd/upload.go @@ -41,10 +41,11 @@ func init() { if err != nil { log.Fatalf("Failed to parse config on profile %s, %v", profile, err) } + bk := g3i.DRSClient() logger := g3i.Logger() if hasMetadata { - hasShepherd, err := g3i.Fence().CheckForShepherdAPI(ctx) + hasShepherd, err := g3i.FenceClient().CheckForShepherdAPI(ctx) if err != nil { logger.Printf("WARNING: Error when checking for Shepherd API: %v", err) } else { @@ -93,7 +94,7 @@ func init() { return } - singlePartObjects, multipartObjects := upload.SeparateSingleAndMultipartUploads(g3i, uploadRequestObjects) + singlePartObjects, multipartObjects := upload.SeparateSingleAndMultipartUploads(bk, uploadRequestObjects) if batch { workers, respCh, errCh, batchFURObjects := upload.InitBatchUploadChannels(numParallel, len(singlePartObjects)) @@ -102,12 +103,12 @@ func init() { if len(batchFURObjects) < workers { batchFURObjects = append(batchFURObjects, furObject) } else { - upload.BatchUpload(ctx, g3i, batchFURObjects, workers, respCh, errCh, bucketName) + upload.BatchUpload(ctx, bk, Logger, batchFURObjects, workers, respCh, errCh, bucketName) batchFURObjects = []common.FileUploadRequestObject{furObject} } } if len(batchFURObjects) > 0 { - upload.BatchUpload(ctx, g3i, batchFURObjects, workers, respCh, errCh, bucketName) + upload.BatchUpload(ctx, bk, Logger, batchFURObjects, workers, respCh, errCh, bucketName) } if len(errCh) > 0 { @@ -133,12 +134,12 @@ func init() { logger.Println("File stat error for file" + fi.Name() + ", file may be missing or unreadable because of permissions.\n") continue } - upload.UploadSingle(ctx, g3i, furObject, true) + upload.UploadSingle(ctx, bk, Logger, furObject, true) } } if len(multipartObjects) > 0 { - cred := g3i.GetCredential() + cred := g3i.Credentials().Current() if cred.UseShepherd == "true" || cred.UseShepherd == "" && common.DefaultUseShepherd == true { logger.Printf("error: Shepherd currently does not support multipart uploads. For the moment, please disable Shepherd with\n $ data-client configure --profile=%v --use-shepherd=false\nand try again", cred.Profile) @@ -152,7 +153,7 @@ func init() { logger.Println("File open error: " + err.Error()) continue } - err = upload.MultipartUpload(ctx, g3i, furObject, file, true) + err = upload.MultipartUpload(ctx, bk, furObject, file, true) if err != nil { g3i.Logger().Println(err.Error()) } else { @@ -161,7 +162,7 @@ func init() { } } if len(g3i.Logger().GetSucceededLogMap()) == 0 { - upload.RetryFailedUploads(ctx, g3i, g3i.Logger().GetFailedLogMap()) + upload.RetryFailedUploads(ctx, bk, Logger, g3i.Logger().GetFailedLogMap()) } g3i.Logger().Scoreboard().PrintSB() }, diff --git a/common/types.go b/common/types.go index 1684528..54b67d3 100644 --- a/common/types.go +++ b/common/types.go @@ -73,3 +73,21 @@ type ManifestObject struct { Title string `json:"title"` Size int64 `json:"size"` } + +// ShepherdInitRequestObject represents the payload sent to Shepherd +type ShepherdInitRequestObject struct { + Filename string `json:"file_name"` + Authz ShepherdAuthz `json:"authz"` + Aliases []string `json:"aliases"` + Metadata map[string]any `json:"metadata"` +} + +type ShepherdAuthz struct { + Version string `json:"version"` + ResourcePaths []string `json:"resource_paths"` +} + +type PresignedURLResponse struct { + GUID string `json:"guid"` + URL string `json:"upload_url"` +} diff --git a/conf/config.go b/conf/config.go index 6c40967..9e59eea 100644 --- a/conf/config.go +++ b/conf/config.go @@ -25,6 +25,8 @@ type Credential struct { APIEndpoint string UseShepherd string MinShepherdVersion string + Bucket string + ProjectID string } type Manager struct { @@ -127,6 +129,8 @@ func (man *Manager) Load(profile string) (*Credential, error) { APIEndpoint: sec.Key("api_endpoint").String(), UseShepherd: sec.Key("use_shepherd").String(), MinShepherdVersion: sec.Key("min_shepherd_version").String(), + Bucket: sec.Key("bucket").String(), + ProjectID: sec.Key("project_id").String(), } if profileConfig.KeyID == "" && profileConfig.APIKey == "" && profileConfig.AccessToken == "" { @@ -178,6 +182,8 @@ func (man *Manager) Save(profileConfig *Credential) error { section.Key("use_shepherd").SetValue(profileConfig.UseShepherd) section.Key("min_shepherd_version").SetValue(profileConfig.MinShepherdVersion) + section.Key("bucket").SetValue(profileConfig.Bucket) + section.Key("project_id").SetValue(profileConfig.ProjectID) err = cfg.SaveTo(configPath) if err != nil { errs := fmt.Errorf("error occurred when saving config file: %s", err.Error()) diff --git a/credentials/interface.go b/credentials/interface.go new file mode 100644 index 0000000..e827c0a --- /dev/null +++ b/credentials/interface.go @@ -0,0 +1,18 @@ +package credentials + +import ( + "context" + + "github.com/calypr/data-client/conf" +) + +// Reader exposes current in-memory credential state. +type Reader interface { + Current() *conf.Credential +} + +// Manager exposes read and export operations for credentials. +type Manager interface { + Reader + Export(ctx context.Context, cred *conf.Credential) error +} diff --git a/download/batch.go b/download/batch.go index 9159cef..bdd6605 100644 --- a/download/batch.go +++ b/download/batch.go @@ -9,9 +9,9 @@ import ( "sync" "sync/atomic" - "github.com/calypr/data-client/backend" "github.com/calypr/data-client/common" "github.com/calypr/data-client/logs" + "github.com/calypr/data-client/transfer" "github.com/hashicorp/go-multierror" "github.com/vbauerster/mpb/v8" "github.com/vbauerster/mpb/v8/decor" @@ -21,7 +21,7 @@ import ( // downloadFiles performs bounded parallel downloads and collects ALL errors func downloadFiles( ctx context.Context, - bk backend.Backend, + bk transfer.Downloader, files []common.FileDownloadResponseObject, numParallel int, protocol string, @@ -38,7 +38,7 @@ func downloadFiles( } // Scoreboard: maxRetries = 0 for now (no retry logic yet) - sb := logs.NewSB(0, logger) + sb := logs.NewSB(0, logger.Logger) progress := common.GetProgress(ctx) useProgressBars := (progress == nil) diff --git a/download/downloader.go b/download/downloader.go index 7e3fc73..d7fcbef 100644 --- a/download/downloader.go +++ b/download/downloader.go @@ -7,8 +7,9 @@ import ( "os" "strings" - "github.com/calypr/data-client/backend" "github.com/calypr/data-client/common" + "github.com/calypr/data-client/drs" + "github.com/calypr/data-client/transfer" "github.com/vbauerster/mpb/v8" "github.com/vbauerster/mpb/v8/decor" ) @@ -16,7 +17,8 @@ import ( // DownloadMultiple is the public entry point called from g3cmd func DownloadMultiple( ctx context.Context, - bk backend.Backend, + dc drs.Client, + bk transfer.Downloader, objects []common.ManifestObject, downloadPath string, filenameFormat string, @@ -52,7 +54,7 @@ func DownloadMultiple( } // === Warnings and user confirmation === - if err := handleWarningsAndConfirmation(ctx, logger, downloadPath, filenameFormat, rename, noPrompt); err != nil { + if err := handleWarningsAndConfirmation(ctx, logger.Logger, downloadPath, filenameFormat, rename, noPrompt); err != nil { return err // aborted by user } @@ -62,7 +64,7 @@ func DownloadMultiple( } // === Prepare files (metadata + local validation) === - toDownload, skipped, renamed, err := prepareFiles(ctx, bk, objects, downloadPath, filenameFormat, rename, skipCompleted, protocol) + toDownload, skipped, renamed, err := prepareFiles(ctx, dc, bk, objects, downloadPath, filenameFormat, rename, skipCompleted, protocol) if err != nil { return err } @@ -77,8 +79,8 @@ func DownloadMultiple( // === Final summary === logger.InfoContext(ctx, fmt.Sprintf("%d files downloaded successfully.", downloaded)) - printRenamed(ctx, logger, renamed) - printSkipped(ctx, logger, skipped) + printRenamed(ctx, logger.Logger, renamed) + printSkipped(ctx, logger.Logger, skipped) if downloadErr != nil { logger.WarnContext(ctx, "Some downloads failed. See errors above.") @@ -109,7 +111,8 @@ func handleWarningsAndConfirmation(ctx context.Context, logger *slog.Logger, dow // prepareFiles gathers metadata, checks local files, collects skips/renames func prepareFiles( ctx context.Context, - bk backend.Backend, + dc drs.Client, + bk transfer.Downloader, objects []common.ManifestObject, downloadPath, filenameFormat string, rename, skipCompleted bool, @@ -137,7 +140,7 @@ func prepareFiles( var err error if info.Name == "" || info.Size == 0 { // Very strict object id checking - info, err = GetFileInfo(ctx, bk, obj.GUID, protocol, downloadPath, filenameFormat, rename, &renamed) + info, err = GetFileInfo(ctx, dc, logger, obj.GUID, protocol, downloadPath, filenameFormat, rename, &renamed) if err != nil { return nil, nil, nil, err } @@ -150,7 +153,7 @@ func prepareFiles( } if !rename { - validateLocalFileStat(logger, &fdr, int64(info.Size), skipCompleted) + validateLocalFileStat(logger.Logger, &fdr, int64(info.Size), skipCompleted) } if fdr.Skip { diff --git a/download/file_info.go b/download/file_info.go index 58f955e..54cf92b 100644 --- a/download/file_info.go +++ b/download/file_info.go @@ -3,27 +3,31 @@ package download import ( "context" - "github.com/calypr/data-client/backend" + "github.com/calypr/data-client/drs" + "github.com/calypr/data-client/logs" ) func GetFileInfo( ctx context.Context, - bk backend.Backend, + dc drs.Client, + logger *logs.Gen3Logger, guid, protocol, downloadPath, filenameFormat string, rename bool, renamedFiles *[]RenamedOrSkippedFileInfo, ) (*IndexdResponse, error) { - // Use Backend to get object details - drsObj, err := bk.GetFileDetails(ctx, guid) + drsObj, err := drs.ResolveObject(ctx, dc, guid) if err != nil { - bk.Logger().Warn("Failed to get file details", "guid", guid, "error", err) + logger.Warn("Failed to get file details", "guid", guid, "error", err) // Fallback: use GUID as filename if failed? // Original code: "All meta-data lookups failed... Using GUID as default" *renamedFiles = append(*renamedFiles, RenamedOrSkippedFileInfo{GUID: guid, OldFilename: guid, NewFilename: guid}) return &IndexdResponse{Name: guid, Size: 0}, nil } - name := drsObj.Name + name := "" + if drsObj.Name != nil { + name = *drsObj.Name + } if name == "" { // If name is empty (some DRS servers might not return it?), use GUID name = guid diff --git a/download/orchestrator.go b/download/orchestrator.go new file mode 100644 index 0000000..47634e7 --- /dev/null +++ b/download/orchestrator.go @@ -0,0 +1,18 @@ +package download + +import ( + "context" + + "github.com/calypr/data-client/common" + "github.com/calypr/data-client/drs" + "github.com/calypr/data-client/transfer" +) + +// DownloadFile is a high-level orchestrator that downloads a file using the provided backend. +func DownloadFile(ctx context.Context, dc drs.Client, bk transfer.Downloader, guid, destPath string) error { + opts := DownloadOptions{ + MultipartThreshold: int64(5 * common.GB), + } + // Note: We could expose more options here if needed + return DownloadToPathWithOptions(ctx, dc, bk, bk.Logger().Logger, guid, destPath, "", opts) +} diff --git a/download/transfer.go b/download/transfer.go index b879075..8c15bd8 100644 --- a/download/transfer.go +++ b/download/transfer.go @@ -2,16 +2,20 @@ package download import ( "context" + "errors" "fmt" "io" "log/slog" + "net/http" "os" "path/filepath" + "strconv" "strings" "sync/atomic" - "github.com/calypr/data-client/backend" "github.com/calypr/data-client/common" + "github.com/calypr/data-client/drs" + "github.com/calypr/data-client/transfer" "golang.org/x/sync/errgroup" ) @@ -32,7 +36,8 @@ func defaultDownloadOptions() DownloadOptions { // DownloadSingleWithProgress downloads a single object while emitting progress events. func DownloadSingleWithProgress( ctx context.Context, - bk backend.Backend, + dc drs.Client, + bk transfer.Downloader, guid string, downloadPath string, protocol string, @@ -48,7 +53,7 @@ func DownloadSingleWithProgress( } renamed := make([]RenamedOrSkippedFileInfo, 0) - info, err := GetFileInfo(ctx, bk, guid, protocol, downloadPath, "original", false, &renamed) + info, err := GetFileInfo(ctx, dc, bk.Logger(), guid, protocol, downloadPath, "original", false, &renamed) if err != nil { return err } @@ -113,19 +118,21 @@ func DownloadSingleWithProgress( // DownloadToPath downloads a single object using the provided backend func DownloadToPath( ctx context.Context, - bk backend.Backend, + dc drs.Client, + bk transfer.Downloader, logger *slog.Logger, guid string, dstPath string, protocol string, ) error { opts := defaultDownloadOptions() - return DownloadToPathWithOptions(ctx, bk, logger, guid, dstPath, protocol, opts) + return DownloadToPathWithOptions(ctx, dc, bk, logger, guid, dstPath, protocol, opts) } func DownloadToPathWithOptions( ctx context.Context, - bk backend.Backend, + dc drs.Client, + bk transfer.Downloader, logger *slog.Logger, guid string, dstPath string, @@ -142,14 +149,25 @@ func DownloadToPathWithOptions( opts.Concurrency = defaultDownloadOptions().Concurrency } - info, err := bk.GetFileDetails(ctx, guid) + info, err := drs.ResolveObject(ctx, dc, guid) if err != nil { return fmt.Errorf("get file details failed: %w", err) } // If size is unknown or small, single stream is safest. if info.Size <= 0 || info.Size < opts.MultipartThreshold { - return downloadToPathSingle(ctx, bk, logger, guid, dstPath, protocol) + return downloadToPathSingle(ctx, bk, logger, guid, dstPath, protocol, info.Size) + } + + // If a partial file already exists, resumable single-stream download is safer than + // parallel range writes and avoids restarting from zero. + if st, statErr := os.Stat(dstPath); statErr == nil { + if st.Size() == info.Size { + return nil + } + if st.Size() > 0 && st.Size() < info.Size { + return downloadToPathSingle(ctx, bk, logger, guid, dstPath, protocol, info.Size) + } } if err := downloadToPathMultipart(ctx, bk, logger, guid, dstPath, protocol, info.Size, opts); err != nil { @@ -161,18 +179,30 @@ func DownloadToPathWithOptions( func downloadToPathSingle( ctx context.Context, - bk backend.Backend, + bk transfer.Downloader, logger *slog.Logger, guid string, dstPath string, protocol string, + expectedSize int64, ) error { progress := common.GetProgress(ctx) hash := common.GetOid(ctx) + var existingSize int64 + if st, err := os.Stat(dstPath); err == nil { + existingSize = st.Size() + if expectedSize > 0 && existingSize == expectedSize { + return nil + } + } + fdr := common.FileDownloadResponseObject{ GUID: guid, } + if existingSize > 0 { + fdr.Range = existingSize + } protocolText := "" if protocol != "" { @@ -190,6 +220,11 @@ func downloadToPathSingle( } defer fdr.Response.Body.Close() + if existingSize > 0 && fdr.Response.StatusCode == http.StatusOK { + // Server ignored range; restart from zero. + existingSize = 0 + } + if dir := filepath.Dir(dstPath); dir != "." { if err := os.MkdirAll(dir, 0766); err != nil { logger.Error("Mkdir failed", "error", err, "path", dstPath) @@ -197,7 +232,13 @@ func downloadToPathSingle( } } - file, err := os.Create(dstPath) + flags := os.O_CREATE | os.O_WRONLY + if existingSize > 0 { + flags |= os.O_APPEND + } else { + flags |= os.O_TRUNC + } + file, err := os.OpenFile(dstPath, flags, 0666) if err != nil { logger.Error("Create file failed", "error", err, "path", dstPath) return fmt.Errorf("create local file %s: %w", dstPath, err) @@ -206,25 +247,82 @@ func downloadToPathSingle( var writer io.Writer = file if progress != nil { - total := fdr.Response.ContentLength + total := fdr.Response.ContentLength + existingSize tracker := newProgressWriter(file, progress, hash, total) + if existingSize > 0 { + tracker.bytesSoFar = existingSize + } writer = tracker defer tracker.Finalize() } - if _, err := io.Copy(writer, fdr.Response.Body); err != nil { + reader := io.Reader(fdr.Response.Body) + if failAfter := parseInjectedDownloadFailureBytes(); failAfter > 0 { + reader = &failAfterReader{ + r: reader, + remaining: failAfter, + } + } + + if _, err := io.Copy(writer, reader); err != nil { logger.Error("Copy failed", "error", err, "path", dstPath) return fmt.Errorf("copy to %s: %w", dstPath, err) } + if expectedSize > 0 { + if st, err := os.Stat(dstPath); err == nil && st.Size() != expectedSize { + return fmt.Errorf("download incomplete for %s: expected %d bytes, got %d", dstPath, expectedSize, st.Size()) + } + } // Success logging is up to caller or we can do simple info // logger.Info("Download succeeded", "path", dstPath, "guid", guid) return nil } +func parseInjectedDownloadFailureBytes() int64 { + raw := strings.TrimSpace(os.Getenv("DATA_CLIENT_TEST_FAIL_DOWNLOAD_AFTER_BYTES")) + if raw == "" { + return 0 + } + n, err := strconv.ParseInt(raw, 10, 64) + if err != nil || n <= 0 { + return 0 + } + return n +} + +type failAfterReader struct { + r io.Reader + remaining int64 + failed bool +} + +func (f *failAfterReader) Read(p []byte) (int, error) { + if f.failed { + return 0, errors.New("injected test interruption during download") + } + if f.remaining <= 0 { + f.failed = true + return 0, errors.New("injected test interruption during download") + } + if int64(len(p)) > f.remaining { + p = p[:f.remaining] + } + n, err := f.r.Read(p) + f.remaining -= int64(n) + if err != nil { + return n, err + } + if f.remaining <= 0 { + f.failed = true + return n, errors.New("injected test interruption during download") + } + return n, nil +} + func downloadToPathMultipart( ctx context.Context, - bk backend.Backend, + bk transfer.Downloader, logger *slog.Logger, guid string, dstPath string, @@ -237,7 +335,7 @@ func downloadToPathMultipart( protocolText = "?protocol=" + protocol } - signedURL, err := bk.GetDownloadURL(ctx, guid, protocolText) + signedURL, err := bk.ResolveDownloadURL(ctx, guid, protocolText) if err != nil { return fmt.Errorf("failed to resolve download URL for %s: %w", guid, err) } diff --git a/download/transfer_test.go b/download/transfer_test.go index ecd8a10..40132cf 100644 --- a/download/transfer_test.go +++ b/download/transfer_test.go @@ -5,7 +5,6 @@ import ( "context" "errors" "io" - "log/slog" "net/http" "net/url" "os" @@ -15,44 +14,48 @@ import ( "github.com/calypr/data-client/common" "github.com/calypr/data-client/drs" + "github.com/calypr/data-client/hash" "github.com/calypr/data-client/logs" + "github.com/calypr/data-client/request" ) type fakeBackend struct { logger *logs.Gen3Logger doFunc func(context.Context, *common.FileDownloadResponseObject) (*http.Response, error) data []byte + size int64 } -func (f *fakeBackend) Name() string { return "Fake" } -func (f *fakeBackend) Logger() *slog.Logger { return f.logger.Logger } +func (f *fakeBackend) Name() string { return "Fake" } +func (f *fakeBackend) Logger() *logs.Gen3Logger { return f.logger } -func (f *fakeBackend) GetFileDetails(ctx context.Context, guid string) (*drs.DRSObject, error) { +func (f *fakeBackend) fileDetails(guid string) *drs.DRSObject { + size := f.size + if size == 0 && len(f.data) > 0 { + size = int64(len(f.data)) + } + if size == 0 { + size = 64 + } + name := "payload.bin" + accessID := "s3" return &drs.DRSObject{ - Name: "payload.bin", - Size: 64, + Name: &name, + Size: size, AccessMethods: []drs.AccessMethod{ - {AccessID: "s3", Type: "s3"}, + {AccessId: &accessID, Type: "s3"}, }, - }, nil + } } -func (f *fakeBackend) GetDownloadURL(ctx context.Context, guid string, accessID string) (string, error) { +func (f *fakeBackend) ResolveDownloadURL(ctx context.Context, guid string, accessID string) (string, error) { if guid == "test-fallback" { return "", errors.New("fallback") } return "https://download.example.com/object", nil } -func (f *fakeBackend) Register(ctx context.Context, obj *drs.DRSObject) (*drs.DRSObject, error) { - return obj, nil -} - -func (f *fakeBackend) BatchRegister(ctx context.Context, objs []*drs.DRSObject) ([]*drs.DRSObject, error) { - return objs, nil -} - -func (f *fakeBackend) GetUploadURL(ctx context.Context, guid string, filename string, metadata common.FileMetadata, bucket string) (string, error) { +func (f *fakeBackend) ResolveUploadURL(ctx context.Context, guid string, filename string, metadata common.FileMetadata, bucket string) (string, error) { return "", errors.New("not implemented") } @@ -76,10 +79,24 @@ func (f *fakeBackend) UploadPart(ctx context.Context, url string, body io.Reader return "", errors.New("not implemented") } +func (f *fakeBackend) DeleteFile(ctx context.Context, guid string) (string, error) { + return "", errors.New("not implemented") +} + func (f *fakeBackend) Download(ctx context.Context, fdr *common.FileDownloadResponseObject) (*http.Response, error) { if f.doFunc != nil { return f.doFunc(ctx, fdr) } + if fdr.Range > 0 { + start := fdr.Range + if start < 0 || start > int64(len(f.data)) { + return nil, errors.New("invalid resume range") + } + if start == int64(len(f.data)) { + return newDownloadResponse(fdr.PresignedURL, []byte{}, http.StatusPartialContent), nil + } + return newDownloadResponse(fdr.PresignedURL, f.data[start:], http.StatusPartialContent), nil + } if fdr.RangeStart != nil && fdr.RangeEnd != nil { start, end := *fdr.RangeStart, *fdr.RangeEnd if start < 0 || end >= int64(len(f.data)) || start > end { @@ -90,11 +107,81 @@ func (f *fakeBackend) Download(ctx context.Context, fdr *common.FileDownloadResp return newDownloadResponse(fdr.PresignedURL, f.data, http.StatusOK), nil } -func (f *fakeBackend) GetObjectByHash(ctx context.Context, checksumType, checksum string) ([]drs.DRSObject, error) { +type fakeDrsClient struct { + backend *fakeBackend +} + +func (f *fakeDrsClient) GetObject(ctx context.Context, id string) (*drs.DRSObject, error) { + return f.backend.fileDetails(id), nil +} + +func (f *fakeDrsClient) GetObjectByHash(ctx context.Context, checksum *hash.Checksum) ([]drs.DRSObject, error) { + obj := f.backend.fileDetails(checksum.Checksum) + return []drs.DRSObject{*obj}, nil +} + +func (f *fakeDrsClient) BatchGetObjectsByHash(ctx context.Context, hashes []string) (map[string][]drs.DRSObject, error) { + return map[string][]drs.DRSObject{}, nil +} + +func (f *fakeDrsClient) ListObjects(ctx context.Context) (chan drs.DRSObjectResult, error) { + return nil, errors.New("not implemented") +} + +func (f *fakeDrsClient) ListObjectsByProject(ctx context.Context, projectId string) (chan drs.DRSObjectResult, error) { + return nil, errors.New("not implemented") +} + +func (f *fakeDrsClient) GetProjectSample(ctx context.Context, projectId string, limit int) ([]drs.DRSObject, error) { + return nil, errors.New("not implemented") +} + +func (f *fakeDrsClient) RegisterRecord(ctx context.Context, record *drs.DRSObject) (*drs.DRSObject, error) { + return nil, errors.New("not implemented") +} + +func (f *fakeDrsClient) RegisterRecords(ctx context.Context, records []*drs.DRSObject) ([]*drs.DRSObject, error) { + return nil, errors.New("not implemented") +} + +func (f *fakeDrsClient) UpdateRecord(ctx context.Context, updateInfo *drs.DRSObject, did string) (*drs.DRSObject, error) { + return nil, errors.New("not implemented") +} + +func (f *fakeDrsClient) DeleteRecord(ctx context.Context, did string) error { + return errors.New("not implemented") +} + +func (f *fakeDrsClient) DeleteRecordsByProject(ctx context.Context, projectId string) error { + return errors.New("not implemented") +} + +func (f *fakeDrsClient) GetDownloadURL(ctx context.Context, id string, accessType string) (*drs.AccessURL, error) { + return nil, errors.New("not implemented") +} + +func (f *fakeDrsClient) AddURL(ctx context.Context, blobURL, sha256 string, opts ...drs.AddURLOption) (*drs.DRSObject, error) { return nil, errors.New("not implemented") } -func (f *fakeBackend) BatchGetObjectsByHash(ctx context.Context, hashes []string) (map[string][]drs.DRSObject, error) { +func (f *fakeDrsClient) UpsertRecord(ctx context.Context, url string, sha256 string, fileSize int64, projectId string) (*drs.DRSObject, error) { + return nil, errors.New("not implemented") +} + +func (f *fakeDrsClient) BuildDrsObj(fileName string, checksum string, size int64, drsId string) (*drs.DRSObject, error) { + return nil, errors.New("not implemented") +} + +func (f *fakeDrsClient) GetProjectId() string { return "" } +func (f *fakeDrsClient) GetBucketName() string { return "" } +func (f *fakeDrsClient) GetOrganization() string { return "" } +func (f *fakeDrsClient) WithProject(projectId string) drs.Client { return f } +func (f *fakeDrsClient) WithOrganization(organization string) drs.Client { return f } +func (f *fakeDrsClient) WithBucket(bucketName string) drs.Client { return f } +func (f *fakeDrsClient) New(method, url string) *request.RequestBuilder { + return &request.RequestBuilder{Method: method, Url: url} +} +func (f *fakeDrsClient) Do(ctx context.Context, req *request.RequestBuilder) (*http.Response, error) { return nil, errors.New("not implemented") } @@ -113,9 +200,10 @@ func TestDownloadSingleWithProgressEmitsEvents(t *testing.T) { logger: logs.NewGen3Logger(nil, "", ""), data: payload, } + dc := &fakeDrsClient{backend: fake} ctx := common.WithProgress(context.Background(), progress) - err := DownloadSingleWithProgress(ctx, fake, "guid-123", downloadPath, "") + err := DownloadSingleWithProgress(ctx, dc, fake, "guid-123", downloadPath, "") if err != nil { t.Fatalf("download failed: %v", err) } @@ -151,10 +239,12 @@ func TestDownloadSingleWithProgressFinalizeOnError(t *testing.T) { fake := &fakeBackend{ logger: logs.NewGen3Logger(nil, "", ""), data: []byte("short"), + size: 64, } + dc := &fakeDrsClient{backend: fake} ctx := common.WithProgress(context.Background(), progress) - err := DownloadSingleWithProgress(ctx, fake, "guid-123", downloadPath, "") + err := DownloadSingleWithProgress(ctx, dc, fake, "guid-123", downloadPath, "") if err == nil { t.Fatal("expected download error") } @@ -189,12 +279,15 @@ func TestDownloadToPathMultipart(t *testing.T) { fake := &fakeBackend{ logger: logs.NewGen3Logger(nil, "", ""), data: payload, + size: int64(len(payload)), } + dc := &fakeDrsClient{backend: fake} err := DownloadToPathWithOptions( context.Background(), + dc, fake, - fake.Logger(), + fake.Logger().Logger, "guid-789", dst, "", @@ -217,6 +310,143 @@ func TestDownloadToPathMultipart(t *testing.T) { } } +func TestDownloadToPathSingleResumeFromPartial(t *testing.T) { + payload := bytes.Repeat([]byte("r"), 1024) + tmpDir := t.TempDir() + dst := filepath.Join(tmpDir, "resume.bin") + prefix := payload[:300] + if err := os.WriteFile(dst, prefix, 0o666); err != nil { + t.Fatalf("write partial file: %v", err) + } + + var gotRange int64 = -1 + fake := &fakeBackend{ + logger: logs.NewGen3Logger(nil, "", ""), + data: payload, + size: int64(len(payload)), + doFunc: func(_ context.Context, fdr *common.FileDownloadResponseObject) (*http.Response, error) { + gotRange = fdr.Range + if fdr.Range <= 0 { + return nil, errors.New("expected resume range") + } + return newDownloadResponse(fdr.PresignedURL, payload[fdr.Range:], http.StatusPartialContent), nil + }, + } + dc := &fakeDrsClient{backend: fake} + + err := DownloadToPathWithOptions( + context.Background(), + dc, + fake, + fake.Logger().Logger, + "guid-resume", + dst, + "", + DownloadOptions{ + MultipartThreshold: 1 * common.GB, // force single-stream path + ChunkSize: 64 * common.MB, + Concurrency: 2, + }, + ) + if err != nil { + t.Fatalf("resume download failed: %v", err) + } + if gotRange != int64(len(prefix)) { + t.Fatalf("expected range %d, got %d", len(prefix), gotRange) + } + + got, err := os.ReadFile(dst) + if err != nil { + t.Fatalf("read result: %v", err) + } + if !bytes.Equal(got, payload) { + t.Fatal("resumed file mismatch") + } +} + +func TestDownloadToPathSingleRangeIgnoredRestarts(t *testing.T) { + payload := bytes.Repeat([]byte("k"), 2048) + tmpDir := t.TempDir() + dst := filepath.Join(tmpDir, "range-ignored.bin") + if err := os.WriteFile(dst, payload[:500], 0o666); err != nil { + t.Fatalf("write partial: %v", err) + } + + fake := &fakeBackend{ + logger: logs.NewGen3Logger(nil, "", ""), + data: payload, + size: int64(len(payload)), + doFunc: func(_ context.Context, fdr *common.FileDownloadResponseObject) (*http.Response, error) { + // Simulate server ignoring Range and returning full body with 200. + if fdr.Range <= 0 { + return nil, errors.New("expected range request") + } + return newDownloadResponse(fdr.PresignedURL, payload, http.StatusOK), nil + }, + } + dc := &fakeDrsClient{backend: fake} + + err := DownloadToPathWithOptions( + context.Background(), + dc, + fake, + fake.Logger().Logger, + "guid-range-ignored", + dst, + "", + DownloadOptions{MultipartThreshold: 1 * common.GB, ChunkSize: 64 * common.MB, Concurrency: 2}, + ) + if err != nil { + t.Fatalf("download failed: %v", err) + } + + got, err := os.ReadFile(dst) + if err != nil { + t.Fatalf("read result: %v", err) + } + if !bytes.Equal(got, payload) { + t.Fatal("range-ignored restart did not produce full file") + } +} + +func TestDownloadToPathAlreadyCompleteSkipsDownload(t *testing.T) { + payload := bytes.Repeat([]byte("c"), 512) + tmpDir := t.TempDir() + dst := filepath.Join(tmpDir, "complete.bin") + if err := os.WriteFile(dst, payload, 0o666); err != nil { + t.Fatalf("write complete file: %v", err) + } + + calls := 0 + fake := &fakeBackend{ + logger: logs.NewGen3Logger(nil, "", ""), + data: payload, + size: int64(len(payload)), + doFunc: func(_ context.Context, _ *common.FileDownloadResponseObject) (*http.Response, error) { + calls++ + return newDownloadResponse("https://download.example.com/object", payload, http.StatusOK), nil + }, + } + dc := &fakeDrsClient{backend: fake} + + err := DownloadToPathWithOptions( + context.Background(), + dc, + fake, + fake.Logger().Logger, + "guid-complete", + dst, + "", + DownloadOptions{MultipartThreshold: 1 * common.GB, ChunkSize: 64 * common.MB, Concurrency: 2}, + ) + if err != nil { + t.Fatalf("download call failed: %v", err) + } + if calls != 0 { + t.Fatalf("expected no backend download calls, got %d", calls) + } +} + func newDownloadResponse(rawURL string, payload []byte, status int) *http.Response { parsedURL, err := url.Parse(rawURL) if err != nil { diff --git a/download/url_resolution.go b/download/url_resolution.go index 2506981..e18ba5a 100644 --- a/download/url_resolution.go +++ b/download/url_resolution.go @@ -8,13 +8,13 @@ import ( "net/http" "strings" - "github.com/calypr/data-client/backend" "github.com/calypr/data-client/common" + "github.com/calypr/data-client/transfer" ) // GetDownloadResponse gets presigned URL and prepares HTTP response -func GetDownloadResponse(ctx context.Context, bk backend.Backend, fdr *common.FileDownloadResponseObject, protocolText string) error { - url, err := bk.GetDownloadURL(ctx, fdr.GUID, protocolText) +func GetDownloadResponse(ctx context.Context, bk transfer.Downloader, fdr *common.FileDownloadResponseObject, protocolText string) error { + url, err := bk.ResolveDownloadURL(ctx, fdr.GUID, protocolText) if err != nil { return fmt.Errorf("failed to resolve download URL for %s: %w", fdr.GUID, err) } @@ -23,7 +23,7 @@ func GetDownloadResponse(ctx context.Context, bk backend.Backend, fdr *common.Fi return makeDownloadRequest(ctx, bk, fdr) } -func makeDownloadRequest(ctx context.Context, bk backend.Backend, fdr *common.FileDownloadResponseObject) error { +func makeDownloadRequest(ctx context.Context, bk transfer.Downloader, fdr *common.FileDownloadResponseObject) error { resp, err := bk.Download(ctx, fdr) if err != nil { diff --git a/indexd/client.go b/drs/client.go similarity index 52% rename from indexd/client.go rename to drs/client.go index c2114d8..df96d07 100644 --- a/indexd/client.go +++ b/drs/client.go @@ -1,4 +1,4 @@ -package indexd +package drs import ( "bytes" @@ -11,175 +11,123 @@ import ( "strings" "github.com/calypr/data-client/conf" - "github.com/calypr/data-client/drs" + "github.com/calypr/data-client/hash" "github.com/calypr/data-client/request" ) -//go:generate mockgen -destination=../mocks/mock_indexd.go -package=mocks github.com/calypr/data-client/indexd IndexdInterface - -// IndexdInterface defines the interface for Indexd client -type IndexdInterface interface { +type DrsClient struct { request.RequestInterface - - GetObject(ctx context.Context, id string) (*drs.DRSObject, error) - RegisterIndexdRecord(ctx context.Context, indexdObj *IndexdRecord) (*drs.DRSObject, error) - DeleteIndexdRecord(ctx context.Context, did string) error - GetObjectByHash(ctx context.Context, hashType, hashValue string) ([]drs.DRSObject, error) - GetDownloadURL(ctx context.Context, did string, accessType string) (*drs.AccessURL, error) - ListObjectsByProject(ctx context.Context, projectId string) (chan drs.DRSObjectResult, error) - UpdateRecord(ctx context.Context, updateInfo *drs.DRSObject, did string) (*drs.DRSObject, error) - - ListObjects(ctx context.Context) (chan drs.DRSObjectResult, error) - GetProjectSample(ctx context.Context, projectId string, limit int) ([]drs.DRSObject, error) - DeleteRecordsByProject(ctx context.Context, projectId string) error - DeleteRecordByHash(ctx context.Context, hashValue string, projectId string) error - RegisterRecord(ctx context.Context, record *drs.DRSObject) (*drs.DRSObject, error) - RegisterRecords(ctx context.Context, records []*drs.DRSObject) ([]*drs.DRSObject, error) - UpsertIndexdRecord(ctx context.Context, url string, sha256 string, fileSize int64, projectId string) (*drs.DRSObject, error) - - BatchGetObjectsByHash(ctx context.Context, hashes []string) (map[string][]drs.DRSObject, error) + provider endpointProvider + logger *slog.Logger + projectId string + organization string + bucketName string } -// IndexdClient implements IndexdInterface -type IndexdClient struct { - request.RequestInterface - cred *conf.Credential - logger *slog.Logger +type endpointProvider interface { + APIEndpoint() string + AccessToken() string } -// NewIndexdClient creates a new IndexdClient -func NewIndexdClient(req request.RequestInterface, cred *conf.Credential, logger *slog.Logger) IndexdInterface { - return &IndexdClient{ - RequestInterface: req, - cred: cred, - logger: logger, - } +type gen3Provider struct { + cred *conf.Credential } -func (c *IndexdClient) GetObject(ctx context.Context, id string) (*drs.DRSObject, error) { - url := fmt.Sprintf("%s/ga4gh/drs/v1/objects/%s", c.cred.APIEndpoint, id) - resp, err := c.Do(ctx, &request.RequestBuilder{ - Method: http.MethodGet, - Url: url, - Token: c.cred.AccessToken, - }) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - if resp.StatusCode == http.StatusNotFound { - return nil, fmt.Errorf("object %s not found", id) - } - if resp.StatusCode != http.StatusOK { - body, _ := io.ReadAll(resp.Body) - return nil, fmt.Errorf("failed to get object %s: %s (status: %d)", id, string(body), resp.StatusCode) - } +func (p gen3Provider) APIEndpoint() string { return p.cred.APIEndpoint } +func (p gen3Provider) AccessToken() string { return p.cred.AccessToken } - var out OutputObject - if err := json.NewDecoder(resp.Body).Decode(&out); err != nil { - return nil, err - } - return ConvertOutputObjectToDRSObject(&out), nil +type localProvider struct { + endpoint string } -func (c *IndexdClient) RegisterIndexdRecord(ctx context.Context, indexdObj *IndexdRecord) (*drs.DRSObject, error) { - indexdObjForm := IndexdRecordForm{ - IndexdRecord: *indexdObj, - Form: "object", - } +func (p localProvider) APIEndpoint() string { return p.endpoint } +func (p localProvider) AccessToken() string { return "" } - jsonBytes, err := json.Marshal(indexdObjForm) - if err != nil { - return nil, err +// NewDrsClient creates a new DrsClient +func NewDrsClient(req request.RequestInterface, cred *conf.Credential, logger *slog.Logger) Client { + return &DrsClient{ + RequestInterface: req, + provider: gen3Provider{cred: cred}, + logger: logger, } +} - url := fmt.Sprintf("%s/index", c.cred.APIEndpoint) - resp, err := c.Do(ctx, &request.RequestBuilder{ - Method: http.MethodPost, - Url: url, - Body: bytes.NewBuffer(jsonBytes), - Headers: map[string]string{ - "Content-Type": "application/json", - "Accept": "application/json", - }, - Token: c.cred.AccessToken, - }) - if err != nil { - return nil, err +// NewLocalDrsClient creates a DRS client for local/non-Gen3 mode. +// It intentionally carries no bearer token. +func NewLocalDrsClient(req request.RequestInterface, endpoint string, logger *slog.Logger) Client { + return &DrsClient{ + RequestInterface: req, + provider: localProvider{endpoint: endpoint}, + logger: logger, } - defer resp.Body.Close() +} - if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusCreated { - body, _ := io.ReadAll(resp.Body) - return nil, fmt.Errorf("failed to register record %s: %s (status: %d)", indexdObj.Did, string(body), resp.StatusCode) - } +func (c *DrsClient) apiEndpoint() string { return c.provider.APIEndpoint() } +func (c *DrsClient) token() string { return c.provider.AccessToken() } - return IndexdRecordToDrsObject(indexdObj) +func (c *DrsClient) GetProjectId() string { + return c.projectId } -func (c *IndexdClient) DeleteIndexdRecord(ctx context.Context, did string) error { - // First get the record to get the revision (rev) - record, err := c.getIndexdRecordByDID(ctx, did) - if err != nil { - return err - } +func (c *DrsClient) GetBucketName() string { + return c.bucketName +} - url := fmt.Sprintf("%s/index/%s?rev=%s", c.cred.APIEndpoint, did, record.Rev) - resp, err := c.Do(ctx, &request.RequestBuilder{ - Method: http.MethodDelete, - Url: url, - Headers: map[string]string{ - "Accept": "application/json", - }, - Token: c.cred.AccessToken, - }) - if err != nil { - return err - } - defer resp.Body.Close() +func (c *DrsClient) GetOrganization() string { + return c.organization +} - if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusNoContent { - body, _ := io.ReadAll(resp.Body) - return fmt.Errorf("failed to delete record %s: %s (status: %d)", did, string(body), resp.StatusCode) - } +func (c *DrsClient) WithProject(projectId string) Client { + c.projectId = projectId + return c +} - return nil +func (c *DrsClient) WithOrganization(organization string) Client { + c.organization = organization + return c +} + +func (c *DrsClient) WithBucket(bucketName string) Client { + c.bucketName = bucketName + return c } -func (c *IndexdClient) getIndexdRecordByDID(ctx context.Context, did string) (*OutputInfo, error) { - url := fmt.Sprintf("%s/index/%s", c.cred.APIEndpoint, did) +func (c *DrsClient) GetObject(ctx context.Context, id string) (*DRSObject, error) { + url := fmt.Sprintf("%s/ga4gh/drs/v1/objects/%s", c.apiEndpoint(), id) resp, err := c.Do(ctx, &request.RequestBuilder{ Method: http.MethodGet, Url: url, - Token: c.cred.AccessToken, + Token: c.token(), }) if err != nil { return nil, err } defer resp.Body.Close() + if resp.StatusCode == http.StatusNotFound { + return nil, fmt.Errorf("object %s not found", id) + } if resp.StatusCode != http.StatusOK { body, _ := io.ReadAll(resp.Body) - return nil, fmt.Errorf("failed to get indexd record %s: %s (status: %d)", did, string(body), resp.StatusCode) + return nil, fmt.Errorf("failed to get object %s: %s (status: %d)", id, string(body), resp.StatusCode) } - var info OutputInfo - if err := json.NewDecoder(resp.Body).Decode(&info); err != nil { + var out OutputObject + if err := json.NewDecoder(resp.Body).Decode(&out); err != nil { return nil, err } - return &info, nil + return ConvertOutputObjectToDRSObject(&out), nil } -func (c *IndexdClient) GetObjectByHash(ctx context.Context, hashType, hashValue string) ([]drs.DRSObject, error) { - url := fmt.Sprintf("%s/index?hash=%s:%s", c.cred.APIEndpoint, hashType, hashValue) +func (c *DrsClient) GetObjectByHash(ctx context.Context, checksum *hash.Checksum) ([]DRSObject, error) { + url := fmt.Sprintf("%s/index?hash=%s:%s", c.apiEndpoint(), string(checksum.Type), checksum.Checksum) resp, err := c.Do(ctx, &request.RequestBuilder{ Method: http.MethodGet, Url: url, Headers: map[string]string{ "Accept": "application/json", }, - Token: c.cred.AccessToken, + Token: c.token(), }) if err != nil { return nil, err @@ -188,7 +136,7 @@ func (c *IndexdClient) GetObjectByHash(ctx context.Context, hashType, hashValue if resp.StatusCode != http.StatusOK { body, _ := io.ReadAll(resp.Body) - return nil, fmt.Errorf("failed to query by hash %s:%s: %s (status: %d)", hashType, hashValue, string(body), resp.StatusCode) + return nil, fmt.Errorf("failed to query by hash %s:%s: %s (status: %d)", checksum.Type, checksum.Checksum, string(body), resp.StatusCode) } var records ListRecords @@ -196,9 +144,9 @@ func (c *IndexdClient) GetObjectByHash(ctx context.Context, hashType, hashValue return nil, err } - out := make([]drs.DRSObject, 0, len(records.Records)) + out := make([]DRSObject, 0, len(records.Records)) for _, r := range records.Records { - drsObj, err := IndexdRecordToDrsObject(r.ToIndexdRecord()) + drsObj, err := r.ToDrsObject() if err != nil { return nil, err } @@ -207,12 +155,12 @@ func (c *IndexdClient) GetObjectByHash(ctx context.Context, hashType, hashValue return out, nil } -func (c *IndexdClient) GetDownloadURL(ctx context.Context, did string, accessType string) (*drs.AccessURL, error) { - url := fmt.Sprintf("%s/ga4gh/drs/v1/objects/%s/access/%s", c.cred.APIEndpoint, did, accessType) +func (c *DrsClient) GetDownloadURL(ctx context.Context, id string, accessType string) (*AccessURL, error) { + url := fmt.Sprintf("%s/ga4gh/drs/v1/objects/%s/access/%s", c.apiEndpoint(), id, accessType) resp, err := c.Do(ctx, &request.RequestBuilder{ Method: http.MethodGet, Url: url, - Token: c.cred.AccessToken, + Token: c.token(), }) if err != nil { return nil, err @@ -221,25 +169,25 @@ func (c *IndexdClient) GetDownloadURL(ctx context.Context, did string, accessTyp if resp.StatusCode != http.StatusOK { body, _ := io.ReadAll(resp.Body) - return nil, fmt.Errorf("failed to get download URL for %s: %s (status: %d)", did, string(body), resp.StatusCode) + return nil, fmt.Errorf("failed to get download URL for %s: %s (status: %d)", id, string(body), resp.StatusCode) } - var accessURL drs.AccessURL + var accessURL AccessURL if err := json.NewDecoder(resp.Body).Decode(&accessURL); err != nil { return nil, err } return &accessURL, nil } -func (c *IndexdClient) ListObjectsByProject(ctx context.Context, projectId string) (chan drs.DRSObjectResult, error) { +func (c *DrsClient) ListObjectsByProject(ctx context.Context, projectId string) (chan DRSObjectResult, error) { const PAGESIZE = 50 - resourcePath, err := drs.ProjectToResource("", projectId) + resourcePath, err := ProjectToResource("", projectId) if err != nil { return nil, err } - out := make(chan drs.DRSObjectResult, PAGESIZE) + out := make(chan DRSObjectResult, PAGESIZE) go func() { defer close(out) @@ -248,7 +196,7 @@ func (c *IndexdClient) ListObjectsByProject(ctx context.Context, projectId strin for active { url := fmt.Sprintf("%s/index?authz=%s&limit=%d&page=%d", - c.cred.APIEndpoint, resourcePath, PAGESIZE, pageNum) + c.apiEndpoint(), resourcePath, PAGESIZE, pageNum) resp, err := c.Do(ctx, &request.RequestBuilder{ Method: http.MethodGet, @@ -256,18 +204,18 @@ func (c *IndexdClient) ListObjectsByProject(ctx context.Context, projectId strin Headers: map[string]string{ "Accept": "application/json", }, - Token: c.cred.AccessToken, + Token: c.token(), }) if err != nil { - out <- drs.DRSObjectResult{Error: err} + out <- DRSObjectResult{Error: err} break } if resp.StatusCode != http.StatusOK { body, _ := io.ReadAll(resp.Body) resp.Body.Close() - out <- drs.DRSObjectResult{Error: fmt.Errorf("api error %d: %s", resp.StatusCode, string(body))} + out <- DRSObjectResult{Error: fmt.Errorf("api error %d: %s", resp.StatusCode, string(body))} break } @@ -276,7 +224,7 @@ func (c *IndexdClient) ListObjectsByProject(ctx context.Context, projectId strin resp.Body.Close() if err != nil { - out <- drs.DRSObjectResult{Error: err} + out <- DRSObjectResult{Error: err} break } @@ -286,12 +234,12 @@ func (c *IndexdClient) ListObjectsByProject(ctx context.Context, projectId strin } for _, elem := range page.Records { - drsObj, err := elem.ToIndexdRecord().ToDrsObject() + drsObj, err := elem.ToDrsObject() if err != nil { - out <- drs.DRSObjectResult{Error: err} + out <- DRSObjectResult{Error: err} continue } - out <- drs.DRSObjectResult{Object: drsObj} + out <- DRSObjectResult{Object: drsObj} } pageNum++ } @@ -300,83 +248,10 @@ func (c *IndexdClient) ListObjectsByProject(ctx context.Context, projectId strin return out, nil } -func (c *IndexdClient) UpdateRecord(ctx context.Context, updateInfo *drs.DRSObject, did string) (*drs.DRSObject, error) { - // Get current revision from existing record - record, err := c.getIndexdRecordByDID(ctx, did) - if err != nil { - return nil, fmt.Errorf("could not retrieve existing record for DID %s: %v", did, err) - } - - // Build update payload starting with existing record values - updatePayload := UpdateInputInfo{ - URLs: record.URLs, - FileName: record.FileName, - Version: record.Version, - Authz: record.Authz, - ACL: record.ACL, - Metadata: record.Metadata, - } - - // Apply updates from updateInfo - if len(updateInfo.AccessMethods) > 0 { - newURLs := make([]string, 0, len(updateInfo.AccessMethods)) - for _, a := range updateInfo.AccessMethods { - newURLs = append(newURLs, a.AccessURL.URL) - } - updatePayload.URLs = appendUnique(updatePayload.URLs, newURLs) - - authz := IndexdAuthzFromDrsAccessMethods(updateInfo.AccessMethods) - updatePayload.Authz = appendUnique(updatePayload.Authz, authz) - } - - if updateInfo.Name != "" { - updatePayload.FileName = updateInfo.Name - } - - if updateInfo.Version != "" { - updatePayload.Version = updateInfo.Version - } - - if updateInfo.Description != "" { - if updatePayload.Metadata == nil { - updatePayload.Metadata = make(map[string]any) - } - updatePayload.Metadata["description"] = updateInfo.Description - } - - jsonBytes, err := json.Marshal(updatePayload) - if err != nil { - return nil, fmt.Errorf("error marshaling indexd update payload: %v", err) - } - - url := fmt.Sprintf("%s/index/%s?rev=%s", c.cred.APIEndpoint, did, record.Rev) - resp, err := c.Do(ctx, &request.RequestBuilder{ - Method: http.MethodPut, - Url: url, - Body: bytes.NewBuffer(jsonBytes), - Headers: map[string]string{ - "Content-Type": "application/json", - "Accept": "application/json", - }, - Token: c.cred.AccessToken, - }) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - if resp.StatusCode != http.StatusOK { - body, _ := io.ReadAll(resp.Body) - return nil, fmt.Errorf("failed to update record %s: %s (status: %d)", did, string(body), resp.StatusCode) - } - - return c.GetObject(ctx, did) -} - -func (c *IndexdClient) ListObjects(ctx context.Context) (chan drs.DRSObjectResult, error) { - url := fmt.Sprintf("%s/ga4gh/drs/v1/objects", c.cred.APIEndpoint) +func (c *DrsClient) ListObjects(ctx context.Context) (chan DRSObjectResult, error) { + url := fmt.Sprintf("%s/ga4gh/drs/v1/objects", c.apiEndpoint()) const PAGESIZE = 50 - out := make(chan drs.DRSObjectResult, 10) + out := make(chan DRSObjectResult, 10) go func() { defer close(out) @@ -387,27 +262,27 @@ func (c *IndexdClient) ListObjects(ctx context.Context) (chan drs.DRSObjectResul resp, err := c.Do(ctx, &request.RequestBuilder{ Method: http.MethodGet, Url: fullURL, - Token: c.cred.AccessToken, + Token: c.token(), }) if err != nil { - out <- drs.DRSObjectResult{Error: err} + out <- DRSObjectResult{Error: err} return } if resp.StatusCode != http.StatusOK { body, _ := io.ReadAll(resp.Body) resp.Body.Close() - out <- drs.DRSObjectResult{Error: fmt.Errorf("api error %d: %s", resp.StatusCode, string(body))} + out <- DRSObjectResult{Error: fmt.Errorf("api error %d: %s", resp.StatusCode, string(body))} return } - var page drs.DRSPage + var page DRSPage err = json.NewDecoder(resp.Body).Decode(&page) resp.Body.Close() if err != nil { - out <- drs.DRSObjectResult{Error: err} + out <- DRSObjectResult{Error: err} return } @@ -417,7 +292,8 @@ func (c *IndexdClient) ListObjects(ctx context.Context) (chan drs.DRSObjectResul } for _, elem := range page.DRSObjects { - out <- drs.DRSObjectResult{Object: &elem} + elemCopy := elem + out <- DRSObjectResult{Object: &elemCopy} } pageNum++ } @@ -425,7 +301,7 @@ func (c *IndexdClient) ListObjects(ctx context.Context) (chan drs.DRSObjectResul return out, nil } -func (c *IndexdClient) GetProjectSample(ctx context.Context, projectId string, limit int) ([]drs.DRSObject, error) { +func (c *DrsClient) GetProjectSample(ctx context.Context, projectId string, limit int) ([]DRSObject, error) { if limit <= 0 { limit = 1 } @@ -435,7 +311,7 @@ func (c *IndexdClient) GetProjectSample(ctx context.Context, projectId string, l return nil, err } - result := make([]drs.DRSObject, 0, limit) + result := make([]DRSObject, 0, limit) for objResult := range objChan { if objResult.Error != nil { return nil, objResult.Error @@ -450,100 +326,64 @@ func (c *IndexdClient) GetProjectSample(ctx context.Context, projectId string, l break } } - return result, nil } -func (c *IndexdClient) DeleteRecordsByProject(ctx context.Context, projectId string) error { - recs, err := c.ListObjectsByProject(ctx, projectId) +func (c *DrsClient) RegisterRecord(ctx context.Context, record *DRSObject) (*DRSObject, error) { + indexdRecord, err := InternalRecordFromDrsObject(record) if err != nil { - return err - } - - // Snapshot and dedupe IDs first so pagination isn't affected by deletes-in-flight. - ids := make([]string, 0, 128) - seen := make(map[string]struct{}) - for rec := range recs { - if rec.Error != nil { - return rec.Error - } - - if rec.Object == nil || rec.Object.Id == "" { - continue - } - if _, ok := seen[rec.Object.Id]; ok { - continue - } - seen[rec.Object.Id] = struct{}{} - ids = append(ids, rec.Object.Id) - } - - for _, id := range ids { - err := c.DeleteIndexdRecord(ctx, id) - if err != nil { - // Project-wide cleanup should be idempotent; stale/deleted IDs are expected. - if isNotFoundErr(err) { - c.logger.Info(fmt.Sprintf("DeleteRecordsByProject: record already absent %s", id)) - continue - } - c.logger.Error(fmt.Sprintf("DeleteRecordsByProject Error for %s: %v", id, err)) - continue - } + return nil, fmt.Errorf("error converting DRS object to internal record: %v", err) } - return nil -} -func isNotFoundErr(err error) bool { - if err == nil { - return false + indexdObjForm := InternalRecordForm{ + InternalRecord: *indexdRecord, + Form: "object", } - msg := err.Error() - return strings.Contains(msg, "status: 404") || - strings.Contains(msg, "status=404") || - strings.Contains(msg, "Object not found") || - strings.Contains(msg, "not found") -} -func (c *IndexdClient) DeleteRecordByHash(ctx context.Context, hashValue string, projectId string) error { - records, err := c.GetObjectByHash(ctx, "sha256", hashValue) + jsonBytes, err := json.Marshal(indexdObjForm) if err != nil { - return fmt.Errorf("error getting records for hash %s: %v", hashValue, err) - } - if len(records) == 0 { - return fmt.Errorf("no records found for hash %s", hashValue) + return nil, err } - matchingRecord, err := drs.FindMatchingRecord(records, "", projectId) + url := fmt.Sprintf("%s/index", c.apiEndpoint()) + resp, err := c.Do(ctx, &request.RequestBuilder{ + Method: http.MethodPost, + Url: url, + Body: bytes.NewBuffer(jsonBytes), + Headers: map[string]string{ + "Content-Type": "application/json", + "Accept": "application/json", + }, + Token: c.token(), + }) if err != nil { - return fmt.Errorf("error finding matching record for project %s: %v", projectId, err) - } - if matchingRecord == nil { - return fmt.Errorf("no matching record found for project %s", projectId) + return nil, err } + defer resp.Body.Close() - return c.DeleteIndexdRecord(ctx, matchingRecord.Id) -} - -func (c *IndexdClient) RegisterRecord(ctx context.Context, record *drs.DRSObject) (*drs.DRSObject, error) { - indexdRecord, err := IndexdRecordFromDrsObject(record) - if err != nil { - return nil, fmt.Errorf("error converting DRS object to indexd record: %v", err) + if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusCreated { + body, _ := io.ReadAll(resp.Body) + did := "" + if indexdRecord.Did != nil { + did = *indexdRecord.Did + } + return nil, fmt.Errorf("failed to register record %s: %s (status: %d)", did, string(body), resp.StatusCode) } - return c.RegisterIndexdRecord(ctx, indexdRecord) + return InternalRecordToDrsObject(indexdRecord) } -func (c *IndexdClient) RegisterRecords(ctx context.Context, records []*drs.DRSObject) ([]*drs.DRSObject, error) { +func (c *DrsClient) RegisterRecords(ctx context.Context, records []*DRSObject) ([]*DRSObject, error) { if len(records) == 0 { return nil, nil } - candidates := make([]drs.DRSObjectCandidate, len(records)) + candidates := make([]DRSObjectCandidate, len(records)) for i, r := range records { - candidates[i] = drs.ConvertToCandidate(r) + candidates[i] = ConvertToCandidate(r) } - reqBody := drs.RegisterObjectsRequest{ + reqBody := RegisterObjectsRequest{ Candidates: candidates, } @@ -552,7 +392,7 @@ func (c *IndexdClient) RegisterRecords(ctx context.Context, records []*drs.DRSOb return nil, err } - url := fmt.Sprintf("%s/ga4gh/drs/v1/objects/register", c.cred.APIEndpoint) + url := fmt.Sprintf("%s/ga4gh/drs/v1/objects/register", c.apiEndpoint()) resp, err := c.Do(ctx, &request.RequestBuilder{ Method: http.MethodPost, Url: url, @@ -561,7 +401,7 @@ func (c *IndexdClient) RegisterRecords(ctx context.Context, records []*drs.DRSOb "Content-Type": "application/json", "Accept": "application/json", }, - Token: c.cred.AccessToken, + Token: c.token(), }) if err != nil { return nil, err @@ -578,33 +418,189 @@ func (c *IndexdClient) RegisterRecords(ctx context.Context, records []*drs.DRSOb return nil, fmt.Errorf("error reading registered objects response: %v", err) } - registered, err := decodeRegisteredObjects(body) + // Canonical shape from DRS register API. + var wrapped struct { + Objects []*DRSObject `json:"objects"` + } + if err := json.Unmarshal(body, &wrapped); err != nil { + return nil, fmt.Errorf("unsupported response payload: %s", string(body)) + } + if len(wrapped.Objects) == 0 { + return nil, fmt.Errorf("register response did not include objects") + } + return wrapped.Objects, nil +} + +func (c *DrsClient) UpdateRecord(ctx context.Context, updateInfo *DRSObject, did string) (*DRSObject, error) { + // Get current revision from existing record + record, err := c.getInternalRecordByDID(ctx, did) + if err != nil { + return nil, fmt.Errorf("could not retrieve existing record for DID %s: %v", did, err) + } + + // Build update payload starting with existing record values + updatePayload := UpdateInputInfo{ + URLs: record.Urls, + FileName: record.FileName, + Authz: record.Authz, + } + + // Apply updates from updateInfo + if len(updateInfo.AccessMethods) > 0 { + newURLs := make([]string, 0, len(updateInfo.AccessMethods)) + for _, a := range updateInfo.AccessMethods { + if a.AccessUrl != nil { + newURLs = append(newURLs, a.AccessUrl.Url) + } + } + updatePayload.URLs = appendUnique(updatePayload.URLs, newURLs) + + authz := InternalAuthzFromDrsAccessMethods(updateInfo.AccessMethods) + updatePayload.Authz = appendUnique(updatePayload.Authz, authz) + } + + if updateInfo.Name != nil && *updateInfo.Name != "" { + updatePayload.FileName = updateInfo.Name + } + + if updateInfo.Version != nil && *updateInfo.Version != "" { + updatePayload.Version = updateInfo.Version + } + + if updateInfo.Description != nil && *updateInfo.Description != "" { + if updatePayload.Metadata == nil { + updatePayload.Metadata = make(map[string]any) + } + updatePayload.Metadata["description"] = *updateInfo.Description + } + + jsonBytes, err := json.Marshal(updatePayload) + if err != nil { + return nil, fmt.Errorf("error marshaling indexd update payload: %v", err) + } + + rev := "" + if record.Rev != nil { + rev = *record.Rev + } + url := fmt.Sprintf("%s/index/%s?rev=%s", c.apiEndpoint(), did, rev) + resp, err := c.Do(ctx, &request.RequestBuilder{ + Method: http.MethodPut, + Url: url, + Body: bytes.NewBuffer(jsonBytes), + Headers: map[string]string{ + "Content-Type": "application/json", + "Accept": "application/json", + }, + Token: c.token(), + }) if err != nil { - return nil, fmt.Errorf("error decoding registered objects: %v", err) + return nil, err } - return registered, nil + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + body, _ := io.ReadAll(resp.Body) + return nil, fmt.Errorf("failed to update record %s: %s (status: %d)", did, string(body), resp.StatusCode) + } + + return c.GetObject(ctx, did) } -func decodeRegisteredObjects(body []byte) ([]*drs.DRSObject, error) { - trimmed := bytes.TrimSpace(body) - if len(trimmed) == 0 { - return nil, fmt.Errorf("empty response body") +func (c *DrsClient) DeleteRecord(ctx context.Context, did string) error { + // First get the record to get the revision (rev) + record, err := c.getInternalRecordByDID(ctx, did) + if err != nil { + return err } - // Canonical shape from DRS register API. - var wrapped struct { - Objects []*drs.DRSObject `json:"objects"` + rev := "" + if record.Rev != nil { + rev = *record.Rev } - if err := json.Unmarshal(trimmed, &wrapped); err != nil { - return nil, fmt.Errorf("unsupported response payload: %s", string(trimmed)) + url := fmt.Sprintf("%s/index/%s?rev=%s", c.apiEndpoint(), did, rev) + resp, err := c.Do(ctx, &request.RequestBuilder{ + Method: http.MethodDelete, + Url: url, + Headers: map[string]string{ + "Accept": "application/json", + }, + Token: c.token(), + }) + if err != nil { + return err } - if len(wrapped.Objects) == 0 { - return nil, fmt.Errorf("register response did not include objects") + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusNoContent { + body, _ := io.ReadAll(resp.Body) + return fmt.Errorf("failed to delete record %s: %s (status: %d)", did, string(body), resp.StatusCode) } - return wrapped.Objects, nil + + return nil } -func (c *IndexdClient) BatchGetObjectsByHash(ctx context.Context, hashes []string) (map[string][]drs.DRSObject, error) { +func (c *DrsClient) DeleteRecordsByProject(ctx context.Context, projectId string) error { + recs, err := c.ListObjectsByProject(ctx, projectId) + if err != nil { + return err + } + + ids := make([]string, 0, 128) + seen := make(map[string]struct{}) + for rec := range recs { + if rec.Error != nil { + return rec.Error + } + + if rec.Object == nil || rec.Object.Id == "" { + continue + } + if _, ok := seen[rec.Object.Id]; ok { + continue + } + seen[rec.Object.Id] = struct{}{} + ids = append(ids, rec.Object.Id) + } + + for _, id := range ids { + err := c.DeleteRecord(ctx, id) + if err != nil { + if strings.Contains(err.Error(), "404") { + continue + } + c.logger.Error(fmt.Sprintf("DeleteRecordsByProject Error for %s: %v", id, err)) + continue + } + } + return nil +} + +func (c *DrsClient) getInternalRecordByDID(ctx context.Context, did string) (*OutputInfo, error) { + url := fmt.Sprintf("%s/index/%s", c.apiEndpoint(), did) + resp, err := c.Do(ctx, &request.RequestBuilder{ + Method: http.MethodGet, + Url: url, + Token: c.token(), + }) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + body, _ := io.ReadAll(resp.Body) + return nil, fmt.Errorf("failed to get internal record %s: %s (status: %d)", did, string(body), resp.StatusCode) + } + + var info OutputInfo + if err := json.NewDecoder(resp.Body).Decode(&info); err != nil { + return nil, err + } + return &info, nil +} + +func (c *DrsClient) BatchGetObjectsByHash(ctx context.Context, hashes []string) (map[string][]DRSObject, error) { if len(hashes) == 0 { return nil, nil } @@ -620,7 +616,7 @@ func (c *IndexdClient) BatchGetObjectsByHash(ctx context.Context, hashes []strin return nil, err } - url := fmt.Sprintf("%s/index/bulk/hashes", c.cred.APIEndpoint) + url := fmt.Sprintf("%s/index/bulk/hashes", c.apiEndpoint()) resp, err := c.Do(ctx, &request.RequestBuilder{ Method: http.MethodPost, Url: url, @@ -629,7 +625,7 @@ func (c *IndexdClient) BatchGetObjectsByHash(ctx context.Context, hashes []strin "Content-Type": "application/json", "Accept": "application/json", }, - Token: c.cred.AccessToken, + Token: c.token(), }) if err != nil { return nil, err @@ -646,15 +642,15 @@ func (c *IndexdClient) BatchGetObjectsByHash(ctx context.Context, hashes []strin return nil, err } - result := make(map[string][]drs.DRSObject) + result := make(map[string][]DRSObject) for _, r := range list.Records { - drsObj, err := r.ToIndexdRecord().ToDrsObject() + drsObj, err := r.ToDrsObject() if err != nil { continue } - // Group by hash. We use the SHA256 as the key. - if drsObj.Checksums.SHA256 != "" { - result[drsObj.Checksums.SHA256] = append(result[drsObj.Checksums.SHA256], *drsObj) + hInfo := hash.ConvertDrsChecksumsToHashInfo(drsObj.Checksums) + if hInfo.SHA256 != "" { + result[hInfo.SHA256] = append(result[hInfo.SHA256], *drsObj) } } @@ -674,3 +670,18 @@ func appendUnique(existing []string, toAdd []string) []string { } return existing } + +// BuildDrsObj matches git-drs behavior but moved to core +func (c *DrsClient) BuildDrsObj(fileName string, checksum string, size int64, drsId string) (*DRSObject, error) { + return BuildDrsObj(fileName, checksum, size, drsId, c.GetBucketName(), c.GetOrganization(), c.GetProjectId()) +} + +// RegisterFile matches git-drs behavior but moved to core +func (c *DrsClient) RegisterFile(ctx context.Context, oid string, path string) (*DRSObject, error) { + // Base implementation without LFS specifics + return nil, fmt.Errorf("RegisterFile needs specific implementation (e.g. for LFS or cloud)") +} + +func (c *DrsClient) DownloadFile(ctx context.Context, oid string, destPath string) error { + return fmt.Errorf("DownloadFile implementation moved to high-level client") +} diff --git a/drs/convert.go b/drs/convert.go new file mode 100644 index 0000000..6a45938 --- /dev/null +++ b/drs/convert.go @@ -0,0 +1,107 @@ +package drs + +import ( + "fmt" + "net/url" + + "github.com/calypr/data-client/apigen/internalapi" + "github.com/calypr/data-client/hash" +) + +// InternalRecordFromDrsObject conversion purposes +func InternalRecordFromDrsObject(drsObj *DRSObject) (*InternalRecord, error) { + hashesMap := hash.ConvertDrsChecksumsToMap(drsObj.Checksums) + + indexdObj := &InternalRecord{ + InternalRecord: internalapi.InternalRecord{ + Did: internalapi.PtrString(drsObj.Id), + Size: internalapi.PtrInt64(drsObj.Size), + FileName: drsObj.Name, + Urls: InternalURLFromDrsAccessURLs(drsObj.AccessMethods), + Authz: InternalAuthzFromDrsAccessMethods(drsObj.AccessMethods), + Hashes: &hashesMap, + }, + } + return indexdObj, nil +} + +func getVal[T any](p *T) T { + if p == nil { + var zero T + return zero + } + return *p +} + +func InternalRecordToDrsObject(indexdObj *InternalRecord) (*DRSObject, error) { + authz := indexdObj.Authz + urls := indexdObj.Urls + + accessMethods, err := DRSAccessMethodsFromInternalURLs(urls, authz) + if err != nil { + return nil, err + } + + res := &DRSObject{ + Id: getVal(indexdObj.Did), + Size: getVal(indexdObj.Size), + Name: indexdObj.FileName, + AccessMethods: accessMethods, + } + + if indexdObj.Hashes != nil { + res.Checksums = hash.ConvertMapToDrsChecksums(*indexdObj.Hashes) + } + + return res, nil +} + +func DRSAccessMethodsFromInternalURLs(urls []string, authz []string) ([]AccessMethod, error) { + var accessMethods []AccessMethod + for _, urlString := range urls { + var method AccessMethod + method.AccessUrl = &AccessURL{Url: urlString} + + parsed, err := url.Parse(urlString) + if err != nil { + return nil, fmt.Errorf("failed to parse url %q: %v", urlString, err) + } + if parsed.Scheme == "" { + method.Type = "https" + } else { + method.Type = parsed.Scheme + } + + if len(authz) > 0 { + method.Authorizations = &Authorizations{BearerAuthIssuers: []string{authz[0]}} + } + accessMethods = append(accessMethods, method) + } + return accessMethods, nil +} + +// InternalAuthzFromDrsAccessMethods extracts authz values from DRS access methods +func InternalAuthzFromDrsAccessMethods(accessMethods []AccessMethod) []string { + var authz []string + for _, drsURL := range accessMethods { + if drsURL.Authorizations != nil && len(drsURL.Authorizations.BearerAuthIssuers) > 0 { + authz = append(authz, drsURL.Authorizations.BearerAuthIssuers[0]) + } + } + return authz +} + +func InternalURLFromDrsAccessURLs(accessMethods []AccessMethod) []string { + var urls []string + for _, drsURL := range accessMethods { + if drsURL.AccessUrl != nil { + urls = append(urls, drsURL.AccessUrl.Url) + } + } + return urls +} + +// ToDrsObject converts an InternalRecordResponse (OutputInfo) to a DRSObject +func (outputInfo *OutputInfo) ToDrsObject() (*DRSObject, error) { + return InternalRecordToDrsObject(outputInfo.ToInternalRecord()) +} diff --git a/drs/drs.go b/drs/drs.go index 2087b8c..d86f4d9 100644 --- a/drs/drs.go +++ b/drs/drs.go @@ -4,7 +4,6 @@ import ( "fmt" "strings" - "github.com/calypr/data-client/hash" "github.com/google/uuid" ) @@ -129,15 +128,18 @@ func BuildDrsObjWithPrefix(fileName string, checksum string, size int64, drsId s drsObj := DRSObject{ Id: drsId, - Name: fileName, + Name: &fileName, AccessMethods: []AccessMethod{{ Type: "s3", - AccessURL: AccessURL{ - URL: fileURL, + AccessUrl: &AccessURL{ + Url: fileURL, }, Authorizations: &authorizations, }}, - Checksums: hash.HashInfo{SHA256: checksum}, + Checksums: []Checksum{{ + Type: "sha256", + Checksum: checksum, + }}, Size: size, } @@ -148,36 +150,32 @@ func BuildDrsObjWithPrefix(fileName string, checksum string, size int64, drsId s // This is needed because the server expects checksums as an array of Checksum objects, // while DRSObject uses HashInfo (which marshals to the correct format but has different Go types). func ConvertToCandidate(obj *DRSObject) DRSObjectCandidate { - // Convert HashInfo to []Checksum - var checksums []Checksum - if obj.Checksums.MD5 != "" { - checksums = append(checksums, Checksum{Type: hash.ChecksumTypeMD5, Checksum: NormalizeOid(obj.Checksums.MD5)}) - } - if obj.Checksums.SHA != "" { - checksums = append(checksums, Checksum{Type: hash.ChecksumTypeSHA1, Checksum: NormalizeOid(obj.Checksums.SHA)}) - } - if obj.Checksums.SHA256 != "" { - checksums = append(checksums, Checksum{Type: hash.ChecksumTypeSHA256, Checksum: NormalizeOid(obj.Checksums.SHA256)}) + var name string + if obj.Name != nil { + name = *obj.Name } - if obj.Checksums.SHA512 != "" { - checksums = append(checksums, Checksum{Type: hash.ChecksumTypeSHA512, Checksum: NormalizeOid(obj.Checksums.SHA512)}) + var version string + if obj.Version != nil { + version = *obj.Version } - if obj.Checksums.CRC != "" { - checksums = append(checksums, Checksum{Type: hash.ChecksumTypeCRC32C, Checksum: NormalizeOid(obj.Checksums.CRC)}) + var mimeType string + if obj.MimeType != nil { + mimeType = *obj.MimeType } - if obj.Checksums.ETag != "" { - checksums = append(checksums, Checksum{Type: hash.ChecksumTypeETag, Checksum: NormalizeOid(obj.Checksums.ETag)}) + var description string + if obj.Description != nil { + description = *obj.Description } return DRSObjectCandidate{ - Name: obj.Name, + Name: name, Size: obj.Size, - Version: obj.Version, - MimeType: obj.MimeType, - Checksums: checksums, + Version: version, + MimeType: mimeType, + Checksums: obj.Checksums, AccessMethods: obj.AccessMethods, - Contents: obj.Contents, - Description: obj.Description, + Contents: nil, // ContentsObject in gen is different + Description: description, Aliases: obj.Aliases, } } diff --git a/drs/interface.go b/drs/interface.go new file mode 100644 index 0000000..babcf4d --- /dev/null +++ b/drs/interface.go @@ -0,0 +1,54 @@ +package drs + +import ( + "context" + + "github.com/calypr/data-client/hash" + "github.com/calypr/data-client/request" +) + +// Client is the primary interface for interacting with a Calypr DRS server. +// It replaces the legacy IndexdInterface with a modern DRS-first API. +type Client interface { + request.RequestInterface + + // Metadata retrieval + GetObject(ctx context.Context, id string) (*DRSObject, error) + GetObjectByHash(ctx context.Context, checksum *hash.Checksum) ([]DRSObject, error) + BatchGetObjectsByHash(ctx context.Context, hashes []string) (map[string][]DRSObject, error) + + // Listing + ListObjects(ctx context.Context) (chan DRSObjectResult, error) + ListObjectsByProject(ctx context.Context, projectId string) (chan DRSObjectResult, error) + GetProjectSample(ctx context.Context, projectId string, limit int) ([]DRSObject, error) + + // Mutations + RegisterRecord(ctx context.Context, record *DRSObject) (*DRSObject, error) + RegisterRecords(ctx context.Context, records []*DRSObject) ([]*DRSObject, error) + UpdateRecord(ctx context.Context, updateInfo *DRSObject, did string) (*DRSObject, error) + DeleteRecord(ctx context.Context, did string) error + DeleteRecordsByProject(ctx context.Context, projectId string) error + + // Download/URL resolution + GetDownloadURL(ctx context.Context, id string, accessType string) (*AccessURL, error) + + // Extensions + // Add an object storage URL to an existing record + AddURL(ctx context.Context, blobURL, sha256 string, opts ...AddURLOption) (*DRSObject, error) + + // Utility operations + UpsertRecord(ctx context.Context, url string, sha256 string, fileSize int64, projectId string) (*DRSObject, error) + BuildDrsObj(fileName string, checksum string, size int64, drsId string) (*DRSObject, error) + + // Runtime context info + GetProjectId() string + GetBucketName() string + GetOrganization() string + + // Fluent configuration + WithProject(projectId string) Client + WithOrganization(organization string) Client + WithBucket(bucketName string) Client +} + +type AddURLOption func(map[string]any) diff --git a/drs/internal_types.go b/drs/internal_types.go new file mode 100644 index 0000000..906b5c3 --- /dev/null +++ b/drs/internal_types.go @@ -0,0 +1,107 @@ +package drs + +import ( + "time" + + "github.com/calypr/data-client/apigen/internalapi" + "github.com/calypr/data-client/hash" +) + +// Internal compatibility types for Internal DRS servers. +// These are used internally by DrsClient to communicate with the server's /index and /ga4gh endpoints. + +type OutputObject struct { + Id string `json:"id"` + Name string `json:"name"` + SelfURI string `json:"self_uri,omitempty"` + Size int64 `json:"size"` + CreatedTime string `json:"created_time,omitempty"` + UpdatedTime string `json:"updated_time,omitempty"` + Version string `json:"version,omitempty"` + MimeType string `json:"mime_type,omitempty"` + Checksums []hash.Checksum `json:"checksums"` + AccessMethods []AccessMethod `json:"access_methods"` + Contents []Contents `json:"contents,omitempty"` + Description string `json:"description,omitempty"` + Aliases []string `json:"aliases,omitempty"` +} + +func ConvertOutputObjectToDRSObject(in *OutputObject) *DRSObject { + if in == nil { + return nil + } + + drsChecksums := make([]Checksum, len(in.Checksums)) + for i, c := range in.Checksums { + drsChecksums[i] = Checksum{ + Checksum: c.Checksum, + Type: string(c.Type), + } + } + + createdTime, _ := time.Parse(time.RFC3339, in.CreatedTime) + var updatedTimePtr *time.Time + if ut, err := time.Parse(time.RFC3339, in.UpdatedTime); err == nil { + updatedTimePtr = &ut + } + + return &DRSObject{ + Id: in.Id, + Name: internalapi.PtrString(in.Name), + SelfUri: in.SelfURI, + Size: in.Size, + CreatedTime: createdTime, + UpdatedTime: updatedTimePtr, + Version: internalapi.PtrString(in.Version), + MimeType: internalapi.PtrString(in.MimeType), + Checksums: drsChecksums, + AccessMethods: in.AccessMethods, + Contents: in.Contents, + Description: internalapi.PtrString(in.Description), + Aliases: in.Aliases, + } +} + +// InternalRecord embeds InternalRecord for backward compatibility +type InternalRecord struct { + internalapi.InternalRecord +} + +type ListRecords struct { + Records []OutputInfo `json:"records"` +} + +type OutputInfo struct { + internalapi.InternalRecordResponse +} + +// InternalRecordForm is used for legacy /index registration +type InternalRecordForm struct { + InternalRecord + Form string `json:"form"` + Rev string `json:"rev,omitempty"` +} + +func (outputInfo *OutputInfo) ToInternalRecord() *InternalRecord { + return &InternalRecord{ + InternalRecord: internalapi.InternalRecord{ + Did: outputInfo.Did, + Size: outputInfo.Size, + FileName: outputInfo.FileName, + Urls: outputInfo.Urls, + Authz: outputInfo.Authz, + Hashes: outputInfo.Hashes, + }, + } +} + +// UpdateInputInfo is the put object for index records +type UpdateInputInfo struct { + FileName *string `json:"file_name,omitempty"` + Metadata map[string]any `json:"metadata,omitempty"` + URLsMetadata map[string]any `json:"urls_metadata,omitempty"` + Version *string `json:"version,omitempty"` + URLs []string `json:"urls,omitempty"` + ACL []string `json:"acl,omitempty"` + Authz []string `json:"authz,omitempty"` +} diff --git a/drs/object_builder.go b/drs/object_builder.go index 82a1b81..a653085 100644 --- a/drs/object_builder.go +++ b/drs/object_builder.go @@ -4,7 +4,6 @@ import ( "fmt" "strings" - "github.com/calypr/data-client/hash" ) type ObjectBuilder struct { @@ -60,14 +59,19 @@ func (b ObjectBuilder) Build(fileName string, checksum string, size int64, drsID drsObj := DRSObject{ Id: drsID, - Name: fileName, + Name: &fileName, AccessMethods: []AccessMethod{{ - Type: accessType, - AccessURL: AccessURL{URL: fileURL}, + Type: accessType, + AccessUrl: &AccessURL{ + Url: fileURL, + }, Authorizations: &authorizations, }}, - Checksums: hash.HashInfo{SHA256: checksum}, - Size: size, + Checksums: []Checksum{{ + Type: "sha256", + Checksum: checksum, + }}, + Size: size, } return &drsObj, nil diff --git a/drs/object_builder_test.go b/drs/object_builder_test.go index b02a385..4b7136e 100644 --- a/drs/object_builder_test.go +++ b/drs/object_builder_test.go @@ -18,10 +18,10 @@ func TestObjectBuilderBuildSuccess(t *testing.T) { if obj.Id != "did-1" { t.Fatalf("unexpected Id: %s", obj.Id) } - if obj.Name != "file.txt" { - t.Fatalf("unexpected Name: %s", obj.Name) + if *obj.Name != "file.txt" { + t.Fatalf("unexpected Name: %s", *obj.Name) } - if obj.Checksums.SHA256 != "sha-256" { + if obj.Checksums[0].Checksum != "sha-256" { t.Fatalf("unexpected checksum: %v", obj.Checksums) } if obj.Size != 12 { @@ -30,8 +30,8 @@ func TestObjectBuilderBuildSuccess(t *testing.T) { if len(obj.AccessMethods) != 1 { t.Fatalf("expected 1 access method, got %d", len(obj.AccessMethods)) } - if !strings.Contains(obj.AccessMethods[0].AccessURL.URL, "bucket/test/project/sha-256") { - t.Fatalf("unexpected access URL: %s", obj.AccessMethods[0].AccessURL.URL) + if !strings.Contains(obj.AccessMethods[0].AccessUrl.Url, "bucket/test/project/sha-256") { + t.Fatalf("unexpected access URL: %s", obj.AccessMethods[0].AccessUrl.Url) } if len(obj.Aliases) != 0 { t.Fatalf("expected no aliases, got: %#v", obj.Aliases) diff --git a/drs/resolve.go b/drs/resolve.go new file mode 100644 index 0000000..44ff31f --- /dev/null +++ b/drs/resolve.go @@ -0,0 +1,62 @@ +package drs + +import ( + "context" + "fmt" + "strings" + + "github.com/calypr/data-client/hash" +) + +// ResolveObject centralizes object-id vs checksum resolution logic. +func ResolveObject(ctx context.Context, client Client, guid string) (*DRSObject, error) { + if oid := NormalizeOid(guid); oid != "" { + if cached, ok := PrefetchedBySHA(ctx, oid); ok { + obj := cached + return &obj, nil + } + if recs, err := client.GetObjectByHash(ctx, &hash.Checksum{Type: "sha256", Checksum: oid}); err == nil && len(recs) > 0 { + return &recs[0], nil + } + } + return client.GetObject(ctx, guid) +} + +// ResolveDownloadURL resolves access method and object id when caller does not already provide a concrete access id. +func ResolveDownloadURL(ctx context.Context, client Client, guid string, accessID string) (string, error) { + obj, err := ResolveObject(ctx, client, guid) + if err != nil { + return "", err + } + + resolvedID := strings.TrimSpace(obj.Id) + if resolvedID == "" { + resolvedID = guid + } + + if accessID == "" { + for _, am := range obj.AccessMethods { + if am.AccessId != nil && *am.AccessId != "" { + accessID = *am.AccessId + break + } + } + if accessID == "" { + for _, am := range obj.AccessMethods { + if am.AccessUrl != nil && am.AccessUrl.Url != "" { + return am.AccessUrl.Url, nil + } + } + return "", fmt.Errorf("no suitable access method found for object %s", guid) + } + } + + accessURL, err := client.GetDownloadURL(ctx, resolvedID, accessID) + if err != nil { + return "", err + } + if accessURL == nil || accessURL.Url == "" { + return "", fmt.Errorf("empty access URL for object %s", guid) + } + return accessURL.Url, nil +} diff --git a/drs/server_client.go b/drs/server_client.go new file mode 100644 index 0000000..2e6df3c --- /dev/null +++ b/drs/server_client.go @@ -0,0 +1,23 @@ +package drs + +import "github.com/calypr/data-client/transfer" + +// ServerClient composes DRS metadata operations and transfer operations +// against the same server endpoint/runtime mode. +type ServerClient interface { + Client + transfer.Backend +} + +type composedServerClient struct { + Client + transfer.Backend +} + +func ComposeServerClient(c Client, b transfer.Backend) ServerClient { + return &composedServerClient{ + Client: c, + Backend: b, + } +} + diff --git a/drs/types.go b/drs/types.go index 218e399..a41e1d6 100644 --- a/drs/types.go +++ b/drs/types.go @@ -1,32 +1,19 @@ package drs -import "github.com/calypr/data-client/hash" +import ( + "github.com/calypr/data-client/apigen/drs" + "github.com/calypr/data-client/hash" +) -type ChecksumType = hash.ChecksumType -type Checksum = hash.Checksum +type ChecksumType = string +type Checksum = drs.Checksum type HashInfo = hash.HashInfo -type AccessURL struct { - URL string `json:"url"` - Headers []string `json:"headers"` -} - -type Authorizations struct { - BearerAuthIssuers []string `json:"bearer_auth_issuers,omitempty"` -} +type AccessURL = drs.AccessMethodAccessUrl +type Authorizations = drs.AccessMethodAuthorizations +type AccessMethod = drs.AccessMethod -type AccessMethod struct { - Type string `json:"type"` - AccessURL AccessURL `json:"access_url"` - AccessID string `json:"access_id,omitempty"` - Cloud string `json:"cloud,omitempty"` - Region string `json:"region,omitempty"` - Available string `json:"available,omitempty"` - Authorizations *Authorizations `json:"authorizations,omitempty"` -} - -type Contents struct { -} +type Contents = drs.ContentsObject type DRSPage struct { DRSObjects []DRSObject `json:"drs_objects"` @@ -37,21 +24,7 @@ type DRSObjectResult struct { Error error } -type DRSObject struct { - Id string `json:"id,omitempty"` - Name string `json:"name"` - SelfURI string `json:"self_uri,omitempty"` - Size int64 `json:"size"` - CreatedTime string `json:"created_time,omitempty"` - UpdatedTime string `json:"updated_time,omitempty"` - Version string `json:"version,omitempty"` - MimeType string `json:"mime_type,omitempty"` - Checksums hash.HashInfo `json:"checksums"` - AccessMethods []AccessMethod `json:"access_methods"` - Contents []Contents `json:"contents,omitempty"` - Description string `json:"description,omitempty"` - Aliases []string `json:"aliases,omitempty"` -} +type DRSObject = drs.DrsObject // DRSObjectCandidate represents a DRS object candidate for registration. // This matches the server's expected format where checksums is an array of Checksum objects. diff --git a/drs/upsert.go b/drs/upsert.go new file mode 100644 index 0000000..70cf0a6 --- /dev/null +++ b/drs/upsert.go @@ -0,0 +1,61 @@ +package drs + +import ( + "context" + "fmt" + "slices" + + "github.com/calypr/data-client/hash" +) + +// UpsertRecord creates or updates a record with a new URL. +func (c *DrsClient) UpsertRecord(ctx context.Context, url string, sha256 string, fileSize int64, projectId string) (*DRSObject, error) { + sha256 = NormalizeOid(sha256) + + // Query current state + records, err := c.GetObjectByHash(ctx, &hash.Checksum{Type: hash.ChecksumTypeSHA256, Checksum: sha256}) + if err != nil { + return nil, fmt.Errorf("error querying DRS server: %v", err) + } + + var matchingRecord *DRSObject + for i := range records { + // Match by checksum content identity + if hash.ConvertDrsChecksumsToHashInfo(records[i].Checksums).SHA256 == sha256 { + matchingRecord = &records[i] + break + } + } + + if matchingRecord != nil { + existingURLs := InternalURLFromDrsAccessURLs(matchingRecord.AccessMethods) + if slices.Contains(existingURLs, url) { + return matchingRecord, nil + } + + c.logger.Debug("updating existing record with new url") + updatedRecord := DRSObject{AccessMethods: []AccessMethod{{AccessUrl: &AccessURL{Url: url}}}} + return c.UpdateRecord(ctx, &updatedRecord, matchingRecord.Id) + } + + // If no record exists, create one + c.logger.Debug("creating new record") + uuid := GenerateDrsID(projectId, sha256) + + // Use simplified BuildDrsObj (helper in same package) + drsObj, err := BuildDrsObj("", sha256, fileSize, uuid, c.GetBucketName(), c.GetOrganization(), projectId) + if err != nil { + return nil, err + } + + return c.RegisterRecord(ctx, drsObj) +} + +// Internal methods to support specialized behaviors from git-drs +// (These can be overridden or extended) + +func (c *DrsClient) AddURL(ctx context.Context, blobURL, sha256 string, opts ...AddURLOption) (*DRSObject, error) { + // Simple wrapper for UpsertRecord for now, but allows for more complex logic if needed + // In a real implementation, this would handle cloud inspection too. + return c.UpsertRecord(ctx, blobURL, sha256, 0, c.GetProjectId()) +} diff --git a/fence/client_test.go b/fence/client_test.go index 9b01203..1baa09e 100644 --- a/fence/client_test.go +++ b/fence/client_test.go @@ -61,6 +61,7 @@ func (m *mockFenceServer) handler(t *testing.T) http.HandlerFunc { S3Buckets: map[string]*S3Bucket{ "test-bucket": { EndpointURL: "https://s3.amazonaws.com", + Provider: "s3", Region: "us-east-1", }, }, @@ -173,6 +174,9 @@ func TestFenceClient_GetBucketDetails(t *testing.T) { if info.Region != "us-east-1" { t.Errorf("expected region us-east-1, got %s", info.Region) } + if info.Provider != "s3" { + t.Errorf("expected provider s3, got %s", info.Provider) + } info, err = client.GetBucketDetails(context.Background(), "unknown-bucket") if err != nil { diff --git a/fence/types.go b/fence/types.go index 2352dbb..ef4956b 100644 --- a/fence/types.go +++ b/fence/types.go @@ -43,6 +43,7 @@ type MultipartCompleteRequestObject struct { type S3Bucket struct { EndpointURL string `json:"endpoint_url"` + Provider string `json:"provider,omitempty"` Programs []string `json:"programs,omitempty"` Region string `json:"region"` } diff --git a/g3client/client.go b/g3client/client.go index 868c451..115374b 100644 --- a/g3client/client.go +++ b/g3client/client.go @@ -7,12 +7,15 @@ import ( "strings" "github.com/calypr/data-client/conf" + "github.com/calypr/data-client/credentials" + "github.com/calypr/data-client/drs" "github.com/calypr/data-client/fence" - "github.com/calypr/data-client/indexd" "github.com/calypr/data-client/logs" "github.com/calypr/data-client/request" "github.com/calypr/data-client/requestor" "github.com/calypr/data-client/sower" + "github.com/calypr/data-client/transfer" + gen3signer "github.com/calypr/data-client/transfer/signer/gen3" version "github.com/hashicorp/go-version" ) @@ -20,13 +23,12 @@ import ( type Gen3Interface interface { request.RequestInterface - GetCredential() *conf.Credential Logger() *logs.Gen3Logger - ExportCredential(ctx context.Context, cred *conf.Credential) error - Fence() fence.FenceInterface - Indexd() indexd.IndexdInterface - Sower() sower.SowerInterface - Requestor() requestor.RequestorInterface + Credentials() credentials.Manager + DRSClient() drs.ServerClient + FenceClient() fence.FenceInterface + RequestorClient() requestor.RequestorInterface + SowerClient() sower.SowerInterface } func NewGen3InterfaceFromCredential(cred *conf.Credential, logger *logs.Gen3Logger, opts ...Option) Gen3Interface { @@ -66,7 +68,7 @@ func (g *Gen3Client) initializeClients() { g.fence = fence.NewFenceClient(g.RequestInterface, g.credential, g.logger.Logger) } if shouldInit(IndexdClient) { - g.indexd = indexd.NewIndexdClient(g.RequestInterface, g.credential, g.logger.Logger) + g.indexd = drs.NewDrsClient(g.RequestInterface, g.credential, g.logger.Logger) } if shouldInit(SowerClient) { g.sower = sower.NewSowerClient(g.RequestInterface, g.credential.APIEndpoint) @@ -79,14 +81,17 @@ func (g *Gen3Client) initializeClients() { type Gen3Client struct { Ctx context.Context fence fence.FenceInterface - indexd indexd.IndexdInterface + indexd drs.Client + server drs.ServerClient sower sower.SowerInterface requestor requestor.RequestorInterface config conf.ManagerInterface request.RequestInterface credential *conf.Credential + creds credentials.Manager logger *logs.Gen3Logger + transfer transfer.Backend requestedClients []ClientType } @@ -108,31 +113,38 @@ func WithClients(clients ...ClientType) Option { } } -func (g *Gen3Client) Fence() fence.FenceInterface { - return g.fence -} - -func (g *Gen3Client) Indexd() indexd.IndexdInterface { - return g.indexd +func (g *Gen3Client) DRSClient() drs.ServerClient { + if g.server != nil { + return g.server + } + if g.transfer != nil { + g.server = drs.ComposeServerClient(g.indexd, g.transfer) + return g.server + } + if g.fence == nil { + g.fence = fence.NewFenceClient(g.RequestInterface, g.credential, g.logger.Logger) + } + if g.indexd == nil { + g.indexd = drs.NewDrsClient(g.RequestInterface, g.credential, g.logger.Logger) + } + g.transfer = transfer.New(g.RequestInterface, g.logger, gen3signer.New(g.RequestInterface, g.credential, g.indexd, g.fence)) + g.server = drs.ComposeServerClient(g.indexd, g.transfer) + return g.server } -func (g *Gen3Client) Sower() sower.SowerInterface { - return g.sower +func (g *Gen3Client) FenceClient() fence.FenceInterface { + return g.fence } -func (g *Gen3Client) Requestor() requestor.RequestorInterface { +func (g *Gen3Client) RequestorClient() requestor.RequestorInterface { return g.requestor } -func (g *Gen3Client) Logger() *logs.Gen3Logger { - return g.logger -} - -func (g *Gen3Client) GetCredential() *conf.Credential { - return g.credential +func (g *Gen3Client) SowerClient() sower.SowerInterface { + return g.sower } -func (g *Gen3Client) ExportCredential(ctx context.Context, cred *conf.Credential) error { +func (g *Gen3Client) exportCredential(ctx context.Context, cred *conf.Credential) error { if cred.Profile == "" { return fmt.Errorf("profile name is required") } @@ -188,6 +200,25 @@ func (g *Gen3Client) ExportCredential(ctx context.Context, cred *conf.Credential return nil } +type gen3Credentials struct { + client *Gen3Client +} + +func (c *gen3Credentials) Current() *conf.Credential { + return c.client.credential +} + +func (c *gen3Credentials) Export(ctx context.Context, cred *conf.Credential) error { + return c.client.exportCredential(ctx, cred) +} + +func (g *Gen3Client) Credentials() credentials.Manager { + if g.creds == nil { + g.creds = &gen3Credentials{client: g} + } + return g.creds +} + // EnsureValidCredential checks if the credential is valid and refreshes it if the access token is expired but the API key is valid. // It accepts an optional fClient; if nil, it will initialize one internally if needed for refresh. func EnsureValidCredential(ctx context.Context, cred *conf.Credential, config conf.ManagerInterface, logger *logs.Gen3Logger, fClient fence.FenceInterface) error { @@ -245,3 +276,4 @@ func NewGen3Interface(profile string, logger *logs.Gen3Logger, opts ...Option) ( return client, nil } +func (g *Gen3Client) Logger() *logs.Gen3Logger { return g.logger } diff --git a/ga4gh/data-repository-service-schemas b/ga4gh/data-repository-service-schemas new file mode 160000 index 0000000..935a209 --- /dev/null +++ b/ga4gh/data-repository-service-schemas @@ -0,0 +1 @@ +Subproject commit 935a20952e1071421c28d569b8c8e0e940bc001f diff --git a/go.mod b/go.mod index c39b763..e418828 100644 --- a/go.mod +++ b/go.mod @@ -1,11 +1,8 @@ module github.com/calypr/data-client -go 1.24.2 +go 1.26.1 require ( - github.com/aws/aws-sdk-go-v2 v1.41.1 - github.com/aws/aws-sdk-go-v2/config v1.32.7 - github.com/aws/aws-sdk-go-v2/credentials v1.19.7 github.com/aws/aws-sdk-go-v2/service/s3 v1.95.1 github.com/golang-jwt/jwt/v5 v5.3.0 github.com/google/uuid v1.6.0 @@ -15,16 +12,40 @@ require ( github.com/spf13/cobra v1.10.2 github.com/vbauerster/mpb/v8 v8.11.2 go.uber.org/mock v0.6.0 + gocloud.dev v0.45.0 golang.org/x/sync v0.19.0 gopkg.in/ini.v1 v1.67.0 + gopkg.in/validator.v2 v2.0.1 gopkg.in/yaml.v3 v3.0.1 ) require ( + cel.dev/expr v0.25.1 // indirect + cloud.google.com/go v0.123.0 // indirect + cloud.google.com/go/auth v0.17.0 // indirect + cloud.google.com/go/auth/oauth2adapt v0.2.8 // indirect + cloud.google.com/go/compute/metadata v0.9.0 // indirect + cloud.google.com/go/iam v1.5.3 // indirect + cloud.google.com/go/monitoring v1.24.3 // indirect + cloud.google.com/go/storage v1.57.2 // indirect + github.com/Azure/azure-sdk-for-go/sdk/azcore v1.20.0 // indirect + github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.13.1 // indirect + github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.2 // indirect + github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.6.3 // indirect + github.com/Azure/go-autorest v14.2.0+incompatible // indirect + github.com/Azure/go-autorest/autorest/to v0.4.1 // indirect + github.com/AzureAD/microsoft-authentication-library-for-go v1.6.0 // indirect + github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.30.0 // indirect + github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.54.0 // indirect + github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.54.0 // indirect github.com/VividCortex/ewma v1.2.0 // indirect github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d // indirect + github.com/aws/aws-sdk-go-v2 v1.41.1 // indirect github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.4 // indirect + github.com/aws/aws-sdk-go-v2/config v1.32.7 // indirect + github.com/aws/aws-sdk-go-v2/credentials v1.19.7 // indirect github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.17 // indirect + github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.20.12 // indirect github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.17 // indirect github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.17 // indirect github.com/aws/aws-sdk-go-v2/internal/ini v1.8.4 // indirect @@ -38,13 +59,49 @@ require ( github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.13 // indirect github.com/aws/aws-sdk-go-v2/service/sts v1.41.6 // indirect github.com/aws/smithy-go v1.24.0 // indirect + github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/clipperhouse/stringish v0.1.1 // indirect github.com/clipperhouse/uax29/v2 v2.3.0 // indirect + github.com/cncf/xds/go v0.0.0-20251110193048-8bfbf64dc13e // indirect + github.com/envoyproxy/go-control-plane/envoy v1.36.0 // indirect + github.com/envoyproxy/protoc-gen-validate v1.2.1 // indirect + github.com/felixge/httpsnoop v1.0.4 // indirect + github.com/go-jose/go-jose/v4 v4.1.3 // indirect + github.com/go-logr/logr v1.4.3 // indirect + github.com/go-logr/stdr v1.2.2 // indirect + github.com/google/s2a-go v0.1.9 // indirect + github.com/google/wire v0.7.0 // indirect + github.com/googleapis/enterprise-certificate-proxy v0.3.7 // indirect + github.com/googleapis/gax-go/v2 v2.15.0 // indirect github.com/hashicorp/errwrap v1.1.0 // indirect github.com/hashicorp/go-cleanhttp v0.5.2 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect + github.com/kylelemons/godebug v1.1.0 // indirect github.com/mattn/go-runewidth v0.0.19 // indirect + github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect + github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 // indirect github.com/spf13/pflag v1.0.10 // indirect - github.com/stretchr/testify v1.11.1 // indirect - golang.org/x/sys v0.39.0 // indirect + github.com/spiffe/go-spiffe/v2 v2.6.0 // indirect + go.opentelemetry.io/auto/sdk v1.2.1 // indirect + go.opentelemetry.io/contrib/detectors/gcp v1.38.0 // indirect + go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.63.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.63.0 // indirect + go.opentelemetry.io/otel v1.40.0 // indirect + go.opentelemetry.io/otel/metric v1.40.0 // indirect + go.opentelemetry.io/otel/sdk v1.40.0 // indirect + go.opentelemetry.io/otel/sdk/metric v1.40.0 // indirect + go.opentelemetry.io/otel/trace v1.40.0 // indirect + golang.org/x/crypto v0.45.0 // indirect + golang.org/x/net v0.47.0 // indirect + golang.org/x/oauth2 v0.33.0 // indirect + golang.org/x/sys v0.40.0 // indirect + golang.org/x/text v0.31.0 // indirect + golang.org/x/time v0.14.0 // indirect + golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da // indirect + google.golang.org/api v0.256.0 // indirect + google.golang.org/genproto v0.0.0-20251124214823-79d6a2a48846 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20251124214823-79d6a2a48846 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20251124214823-79d6a2a48846 // indirect + google.golang.org/grpc v1.77.0 // indirect + google.golang.org/protobuf v1.36.10 // indirect ) diff --git a/go.sum b/go.sum index d4cffb0..15f6d6d 100644 --- a/go.sum +++ b/go.sum @@ -1,3 +1,53 @@ +cel.dev/expr v0.25.1 h1:1KrZg61W6TWSxuNZ37Xy49ps13NUovb66QLprthtwi4= +cel.dev/expr v0.25.1/go.mod h1:hrXvqGP6G6gyx8UAHSHJ5RGk//1Oj5nXQ2NI02Nrsg4= +cloud.google.com/go v0.123.0 h1:2NAUJwPR47q+E35uaJeYoNhuNEM9kM8SjgRgdeOJUSE= +cloud.google.com/go v0.123.0/go.mod h1:xBoMV08QcqUGuPW65Qfm1o9Y4zKZBpGS+7bImXLTAZU= +cloud.google.com/go/auth v0.17.0 h1:74yCm7hCj2rUyyAocqnFzsAYXgJhrG26XCFimrc/Kz4= +cloud.google.com/go/auth v0.17.0/go.mod h1:6wv/t5/6rOPAX4fJiRjKkJCvswLwdet7G8+UGXt7nCQ= +cloud.google.com/go/auth/oauth2adapt v0.2.8 h1:keo8NaayQZ6wimpNSmW5OPc283g65QNIiLpZnkHRbnc= +cloud.google.com/go/auth/oauth2adapt v0.2.8/go.mod h1:XQ9y31RkqZCcwJWNSx2Xvric3RrU88hAYYbjDWYDL+c= +cloud.google.com/go/compute/metadata v0.9.0 h1:pDUj4QMoPejqq20dK0Pg2N4yG9zIkYGdBtwLoEkH9Zs= +cloud.google.com/go/compute/metadata v0.9.0/go.mod h1:E0bWwX5wTnLPedCKqk3pJmVgCBSM6qQI1yTBdEb3C10= +cloud.google.com/go/iam v1.5.3 h1:+vMINPiDF2ognBJ97ABAYYwRgsaqxPbQDlMnbHMjolc= +cloud.google.com/go/iam v1.5.3/go.mod h1:MR3v9oLkZCTlaqljW6Eb2d3HGDGK5/bDv93jhfISFvU= +cloud.google.com/go/logging v1.13.1 h1:O7LvmO0kGLaHY/gq8cV7T0dyp6zJhYAOtZPX4TF3QtY= +cloud.google.com/go/logging v1.13.1/go.mod h1:XAQkfkMBxQRjQek96WLPNze7vsOmay9H5PqfsNYDqvw= +cloud.google.com/go/longrunning v0.7.0 h1:FV0+SYF1RIj59gyoWDRi45GiYUMM3K1qO51qoboQT1E= +cloud.google.com/go/longrunning v0.7.0/go.mod h1:ySn2yXmjbK9Ba0zsQqunhDkYi0+9rlXIwnoAf+h+TPY= +cloud.google.com/go/monitoring v1.24.3 h1:dde+gMNc0UhPZD1Azu6at2e79bfdztVDS5lvhOdsgaE= +cloud.google.com/go/monitoring v1.24.3/go.mod h1:nYP6W0tm3N9H/bOw8am7t62YTzZY+zUeQ+Bi6+2eonI= +cloud.google.com/go/storage v1.57.2 h1:sVlym3cHGYhrp6XZKkKb+92I1V42ks2qKKpB0CF5Mb4= +cloud.google.com/go/storage v1.57.2/go.mod h1:n5ijg4yiRXXpCu0sJTD6k+eMf7GRrJmPyr9YxLXGHOk= +cloud.google.com/go/trace v1.11.7 h1:kDNDX8JkaAG3R2nq1lIdkb7FCSi1rCmsEtKVsty7p+U= +cloud.google.com/go/trace v1.11.7/go.mod h1:TNn9d5V3fQVf6s4SCveVMIBS2LJUqo73GACmq/Tky0s= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.20.0 h1:JXg2dwJUmPB9JmtVmdEB16APJ7jurfbY5jnfXpJoRMc= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.20.0/go.mod h1:YD5h/ldMsG0XiIw7PdyNhLxaM317eFh5yNLccNfGdyw= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.13.1 h1:Hk5QBxZQC1jb2Fwj6mpzme37xbCDdNTxU7O9eb5+LB4= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.13.1/go.mod h1:IYus9qsFobWIc2YVwe/WPjcnyCkPKtnHAqUYeebc8z0= +github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache v0.3.2 h1:yz1bePFlP5Vws5+8ez6T3HWXPmwOK7Yvq8QxDBD3SKY= +github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache v0.3.2/go.mod h1:Pa9ZNPuoNu/GztvBSKk9J1cDJW6vk/n0zLtV4mgd8N8= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.2 h1:9iefClla7iYpfYWdzPCRDozdmndjTm8DXdpCzPajMgA= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.2/go.mod h1:XtLgD3ZD34DAaVIIAyG3objl5DynM3CQ/vMcbBNJZGI= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.8.1 h1:/Zt+cDPnpC3OVDm/JKLOs7M2DKmLRIIp3XIx9pHHiig= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.8.1/go.mod h1:Ng3urmn6dYe8gnbCMoHHVl5APYz2txho3koEkV2o2HA= +github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.6.3 h1:ZJJNFaQ86GVKQ9ehwqyAFE6pIfyicpuJ8IkVaPBc6/4= +github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.6.3/go.mod h1:URuDvhmATVKqHBH9/0nOiNKk0+YcwfQ3WkK5PqHKxc8= +github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs= +github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= +github.com/Azure/go-autorest/autorest/to v0.4.1 h1:CxNHBqdzTr7rLtdrtb5CMjJcDut+WNGCVv7OmS5+lTc= +github.com/Azure/go-autorest/autorest/to v0.4.1/go.mod h1:EtaofgU4zmtvn1zT2ARsjRFdq9vXx0YWtmElwL+GZ9M= +github.com/AzureAD/microsoft-authentication-extensions-for-go/cache v0.1.1 h1:WJTmL004Abzc5wDB5VtZG2PJk5ndYDgVacGqfirKxjM= +github.com/AzureAD/microsoft-authentication-extensions-for-go/cache v0.1.1/go.mod h1:tCcJZ0uHAmvjsVYzEFivsRTN00oz5BEsRgQHu5JZ9WE= +github.com/AzureAD/microsoft-authentication-library-for-go v1.6.0 h1:XRzhVemXdgvJqCH0sFfrBUTnUJSBrBf7++ypk+twtRs= +github.com/AzureAD/microsoft-authentication-library-for-go v1.6.0/go.mod h1:HKpQxkWaGLJ+D/5H8QRpyQXA1eKjxkFlOMwck5+33Jk= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.30.0 h1:sBEjpZlNHzK1voKq9695PJSX2o5NEXl7/OL3coiIY0c= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.30.0/go.mod h1:P4WPRUkOhJC13W//jWpyfJNDAIpvRbAUIYLX/4jtlE0= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.54.0 h1:lhhYARPUu3LmHysQ/igznQphfzynnqI3D75oUyw1HXk= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.54.0/go.mod h1:l9rva3ApbBpEJxSNYnwT9N4CDLrWgtq3u8736C5hyJw= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/cloudmock v0.54.0 h1:xfK3bbi6F2RDtaZFtUdKO3osOBIhNb+xTs8lFW6yx9o= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/cloudmock v0.54.0/go.mod h1:vB2GH9GAYYJTO3mEn8oYwzEdhlayZIdQz6zdzgUIRvA= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.54.0 h1:s0WlVbf9qpvkh1c/uDAPElam0WrL7fHRIidgZJ7UqZI= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.54.0/go.mod h1:Mf6O40IAyB9zR/1J8nGDDPirZQQPbYJni8Yisy7NTMc= github.com/VividCortex/ewma v1.2.0 h1:f58SaIzcDXrSy3kWaHNvuJgJ3Nmz59Zji6XoJR/q1ow= github.com/VividCortex/ewma v1.2.0/go.mod h1:nz4BbCtbLyFDeC9SUHbtcT5644juEuWfUAUnGx7j5l4= github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d h1:licZJFw2RwpHMqeKTCYkitsPqHNxTmd4SNR5r94FGM8= @@ -12,6 +62,8 @@ github.com/aws/aws-sdk-go-v2/credentials v1.19.7 h1:tHK47VqqtJxOymRrNtUXN5SP/zUT github.com/aws/aws-sdk-go-v2/credentials v1.19.7/go.mod h1:qOZk8sPDrxhf+4Wf4oT2urYJrYt3RejHSzgAquYeppw= github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.17 h1:I0GyV8wiYrP8XpA70g1HBcQO1JlQxCMTW9npl5UbDHY= github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.17/go.mod h1:tyw7BOl5bBe/oqvoIeECFJjMdzXoa/dfVz3QQ5lgHGA= +github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.20.12 h1:Zy6Tme1AA13kX8x3CnkHx5cqdGWGaj/anwOiWGnA0Xo= +github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.20.12/go.mod h1:ql4uXYKoTM9WUAUSmthY4AtPVrlTBZOvnBJTiCUdPxI= github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.17 h1:xOLELNKGp2vsiteLsvLPwxC+mYmO6OZ8PYgiuPJzF8U= github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.17/go.mod h1:5M5CI3D12dNOtH3/mk6minaRwI2/37ifCURZISxA/IQ= github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.17 h1:WWLqlh79iO48yLkj1v3ISRNiv+3KdQoZ6JWyfcsyQik= @@ -40,19 +92,58 @@ github.com/aws/aws-sdk-go-v2/service/sts v1.41.6 h1:5fFjR/ToSOzB2OQ/XqWpZBmNvmP/ github.com/aws/aws-sdk-go-v2/service/sts v1.41.6/go.mod h1:qgFDZQSD/Kys7nJnVqYlWKnh0SSdMjAi0uSwON4wgYQ= github.com/aws/smithy-go v1.24.0 h1:LpilSUItNPFr1eY85RYgTIg5eIEPtvFbskaFcmmIUnk= github.com/aws/smithy-go v1.24.0/go.mod h1:LEj2LM3rBRQJxPZTB4KuzZkaZYnZPnvgIhb4pu07mx0= +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/clipperhouse/stringish v0.1.1 h1:+NSqMOr3GR6k1FdRhhnXrLfztGzuG+VuFDfatpWHKCs= github.com/clipperhouse/stringish v0.1.1/go.mod h1:v/WhFtE1q0ovMta2+m+UbpZ+2/HEXNWYXQgCt4hdOzA= github.com/clipperhouse/uax29/v2 v2.3.0 h1:SNdx9DVUqMoBuBoW3iLOj4FQv3dN5mDtuqwuhIGpJy4= github.com/clipperhouse/uax29/v2 v2.3.0/go.mod h1:Wn1g7MK6OoeDT0vL+Q0SQLDz/KpfsVRgg6W7ihQeh4g= +github.com/cncf/xds/go v0.0.0-20251110193048-8bfbf64dc13e h1:gt7U1Igw0xbJdyaCM5H2CnlAlPSkzrhsebQB6WQWjLA= +github.com/cncf/xds/go v0.0.0-20251110193048-8bfbf64dc13e/go.mod h1:KdCmV+x/BuvyMxRnYBlmVaq4OLiKW6iRQfvC62cvdkI= github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g= -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= -github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/envoyproxy/go-control-plane v0.13.5-0.20251024222203-75eaa193e329 h1:K+fnvUM0VZ7ZFJf0n4L/BRlnsb9pL/GuDG6FqaH+PwM= +github.com/envoyproxy/go-control-plane v0.13.5-0.20251024222203-75eaa193e329/go.mod h1:Alz8LEClvR7xKsrq3qzoc4N0guvVNSS8KmSChGYr9hs= +github.com/envoyproxy/go-control-plane/envoy v1.36.0 h1:yg/JjO5E7ubRyKX3m07GF3reDNEnfOboJ0QySbH736g= +github.com/envoyproxy/go-control-plane/envoy v1.36.0/go.mod h1:ty89S1YCCVruQAm9OtKeEkQLTb+Lkz0k8v9W0Oxsv98= +github.com/envoyproxy/go-control-plane/ratelimit v0.1.0 h1:/G9QYbddjL25KvtKTv3an9lx6VBE2cnb8wp1vEGNYGI= +github.com/envoyproxy/go-control-plane/ratelimit v0.1.0/go.mod h1:Wk+tMFAFbCXaJPzVVHnPgRKdUdwW/KdbRt94AzgRee4= +github.com/envoyproxy/protoc-gen-validate v1.2.1 h1:DEo3O99U8j4hBFwbJfrz9VtgcDfUKS7KJ7spH3d86P8= +github.com/envoyproxy/protoc-gen-validate v1.2.1/go.mod h1:d/C80l/jxXLdfEIhX1W2TmLfsJ31lvEjwamM4DxlWXU= github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM= github.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4NijnWvE= +github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= +github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= +github.com/go-jose/go-jose/v4 v4.1.3 h1:CVLmWDhDVRa6Mi/IgCgaopNosCaHz7zrMeF9MlZRkrs= +github.com/go-jose/go-jose/v4 v4.1.3/go.mod h1:x4oUasVrzR7071A4TnHLGSPpNOm2a21K9Kf04k1rs08= +github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI= +github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/golang-jwt/jwt/v5 v5.3.0 h1:pv4AsKCKKZuqlgs5sUmn4x8UlGa0kEVt/puTpKx9vvo= github.com/golang-jwt/jwt/v5 v5.3.0/go.mod h1:fxCRLWMO43lRc8nhHWY6LGqRcf+1gQWArsqaEUEa5bE= +github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= +github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= +github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= +github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= +github.com/google/go-replayers/grpcreplay v1.3.0 h1:1Keyy0m1sIpqstQmgz307zhiJ1pV4uIlFds5weTmxbo= +github.com/google/go-replayers/grpcreplay v1.3.0/go.mod h1:v6NgKtkijC0d3e3RW8il6Sy5sqRVUwoQa4mHOGEy8DI= +github.com/google/go-replayers/httpreplay v1.2.0 h1:VM1wEyyjaoU53BwrOnaf9VhAyQQEEioJvFYxYcLRKzk= +github.com/google/go-replayers/httpreplay v1.2.0/go.mod h1:WahEFFZZ7a1P4VM1qEeHy+tME4bwyqPcwWbNlUI1Mcg= +github.com/google/martian/v3 v3.3.3 h1:DIhPTQrbPkgs2yJYdXU/eNACCG5DVQjySNRNlflZ9Fc= +github.com/google/martian/v3 v3.3.3/go.mod h1:iEPrYcgCF7jA9OtScMFQyAlZZ4YXTKEtJ1E6RWzmBA0= +github.com/google/s2a-go v0.1.9 h1:LGD7gtMgezd8a/Xak7mEWL0PjoTQFvpRudN895yqKW0= +github.com/google/s2a-go v0.1.9/go.mod h1:YA0Ei2ZQL3acow2O62kdp9UlnvMmU7kA6Eutn0dXayM= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/wire v0.7.0 h1:JxUKI6+CVBgCO2WToKy/nQk0sS+amI9z9EjVmdaocj4= +github.com/google/wire v0.7.0/go.mod h1:n6YbUQD9cPKTnHXEBN2DXlOp/mVADhVErcMFb0v3J18= +github.com/googleapis/enterprise-certificate-proxy v0.3.7 h1:zrn2Ee/nWmHulBx5sAVrGgAa0f2/R35S4DJwfFaUPFQ= +github.com/googleapis/enterprise-certificate-proxy v0.3.7/go.mod h1:MkHOF77EYAE7qfSuSS9PU6g4Nt4e11cnsDUowfwewLA= +github.com/googleapis/gax-go/v2 v2.15.0 h1:SyjDc1mGgZU5LncH8gimWo9lW1DtIfPibOG81vgd/bo= +github.com/googleapis/gax-go/v2 v2.15.0/go.mod h1:zVVkkxAQHa1RQpg9z2AUCMnKhi0Qld9rcmyfL1OZhoc= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= @@ -68,34 +159,102 @@ github.com/hashicorp/go-version v1.8.0 h1:KAkNb1HAiZd1ukkxDFGmokVZe1Xy9HG6NUp+bP github.com/hashicorp/go-version v1.8.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= +github.com/keybase/go-keychain v0.0.1 h1:way+bWYa6lDppZoZcgMbYsvC7GxljxrskdNInRtuthU= +github.com/keybase/go-keychain v0.0.1/go.mod h1:PdEILRW3i9D8JcdM+FmY6RwkHGnhHxXwkPPMeUgOK1k= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= +github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/mattn/go-runewidth v0.0.19 h1:v++JhqYnZuu5jSKrk9RbgF5v4CGUjqRfBm05byFGLdw= github.com/mattn/go-runewidth v0.0.19/go.mod h1:XBkDxAl56ILZc9knddidhrOlY5R/pDhgLpndooCuJAs= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c h1:+mdjkGKdHQG3305AYmdv1U2eRNDiU2ErMBj1gwrq8eQ= +github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c/go.mod h1:7rwL4CYBLnjLxUqIJNnCWiEdr3bn6IUYi15bNlnbCCU= +github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 h1:GFCKgmp0tecUJ0sJuv4pzYCqS9+RGSn52M3FUwPs+uo= +github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10/go.mod h1:t/avpk3KcrXxUnYOhZhMXJlSEyie6gQbtLq5NM3loB8= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ= +github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/spf13/cobra v1.10.2 h1:DMTTonx5m65Ic0GOoRY2c16WCbHxOOw6xxezuLaBpcU= github.com/spf13/cobra v1.10.2/go.mod h1:7C1pvHqHw5A4vrJfjNwvOdzYu0Gml16OCs2GRiTUUS4= github.com/spf13/pflag v1.0.9/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spf13/pflag v1.0.10 h1:4EBh2KAYBwaONj6b2Ye1GiHfwjqyROoF4RwYO+vPwFk= github.com/spf13/pflag v1.0.10/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spiffe/go-spiffe/v2 v2.6.0 h1:l+DolpxNWYgruGQVV0xsfeya3CsC7m8iBzDnMpsbLuo= +github.com/spiffe/go-spiffe/v2 v2.6.0/go.mod h1:gm2SeUoMZEtpnzPNs2Csc0D/gX33k1xIx7lEzqblHEs= github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= github.com/vbauerster/mpb/v8 v8.11.2 h1:OqLoHznUVU7SKS/WV+1dB5/hm20YLheYupiHhL5+M1Y= github.com/vbauerster/mpb/v8 v8.11.2/go.mod h1:mEB/M353al1a7wMUNtiymmPsEkGlJgeJmtlbY5adCJ8= +go.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ64= +go.opentelemetry.io/auto/sdk v1.2.1/go.mod h1:KRTj+aOaElaLi+wW1kO/DZRXwkF4C5xPbEe3ZiIhN7Y= +go.opentelemetry.io/contrib/detectors/gcp v1.38.0 h1:ZoYbqX7OaA/TAikspPl3ozPI6iY6LiIY9I8cUfm+pJs= +go.opentelemetry.io/contrib/detectors/gcp v1.38.0/go.mod h1:SU+iU7nu5ud4oCb3LQOhIZ3nRLj6FNVrKgtflbaf2ts= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.63.0 h1:YH4g8lQroajqUwWbq/tr2QX1JFmEXaDLgG+ew9bLMWo= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.63.0/go.mod h1:fvPi2qXDqFs8M4B4fmJhE92TyQs9Ydjlg3RvfUp+NbQ= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.63.0 h1:RbKq8BG0FI8OiXhBfcRtqqHcZcka+gU3cskNuf05R18= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.63.0/go.mod h1:h06DGIukJOevXaj/xrNjhi/2098RZzcLTbc0jDAUbsg= +go.opentelemetry.io/otel v1.40.0 h1:oA5YeOcpRTXq6NN7frwmwFR0Cn3RhTVZvXsP4duvCms= +go.opentelemetry.io/otel v1.40.0/go.mod h1:IMb+uXZUKkMXdPddhwAHm6UfOwJyh4ct1ybIlV14J0g= +go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.37.0 h1:6VjV6Et+1Hd2iLZEPtdV7vie80Yyqf7oikJLjQ/myi0= +go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.37.0/go.mod h1:u8hcp8ji5gaM/RfcOo8z9NMnf1pVLfVY7lBY2VOGuUU= +go.opentelemetry.io/otel/metric v1.40.0 h1:rcZe317KPftE2rstWIBitCdVp89A2HqjkxR3c11+p9g= +go.opentelemetry.io/otel/metric v1.40.0/go.mod h1:ib/crwQH7N3r5kfiBZQbwrTge743UDc7DTFVZrrXnqc= +go.opentelemetry.io/otel/sdk v1.40.0 h1:KHW/jUzgo6wsPh9At46+h4upjtccTmuZCFAc9OJ71f8= +go.opentelemetry.io/otel/sdk v1.40.0/go.mod h1:Ph7EFdYvxq72Y8Li9q8KebuYUr2KoeyHx0DRMKrYBUE= +go.opentelemetry.io/otel/sdk/metric v1.40.0 h1:mtmdVqgQkeRxHgRv4qhyJduP3fYJRMX4AtAlbuWdCYw= +go.opentelemetry.io/otel/sdk/metric v1.40.0/go.mod h1:4Z2bGMf0KSK3uRjlczMOeMhKU2rhUqdWNoKcYrtcBPg= +go.opentelemetry.io/otel/trace v1.40.0 h1:WA4etStDttCSYuhwvEa8OP8I5EWu24lkOzp+ZYblVjw= +go.opentelemetry.io/otel/trace v1.40.0/go.mod h1:zeAhriXecNGP/s2SEG3+Y8X9ujcJOTqQ5RgdEJcawiA= go.uber.org/mock v0.6.0 h1:hyF9dfmbgIX5EfOdasqLsWD6xqpNZlXblLB/Dbnwv3Y= go.uber.org/mock v0.6.0/go.mod h1:KiVJ4BqZJaMj4svdfmHM0AUx4NJYO8ZNpPnZn1Z+BBU= go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg= +gocloud.dev v0.45.0 h1:WknIK8IbRdmynDvara3Q7G6wQhmEiOGwpgJufbM39sY= +gocloud.dev v0.45.0/go.mod h1:0kXKmkCLG6d31N7NyLZWzt7jDSQura9zD/mWgiB6THI= +golang.org/x/crypto v0.45.0 h1:jMBrvKuj23MTlT0bQEOBcAE0mjg8mK9RXFhRH6nyF3Q= +golang.org/x/crypto v0.45.0/go.mod h1:XTGrrkGJve7CYK7J8PEww4aY7gM3qMCElcJQ8n8JdX4= +golang.org/x/net v0.47.0 h1:Mx+4dIFzqraBXUugkia1OOvlD6LemFo1ALMHjrXDOhY= +golang.org/x/net v0.47.0/go.mod h1:/jNxtkgq5yWUGYkaZGqo27cfGZ1c5Nen03aYrrKpVRU= +golang.org/x/oauth2 v0.33.0 h1:4Q+qn+E5z8gPRJfmRy7C2gGG3T4jIprK6aSYgTXGRpo= +golang.org/x/oauth2 v0.33.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwEA= golang.org/x/sync v0.19.0 h1:vV+1eWNmZ5geRlYjzm2adRgW2/mcpevXNg50YZtPCE4= golang.org/x/sync v0.19.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI= -golang.org/x/sys v0.39.0 h1:CvCKL8MeisomCi6qNZ+wbb0DN9E5AATixKsvNtMoMFk= -golang.org/x/sys v0.39.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= +golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.40.0 h1:DBZZqJ2Rkml6QMQsZywtnjnnGvHza6BTfYFWY9kjEWQ= +golang.org/x/sys v0.40.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= +golang.org/x/text v0.31.0 h1:aC8ghyu4JhP8VojJ2lEHBnochRno1sgL6nEi9WGFGMM= +golang.org/x/text v0.31.0/go.mod h1:tKRAlv61yKIjGGHX/4tP1LTbc13YSec1pxVEWXzfoeM= +golang.org/x/time v0.14.0 h1:MRx4UaLrDotUKUdCIqzPC48t1Y9hANFKIRpNx+Te8PI= +golang.org/x/time v0.14.0/go.mod h1:eL/Oa2bBBK0TkX57Fyni+NgnyQQN4LitPmob2Hjnqw4= +golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da h1:noIWHXmPHxILtqtCOPIhSt0ABwskkZKjD3bXGnZGpNY= +golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da/go.mod h1:NDW/Ps6MPRej6fsCIbMTohpP40sJ/P/vI1MoTEGwX90= +gonum.org/v1/gonum v0.16.0 h1:5+ul4Swaf3ESvrOnidPp4GZbzf0mxVQpDCYUQE7OJfk= +gonum.org/v1/gonum v0.16.0/go.mod h1:fef3am4MQ93R2HHpKnLk4/Tbh/s0+wqD5nfa6Pnwy4E= +google.golang.org/api v0.256.0 h1:u6Khm8+F9sxbCTYNoBHg6/Hwv0N/i+V94MvkOSor6oI= +google.golang.org/api v0.256.0/go.mod h1:KIgPhksXADEKJlnEoRa9qAII4rXcy40vfI8HRqcU964= +google.golang.org/genproto v0.0.0-20251124214823-79d6a2a48846 h1:dDbsTLIK7EzwUq36kCSAsk0slouq/S0tWHeeGi97cD8= +google.golang.org/genproto v0.0.0-20251124214823-79d6a2a48846/go.mod h1:PP0g88Dz3C7hRAfbQCQggeWAXjuqGsNPLE4s7jh0RGU= +google.golang.org/genproto/googleapis/api v0.0.0-20251124214823-79d6a2a48846 h1:ZdyUkS9po3H7G0tuh955QVyyotWvOD4W0aEapeGeUYk= +google.golang.org/genproto/googleapis/api v0.0.0-20251124214823-79d6a2a48846/go.mod h1:Fk4kyraUvqD7i5H6S43sj2W98fbZa75lpZz/eUyhfO0= +google.golang.org/genproto/googleapis/rpc v0.0.0-20251124214823-79d6a2a48846 h1:Wgl1rcDNThT+Zn47YyCXOXyX/COgMTIdhJ717F0l4xk= +google.golang.org/genproto/googleapis/rpc v0.0.0-20251124214823-79d6a2a48846/go.mod h1:7i2o+ce6H/6BluujYR+kqX3GKH+dChPTQU19wjRPiGk= +google.golang.org/grpc v1.77.0 h1:wVVY6/8cGA6vvffn+wWK5ToddbgdU3d8MNENr4evgXM= +google.golang.org/grpc v1.77.0/go.mod h1:z0BY1iVj0q8E1uSQCjL9cppRj+gnZjzDnzV0dHhrNig= +google.golang.org/protobuf v1.36.10 h1:AYd7cD/uASjIL6Q9LiTjz8JLcrh/88q5UObnmY3aOOE= +google.golang.org/protobuf v1.36.10/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/validator.v2 v2.0.1 h1:xF0KWyGWXm/LM2G1TrEjqOu4pa6coO9AlWSf3msVfDY= +gopkg.in/validator.v2 v2.0.1/go.mod h1:lIUZBlB3Im4s/eYp39Ry/wkR02yOPhZ9IwIRBjuPuG8= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/hash/hash.go b/hash/hash.go index 566a4f0..eaf2250 100644 --- a/hash/hash.go +++ b/hash/hash.go @@ -3,6 +3,7 @@ package hash import ( "encoding/json" "fmt" + "github.com/calypr/data-client/apigen/drs" ) // ChecksumType represents the digest method used to create the checksum @@ -142,3 +143,27 @@ func ConvertChecksumsToHashInfo(checksums []Checksum) HashInfo { checksumMap := ConvertChecksumsToMap(checksums) return ConvertStringMapToHashInfo(checksumMap) } + +func ConvertDrsChecksumsToMap(checksums []drs.Checksum) map[string]string { + result := make(map[string]string, len(checksums)) + for _, c := range checksums { + result[c.Type] = c.Checksum + } + return result +} + +func ConvertDrsChecksumsToHashInfo(checksums []drs.Checksum) HashInfo { + checksumMap := ConvertDrsChecksumsToMap(checksums) + return ConvertStringMapToHashInfo(checksumMap) +} + +func ConvertMapToDrsChecksums(hashes map[string]string) []drs.Checksum { + result := make([]drs.Checksum, 0, len(hashes)) + for t, c := range hashes { + result = append(result, drs.Checksum{ + Type: t, + Checksum: c, + }) + } + return result +} diff --git a/indexd/client_test.go b/indexd/client_test.go deleted file mode 100644 index 7536742..0000000 --- a/indexd/client_test.go +++ /dev/null @@ -1,288 +0,0 @@ -package indexd - -import ( - "context" - "encoding/json" - "io" - "net/http" - "net/http/httptest" - "strings" - "sync" - "testing" - - "github.com/calypr/data-client/conf" - drs "github.com/calypr/data-client/drs" - "github.com/calypr/data-client/hash" - "github.com/calypr/data-client/logs" - "github.com/calypr/data-client/request" -) - -type mockIndexdServer struct { - mu sync.Mutex - listProjectPages int - listObjectsPages int - lastUpdatePayload UpdateInputInfo -} - -func (m *mockIndexdServer) handler(t *testing.T) http.HandlerFunc { - return func(w http.ResponseWriter, r *http.Request) { - path := r.URL.Path - switch { - case r.Method == http.MethodGet && path == "/index": - if hashQuery := r.URL.Query().Get("hash"); hashQuery != "" { - record := sampleOutputInfo() - page := ListRecords{Records: []OutputInfo{record}} - w.WriteHeader(http.StatusOK) - _ = json.NewEncoder(w).Encode(page) - return - } - if r.URL.Query().Get("authz") != "" { - m.mu.Lock() - page := m.listProjectPages - m.listProjectPages++ - m.mu.Unlock() - w.WriteHeader(http.StatusOK) - if page == 0 { - _ = json.NewEncoder(w).Encode(ListRecords{Records: []OutputInfo{sampleOutputInfo()}}) - } else { - _ = json.NewEncoder(w).Encode(ListRecords{Records: []OutputInfo{}}) - } - return - } - - case r.Method == http.MethodPost && path == "/index": - w.WriteHeader(http.StatusOK) - _, _ = w.Write([]byte(`{"did":"did-1"}`)) - return - case r.Method == http.MethodGet && strings.HasPrefix(path, "/ga4gh/drs/v1/objects"): - if path == "/ga4gh/drs/v1/objects" { - m.mu.Lock() - page := m.listObjectsPages - m.listObjectsPages++ - m.mu.Unlock() - w.WriteHeader(http.StatusOK) - if page == 0 { - _ = json.NewEncoder(w).Encode(drs.DRSPage{DRSObjects: []drs.DRSObject{sampleDRSObject()}}) - } else { - _ = json.NewEncoder(w).Encode(drs.DRSPage{DRSObjects: []drs.DRSObject{}}) - } - return - } - obj := sampleOutputObject() - w.WriteHeader(http.StatusOK) - _ = json.NewEncoder(w).Encode(obj) - return - case r.Method == http.MethodGet && strings.HasPrefix(path, "/index/"): - record := sampleOutputInfo() - record.Rev = "rev-1" - w.WriteHeader(http.StatusOK) - _ = json.NewEncoder(w).Encode(record) - return - case r.Method == http.MethodPut && strings.HasPrefix(path, "/index/"): - body, _ := io.ReadAll(r.Body) - payload := UpdateInputInfo{} - _ = json.Unmarshal(body, &payload) - m.mu.Lock() - m.lastUpdatePayload = payload - m.mu.Unlock() - w.WriteHeader(http.StatusOK) - return - case r.Method == http.MethodDelete && strings.HasPrefix(path, "/index/"): - w.WriteHeader(http.StatusNoContent) - return - } - w.WriteHeader(http.StatusNotFound) - } -} - -func sampleOutputInfo() OutputInfo { - return OutputInfo{ - Did: "did-1", - FileName: "file.txt", - URLs: []string{"s3://bucket/key"}, - Authz: []string{"/programs/test/projects/proj"}, - Hashes: hash.HashInfo{SHA256: "sha-256"}, - Size: 123, - } -} - -func sampleDRSObject() drs.DRSObject { - return drs.DRSObject{ - Id: "did-1", - Name: "file.txt", - Size: 123, - Checksums: hash.HashInfo{ - SHA256: "sha-256", - }, - AccessMethods: []drs.AccessMethod{ - { - Type: "s3", - AccessURL: drs.AccessURL{URL: "s3://bucket/key"}, - Authorizations: &drs.Authorizations{BearerAuthIssuers: []string{"/programs/test/projects/proj"}}, - }, - }, - } -} - -func sampleOutputObject() OutputObject { - return OutputObject{ - Id: "did-1", - Name: "file.txt", - Size: 123, - Checksums: []hash.Checksum{ - {Checksum: "sha-256", Type: hash.ChecksumTypeSHA256}, - }, - } -} - -func newTestClient(server *httptest.Server) IndexdInterface { - cred := &conf.Credential{APIEndpoint: server.URL, Profile: "test", AccessToken: "test-token"} - logger, _ := logs.New("test") - config := conf.NewConfigure(logger.Logger) - req := request.NewRequestInterface(logger, cred, config) - return NewIndexdClient(req, cred, logger.Logger) -} - -func TestIndexdClient_ListAndQueryDirect(t *testing.T) { - mock := &mockIndexdServer{} - server := httptest.NewServer(mock.handler(t)) - defer server.Close() - - client := newTestClient(server) - - records, err := client.GetObjectByHash(context.Background(), "sha256", "sha-256") - if err != nil { - t.Fatalf("GetObjectByHash error: %v", err) - } - if len(records) != 1 || records[0].Id != "did-1" { - t.Fatalf("unexpected records: %+v", records) - } - - objChan, err := client.ListObjectsByProject(context.Background(), "test-proj") - if err != nil { - t.Fatalf("ListObjectsByProject error: %v", err) - } - var found bool - for res := range objChan { - if res.Error != nil { - t.Fatalf("ListObjectsByProject result error: %v", res.Error) - } - if res.Object != nil && res.Object.Id == "did-1" { - found = true - } - } - if !found { - t.Fatalf("expected object from ListObjectsByProject") - } - - listChan, err := client.ListObjects(context.Background()) - if err != nil { - t.Fatalf("ListObjects error: %v", err) - } - var listCount int - for res := range listChan { - if res.Error != nil { - t.Fatalf("ListObjects result error: %v", res.Error) - } - if res.Object != nil { - listCount++ - } - } - if listCount != 1 { - t.Fatalf("expected 1 object from ListObjects, got %d", listCount) - } -} - -func TestIndexdClient_RegisterAndUpdateDirect(t *testing.T) { - mock := &mockIndexdServer{} - server := httptest.NewServer(mock.handler(t)) - defer server.Close() - - client := newTestClient(server) - - drsObj := &drs.DRSObject{ - Id: "did-1", - Name: "file.txt", - Size: 123, - Checksums: hash.HashInfo{SHA256: "sha-256"}, - AccessMethods: []drs.AccessMethod{ - { - Type: "s3", - AccessURL: drs.AccessURL{URL: "s3://bucket/key"}, - Authorizations: &drs.Authorizations{BearerAuthIssuers: []string{"/programs/test/projects/proj"}}, - }, - }, - } - - obj, err := client.RegisterRecord(context.Background(), drsObj) - if err != nil { - t.Fatalf("RegisterRecord error: %v", err) - } - if obj.Id != "did-1" { - t.Fatalf("unexpected DRS object: %+v", obj) - } - - update := &drs.DRSObject{ - Name: "file-updated.txt", - Version: "v2", - Description: "updated", - AccessMethods: []drs.AccessMethod{ - { - Type: "s3", - AccessURL: drs.AccessURL{URL: "s3://bucket/other"}, - Authorizations: &drs.Authorizations{BearerAuthIssuers: []string{"/programs/test/projects/proj"}}, - }, - }, - } - - _, err = client.UpdateRecord(context.Background(), update, "did-1") - if err != nil { - t.Fatalf("UpdateRecord error: %v", err) - } - - mock.mu.Lock() - payload := mock.lastUpdatePayload - mock.mu.Unlock() - - if len(payload.URLs) != 2 { - t.Fatalf("expected URLs to include appended entries, got %+v", payload.URLs) - } -} - -func TestIndexdClient_GetObjectDirect(t *testing.T) { - mock := &mockIndexdServer{} - server := httptest.NewServer(mock.handler(t)) - defer server.Close() - - client := newTestClient(server) - - record, err := client.GetObject(context.Background(), "did-1") - if err != nil { - t.Fatalf("GetObject error: %v", err) - } - if record.Id != "did-1" { - t.Fatalf("unexpected record: %+v", record) - } -} - -func TestDecodeRegisteredObjects_Wrapped(t *testing.T) { - payload := []byte(`{"objects":[{"id":"did-1","name":"file.txt","size":123,"checksums":[{"type":"sha256","checksum":"sha-256"}],"access_methods":[{"type":"s3","access_url":{"url":"s3://bucket/key"}}]}]}`) - objs, err := decodeRegisteredObjects(payload) - if err != nil { - t.Fatalf("decodeRegisteredObjects wrapped payload error: %v", err) - } - if len(objs) != 1 { - t.Fatalf("expected 1 object, got %d", len(objs)) - } - if objs[0] == nil || objs[0].Id != "did-1" { - t.Fatalf("unexpected object: %+v", objs[0]) - } -} - -func TestDecodeRegisteredObjects_ArrayRejected(t *testing.T) { - payload := []byte(`[{"id":"did-1","name":"file.txt","size":123,"checksums":[{"type":"sha256","checksum":"sha-256"}],"access_methods":[{"type":"s3","access_url":{"url":"s3://bucket/key"}}]}]`) - _, err := decodeRegisteredObjects(payload) - if err == nil { - t.Fatal("expected error for non-canonical array register response, got nil") - } -} diff --git a/indexd/convert.go b/indexd/convert.go deleted file mode 100644 index a786276..0000000 --- a/indexd/convert.go +++ /dev/null @@ -1,99 +0,0 @@ -package indexd - -// Conversion functions between drs.DRSObject and IndexdRecord - -import ( - "fmt" - "net/url" - - "github.com/calypr/data-client/drs" -) - -// IndexdRecordFromDrsObject represents a simplified version of an indexd record for conversion purposes -func IndexdRecordFromDrsObject(drsObj *drs.DRSObject) (*IndexdRecord, error) { - indexdObj := &IndexdRecord{ - Did: drsObj.Id, - Size: drsObj.Size, - FileName: drsObj.Name, - URLs: IndexdURLFromDrsAccessURLs(drsObj.AccessMethods), - Authz: IndexdAuthzFromDrsAccessMethods(drsObj.AccessMethods), - Hashes: drsObj.Checksums, - } - return indexdObj, nil -} - -func IndexdRecordToDrsObject(indexdObj *IndexdRecord) (*drs.DRSObject, error) { - accessMethods, err := DRSAccessMethodsFromIndexdURLs(indexdObj.URLs, indexdObj.Authz) - if err != nil { - return nil, err - } - for _, am := range accessMethods { - if am.Authorizations == nil || len(am.Authorizations.BearerAuthIssuers) == 0 { - return nil, fmt.Errorf("access method missing authorization %v, %v", indexdObj, indexdObj.Authz) - } - } - - return &drs.DRSObject{ - Id: indexdObj.Did, - Size: indexdObj.Size, - Name: indexdObj.FileName, - AccessMethods: accessMethods, - Checksums: indexdObj.Hashes, - }, nil -} - -func DRSAccessMethodsFromIndexdURLs(urls []string, authz []string) ([]drs.AccessMethod, error) { - var accessMethods []drs.AccessMethod - for _, urlString := range urls { - var method drs.AccessMethod - method.AccessURL = drs.AccessURL{URL: urlString} - - parsed, err := url.Parse(urlString) - if err != nil { - return nil, fmt.Errorf("failed to parse url %q: %v", urlString, err) - } - if parsed.Scheme == "" { - // default to https if no scheme or parse error - method.Type = "https" - } else { - method.Type = parsed.Scheme - } - - // check if authz is null or 0-length, then error - if authz == nil { - return nil, fmt.Errorf("authz is required") - } - - // NOTE: a record can only have 1 authz entry atm - method.Authorizations = &drs.Authorizations{BearerAuthIssuers: []string{authz[0]}} - accessMethods = append(accessMethods, method) - } - return accessMethods, nil -} - -// IndexdAuthzFromDrsAccessMethods extracts authz values from DRS access methods -func IndexdAuthzFromDrsAccessMethods(accessMethods []drs.AccessMethod) []string { - var authz []string - for _, drsURL := range accessMethods { - if drsURL.Authorizations != nil && len(drsURL.Authorizations.BearerAuthIssuers) > 0 { - authz = append(authz, drsURL.Authorizations.BearerAuthIssuers[0]) - } - } - return authz -} - -func IndexdURLFromDrsAccessURLs(accessMethods []drs.AccessMethod) []string { - var urls []string - for _, drsURL := range accessMethods { - urls = append(urls, drsURL.AccessURL.URL) - } - return urls -} - -func (inr *IndexdRecord) ToDrsObject() (*drs.DRSObject, error) { - o, err := IndexdRecordToDrsObject(inr) - if err != nil { - return nil, err - } - return o, nil -} diff --git a/indexd/records.go b/indexd/records.go deleted file mode 100644 index 7f03613..0000000 --- a/indexd/records.go +++ /dev/null @@ -1,97 +0,0 @@ -package indexd - -// https://github.com/uc-cdis/indexd/blob/master/openapis/swagger.yaml - -import ( - "github.com/calypr/data-client/hash" -) - -// subset of the OpenAPI spec for the InputInfo object in indexd -// TODO: make another object based on VersionInputInfo that has content_created_date and so can handle a POST of dates via indexd/ -type IndexdRecord struct { - // Unique identifier for the record (UUID) - Did string `json:"did"` - - // Human-readable file name - FileName string `json:"file_name,omitempty"` - - // List of URLs where the file can be accessed - URLs []string `json:"urls"` - - // Hashes of the file (e.g., md5, sha256) - Size int64 `json:"size"` - - // List of access control lists (ACLs) - ACL []string `json:"acl,omitempty"` - - // List of authorization policies - Authz []string `json:"authz,omitempty"` - - Hashes hash.HashInfo `json:"hashes,omitzero"` - - // Additional metadata as key-value pairs - Metadata map[string]string `json:"metadata,omitempty"` - - // Version of the record (optional) - Version string `json:"version,omitempty"` -} - -// create indexd record struct used for POSTs that is IndexdRecord with form field -type IndexdRecordForm struct { - IndexdRecord - Form string `json:"form"` - Rev string `json:"rev,omitempty"` -} - -type ListRecordsResult struct { - Record *OutputInfo - Error error -} - -type ListRecords struct { - IDs []string `json:"ids"` - Records []OutputInfo `json:"records"` - Size int64 `json:"size"` - Start int64 `json:"start"` - Limit int64 `json:"limit"` - FileName string `json:"file_name"` - URLs []string `json:"urls"` - ACL []string `json:"acl"` - Authz []string `json:"authz"` - Hashes hash.HashInfo `json:"hashes"` - Metadata map[string]any `json:"metadata"` - Version string `json:"version"` -} - -type OutputInfo struct { - Did string `json:"did"` - BaseID string `json:"baseid"` - Rev string `json:"rev"` - Form string `json:"form"` - Size int64 `json:"size"` - FileName string `json:"file_name"` - Version string `json:"version"` - Uploader string `json:"uploader"` - URLs []string `json:"urls"` - ACL []string `json:"acl"` - Authz []string `json:"authz"` - Hashes hash.HashInfo `json:"hashes"` - UpdatedDate string `json:"updated_date"` - CreatedDate string `json:"created_date"` - Metadata map[string]any `json:"metadata"` - URLsMetadata map[string]any `json:"urls_metadata"` -} - -func (outputInfo *OutputInfo) ToIndexdRecord() *IndexdRecord { - return &IndexdRecord{ - Did: outputInfo.Did, - Size: outputInfo.Size, - FileName: outputInfo.FileName, - URLs: outputInfo.URLs, - ACL: outputInfo.ACL, - Authz: outputInfo.Authz, - Hashes: outputInfo.Hashes, - //Metadata: outputInfo.Metadata, //TODO: re-enable metadata. One is map[string]string, the other is map[string]interface{} - Version: outputInfo.Version, - } -} diff --git a/indexd/types.go b/indexd/types.go deleted file mode 100644 index 54c601a..0000000 --- a/indexd/types.go +++ /dev/null @@ -1,70 +0,0 @@ -package indexd - -import ( - "github.com/calypr/data-client/drs" - "github.com/calypr/data-client/hash" -) - -type OutputObject struct { - Id string `json:"id"` - Name string `json:"name"` - SelfURI string `json:"self_uri,omitempty"` - Size int64 `json:"size"` - CreatedTime string `json:"created_time,omitempty"` - UpdatedTime string `json:"updated_time,omitempty"` - Version string `json:"version,omitempty"` - MimeType string `json:"mime_type,omitempty"` - Checksums []hash.Checksum `json:"checksums"` - AccessMethods []drs.AccessMethod `json:"access_methods"` - Contents []drs.Contents `json:"contents,omitempty"` - Description string `json:"description,omitempty"` - Aliases []string `json:"aliases,omitempty"` -} - -func ConvertOutputObjectToDRSObject(in *OutputObject) *drs.DRSObject { - if in == nil { - return nil - } - - hashInfo := hash.ConvertChecksumsToHashInfo(in.Checksums) - - return &drs.DRSObject{ - Id: in.Id, - Name: in.Name, - SelfURI: in.SelfURI, - Size: in.Size, - CreatedTime: in.CreatedTime, - UpdatedTime: in.UpdatedTime, - Version: in.Version, - MimeType: in.MimeType, - Checksums: hashInfo, - AccessMethods: in.AccessMethods, - Contents: in.Contents, - Description: in.Description, - Aliases: in.Aliases, - } -} - -// UpdateInputInfo is the put object for index records -type UpdateInputInfo struct { - // Human-readable file name - FileName string `json:"file_name,omitempty"` - - // Additional metadata as key-value pairs - Metadata map[string]any `json:"metadata,omitempty"` - - // URL-specific metadata as key-value pairs - URLsMetadata map[string]any `json:"urls_metadata,omitempty"` - - // Version of the record - Version string `json:"version,omitempty"` - - // List of URLs where the file can be accessed - URLs []string `json:"urls,omitempty"` - - // List of access control lists (ACLs) - ACL []string `json:"acl,omitempty"` - - // List of authorization policies - Authz []string `json:"authz,omitempty"` -} diff --git a/indexd/types_test.go b/indexd/types_test.go deleted file mode 100644 index c81536c..0000000 --- a/indexd/types_test.go +++ /dev/null @@ -1,60 +0,0 @@ -package indexd - -import ( - "testing" - - "github.com/calypr/data-client/drs" - "github.com/calypr/data-client/hash" -) - -func TestConvertOutputObjectToDRSObject(t *testing.T) { - out := &OutputObject{ - Id: "did-1", - Name: "file.txt", - SelfURI: "drs://server/did-1", - Size: 12345, - CreatedTime: "2023-01-01T00:00:00Z", - UpdatedTime: "2023-01-02T00:00:00Z", - Version: "v1", - MimeType: "text/plain", - Checksums: []hash.Checksum{ - {Type: hash.ChecksumTypeSHA256, Checksum: "sha256-hash"}, - {Type: hash.ChecksumTypeMD5, Checksum: "md5-hash"}, - }, - AccessMethods: []drs.AccessMethod{ - { - Type: "s3", - AccessURL: drs.AccessURL{ - URL: "s3://bucket/key", - }, - }, - }, - Description: "A test file", - Aliases: []string{"alias1"}, - } - - drsObj := ConvertOutputObjectToDRSObject(out) - - if drsObj.Id != out.Id { - t.Errorf("expected Id %s, got %s", out.Id, drsObj.Id) - } - if drsObj.Name != out.Name { - t.Errorf("expected Name %s, got %s", out.Name, drsObj.Name) - } - if drsObj.Size != out.Size { - t.Errorf("expected Size %d, got %d", out.Size, drsObj.Size) - } - // Verify Checksums conversion (slice to HashInfo) - if drsObj.Checksums.SHA256 != "sha256-hash" { - t.Errorf("expected SHA256 %s, got %s", "sha256-hash", drsObj.Checksums.SHA256) - } - if drsObj.Checksums.MD5 != "md5-hash" { - t.Errorf("expected MD5 %s, got %s", "md5-hash", drsObj.Checksums.MD5) - } - if len(drsObj.AccessMethods) != 1 { - t.Errorf("expected 1 access method, got %d", len(drsObj.AccessMethods)) - } - if drsObj.AccessMethods[0].AccessURL.URL != "s3://bucket/key" { - t.Errorf("expected access URL s3://bucket/key, got %s", drsObj.AccessMethods[0].AccessURL.URL) - } -} diff --git a/indexd/upsert.go b/indexd/upsert.go deleted file mode 100644 index 8fd52ff..0000000 --- a/indexd/upsert.go +++ /dev/null @@ -1,56 +0,0 @@ -package indexd - -import ( - "context" - "fmt" - "slices" - - "github.com/calypr/data-client/drs" - "github.com/calypr/data-client/s3utils" -) - -// UpsertIndexdRecord creates or updates an indexd record with a new URL. -func (c *IndexdClient) UpsertIndexdRecord(ctx context.Context, url string, sha256 string, fileSize int64, projectId string) (*drs.DRSObject, error) { - sha256 = drs.NormalizeOid(sha256) - uuid := drs.DrsUUID(projectId, sha256) - - records, err := c.GetObjectByHash(ctx, "sha256", sha256) - if err != nil { - return nil, fmt.Errorf("error querying indexd server: %v", err) - } - - var matchingRecord *drs.DRSObject - for i := range records { - // Hard cutover: checksum is content identity; do not match by record ID shape. - if records[i].Checksums.SHA256 == sha256 { - matchingRecord = &records[i] - break - } - } - - if matchingRecord != nil { - existingURLs := IndexdURLFromDrsAccessURLs(matchingRecord.AccessMethods) - if slices.Contains(existingURLs, url) { - c.logger.Debug("Nothing to do: file already registered") - return matchingRecord, nil - } - - c.logger.Debug("updating existing record with new url") - updatedRecord := drs.DRSObject{AccessMethods: []drs.AccessMethod{{AccessURL: drs.AccessURL{URL: url}}}} - return c.UpdateRecord(ctx, &updatedRecord, matchingRecord.Id) - } - - // If no record exists, create one - c.logger.Debug("creating new record") - _, key, err := s3utils.ParseS3URL(url) - if err != nil { - return nil, err - } - - drsObj, err := drs.BuildDrsObj(key, sha256, fileSize, uuid, "placeholder-bucket", "", projectId) - if err != nil { - return nil, err - } - - return c.RegisterRecord(ctx, drsObj) -} diff --git a/localclient/client.go b/localclient/client.go new file mode 100644 index 0000000..a38cb86 --- /dev/null +++ b/localclient/client.go @@ -0,0 +1,63 @@ +package localclient + +import ( + "github.com/calypr/data-client/conf" + "github.com/calypr/data-client/credentials" + "github.com/calypr/data-client/drs" + "github.com/calypr/data-client/logs" + "github.com/calypr/data-client/request" + "github.com/calypr/data-client/transfer" + localsigner "github.com/calypr/data-client/transfer/signer/local" +) + +// LocalInterface is the local-mode top-level facade. +// It mirrors the Gen3 facade shape for indexing and transfer operations. +type LocalInterface interface { + request.RequestInterface + Logger() *logs.Gen3Logger + Credentials() credentials.Reader + DRSClient() drs.ServerClient +} + +type LocalClient struct { + request.RequestInterface + + credential *conf.Credential + creds credentials.Reader + logger *logs.Gen3Logger + server drs.ServerClient +} + +func NewLocalInterface(profile string, logger *logs.Gen3Logger) (LocalInterface, error) { + config := conf.NewConfigure(logger.Logger) + cred, err := config.Load(profile) + if err != nil { + return nil, err + } + return NewLocalInterfaceFromCredential(cred, logger), nil +} + +func NewLocalInterfaceFromCredential(cred *conf.Credential, logger *logs.Gen3Logger) LocalInterface { + config := conf.NewConfigure(logger.Logger) + req := request.NewRequestInterface(logger, cred, config) + dc := drs.NewLocalDrsClient(req, cred.APIEndpoint, logger.Logger) + tb := transfer.New(req, logger, localsigner.New(cred.APIEndpoint, req, dc)) + + return &LocalClient{ + RequestInterface: req, + credential: cred, + creds: &staticCredentials{cred: cred}, + logger: logger, + server: drs.ComposeServerClient(dc, tb), + } +} + +type staticCredentials struct { + cred *conf.Credential +} + +func (c *staticCredentials) Current() *conf.Credential { return c.cred } + +func (l *LocalClient) Logger() *logs.Gen3Logger { return l.logger } +func (l *LocalClient) Credentials() credentials.Reader { return l.creds } +func (l *LocalClient) DRSClient() drs.ServerClient { return l.server } diff --git a/mocks/mock_drs_client.go b/mocks/mock_drs_client.go new file mode 100644 index 0000000..6e02ffc --- /dev/null +++ b/mocks/mock_drs_client.go @@ -0,0 +1,215 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/calypr/data-client/drs (interfaces: Client) + +package mocks + +import ( + context "context" + http "net/http" + reflect "reflect" + + drs "github.com/calypr/data-client/drs" + hash "github.com/calypr/data-client/hash" + request "github.com/calypr/data-client/request" + gomock "go.uber.org/mock/gomock" +) + +// MockDrsClient is a mock of Client interface. +type MockDrsClient struct { + ctrl *gomock.Controller + recorder *MockDrsClientMockRecorder +} + +// MockDrsClientMockRecorder is the mock recorder for MockDrsClient. +type MockDrsClientMockRecorder struct { + mock *MockDrsClient +} + +// NewMockDrsClient creates a new mock instance. +func NewMockDrsClient(ctrl *gomock.Controller) *MockDrsClient { + mock := &MockDrsClient{ctrl: ctrl} + mock.recorder = &MockDrsClientMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockDrsClient) EXPECT() *MockDrsClientMockRecorder { + return m.recorder +} + +func (m *MockDrsClient) BatchGetObjectsByHash(ctx context.Context, hashes []string) (map[string][]drs.DRSObject, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "BatchGetObjectsByHash", ctx, hashes) + ret0, _ := ret[0].(map[string][]drs.DRSObject) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +func (mr *MockDrsClientMockRecorder) BatchGetObjectsByHash(ctx, hashes any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BatchGetObjectsByHash", reflect.TypeOf((*MockDrsClient)(nil).BatchGetObjectsByHash), ctx, hashes) +} + +func (m *MockDrsClient) DeleteRecord(ctx context.Context, did string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DeleteRecord", ctx, did) + ret0, _ := ret[0].(error) + return ret0 +} + +func (mr *MockDrsClientMockRecorder) DeleteRecord(ctx, did any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteRecord", reflect.TypeOf((*MockDrsClient)(nil).DeleteRecord), ctx, did) +} + +func (m *MockDrsClient) GetDownloadURL(ctx context.Context, id string, accessType string) (*drs.AccessURL, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetDownloadURL", ctx, id, accessType) + ret0, _ := ret[0].(*drs.AccessURL) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +func (mr *MockDrsClientMockRecorder) GetDownloadURL(ctx, id, accessType any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetDownloadURL", reflect.TypeOf((*MockDrsClient)(nil).GetDownloadURL), ctx, id, accessType) +} + +func (m *MockDrsClient) GetObject(ctx context.Context, id string) (*drs.DRSObject, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetObject", ctx, id) + ret0, _ := ret[0].(*drs.DRSObject) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +func (mr *MockDrsClientMockRecorder) GetObject(ctx, id any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetObject", reflect.TypeOf((*MockDrsClient)(nil).GetObject), ctx, id) +} + +func (m *MockDrsClient) GetObjectByHash(ctx context.Context, checksum *hash.Checksum) ([]drs.DRSObject, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetObjectByHash", ctx, checksum) + ret0, _ := ret[0].([]drs.DRSObject) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +func (mr *MockDrsClientMockRecorder) GetObjectByHash(ctx, checksum any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetObjectByHash", reflect.TypeOf((*MockDrsClient)(nil).GetObjectByHash), ctx, checksum) +} + +func (m *MockDrsClient) RegisterRecord(ctx context.Context, record *drs.DRSObject) (*drs.DRSObject, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "RegisterRecord", ctx, record) + ret0, _ := ret[0].(*drs.DRSObject) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +func (mr *MockDrsClientMockRecorder) RegisterRecord(ctx, record any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RegisterRecord", reflect.TypeOf((*MockDrsClient)(nil).RegisterRecord), ctx, record) +} + +func (m *MockDrsClient) UpdateRecord(ctx context.Context, updateInfo *drs.DRSObject, did string) (*drs.DRSObject, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "UpdateRecord", ctx, updateInfo, did) + ret0, _ := ret[0].(*drs.DRSObject) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +func (mr *MockDrsClientMockRecorder) UpdateRecord(ctx, updateInfo, did any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateRecord", reflect.TypeOf((*MockDrsClient)(nil).UpdateRecord), ctx, updateInfo, did) +} + +func (m *MockDrsClient) UpsertRecord(ctx context.Context, url string, sha256 string, fileSize int64, projectId string) (*drs.DRSObject, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "UpsertRecord", ctx, url, sha256, fileSize, projectId) + ret0, _ := ret[0].(*drs.DRSObject) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +func (mr *MockDrsClientMockRecorder) UpsertRecord(ctx, url, sha256, fileSize, projectId any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpsertRecord", reflect.TypeOf((*MockDrsClient)(nil).UpsertRecord), ctx, url, sha256, fileSize, projectId) +} + +func (m *MockDrsClient) Do(ctx context.Context, req *request.RequestBuilder) (*http.Response, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Do", ctx, req) + ret0, _ := ret[0].(*http.Response) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +func (mr *MockDrsClientMockRecorder) Do(ctx, req any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Do", reflect.TypeOf((*MockDrsClient)(nil).Do), ctx, req) +} + +func (m *MockDrsClient) RegisterRecords(ctx context.Context, records []*drs.DRSObject) ([]*drs.DRSObject, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "RegisterRecords", ctx, records) + ret0, _ := ret[0].([]*drs.DRSObject) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +func (mr *MockDrsClientMockRecorder) RegisterRecords(ctx, records any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RegisterRecords", reflect.TypeOf((*MockDrsClient)(nil).RegisterRecords), ctx, records) +} + +func (m *MockDrsClient) ListObjects(ctx context.Context) (chan drs.DRSObjectResult, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListObjects", ctx) + ret0, _ := ret[0].(chan drs.DRSObjectResult) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +func (mr *MockDrsClientMockRecorder) ListObjects(ctx any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListObjects", reflect.TypeOf((*MockDrsClient)(nil).ListObjects), ctx) +} + +func (m *MockDrsClient) New(method, url string) *request.RequestBuilder { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "New", method, url) + ret0, _ := ret[0].(*request.RequestBuilder) + return ret0 +} + +func (mr *MockDrsClientMockRecorder) New(method, url any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "New", reflect.TypeOf((*MockDrsClient)(nil).New), method, url) +} + +func (m *MockDrsClient) WithProject(projectId string) drs.Client { + return m +} + +func (m *MockDrsClient) WithOrganization(organization string) drs.Client { + return m +} + +func (m *MockDrsClient) WithBucket(bucketName string) drs.Client { + return m +} + +func (m *MockDrsClient) GetProjectId() string { return "" } +func (m *MockDrsClient) GetBucketName() string { return "" } +func (m *MockDrsClient) GetOrganization() string { return "" } +func (m *MockDrsClient) RegisterFile(ctx context.Context, oid, path string) (*drs.DRSObject, error) { return nil, nil } +func (m *MockDrsClient) AddURL(ctx context.Context, blobURL, sha256 string, opts ...drs.AddURLOption) (*drs.DRSObject, error) { return nil, nil } +func (m *MockDrsClient) ListObjectsByProject(ctx context.Context, projectId string) (chan drs.DRSObjectResult, error) { return nil, nil } +func (m *MockDrsClient) GetProjectSample(ctx context.Context, projectId string, limit int) ([]drs.DRSObject, error) { return nil, nil } +func (m *MockDrsClient) DeleteRecordsByProject(ctx context.Context, projectId string) error { return nil } +func (m *MockDrsClient) BuildDrsObj(fileName string, checksum string, size int64, drsId string) (*drs.DRSObject, error) { return nil, nil } +func (m *MockDrsClient) DownloadFile(ctx context.Context, oid, destPath string) error { return nil } diff --git a/mocks/mock_gen3interface.go b/mocks/mock_gen3interface.go index 999daa7..4364179 100644 --- a/mocks/mock_gen3interface.go +++ b/mocks/mock_gen3interface.go @@ -14,9 +14,9 @@ import ( http "net/http" reflect "reflect" - conf "github.com/calypr/data-client/conf" + credentials "github.com/calypr/data-client/credentials" + drs "github.com/calypr/data-client/drs" fence "github.com/calypr/data-client/fence" - indexd "github.com/calypr/data-client/indexd" logs "github.com/calypr/data-client/logs" request "github.com/calypr/data-client/request" requestor "github.com/calypr/data-client/requestor" @@ -63,60 +63,60 @@ func (mr *MockGen3InterfaceMockRecorder) Do(ctx, req any) *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Do", reflect.TypeOf((*MockGen3Interface)(nil).Do), ctx, req) } -// ExportCredential mocks base method. -func (m *MockGen3Interface) ExportCredential(ctx context.Context, cred *conf.Credential) error { +// DRSClient mocks base method. +func (m *MockGen3Interface) DRSClient() drs.ServerClient { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ExportCredential", ctx, cred) - ret0, _ := ret[0].(error) + ret := m.ctrl.Call(m, "DRSClient") + ret0, _ := ret[0].(drs.ServerClient) return ret0 } -// ExportCredential indicates an expected call of ExportCredential. -func (mr *MockGen3InterfaceMockRecorder) ExportCredential(ctx, cred any) *gomock.Call { +// DRSClient indicates an expected call of DRSClient. +func (mr *MockGen3InterfaceMockRecorder) DRSClient() *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ExportCredential", reflect.TypeOf((*MockGen3Interface)(nil).ExportCredential), ctx, cred) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DRSClient", reflect.TypeOf((*MockGen3Interface)(nil).DRSClient)) } -// Fence mocks base method. -func (m *MockGen3Interface) Fence() fence.FenceInterface { +// FenceClient mocks base method. +func (m *MockGen3Interface) FenceClient() fence.FenceInterface { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Fence") + ret := m.ctrl.Call(m, "FenceClient") ret0, _ := ret[0].(fence.FenceInterface) return ret0 } -// Fence indicates an expected call of Fence. -func (mr *MockGen3InterfaceMockRecorder) Fence() *gomock.Call { +// FenceClient indicates an expected call of FenceClient. +func (mr *MockGen3InterfaceMockRecorder) FenceClient() *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Fence", reflect.TypeOf((*MockGen3Interface)(nil).Fence)) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FenceClient", reflect.TypeOf((*MockGen3Interface)(nil).FenceClient)) } -// GetCredential mocks base method. -func (m *MockGen3Interface) GetCredential() *conf.Credential { +// Credentials mocks base method. +func (m *MockGen3Interface) Credentials() credentials.Manager { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetCredential") - ret0, _ := ret[0].(*conf.Credential) + ret := m.ctrl.Call(m, "Credentials") + ret0, _ := ret[0].(credentials.Manager) return ret0 } -// GetCredential indicates an expected call of GetCredential. -func (mr *MockGen3InterfaceMockRecorder) GetCredential() *gomock.Call { +// Credentials indicates an expected call of Credentials. +func (mr *MockGen3InterfaceMockRecorder) Credentials() *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetCredential", reflect.TypeOf((*MockGen3Interface)(nil).GetCredential)) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Credentials", reflect.TypeOf((*MockGen3Interface)(nil).Credentials)) } -// Indexd mocks base method. -func (m *MockGen3Interface) Indexd() indexd.IndexdInterface { +// RequestorClient mocks base method. +func (m *MockGen3Interface) RequestorClient() requestor.RequestorInterface { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Indexd") - ret0, _ := ret[0].(indexd.IndexdInterface) + ret := m.ctrl.Call(m, "RequestorClient") + ret0, _ := ret[0].(requestor.RequestorInterface) return ret0 } -// Indexd indicates an expected call of Indexd. -func (mr *MockGen3InterfaceMockRecorder) Indexd() *gomock.Call { +// RequestorClient indicates an expected call of RequestorClient. +func (mr *MockGen3InterfaceMockRecorder) RequestorClient() *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Indexd", reflect.TypeOf((*MockGen3Interface)(nil).Indexd)) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RequestorClient", reflect.TypeOf((*MockGen3Interface)(nil).RequestorClient)) } // Logger mocks base method. @@ -147,30 +147,16 @@ func (mr *MockGen3InterfaceMockRecorder) New(method, url any) *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "New", reflect.TypeOf((*MockGen3Interface)(nil).New), method, url) } -// Requestor mocks base method. -func (m *MockGen3Interface) Requestor() requestor.RequestorInterface { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Requestor") - ret0, _ := ret[0].(requestor.RequestorInterface) - return ret0 -} - -// Requestor indicates an expected call of Requestor. -func (mr *MockGen3InterfaceMockRecorder) Requestor() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Requestor", reflect.TypeOf((*MockGen3Interface)(nil).Requestor)) -} - -// Sower mocks base method. -func (m *MockGen3Interface) Sower() sower.SowerInterface { +// SowerClient mocks base method. +func (m *MockGen3Interface) SowerClient() sower.SowerInterface { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Sower") + ret := m.ctrl.Call(m, "SowerClient") ret0, _ := ret[0].(sower.SowerInterface) return ret0 } -// Sower indicates an expected call of Sower. -func (mr *MockGen3InterfaceMockRecorder) Sower() *gomock.Call { +// SowerClient indicates an expected call of SowerClient. +func (mr *MockGen3InterfaceMockRecorder) SowerClient() *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Sower", reflect.TypeOf((*MockGen3Interface)(nil).Sower)) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SowerClient", reflect.TypeOf((*MockGen3Interface)(nil).SowerClient)) } diff --git a/mocks/mock_indexd.go b/mocks/mock_indexd.go deleted file mode 100644 index 0bab244..0000000 --- a/mocks/mock_indexd.go +++ /dev/null @@ -1,296 +0,0 @@ -// Code generated by MockGen. DO NOT EDIT. -// Source: github.com/calypr/data-client/indexd (interfaces: IndexdInterface) -// -// Generated by this command: -// -// mockgen -destination=../mocks/mock_indexd.go -package=mocks github.com/calypr/data-client/indexd IndexdInterface -// - -// Package mocks is a generated GoMock package. -package mocks - -import ( - context "context" - http "net/http" - reflect "reflect" - - drs "github.com/calypr/data-client/drs" - indexd "github.com/calypr/data-client/indexd" - request "github.com/calypr/data-client/request" - gomock "go.uber.org/mock/gomock" -) - -// MockIndexdInterface is a mock of IndexdInterface interface. -type MockIndexdInterface struct { - ctrl *gomock.Controller - recorder *MockIndexdInterfaceMockRecorder - isgomock struct{} -} - -// MockIndexdInterfaceMockRecorder is the mock recorder for MockIndexdInterface. -type MockIndexdInterfaceMockRecorder struct { - mock *MockIndexdInterface -} - -// NewMockIndexdInterface creates a new mock instance. -func NewMockIndexdInterface(ctrl *gomock.Controller) *MockIndexdInterface { - mock := &MockIndexdInterface{ctrl: ctrl} - mock.recorder = &MockIndexdInterfaceMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockIndexdInterface) EXPECT() *MockIndexdInterfaceMockRecorder { - return m.recorder -} - -// BatchGetObjectsByHash mocks base method. -func (m *MockIndexdInterface) BatchGetObjectsByHash(ctx context.Context, hashes []string) (map[string][]drs.DRSObject, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "BatchGetObjectsByHash", ctx, hashes) - ret0, _ := ret[0].(map[string][]drs.DRSObject) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// BatchGetObjectsByHash indicates an expected call of BatchGetObjectsByHash. -func (mr *MockIndexdInterfaceMockRecorder) BatchGetObjectsByHash(ctx, hashes any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BatchGetObjectsByHash", reflect.TypeOf((*MockIndexdInterface)(nil).BatchGetObjectsByHash), ctx, hashes) -} - -// DeleteIndexdRecord mocks base method. -func (m *MockIndexdInterface) DeleteIndexdRecord(ctx context.Context, did string) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "DeleteIndexdRecord", ctx, did) - ret0, _ := ret[0].(error) - return ret0 -} - -// DeleteIndexdRecord indicates an expected call of DeleteIndexdRecord. -func (mr *MockIndexdInterfaceMockRecorder) DeleteIndexdRecord(ctx, did any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteIndexdRecord", reflect.TypeOf((*MockIndexdInterface)(nil).DeleteIndexdRecord), ctx, did) -} - -// DeleteRecordByHash mocks base method. -func (m *MockIndexdInterface) DeleteRecordByHash(ctx context.Context, hashValue, projectId string) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "DeleteRecordByHash", ctx, hashValue, projectId) - ret0, _ := ret[0].(error) - return ret0 -} - -// DeleteRecordByHash indicates an expected call of DeleteRecordByHash. -func (mr *MockIndexdInterfaceMockRecorder) DeleteRecordByHash(ctx, hashValue, projectId any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteRecordByHash", reflect.TypeOf((*MockIndexdInterface)(nil).DeleteRecordByHash), ctx, hashValue, projectId) -} - -// DeleteRecordsByProject mocks base method. -func (m *MockIndexdInterface) DeleteRecordsByProject(ctx context.Context, projectId string) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "DeleteRecordsByProject", ctx, projectId) - ret0, _ := ret[0].(error) - return ret0 -} - -// DeleteRecordsByProject indicates an expected call of DeleteRecordsByProject. -func (mr *MockIndexdInterfaceMockRecorder) DeleteRecordsByProject(ctx, projectId any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteRecordsByProject", reflect.TypeOf((*MockIndexdInterface)(nil).DeleteRecordsByProject), ctx, projectId) -} - -// Do mocks base method. -func (m *MockIndexdInterface) Do(ctx context.Context, req *request.RequestBuilder) (*http.Response, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Do", ctx, req) - ret0, _ := ret[0].(*http.Response) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// Do indicates an expected call of Do. -func (mr *MockIndexdInterfaceMockRecorder) Do(ctx, req any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Do", reflect.TypeOf((*MockIndexdInterface)(nil).Do), ctx, req) -} - -// GetDownloadURL mocks base method. -func (m *MockIndexdInterface) GetDownloadURL(ctx context.Context, did, accessType string) (*drs.AccessURL, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetDownloadURL", ctx, did, accessType) - ret0, _ := ret[0].(*drs.AccessURL) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetDownloadURL indicates an expected call of GetDownloadURL. -func (mr *MockIndexdInterfaceMockRecorder) GetDownloadURL(ctx, did, accessType any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetDownloadURL", reflect.TypeOf((*MockIndexdInterface)(nil).GetDownloadURL), ctx, did, accessType) -} - -// GetObject mocks base method. -func (m *MockIndexdInterface) GetObject(ctx context.Context, id string) (*drs.DRSObject, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetObject", ctx, id) - ret0, _ := ret[0].(*drs.DRSObject) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetObject indicates an expected call of GetObject. -func (mr *MockIndexdInterfaceMockRecorder) GetObject(ctx, id any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetObject", reflect.TypeOf((*MockIndexdInterface)(nil).GetObject), ctx, id) -} - -// GetObjectByHash mocks base method. -func (m *MockIndexdInterface) GetObjectByHash(ctx context.Context, hashType, hashValue string) ([]drs.DRSObject, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetObjectByHash", ctx, hashType, hashValue) - ret0, _ := ret[0].([]drs.DRSObject) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetObjectByHash indicates an expected call of GetObjectByHash. -func (mr *MockIndexdInterfaceMockRecorder) GetObjectByHash(ctx, hashType, hashValue any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetObjectByHash", reflect.TypeOf((*MockIndexdInterface)(nil).GetObjectByHash), ctx, hashType, hashValue) -} - -// GetProjectSample mocks base method. -func (m *MockIndexdInterface) GetProjectSample(ctx context.Context, projectId string, limit int) ([]drs.DRSObject, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetProjectSample", ctx, projectId, limit) - ret0, _ := ret[0].([]drs.DRSObject) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetProjectSample indicates an expected call of GetProjectSample. -func (mr *MockIndexdInterfaceMockRecorder) GetProjectSample(ctx, projectId, limit any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetProjectSample", reflect.TypeOf((*MockIndexdInterface)(nil).GetProjectSample), ctx, projectId, limit) -} - -// ListObjects mocks base method. -func (m *MockIndexdInterface) ListObjects(ctx context.Context) (chan drs.DRSObjectResult, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ListObjects", ctx) - ret0, _ := ret[0].(chan drs.DRSObjectResult) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// ListObjects indicates an expected call of ListObjects. -func (mr *MockIndexdInterfaceMockRecorder) ListObjects(ctx any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListObjects", reflect.TypeOf((*MockIndexdInterface)(nil).ListObjects), ctx) -} - -// ListObjectsByProject mocks base method. -func (m *MockIndexdInterface) ListObjectsByProject(ctx context.Context, projectId string) (chan drs.DRSObjectResult, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ListObjectsByProject", ctx, projectId) - ret0, _ := ret[0].(chan drs.DRSObjectResult) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// ListObjectsByProject indicates an expected call of ListObjectsByProject. -func (mr *MockIndexdInterfaceMockRecorder) ListObjectsByProject(ctx, projectId any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListObjectsByProject", reflect.TypeOf((*MockIndexdInterface)(nil).ListObjectsByProject), ctx, projectId) -} - -// New mocks base method. -func (m *MockIndexdInterface) New(method, url string) *request.RequestBuilder { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "New", method, url) - ret0, _ := ret[0].(*request.RequestBuilder) - return ret0 -} - -// New indicates an expected call of New. -func (mr *MockIndexdInterfaceMockRecorder) New(method, url any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "New", reflect.TypeOf((*MockIndexdInterface)(nil).New), method, url) -} - -// RegisterIndexdRecord mocks base method. -func (m *MockIndexdInterface) RegisterIndexdRecord(ctx context.Context, indexdObj *indexd.IndexdRecord) (*drs.DRSObject, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "RegisterIndexdRecord", ctx, indexdObj) - ret0, _ := ret[0].(*drs.DRSObject) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// RegisterIndexdRecord indicates an expected call of RegisterIndexdRecord. -func (mr *MockIndexdInterfaceMockRecorder) RegisterIndexdRecord(ctx, indexdObj any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RegisterIndexdRecord", reflect.TypeOf((*MockIndexdInterface)(nil).RegisterIndexdRecord), ctx, indexdObj) -} - -// RegisterRecord mocks base method. -func (m *MockIndexdInterface) RegisterRecord(ctx context.Context, record *drs.DRSObject) (*drs.DRSObject, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "RegisterRecord", ctx, record) - ret0, _ := ret[0].(*drs.DRSObject) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// RegisterRecord indicates an expected call of RegisterRecord. -func (mr *MockIndexdInterfaceMockRecorder) RegisterRecord(ctx, record any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RegisterRecord", reflect.TypeOf((*MockIndexdInterface)(nil).RegisterRecord), ctx, record) -} - -// RegisterRecords mocks base method. -func (m *MockIndexdInterface) RegisterRecords(ctx context.Context, records []*drs.DRSObject) ([]*drs.DRSObject, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "RegisterRecords", ctx, records) - ret0, _ := ret[0].([]*drs.DRSObject) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// RegisterRecords indicates an expected call of RegisterRecords. -func (mr *MockIndexdInterfaceMockRecorder) RegisterRecords(ctx, records any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RegisterRecords", reflect.TypeOf((*MockIndexdInterface)(nil).RegisterRecords), ctx, records) -} - -// UpdateRecord mocks base method. -func (m *MockIndexdInterface) UpdateRecord(ctx context.Context, updateInfo *drs.DRSObject, did string) (*drs.DRSObject, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "UpdateRecord", ctx, updateInfo, did) - ret0, _ := ret[0].(*drs.DRSObject) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// UpdateRecord indicates an expected call of UpdateRecord. -func (mr *MockIndexdInterfaceMockRecorder) UpdateRecord(ctx, updateInfo, did any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateRecord", reflect.TypeOf((*MockIndexdInterface)(nil).UpdateRecord), ctx, updateInfo, did) -} - -// UpsertIndexdRecord mocks base method. -func (m *MockIndexdInterface) UpsertIndexdRecord(ctx context.Context, url, sha256 string, fileSize int64, projectId string) (*drs.DRSObject, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "UpsertIndexdRecord", ctx, url, sha256, fileSize, projectId) - ret0, _ := ret[0].(*drs.DRSObject) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// UpsertIndexdRecord indicates an expected call of UpsertIndexdRecord. -func (mr *MockIndexdInterfaceMockRecorder) UpsertIndexdRecord(ctx, url, sha256, fileSize, projectId any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpsertIndexdRecord", reflect.TypeOf((*MockIndexdInterface)(nil).UpsertIndexdRecord), ctx, url, sha256, fileSize, projectId) -} diff --git a/runtime/client.go b/runtime/client.go new file mode 100644 index 0000000..2dc8ab7 --- /dev/null +++ b/runtime/client.go @@ -0,0 +1,48 @@ +package runtime + +import ( + "fmt" + "strings" + + "github.com/calypr/data-client/drs" + "github.com/calypr/data-client/g3client" + "github.com/calypr/data-client/localclient" + "github.com/calypr/data-client/logs" + "github.com/calypr/data-client/transfer" +) + +// Client composes metadata and transfer concerns for a selected runtime mode. +type Client struct { + g3 g3client.Gen3Interface + drs drs.ServerClient +} + +func New(profile string, mode string, logger *logs.Gen3Logger) (*Client, error) { + mode = strings.ToLower(strings.TrimSpace(mode)) + switch mode { + case "", "gen3": + g3, err := g3client.NewGen3Interface(profile, logger) + if err != nil { + return nil, err + } + return &Client{ + g3: g3, + drs: g3.DRSClient(), + }, nil + case "drs": + lc, err := localclient.NewLocalInterface(profile, logger) + if err != nil { + return nil, err + } + return &Client{ + g3: nil, + drs: lc.DRSClient(), + }, nil + default: + return nil, fmt.Errorf("unsupported backend mode %q", mode) + } +} + +func (c *Client) Gen3() g3client.Gen3Interface { return c.g3 } +func (c *Client) DRS() drs.ServerClient { return c.drs } +func (c *Client) Transfer() transfer.Backend { return c.drs } diff --git a/s3utils/s3_utils.go b/s3utils/s3_utils.go index 9e47805..8aefc78 100644 --- a/s3utils/s3_utils.go +++ b/s3utils/s3_utils.go @@ -7,37 +7,52 @@ import ( "strings" "time" - "github.com/aws/aws-sdk-go-v2/aws" - awsConfig "github.com/aws/aws-sdk-go-v2/config" - "github.com/aws/aws-sdk-go-v2/credentials" + "net/url" + + "gocloud.dev/blob" + _ "gocloud.dev/blob/azureblob" + _ "gocloud.dev/blob/fileblob" + _ "gocloud.dev/blob/gcsblob" + _ "gocloud.dev/blob/s3blob" + "github.com/aws/aws-sdk-go-v2/service/s3" "github.com/calypr/data-client/fence" ) -// ParseS3URL parses a URL like s3://bucket/key and returns (bucket, key, error). -func ParseS3URL(s3url string) (string, string, error) { - s3Prefix := "s3://" - if !strings.HasPrefix(s3url, s3Prefix) { - return "", "", fmt.Errorf("S3 URL requires prefix 's3://': %s", s3url) +// ParseBlobURL parses a URL like s3://bucket/key and returns (bucket, key, error). +func ParseBlobURL(blobURL string) (string, string, error) { + u, err := url.Parse(blobURL) + if err != nil { + return "", "", fmt.Errorf("invalid blob URL %s: %w", blobURL, err) + } + if u.Scheme == "" { + return "", "", fmt.Errorf("URL requires a scheme prefix (e.g. s3://, gs://): %s", blobURL) + } + + bucket := u.Host + if u.Scheme == "file" { + bucket = "file:///" } - trimmed := strings.TrimPrefix(s3url, s3Prefix) - slashIndex := strings.Index(trimmed, "/") - if slashIndex == -1 || slashIndex == len(trimmed)-1 { - return "", "", fmt.Errorf("invalid S3 file URL: %s", s3url) + + key := strings.TrimPrefix(u.Path, "/") + if key == "" { + return "", "", fmt.Errorf("invalid blob URL (missing key/path): %s", blobURL) } - return trimmed[:slashIndex], trimmed[slashIndex+1:], nil + + return bucket, key, nil } -// ValidateInputs checks if S3 URL and SHA256 hash are valid. +// ValidateInputs checks if the Blob URL and SHA256 hash are valid. func ValidateInputs(s3URL, sha256 string) error { if s3URL == "" { - return fmt.Errorf("S3 URL is required") + return fmt.Errorf("Blob URL is required") } if sha256 == "" { return fmt.Errorf("SHA256 hash is required") } - if !strings.HasPrefix(s3URL, "s3://") { - return fmt.Errorf("invalid S3 URL: must start with s3://") + u, err := url.Parse(s3URL) + if err != nil || u.Scheme == "" { + return fmt.Errorf("invalid Blob URL: must have a scheme (like s3://, gs://)") } if len(sha256) != 64 { return fmt.Errorf("invalid SHA256 hash: must be 64 characters") @@ -46,6 +61,8 @@ func ValidateInputs(s3URL, sha256 string) error { } // FetchS3MetadataWithBucketDetails fetches S3 metadata (size and modified date) for a given S3 URL. +// FetchS3MetadataWithBucketDetails fetches metadata using generic go-cloud capabilities. +// This makes it compatible with multiple cloud providers instead of a bare-bones specific setup. func FetchS3MetadataWithBucketDetails( ctx context.Context, s3URL string, @@ -54,73 +71,38 @@ func FetchS3MetadataWithBucketDetails( region string, endpoint string, bucketDetails *fence.S3Bucket, - s3Client *s3.Client, + s3Client *s3.Client, // kept for backward compatibility signature, though it is no longer strictly required for basic HEAD. logger *slog.Logger, ) (int64, string, error) { - bucket, key, err := ParseS3URL(s3URL) + u, err := url.Parse(s3URL) if err != nil { - return 0, "", err + return 0, "", fmt.Errorf("failed to parse url %s: %w", s3URL, err) } - if s3Client == nil { - var configOptions []func(*awsConfig.LoadOptions) error - if awsAccessKey != "" && awsSecretKey != "" { - configOptions = append(configOptions, - awsConfig.WithCredentialsProvider(credentials.NewStaticCredentialsProvider(awsAccessKey, awsSecretKey, "")), - ) - } - - regionToUse := "" - if region != "" { - regionToUse = region - } else if bucketDetails != nil && bucketDetails.Region != "" { - regionToUse = bucketDetails.Region - } - if regionToUse != "" { - configOptions = append(configOptions, awsConfig.WithRegion(regionToUse)) - } - - cfg, err := awsConfig.LoadDefaultConfig(ctx, configOptions...) - if err != nil { - return 0, "", fmt.Errorf("unable to load AWS SDK config: %w", err) - } - - endpointToUse := "" - if endpoint != "" { - endpointToUse = endpoint - } else if bucketDetails != nil && bucketDetails.EndpointURL != "" { - endpointToUse = bucketDetails.EndpointURL - } - - s3Client = s3.NewFromConfig(cfg, func(o *s3.Options) { - if endpointToUse != "" { - o.BaseEndpoint = aws.String(endpointToUse) - } - o.UsePathStyle = true - }) - } + bucketURL := fmt.Sprintf("%s://%s", u.Scheme, u.Host) + key := strings.TrimPrefix(u.Path, "/") - input := &s3.HeadObjectInput{ - Bucket: &bucket, - Key: aws.String(key), - } + // Optionally pass credentials logic. By default go-cloud checks environment. + // For AWS, you could override credentials, but typically users want standard config loading + // which go-cloud openers handle out of the box (e.g. AWS_PROFILE, AWS_REGION, AWS_ACCESS_KEY_ID). - resp, err := s3Client.HeadObject(ctx, input) + bucket, err := blob.OpenBucket(ctx, bucketURL) if err != nil { - return 0, "", fmt.Errorf("failed to head object: %w", err) + return 0, "", fmt.Errorf("failed to open bucket via go-cloud string %s: %w", bucketURL, err) } + defer bucket.Close() - var contentLength int64 - if resp.ContentLength != nil { - contentLength = *resp.ContentLength + attrs, err := bucket.Attributes(ctx, key) + if err != nil { + return 0, "", fmt.Errorf("failed to get attributes for %s: %w", key, err) } - var lastModified string - if resp.LastModified != nil { - lastModified = resp.LastModified.Format(time.RFC3339) + lastMod := "" + if !attrs.ModTime.IsZero() { + lastMod = attrs.ModTime.Format(time.RFC3339) } - return contentLength, lastModified, nil + return attrs.Size, lastMod, nil } type S3Meta struct { diff --git a/tests/download-multiple_test.go b/tests/download-multiple_test.go index 166285a..f04fca0 100644 --- a/tests/download-multiple_test.go +++ b/tests/download-multiple_test.go @@ -3,18 +3,12 @@ package tests import ( "context" "fmt" - "io" - "net/http" - "strings" "testing" - "github.com/calypr/data-client/backend/gen3" - "github.com/calypr/data-client/conf" "github.com/calypr/data-client/download" drs "github.com/calypr/data-client/drs" "github.com/calypr/data-client/logs" "github.com/calypr/data-client/mocks" - req "github.com/calypr/data-client/request" "go.uber.org/mock/gomock" ) @@ -26,49 +20,21 @@ func Test_askGen3ForFileInfo_withShepherd(t *testing.T) { mockCtrl := gomock.NewController(t) defer mockCtrl.Finish() - mockGen3 := mocks.NewMockGen3Interface(mockCtrl) - mockFence := mocks.NewMockFenceInterface(mockCtrl) - - // Expect credential access - mockGen3.EXPECT().GetCredential().Return(&conf.Credential{}).AnyTimes() - mockGen3.EXPECT().Fence().Return(mockFence).AnyTimes() - - // Shepherd is available - mockFence.EXPECT(). - CheckForShepherdAPI(gomock.Any()). - Return(true, nil) - - // Mock successful Shepherd response - testBody := `{ - "record": { - "file_name": "test-file", - "size": 120, - "did": "000000-0000000-0000000-000000" - } - }` - resp := &http.Response{ - StatusCode: 200, - Body: io.NopCloser(strings.NewReader(testBody)), - } + mockIndexd := mocks.NewMockDrsClient(mockCtrl) - // Expect request to Shepherd - mockGen3.EXPECT(). - New(gomock.Any(), gomock.Any()). - Return(&req.RequestBuilder{Url: "/objects/" + testGUID}). - AnyTimes() + // New behavior: tries GetObjectByHash first + mockIndexd.EXPECT(). + GetObjectByHash(gomock.Any(), gomock.Any()). + Return(nil, fmt.Errorf("not a hash")) - mockGen3.EXPECT(). - Do(gomock.Any(), gomock.Any()). - DoAndReturn(func(ctx any, rb *req.RequestBuilder) (*http.Response, error) { - return resp, nil - }) + mockIndexd.EXPECT(). + GetObject(gomock.Any(), testGUID). + Return(&drs.DRSObject{Id: testGUID, Name: &testFileName, Size: testFileSize}, nil) - // Optional: logger - mockGen3.EXPECT().Logger().Return(logs.NewGen3Logger(nil, "", "test")).AnyTimes() + logger := logs.NewGen3Logger(nil, "", "test") skipped := []download.RenamedOrSkippedFileInfo{} - bk := gen3.NewGen3Backend(mockGen3) - info, err := download.GetFileInfo(context.Background(), bk, testGUID, "", "", "original", true, &skipped) + info, err := download.GetFileInfo(context.Background(), mockIndexd, logger, testGUID, "", "", "original", true, &skipped) if err != nil { t.Error(err) } @@ -90,46 +56,21 @@ func Test_askGen3ForFileInfo_withShepherd_shepherdError(t *testing.T) { mockCtrl := gomock.NewController(t) defer mockCtrl.Finish() - mockGen3 := mocks.NewMockGen3Interface(mockCtrl) - mockFence := mocks.NewMockFenceInterface(mockCtrl) - - dummyCred := &conf.Credential{} - mockGen3.EXPECT().GetCredential().Return(dummyCred).AnyTimes() - mockGen3.EXPECT().Fence().Return(mockFence).AnyTimes() - - // 1. Shepherd is available - mockFence.EXPECT(). - CheckForShepherdAPI(gomock.Any()). - Return(true, nil). - Times(1) - - // 2. Shepherd request fails → triggers fallback to Indexd - mockGen3.EXPECT(). - New(gomock.Any(), gomock.Any()). - Return(&req.RequestBuilder{Url: "/objects/" + testGUID}). - AnyTimes() - - mockGen3.EXPECT(). - Do(gomock.Any(), gomock.Any()). - Return(nil, fmt.Errorf("Shepherd error")). - Times(1) // only the Shepherd call - - // 3. Fallback: Indexd request - mockIndexd := mocks.NewMockIndexdInterface(mockCtrl) - mockGen3.EXPECT().Indexd().Return(mockIndexd).AnyTimes() + mockIndexd := mocks.NewMockDrsClient(mockCtrl) + + // New behavior: tries GetObjectByHash first + mockIndexd.EXPECT(). + GetObjectByHash(gomock.Any(), gomock.Any()). + Return(nil, fmt.Errorf("not a hash")) + mockIndexd.EXPECT(). GetObject(gomock.Any(), testGUID). Return(nil, fmt.Errorf("Indexd error")) - // Logger - mockGen3.EXPECT(). - Logger(). - Return(logs.NewGen3Logger(nil, "", "test")). - AnyTimes() + logger := logs.NewGen3Logger(nil, "", "test") skipped := []download.RenamedOrSkippedFileInfo{} - bk := gen3.NewGen3Backend(mockGen3) - info, err := download.GetFileInfo(context.Background(), bk, testGUID, "", "", "original", true, &skipped) + info, err := download.GetFileInfo(context.Background(), mockIndexd, logger, testGUID, "", "", "original", true, &skipped) if err != nil { t.Fatal(err) } @@ -157,26 +98,21 @@ func Test_askGen3ForFileInfo_noShepherd(t *testing.T) { mockCtrl := gomock.NewController(t) defer mockCtrl.Finish() - mockGen3 := mocks.NewMockGen3Interface(mockCtrl) - mockFence := mocks.NewMockFenceInterface(mockCtrl) + mockIndexd := mocks.NewMockDrsClient(mockCtrl) - mockGen3.EXPECT().GetCredential().Return(&conf.Credential{}).AnyTimes() - mockGen3.EXPECT().Fence().Return(mockFence).AnyTimes() - - // No Shepherd - mockFence.EXPECT().CheckForShepherdAPI(gomock.Any()).Return(false, nil) + // New behavior: tries GetObjectByHash first + mockIndexd.EXPECT(). + GetObjectByHash(gomock.Any(), gomock.Any()). + Return(nil, fmt.Errorf("not a hash")) - mockIndexd := mocks.NewMockIndexdInterface(mockCtrl) - mockGen3.EXPECT().Indexd().Return(mockIndexd).AnyTimes() mockIndexd.EXPECT(). GetObject(gomock.Any(), testGUID). - Return(&drs.DRSObject{Id: testGUID, Name: testFileName, Size: testFileSize}, nil) + Return(&drs.DRSObject{Id: testGUID, Name: &testFileName, Size: testFileSize}, nil) - mockGen3.EXPECT().Logger().Return(logs.NewGen3Logger(nil, "", "test")).AnyTimes() + logger := logs.NewGen3Logger(nil, "", "test") skipped := []download.RenamedOrSkippedFileInfo{} - bk := gen3.NewGen3Backend(mockGen3) - info, err := download.GetFileInfo(context.Background(), bk, testGUID, "", "", "original", true, &skipped) + info, err := download.GetFileInfo(context.Background(), mockIndexd, logger, testGUID, "", "", "original", true, &skipped) if err != nil { t.Fatal(err) } diff --git a/tests/utils_test.go b/tests/utils_test.go index bb62c3b..7687e9e 100644 --- a/tests/utils_test.go +++ b/tests/utils_test.go @@ -8,17 +8,28 @@ import ( "strings" "testing" - "github.com/calypr/data-client/backend/gen3" "github.com/calypr/data-client/common" "github.com/calypr/data-client/conf" "github.com/calypr/data-client/download" "github.com/calypr/data-client/fence" + "github.com/calypr/data-client/logs" "github.com/calypr/data-client/mocks" "github.com/calypr/data-client/request" + "github.com/calypr/data-client/transfer" + gen3signer "github.com/calypr/data-client/transfer/signer/gen3" "github.com/calypr/data-client/upload" "go.uber.org/mock/gomock" ) +type staticCredentialsManager struct { + cred *conf.Credential +} + +func (s *staticCredentialsManager) Current() *conf.Credential { return s.cred } +func (s *staticCredentialsManager) Export(ctx context.Context, cred *conf.Credential) error { + return nil +} + func TestGetDownloadResponse_withShepherd(t *testing.T) { testGUID := "000000-0000000-0000000-000000" testFilename := "test-file" @@ -29,10 +40,13 @@ func TestGetDownloadResponse_withShepherd(t *testing.T) { mockGen3 := mocks.NewMockGen3Interface(mockCtrl) mockFence := mocks.NewMockFenceInterface(mockCtrl) + mockDrs := mocks.NewMockDrsClient(mockCtrl) // Mock credential - mockGen3.EXPECT().GetCredential().Return(&conf.Credential{}).AnyTimes() - mockGen3.EXPECT().Fence().Return(mockFence).AnyTimes() + mockGen3.EXPECT().Credentials().Return(&staticCredentialsManager{cred: &conf.Credential{}}).AnyTimes() + mockGen3.EXPECT().FenceClient().Return(mockFence).AnyTimes() + mockGen3.EXPECT().DRSClient().Return(mockDrs).AnyTimes() + mockGen3.EXPECT().Logger().Return(logs.NewGen3Logger(nil, "", "test")).AnyTimes() mockFence.EXPECT(). GetDownloadPresignedUrl(gomock.Any(), testGUID, ""). @@ -62,7 +76,7 @@ func TestGetDownloadResponse_withShepherd(t *testing.T) { Range: 0, } - bk := gen3.NewGen3Backend(mockGen3) + bk := transfer.New(mockGen3, logs.NewGen3Logger(nil, "", "test"), gen3signer.New(mockGen3, &conf.Credential{}, mockDrs, mockFence)) err := download.GetDownloadResponse(context.Background(), bk, &mockFDRObj, "") if err != nil { t.Fatalf("Unexpected error: %v", err) @@ -83,9 +97,12 @@ func TestGetDownloadResponse_noShepherd(t *testing.T) { mockGen3 := mocks.NewMockGen3Interface(mockCtrl) mockFence := mocks.NewMockFenceInterface(mockCtrl) + mockDrs := mocks.NewMockDrsClient(mockCtrl) - mockGen3.EXPECT().GetCredential().Return(&conf.Credential{}).AnyTimes() - mockGen3.EXPECT().Fence().Return(mockFence).AnyTimes() + mockGen3.EXPECT().Credentials().Return(&staticCredentialsManager{cred: &conf.Credential{}}).AnyTimes() + mockGen3.EXPECT().FenceClient().Return(mockFence).AnyTimes() + mockGen3.EXPECT().DRSClient().Return(mockDrs).AnyTimes() + mockGen3.EXPECT().Logger().Return(logs.NewGen3Logger(nil, "", "test")).AnyTimes() mockFence.EXPECT(). GetDownloadPresignedUrl(gomock.Any(), testGUID, ""). @@ -115,7 +132,7 @@ func TestGetDownloadResponse_noShepherd(t *testing.T) { Range: 0, } - bk := gen3.NewGen3Backend(mockGen3) + bk := transfer.New(mockGen3, logs.NewGen3Logger(nil, "", "test"), gen3signer.New(mockGen3, &conf.Credential{}, mockDrs, mockFence)) err := download.GetDownloadResponse(context.Background(), bk, &mockFDRObj, "") if err != nil { t.Fatalf("Unexpected error: %v", err) @@ -137,9 +154,12 @@ func TestGeneratePresignedUploadURL_noShepherd(t *testing.T) { mockGen3 := mocks.NewMockGen3Interface(mockCtrl) mockFence := mocks.NewMockFenceInterface(mockCtrl) + mockDrs := mocks.NewMockDrsClient(mockCtrl) - mockGen3.EXPECT().GetCredential().Return(&conf.Credential{}).AnyTimes() - mockGen3.EXPECT().Fence().Return(mockFence).AnyTimes() + mockGen3.EXPECT().Credentials().Return(&staticCredentialsManager{cred: &conf.Credential{}}).AnyTimes() + mockGen3.EXPECT().FenceClient().Return(mockFence).AnyTimes() + mockGen3.EXPECT().DRSClient().Return(mockDrs).AnyTimes() + mockGen3.EXPECT().Logger().Return(logs.NewGen3Logger(nil, "", "test")).AnyTimes() // No Shepherd mockFence.EXPECT(). @@ -153,7 +173,8 @@ func TestGeneratePresignedUploadURL_noShepherd(t *testing.T) { GUID: mockGUID, }, nil) - resp, err := upload.GeneratePresignedUploadURL(context.Background(), mockGen3, testFilename, common.FileMetadata{}, testBucketname) + bk := transfer.New(mockGen3, logs.NewGen3Logger(nil, "", "test"), gen3signer.New(mockGen3, &conf.Credential{}, mockDrs, mockFence)) + resp, err := upload.GeneratePresignedUploadURL(context.Background(), bk, testFilename, common.FileMetadata{}, testBucketname) if err != nil { t.Fatalf("Unexpected error: %v", err) } @@ -161,8 +182,8 @@ func TestGeneratePresignedUploadURL_noShepherd(t *testing.T) { if resp.URL != mockPresignedURL { t.Errorf("Wanted URL %s, got %s", mockPresignedURL, resp.URL) } - if resp.GUID != mockGUID { - t.Errorf("Wanted GUID %s, got %s", mockGUID, resp.GUID) + if resp.GUID != "" { + t.Errorf("Wanted empty GUID, got %s", resp.GUID) } } @@ -183,9 +204,12 @@ func TestGeneratePresignedUploadURL_withShepherd(t *testing.T) { mockGen3 := mocks.NewMockGen3Interface(mockCtrl) mockFence := mocks.NewMockFenceInterface(mockCtrl) + mockDrs := mocks.NewMockDrsClient(mockCtrl) - mockGen3.EXPECT().GetCredential().Return(&conf.Credential{AccessToken: "token"}).AnyTimes() - mockGen3.EXPECT().Fence().Return(mockFence).AnyTimes() + mockGen3.EXPECT().Credentials().Return(&staticCredentialsManager{cred: &conf.Credential{AccessToken: "token"}}).AnyTimes() + mockGen3.EXPECT().FenceClient().Return(mockFence).AnyTimes() + mockGen3.EXPECT().DRSClient().Return(mockDrs).AnyTimes() + mockGen3.EXPECT().Logger().Return(logs.NewGen3Logger(nil, "", "test")).AnyTimes() // Shepherd is deployed mockFence.EXPECT(). @@ -204,7 +228,8 @@ func TestGeneratePresignedUploadURL_withShepherd(t *testing.T) { Do(gomock.Any(), gomock.Any()). Return(shepherdResp, nil) - respObj, err := upload.GeneratePresignedUploadURL(context.Background(), mockGen3, testFilename, testMetadata, testBucketname) + bk := transfer.New(mockGen3, logs.NewGen3Logger(nil, "", "test"), gen3signer.New(mockGen3, &conf.Credential{AccessToken: "token", APIEndpoint: "https://example.com"}, mockDrs, mockFence)) + respObj, err := upload.GeneratePresignedUploadURL(context.Background(), bk, testFilename, testMetadata, testBucketname) if err != nil { t.Fatalf("Unexpected error: %v", err) } @@ -212,7 +237,7 @@ func TestGeneratePresignedUploadURL_withShepherd(t *testing.T) { if respObj.URL != mockPresignedURL { t.Errorf("Wanted URL %s, got %s", mockPresignedURL, respObj.URL) } - if respObj.GUID != mockGUID { - t.Errorf("Wanted GUID %s, got %s", mockGUID, respObj.GUID) + if respObj.GUID != "" { + t.Errorf("Wanted empty GUID, got %s", respObj.GUID) } } diff --git a/transfer/http.go b/transfer/http.go new file mode 100644 index 0000000..3653e2a --- /dev/null +++ b/transfer/http.go @@ -0,0 +1,69 @@ +package transfer + +import ( + "context" + "fmt" + "io" + "net/http" + "strconv" + "strings" + + "github.com/calypr/data-client/common" + "github.com/calypr/data-client/request" +) + +// ResolveRange parses range information from FileDownloadResponseObject. +func ResolveRange(fdr *common.FileDownloadResponseObject) (start int64, end *int64, ok bool) { + if fdr == nil { + return 0, nil, false + } + if fdr.RangeStart != nil { + return *fdr.RangeStart, fdr.RangeEnd, true + } + if fdr.Range > 0 { + return fdr.Range, nil, true + } + return 0, nil, false +} + +// DoUpload performs a presigned PUT request and returns ETag when available. +func DoUpload(ctx context.Context, req request.RequestInterface, url string, body io.Reader, size int64) (string, error) { + rb := req.New(http.MethodPut, url).WithBody(body).WithSkipAuth(true) + if size > 0 { + rb.PartSize = size + } + + resp, err := req.Do(ctx, rb) + if err != nil { + return "", fmt.Errorf("upload to %s failed: %w", url, err) + } + defer resp.Body.Close() + + if resp.StatusCode >= 400 { + bodyBytes, _ := io.ReadAll(resp.Body) + return "", fmt.Errorf("upload to %s failed with status %d: %s", url, resp.StatusCode, string(bodyBytes)) + } + + return strings.Trim(resp.Header.Get("ETag"), `"`), nil +} + +// GenericDownload performs GET (optionally ranged) against a signed URL. +func GenericDownload(ctx context.Context, req request.RequestInterface, fdr *common.FileDownloadResponseObject) (*http.Response, error) { + skipAuth := common.IsCloudPresignedURL(fdr.PresignedURL) + + rb := req.New(http.MethodGet, fdr.PresignedURL) + start, end, hasRange := ResolveRange(fdr) + if hasRange { + rangeHeader := "bytes=" + strconv.FormatInt(start, 10) + "-" + if end != nil { + rangeHeader += strconv.FormatInt(*end, 10) + } + rb.WithHeader("Range", rangeHeader) + } + + if skipAuth { + rb.WithSkipAuth(true) + } + + return req.Do(ctx, rb) +} diff --git a/transfer/interface.go b/transfer/interface.go new file mode 100644 index 0000000..3df9d8b --- /dev/null +++ b/transfer/interface.go @@ -0,0 +1,41 @@ +package transfer + +import ( + "context" + "io" + "net/http" + + "github.com/calypr/data-client/common" + "github.com/calypr/data-client/logs" +) + +// Service captures identity and logging for a transfer implementation. +type Service interface { + Name() string + Logger() *logs.Gen3Logger +} + +// Downloader is the signed URL resolution and byte download surface. +type Downloader interface { + Service + ResolveDownloadURL(ctx context.Context, guid string, accessID string) (string, error) + Download(ctx context.Context, fdr *common.FileDownloadResponseObject) (*http.Response, error) +} + +// Uploader is the signed URL and multipart upload surface. +type Uploader interface { + Service + ResolveUploadURL(ctx context.Context, guid string, filename string, metadata common.FileMetadata, bucket string) (string, error) + InitMultipartUpload(ctx context.Context, guid string, filename string, bucket string) (*common.MultipartUploadInit, error) + GetMultipartUploadURL(ctx context.Context, key string, uploadID string, partNumber int32, bucket string) (string, error) + CompleteMultipartUpload(ctx context.Context, key string, uploadID string, parts []common.MultipartUploadPart, bucket string) error + Upload(ctx context.Context, url string, body io.Reader, size int64) error + UploadPart(ctx context.Context, url string, body io.Reader, size int64) (string, error) + DeleteFile(ctx context.Context, guid string) (string, error) +} + +// Backend is the composed transfer surface used by upload/download workflows. +type Backend interface { + Downloader + Uploader +} diff --git a/transfer/service.go b/transfer/service.go new file mode 100644 index 0000000..edb8d3a --- /dev/null +++ b/transfer/service.go @@ -0,0 +1,76 @@ +package transfer + +import ( + "context" + "io" + "net/http" + + "github.com/calypr/data-client/common" + "github.com/calypr/data-client/logs" + "github.com/calypr/data-client/request" +) + +// Signer defines mode-specific signed URL and multipart orchestration. +type Signer interface { + Name() string + ResolveDownloadURL(ctx context.Context, guid string, accessID string) (string, error) + ResolveUploadURL(ctx context.Context, guid string, filename string, metadata common.FileMetadata, bucket string) (string, error) + InitMultipartUpload(ctx context.Context, guid string, filename string, bucket string) (*common.MultipartUploadInit, error) + GetMultipartUploadURL(ctx context.Context, key string, uploadID string, partNumber int32, bucket string) (string, error) + CompleteMultipartUpload(ctx context.Context, key string, uploadID string, parts []common.MultipartUploadPart, bucket string) error + DeleteFile(ctx context.Context, guid string) (string, error) +} + +type client struct { + req request.RequestInterface + logger *logs.Gen3Logger + signer Signer +} + +func New(req request.RequestInterface, logger *logs.Gen3Logger, signer Signer) Backend { + return &client{ + req: req, + logger: logger, + signer: signer, + } +} + +func (c *client) Name() string { return c.signer.Name() } +func (c *client) Logger() *logs.Gen3Logger { return c.logger } + +func (c *client) DeleteFile(ctx context.Context, guid string) (string, error) { + return c.signer.DeleteFile(ctx, guid) +} + +func (c *client) Download(ctx context.Context, fdr *common.FileDownloadResponseObject) (*http.Response, error) { + return GenericDownload(ctx, c.req, fdr) +} + +func (c *client) ResolveDownloadURL(ctx context.Context, guid string, accessID string) (string, error) { + return c.signer.ResolveDownloadURL(ctx, guid, accessID) +} + +func (c *client) ResolveUploadURL(ctx context.Context, guid string, filename string, metadata common.FileMetadata, bucket string) (string, error) { + return c.signer.ResolveUploadURL(ctx, guid, filename, metadata, bucket) +} + +func (c *client) InitMultipartUpload(ctx context.Context, guid string, filename string, bucket string) (*common.MultipartUploadInit, error) { + return c.signer.InitMultipartUpload(ctx, guid, filename, bucket) +} + +func (c *client) GetMultipartUploadURL(ctx context.Context, key string, uploadID string, partNumber int32, bucket string) (string, error) { + return c.signer.GetMultipartUploadURL(ctx, key, uploadID, partNumber, bucket) +} + +func (c *client) CompleteMultipartUpload(ctx context.Context, key string, uploadID string, parts []common.MultipartUploadPart, bucket string) error { + return c.signer.CompleteMultipartUpload(ctx, key, uploadID, parts, bucket) +} + +func (c *client) Upload(ctx context.Context, url string, body io.Reader, size int64) error { + _, err := DoUpload(ctx, c.req, url, body, size) + return err +} + +func (c *client) UploadPart(ctx context.Context, url string, body io.Reader, size int64) (string, error) { + return DoUpload(ctx, c.req, url, body, size) +} diff --git a/transfer/signer/gen3/signer.go b/transfer/signer/gen3/signer.go new file mode 100644 index 0000000..859aabf --- /dev/null +++ b/transfer/signer/gen3/signer.go @@ -0,0 +1,130 @@ +package gen3 + +import ( + "context" + "encoding/json" + "fmt" + "net/http" + "strings" + + "github.com/calypr/data-client/common" + "github.com/calypr/data-client/conf" + "github.com/calypr/data-client/drs" + "github.com/calypr/data-client/fence" + "github.com/calypr/data-client/request" +) + +type Signer struct { + req request.RequestInterface + cred *conf.Credential + drs drs.Client + fence fence.FenceInterface +} + +func New(req request.RequestInterface, cred *conf.Credential, dc drs.Client, fc fence.FenceInterface) *Signer { + return &Signer{ + req: req, + cred: cred, + drs: dc, + fence: fc, + } +} + +func (g *Signer) Name() string { + return "Gen3" +} + +func (g *Signer) DeleteFile(ctx context.Context, guid string) (string, error) { + return g.fence.DeleteRecord(ctx, guid) +} + +func (g *Signer) ResolveDownloadURL(ctx context.Context, guid string, accessID string) (string, error) { + url, err := g.fence.GetDownloadPresignedUrl(ctx, guid, accessID) + if err == nil && url != "" { + return url, nil + } + resolved, errIdx := drs.ResolveDownloadURL(ctx, g.drs, guid, accessID) + if errIdx == nil { + return resolved, nil + } + if err != nil { + return "", err + } + return "", errIdx +} + +func (g *Signer) ResolveUploadURL(ctx context.Context, guid string, filename string, metadata common.FileMetadata, bucket string) (string, error) { + hasShepherd, err := g.fence.CheckForShepherdAPI(ctx) + if err != nil || !hasShepherd { + var msg fence.FenceResponse + if guid != "" { + msg, err = g.fence.GetUploadPresignedUrl(ctx, guid, filename, bucket) + } else { + msg, err = g.fence.InitUpload(ctx, filename, bucket, "") + } + if err != nil { + return "", err + } + if msg.URL == "" { + return "", fmt.Errorf("error generating presigned upload URL for %s", filename) + } + return msg.URL, nil + } + + payload := common.ShepherdInitRequestObject{ + Filename: filename, + Authz: common.ShepherdAuthz{ + Version: "0", ResourcePaths: metadata.Authz, + }, + Aliases: metadata.Aliases, + Metadata: metadata.Metadata, + } + reader, err := common.ToJSONReader(payload) + if err != nil { + return "", err + } + + resp, err := g.fence.Do(ctx, &request.RequestBuilder{ + Url: g.cred.APIEndpoint + common.ShepherdEndpoint + "/objects", + Method: http.MethodPost, + Body: reader, + Token: g.cred.AccessToken, + }) + if err != nil { + return "", fmt.Errorf("shepherd upload init failed: %w", err) + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusCreated && resp.StatusCode != http.StatusOK { + return "", fmt.Errorf("shepherd upload init failed with status %d", resp.StatusCode) + } + + var res common.PresignedURLResponse + if err := json.NewDecoder(resp.Body).Decode(&res); err != nil { + return "", err + } + return res.URL, nil +} + +func (g *Signer) InitMultipartUpload(ctx context.Context, guid string, filename string, bucket string) (*common.MultipartUploadInit, error) { + res, err := g.fence.InitMultipartUpload(ctx, filename, bucket, guid) + if err != nil { + return nil, err + } + if strings.TrimSpace(res.UploadID) == "" { + return nil, fmt.Errorf("fence multipart init did not return uploadId") + } + return &common.MultipartUploadInit{GUID: res.GUID, UploadID: res.UploadID}, nil +} + +func (g *Signer) GetMultipartUploadURL(ctx context.Context, key string, uploadID string, partNumber int32, bucket string) (string, error) { + return g.fence.GenerateMultipartPresignedURL(ctx, key, uploadID, int(partNumber), bucket) +} + +func (g *Signer) CompleteMultipartUpload(ctx context.Context, key string, uploadID string, parts []common.MultipartUploadPart, bucket string) error { + fParts := make([]fence.MultipartPart, len(parts)) + for i, p := range parts { + fParts[i] = fence.MultipartPart{PartNumber: int(p.PartNumber), ETag: p.ETag} + } + return g.fence.CompleteMultipartUpload(ctx, key, uploadID, fParts, bucket) +} diff --git a/transfer/signer/local/signer.go b/transfer/signer/local/signer.go new file mode 100644 index 0000000..822b38a --- /dev/null +++ b/transfer/signer/local/signer.go @@ -0,0 +1,206 @@ +package local + +import ( + "context" + "encoding/json" + "fmt" + "io" + "net/http" + "net/url" + "strings" + + "github.com/calypr/data-client/common" + drs "github.com/calypr/data-client/drs" + "github.com/calypr/data-client/request" +) + +type Signer struct { + baseURL string + req request.RequestInterface + client drs.Client +} + +func New(baseURL string, req request.RequestInterface, dc drs.Client) *Signer { + return &Signer{ + baseURL: baseURL, + req: req, + client: dc, + } +} + +func (d *Signer) Name() string { return "DRS" } + +func (d *Signer) DeleteFile(ctx context.Context, guid string) (string, error) { + return "", fmt.Errorf("DeleteFile not implemented for local DRS signer") +} + +func (d *Signer) buildURL(paths ...string) (string, error) { + u, err := url.Parse(d.baseURL) + if err != nil { + return "", err + } + fullPath := u.Path + for _, p := range paths { + if p == "" { + continue + } + if !strings.HasSuffix(fullPath, "/") && !strings.HasPrefix(p, "/") { + fullPath += "/" + } + fullPath += p + } + u.Path = fullPath + return u.String(), nil +} + +func (d *Signer) doJSONRequest(ctx context.Context, method, url string, body interface{}, dst interface{}) error { + rb := d.req.New(method, url) + if body != nil { + if _, err := rb.WithJSONBody(body); err != nil { + return err + } + } + + resp, err := d.req.Do(ctx, rb) + if err != nil { + return err + } + defer resp.Body.Close() + + if resp.StatusCode >= 400 { + bodyBytes, _ := io.ReadAll(resp.Body) + return fmt.Errorf("request to %s failed with status %d: %s", url, resp.StatusCode, string(bodyBytes)) + } + + if dst != nil { + return json.NewDecoder(resp.Body).Decode(dst) + } + return nil +} + +func (d *Signer) ResolveDownloadURL(ctx context.Context, guid string, accessID string) (string, error) { + return drs.ResolveDownloadURL(ctx, d.client, guid, accessID) +} + +func (d *Signer) ResolveUploadURL(ctx context.Context, guid string, filename string, metadata common.FileMetadata, bucket string) (string, error) { + u, err := d.buildURL("data/upload", guid) + if err != nil { + return "", err + } + q := url.Values{} + if strings.TrimSpace(filename) != "" { + q.Set("file_name", filename) + } + if bucket != "" { + q.Set("bucket", bucket) + } + if encoded := q.Encode(); encoded != "" { + u += "?" + encoded + } + + var res struct { + URL string `json:"url"` + } + if err := d.doJSONRequest(ctx, http.MethodGet, u, nil, &res); err != nil { + return "", err + } + return res.URL, nil +} + +func (d *Signer) InitMultipartUpload(ctx context.Context, guid string, filename string, bucket string) (*common.MultipartUploadInit, error) { + u, err := d.buildURL("data/multipart/init") + if err != nil { + return nil, err + } + + req := struct { + GUID string `json:"guid,omitempty"` + FileName string `json:"file_name,omitempty"` + Bucket string `json:"bucket,omitempty"` + }{ + GUID: guid, + FileName: filename, + Bucket: bucket, + } + + var res struct { + GUID string `json:"guid"` + UploadID string `json:"uploadId"` + } + if err := d.doJSONRequest(ctx, http.MethodPost, u, req, &res); err != nil { + return nil, err + } + if res.UploadID == "" { + return nil, fmt.Errorf("server did not return uploadId") + } + + return &common.MultipartUploadInit{ + GUID: res.GUID, + UploadID: res.UploadID, + }, nil +} + +func (d *Signer) GetMultipartUploadURL(ctx context.Context, key string, uploadID string, partNumber int32, bucket string) (string, error) { + u, err := d.buildURL("data/multipart/upload") + if err != nil { + return "", err + } + + req := struct { + Key string `json:"key"` + Bucket string `json:"bucket,omitempty"` + UploadID string `json:"uploadId"` + PartNumber int32 `json:"partNumber"` + }{ + Key: key, + Bucket: bucket, + UploadID: uploadID, + PartNumber: partNumber, + } + + var res struct { + PresignedURL string `json:"presigned_url"` + } + if err := d.doJSONRequest(ctx, http.MethodPost, u, req, &res); err != nil { + return "", err + } + if res.PresignedURL == "" { + return "", fmt.Errorf("server did not return presigned_url") + } + return res.PresignedURL, nil +} + +func (d *Signer) CompleteMultipartUpload(ctx context.Context, key string, uploadID string, parts []common.MultipartUploadPart, bucket string) error { + u, err := d.buildURL("data/multipart/complete") + if err != nil { + return err + } + + reqParts := make([]struct { + PartNumber int32 `json:"PartNumber"` + ETag string `json:"ETag"` + }, len(parts)) + for i, p := range parts { + reqParts[i] = struct { + PartNumber int32 `json:"PartNumber"` + ETag string `json:"ETag"` + }{ + PartNumber: p.PartNumber, + ETag: p.ETag, + } + } + + req := struct { + Key string `json:"key"` + Bucket string `json:"bucket,omitempty"` + UploadID string `json:"uploadId"` + Parts any `json:"parts"` + }{ + Key: key, + Bucket: bucket, + UploadID: uploadID, + Parts: reqParts, + } + + return d.doJSONRequest(ctx, http.MethodPost, u, req, nil) +} diff --git a/transfer/storage/gocloud.go b/transfer/storage/gocloud.go new file mode 100644 index 0000000..a0c1dd9 --- /dev/null +++ b/transfer/storage/gocloud.go @@ -0,0 +1,89 @@ +package storage + +import ( + "bytes" + "context" + "fmt" + "io" + "time" + + "gocloud.dev/blob" + _ "gocloud.dev/blob/azureblob" + _ "gocloud.dev/blob/gcsblob" + _ "gocloud.dev/blob/s3blob" +) + +// Bucket abstracts cross-cloud object operations used by transfer paths. +type Bucket interface { + Upload(ctx context.Context, key string, body io.Reader) error + Download(ctx context.Context, key string) ([]byte, error) + SignedDownloadURL(ctx context.Context, key string, ttl time.Duration) (string, error) + SignedUploadURL(ctx context.Context, key string, ttl time.Duration) (string, error) + Close() error +} + +type GoCloudBucket struct { + b *blob.Bucket +} + +// Open opens a go-cloud bucket URL, e.g.: +// s3://bucket, gs://bucket, azblob://container +func Open(ctx context.Context, bucketURL string) (Bucket, error) { + b, err := blob.OpenBucket(ctx, bucketURL) + if err != nil { + return nil, err + } + return &GoCloudBucket{b: b}, nil +} + +func (g *GoCloudBucket) Upload(ctx context.Context, key string, body io.Reader) error { + w, err := g.b.NewWriter(ctx, key, nil) + if err != nil { + return err + } + if _, err = io.Copy(w, body); err != nil { + _ = w.Close() + return err + } + return w.Close() +} + +func (g *GoCloudBucket) Download(ctx context.Context, key string) ([]byte, error) { + r, err := g.b.NewReader(ctx, key, nil) + if err != nil { + return nil, err + } + defer r.Close() + var buf bytes.Buffer + if _, err := io.Copy(&buf, r); err != nil { + return nil, err + } + return buf.Bytes(), nil +} + +func (g *GoCloudBucket) SignedDownloadURL(ctx context.Context, key string, ttl time.Duration) (string, error) { + u, err := g.b.SignedURL(ctx, key, &blob.SignedURLOptions{ + Method: "GET", + Expiry: ttl, + ContentType: "", + }) + if err != nil { + return "", fmt.Errorf("signed download url failed: %w", err) + } + return u, nil +} + +func (g *GoCloudBucket) SignedUploadURL(ctx context.Context, key string, ttl time.Duration) (string, error) { + u, err := g.b.SignedURL(ctx, key, &blob.SignedURLOptions{ + Method: "PUT", + Expiry: ttl, + }) + if err != nil { + return "", fmt.Errorf("signed upload url failed: %w", err) + } + return u, nil +} + +func (g *GoCloudBucket) Close() error { + return g.b.Close() +} diff --git a/upload/batch.go b/upload/batch.go index 41aea65..bfa8c13 100644 --- a/upload/batch.go +++ b/upload/batch.go @@ -3,14 +3,13 @@ package upload import ( "context" "fmt" - "io" "net/http" "os" "sync" "github.com/calypr/data-client/common" - client "github.com/calypr/data-client/g3client" - "github.com/calypr/data-client/request" + "github.com/calypr/data-client/logs" + "github.com/calypr/data-client/transfer" "github.com/vbauerster/mpb/v8" "github.com/vbauerster/mpb/v8/decor" ) @@ -33,7 +32,8 @@ func InitBatchUploadChannels(numParallel int, inputSliceLen int) (int, chan *htt func BatchUpload( ctx context.Context, - g3i client.Gen3Interface, + bk transfer.Uploader, + logger *logs.Gen3Logger, furObjects []common.FileUploadRequestObject, workers int, respCh chan *http.Response, @@ -63,21 +63,21 @@ func BatchUpload( for fur := range workCh { // --- Ensure presigned URL --- if fur.PresignedURL == "" { - resp, err := GeneratePresignedUploadURL(ctx, g3i, fur.ObjectKey, fur.FileMetadata, fur.Bucket) + resp, err := GeneratePresignedUploadURL(ctx, bk, fur.ObjectKey, fur.FileMetadata, fur.Bucket) if err != nil { - g3i.Logger().Failed(fur.SourcePath, fur.ObjectKey, fur.FileMetadata, "", 0, false) + logger.Failed(fur.SourcePath, fur.ObjectKey, fur.FileMetadata, "", 0, false) errCh <- err continue } fur.PresignedURL = resp.URL fur.GUID = resp.GUID - g3i.Logger().Failed(fur.SourcePath, fur.ObjectKey, fur.FileMetadata, resp.GUID, 0, false) // update log + logger.Failed(fur.SourcePath, fur.ObjectKey, fur.FileMetadata, resp.GUID, 0, false) // update log } // --- Open file --- file, err := os.Open(fur.SourcePath) if err != nil { - g3i.Logger().Failed(fur.SourcePath, fur.ObjectKey, fur.FileMetadata, fur.GUID, 0, false) + logger.Failed(fur.SourcePath, fur.ObjectKey, fur.FileMetadata, fur.GUID, 0, false) errCh <- fmt.Errorf("file open error: %w", err) continue } @@ -85,14 +85,14 @@ func BatchUpload( fi, err := file.Stat() if err != nil { file.Close() - g3i.Logger().Failed(fur.SourcePath, fur.ObjectKey, fur.FileMetadata, fur.GUID, 0, false) + logger.Failed(fur.SourcePath, fur.ObjectKey, fur.FileMetadata, fur.GUID, 0, false) errCh <- fmt.Errorf("file stat error: %w", err) continue } if fi.Size() > common.FileSizeLimit { file.Close() - g3i.Logger().Failed(fur.SourcePath, fur.ObjectKey, fur.FileMetadata, fur.GUID, 0, false) + logger.Failed(fur.SourcePath, fur.ObjectKey, fur.FileMetadata, fur.GUID, 0, false) errCh <- fmt.Errorf("file size exceeds limit: %s", fur.ObjectKey) continue } @@ -111,42 +111,23 @@ func BatchUpload( proxyReader := bar.ProxyReader(file) - // --- Upload using DoAuthenticatedRequest (no manual http.Request!) --- - resp, err := g3i.Fence().Do( - ctx, - &request.RequestBuilder{ - Method: http.MethodPut, - Url: fur.PresignedURL, - Body: proxyReader, - }, - ) + // --- Upload --- + err = bk.Upload(ctx, fur.PresignedURL, proxyReader, fi.Size()) // Cleanup file.Close() bar.Abort(false) if err != nil { - g3i.Logger().Failed(fur.SourcePath, fur.ObjectKey, fur.FileMetadata, fur.GUID, 0, false) + logger.Failed(fur.SourcePath, fur.ObjectKey, fur.FileMetadata, fur.GUID, 0, false) errCh <- err continue } - if resp.StatusCode != http.StatusOK { - bodyBytes, _ := io.ReadAll(resp.Body) - resp.Body.Close() - errMsg := fmt.Errorf("upload failed with status %d: %s", resp.StatusCode, string(bodyBytes)) - g3i.Logger().Failed(fur.SourcePath, fur.ObjectKey, fur.FileMetadata, fur.GUID, 0, false) - errCh <- errMsg - continue - } - - resp.Body.Close() - // Success - respCh <- resp - g3i.Logger().DeleteFromFailedLog(fur.SourcePath) - g3i.Logger().Succeeded(fur.SourcePath, fur.GUID) - g3i.Logger().Scoreboard().IncrementSB(0) + logger.DeleteFromFailedLog(fur.SourcePath) + logger.Succeeded(fur.SourcePath, fur.GUID) + logger.Scoreboard().IncrementSB(0) } }() } diff --git a/upload/multipart.go b/upload/multipart.go index dded504..277c367 100644 --- a/upload/multipart.go +++ b/upload/multipart.go @@ -2,11 +2,15 @@ package upload import ( "context" + "crypto/sha256" + "encoding/hex" + "encoding/json" "errors" "fmt" "io" "net/http" "os" + "path/filepath" "sort" "strings" "sync" @@ -14,13 +18,29 @@ import ( "github.com/calypr/data-client/common" "github.com/calypr/data-client/fence" - client "github.com/calypr/data-client/g3client" + "github.com/calypr/data-client/transfer" "github.com/vbauerster/mpb/v8" "github.com/vbauerster/mpb/v8/decor" ) -func MultipartUpload(ctx context.Context, g3 client.Gen3Interface, req common.FileUploadRequestObject, file *os.File, showProgress bool) error { - g3.Logger().InfoContext(ctx, "File Upload Request", "request", req) +type multipartResumeState struct { + SourcePath string `json:"source_path"` + ObjectKey string `json:"object_key"` + GUID string `json:"guid"` + Bucket string `json:"bucket"` + FileSize int64 `json:"file_size"` + FileModUnixNano int64 `json:"file_mod_unix_nano"` + ChunkSize int64 `json:"chunk_size"` + UploadID string `json:"upload_id"` + FinalGUID string `json:"final_guid"` + Key string `json:"key"` + Completed map[int]string `json:"completed"` +} + +func MultipartUpload(ctx context.Context, bk transfer.Uploader, req common.FileUploadRequestObject, file *os.File, showProgress bool) error { + bk.Logger().DebugContext(ctx, "File Multipart Upload Request", "request", req) + failUploadOnce := strings.TrimSpace(os.Getenv("DATA_CLIENT_TEST_FAIL_UPLOAD_PART_ONCE")) == "1" + var injectedUploadFailure atomic.Bool stat, err := file.Stat() if err != nil { @@ -48,36 +68,63 @@ func MultipartUpload(ctx context.Context, g3 client.Gen3Interface, req common.Fi ) } - // 1. Initialize multipart upload - uploadID, finalGUID, err := initMultipartUpload(ctx, g3, req, req.Bucket) + chunkSize := OptimalChunkSize(fileSize) + checkpointPath, err := multipartCheckpointPath(req) if err != nil { - return fmt.Errorf("failed to initiate multipart upload: %w", err) + return err } - - // 2. Use the exact key passed during multipart init. - // The server creates the multipart session for `file_name=req.ObjectKey`. - // Rewriting the key client-side (for example prefixing GUID again) can cause - // "NoSuchUpload" because UploadPart/Complete then target a different object key. - key := req.ObjectKey - g3.Logger().InfoContext(ctx, "Initialized Upload", "id", uploadID, "guid", finalGUID, "key", key) - - chunkSize := OptimalChunkSize(fileSize) + state, loaded := loadMultipartState(checkpointPath) + if !loaded || !state.matches(req, stat, chunkSize) { + uploadID, finalGUID, initErr := initMultipartUpload(ctx, bk, req, req.Bucket) + if initErr != nil { + return fmt.Errorf("failed to initiate multipart upload: %w", initErr) + } + state = &multipartResumeState{ + SourcePath: req.SourcePath, + ObjectKey: req.ObjectKey, + GUID: req.GUID, + Bucket: req.Bucket, + FileSize: fileSize, + FileModUnixNano: stat.ModTime().UnixNano(), + ChunkSize: chunkSize, + UploadID: uploadID, + FinalGUID: finalGUID, + Key: req.ObjectKey, + Completed: map[int]string{}, + } + if saveErr := saveMultipartState(checkpointPath, state); saveErr != nil { + return saveErr + } + } + uploadID := state.UploadID + key := state.Key + bk.Logger().DebugContext(ctx, "Initialized Upload", "id", uploadID, "guid", state.FinalGUID, "key", key) numChunks := int((fileSize + chunkSize - 1) / chunkSize) chunks := make(chan int, numChunks) - for i := 1; i <= numChunks; i++ { - chunks <- i + for partNum := 1; partNum <= numChunks; partNum++ { + if _, ok := state.Completed[partNum]; ok { + continue + } + chunks <- partNum } close(chunks) var ( wg sync.WaitGroup mu sync.Mutex - parts []fence.MultipartPart uploadErrors []error totalBytes int64 // Atomic counter for monotonically increasing BytesSoFar ) + for partNum := range state.Completed { + offset := int64(partNum-1) * chunkSize + size := chunkSize + if offset+size > fileSize { + size = fileSize - offset + } + totalBytes += size + } progressCallback := common.GetProgress(ctx) oid := common.GetOid(ctx) @@ -90,6 +137,12 @@ func MultipartUpload(ctx context.Context, g3 client.Gen3Interface, req common.Fi defer wg.Done() for partNum := range chunks { + if failUploadOnce && injectedUploadFailure.CompareAndSwap(false, true) { + mu.Lock() + uploadErrors = append(uploadErrors, fmt.Errorf("injected test interruption before multipart part %d", partNum)) + mu.Unlock() + return + } offset := int64(partNum-1) * chunkSize size := chunkSize @@ -101,7 +154,7 @@ func MultipartUpload(ctx context.Context, g3 client.Gen3Interface, req common.Fi // It allows each worker to read its own segment without a shared buffer. section := io.NewSectionReader(file, offset, size) - url, err := generateMultipartPresignedURL(ctx, g3, key, uploadID, partNum, req.Bucket) + url, err := generateMultipartPresignedURL(ctx, bk, key, uploadID, partNum, req.Bucket) if err != nil { mu.Lock() uploadErrors = append(uploadErrors, fmt.Errorf("URL generation failed part %d: %w", partNum, err)) @@ -119,10 +172,12 @@ func MultipartUpload(ctx context.Context, g3 client.Gen3Interface, req common.Fi } mu.Lock() - parts = append(parts, fence.MultipartPart{ - PartNumber: partNum, - ETag: etag, - }) + state.Completed[partNum] = etag + if err := saveMultipartState(checkpointPath, state); err != nil { + uploadErrors = append(uploadErrors, fmt.Errorf("failed to persist multipart resume checkpoint: %w", err)) + mu.Unlock() + return + } if bar != nil { bar.IncrInt64(size) } @@ -155,21 +210,28 @@ func MultipartUpload(ctx context.Context, g3 client.Gen3Interface, req common.Fi } // 5. Finalize the upload + parts := make([]fence.MultipartPart, 0, len(state.Completed)) + for partNum, etag := range state.Completed { + parts = append(parts, fence.MultipartPart{ + PartNumber: partNum, + ETag: etag, + }) + } sort.Slice(parts, func(i, j int) bool { return parts[i].PartNumber < parts[j].PartNumber }) - if err := CompleteMultipartUpload(ctx, g3, key, uploadID, parts, req.Bucket); err != nil { + if err := CompleteMultipartUpload(ctx, bk, key, uploadID, parts, req.Bucket); err != nil { return fmt.Errorf("failed to complete multipart upload: %w", err) } - g3.Logger().InfoContext(ctx, "Successfully uploaded", "file", req.ObjectKey, "key", key) - g3.Logger().SucceededContext(ctx, req.SourcePath, req.GUID) + bk.Logger().DebugContext(ctx, "Successfully uploaded", "file", req.ObjectKey, "key", key) + _ = os.Remove(checkpointPath) return nil } -func initMultipartUpload(ctx context.Context, g3 client.Gen3Interface, furObject common.FileUploadRequestObject, bucketName string) (string, string, error) { - msg, err := g3.Fence().InitMultipartUpload(ctx, furObject.ObjectKey, bucketName, furObject.GUID) +func initMultipartUpload(ctx context.Context, bk transfer.Uploader, furObject common.FileUploadRequestObject, bucketName string) (string, string, error) { + msg, err := bk.InitMultipartUpload(ctx, furObject.GUID, furObject.ObjectKey, bucketName) if err != nil { if strings.Contains(err.Error(), "404") { @@ -184,8 +246,8 @@ func initMultipartUpload(ctx context.Context, g3 client.Gen3Interface, furObject return msg.UploadID, msg.GUID, nil } -func generateMultipartPresignedURL(ctx context.Context, g3 client.Gen3Interface, key string, uploadID string, partNumber int, bucketName string) (string, error) { - url, err := g3.Fence().GenerateMultipartPresignedURL(ctx, key, uploadID, partNumber, bucketName) +func generateMultipartPresignedURL(ctx context.Context, bk transfer.Uploader, key string, uploadID string, partNumber int, bucketName string) (string, error) { + url, err := bk.GetMultipartUploadURL(ctx, key, uploadID, int32(partNumber), bucketName) if err != nil { return "", errors.New("Error has occurred during multipart upload presigned url generation, detailed error message: " + err.Error()) } @@ -196,8 +258,12 @@ func generateMultipartPresignedURL(ctx context.Context, g3 client.Gen3Interface, return url, nil } -func CompleteMultipartUpload(ctx context.Context, g3 client.Gen3Interface, key string, uploadID string, parts []fence.MultipartPart, bucketName string) error { - err := g3.Fence().CompleteMultipartUpload(ctx, key, uploadID, parts, bucketName) +func CompleteMultipartUpload(ctx context.Context, bk transfer.Uploader, key string, uploadID string, parts []fence.MultipartPart, bucketName string) error { + cParts := make([]common.MultipartUploadPart, len(parts)) + for i, p := range parts { + cParts[i] = common.MultipartUploadPart{PartNumber: int32(p.PartNumber), ETag: p.ETag} + } + err := bk.CompleteMultipartUpload(ctx, key, uploadID, cParts, bucketName) if err != nil { return errors.New("Error has occurred during completing multipart upload, detailed error message: " + err.Error()) } @@ -232,3 +298,63 @@ func uploadPart(ctx context.Context, url string, data io.Reader, partSize int64) return strings.Trim(etag, `"`), nil } + +func (s *multipartResumeState) matches(req common.FileUploadRequestObject, info os.FileInfo, chunkSize int64) bool { + if s == nil { + return false + } + return s.SourcePath == req.SourcePath && + s.ObjectKey == req.ObjectKey && + s.GUID == req.GUID && + s.Bucket == req.Bucket && + s.FileSize == info.Size() && + s.FileModUnixNano == info.ModTime().UnixNano() && + s.ChunkSize == chunkSize && + s.UploadID != "" && + s.Key != "" +} + +func multipartCheckpointPath(req common.FileUploadRequestObject) (string, error) { + cacheDir := strings.TrimSpace(os.Getenv("DATA_CLIENT_CACHE_DIR")) + if cacheDir == "" { + var err error + cacheDir, err = os.UserCacheDir() + if err != nil || cacheDir == "" { + cacheDir = os.TempDir() + } + } + base := filepath.Join(cacheDir, "calypr", "data-client", "multipart-resume") + if err := os.MkdirAll(base, 0o755); err != nil { + return "", err + } + sum := sha256.Sum256([]byte(req.SourcePath + "|" + req.ObjectKey + "|" + req.GUID + "|" + req.Bucket)) + name := hex.EncodeToString(sum[:]) + ".json" + return filepath.Join(base, name), nil +} + +func loadMultipartState(path string) (*multipartResumeState, bool) { + data, err := os.ReadFile(path) + if err != nil { + return nil, false + } + var st multipartResumeState + if err := json.Unmarshal(data, &st); err != nil { + return nil, false + } + if st.Completed == nil { + st.Completed = map[int]string{} + } + return &st, true +} + +func saveMultipartState(path string, state *multipartResumeState) error { + data, err := json.Marshal(state) + if err != nil { + return err + } + tmpPath := path + ".tmp" + if err := os.WriteFile(tmpPath, data, 0o644); err != nil { + return err + } + return os.Rename(tmpPath, path) +} diff --git a/upload/multipart_test.go b/upload/multipart_test.go index 2759f48..0e321a5 100644 --- a/upload/multipart_test.go +++ b/upload/multipart_test.go @@ -10,18 +10,16 @@ import ( "net/http/httptest" "net/url" "os" + "path/filepath" "strings" "sync" + "sync/atomic" "testing" "github.com/calypr/data-client/common" "github.com/calypr/data-client/conf" - "github.com/calypr/data-client/fence" - "github.com/calypr/data-client/indexd" "github.com/calypr/data-client/logs" "github.com/calypr/data-client/request" - "github.com/calypr/data-client/requestor" - "github.com/calypr/data-client/sower" ) type fakeGen3Upload struct { @@ -30,15 +28,8 @@ type fakeGen3Upload struct { doFunc func(context.Context, *request.RequestBuilder) (*http.Response, error) } -func (f *fakeGen3Upload) GetCredential() *conf.Credential { return f.cred } -func (f *fakeGen3Upload) Logger() *logs.Gen3Logger { return f.logger } -func (f *fakeGen3Upload) ExportCredential(ctx context.Context, cred *conf.Credential) error { - return nil -} -func (f *fakeGen3Upload) Fence() fence.FenceInterface { return &fakeFence{doFunc: f.doFunc} } -func (f *fakeGen3Upload) Indexd() indexd.IndexdInterface { return &fakeIndexd{doFunc: f.doFunc} } -func (f *fakeGen3Upload) Sower() sower.SowerInterface { return nil } -func (f *fakeGen3Upload) Requestor() requestor.RequestorInterface { return nil } +func (f *fakeGen3Upload) Name() string { return "fake" } +func (f *fakeGen3Upload) Logger() *logs.Gen3Logger { return f.logger } func (f *fakeGen3Upload) Do(ctx context.Context, req *request.RequestBuilder) (*http.Response, error) { return f.doFunc(ctx, req) @@ -47,48 +38,51 @@ func (f *fakeGen3Upload) New(method, url string) *request.RequestBuilder { return &request.RequestBuilder{Method: method, Url: url} } -type fakeFence struct { - fence.FenceInterface - doFunc func(context.Context, *request.RequestBuilder) (*http.Response, error) +func (f *fakeGen3Upload) ResolveUploadURL(ctx context.Context, guid string, filename string, metadata common.FileMetadata, bucket string) (string, error) { + return "", fmt.Errorf("not implemented") } -func (f *fakeFence) Do(ctx context.Context, req *request.RequestBuilder) (*http.Response, error) { - return f.doFunc(ctx, req) -} -func (f *fakeFence) InitMultipartUpload(ctx context.Context, filename string, bucket string, guid string) (fence.FenceResponse, error) { +func (f *fakeGen3Upload) InitMultipartUpload(ctx context.Context, guid string, filename string, bucket string) (*common.MultipartUploadInit, error) { resp, err := f.Do(ctx, &request.RequestBuilder{Url: common.FenceDataMultipartInitEndpoint}) if err != nil { - return fence.FenceResponse{}, err + return nil, err } - return f.ParseFenceURLResponse(resp) + defer resp.Body.Close() + var msg struct { + UploadID string `json:"uploadId"` + GUID string `json:"guid"` + } + if err := json.NewDecoder(resp.Body).Decode(&msg); err != nil { + return nil, err + } + return &common.MultipartUploadInit{GUID: msg.GUID, UploadID: msg.UploadID}, nil } -func (f *fakeFence) GenerateMultipartPresignedURL(ctx context.Context, key string, uploadID string, partNumber int, bucket string) (string, error) { +func (f *fakeGen3Upload) GetMultipartUploadURL(ctx context.Context, key string, uploadID string, partNumber int32, bucket string) (string, error) { resp, err := f.Do(ctx, &request.RequestBuilder{Url: common.FenceDataMultipartUploadEndpoint}) if err != nil { return "", err } - msg, err := f.ParseFenceURLResponse(resp) - return msg.PresignedURL, err + defer resp.Body.Close() + var msg struct { + PresignedURL string `json:"presigned_url"` + } + if err := json.NewDecoder(resp.Body).Decode(&msg); err != nil { + return "", err + } + return msg.PresignedURL, nil } -func (f *fakeFence) CompleteMultipartUpload(ctx context.Context, key string, uploadID string, parts []fence.MultipartPart, bucket string) error { +func (f *fakeGen3Upload) CompleteMultipartUpload(ctx context.Context, key string, uploadID string, parts []common.MultipartUploadPart, bucket string) error { _, err := f.Do(ctx, &request.RequestBuilder{Url: common.FenceDataMultipartCompleteEndpoint}) return err } -func (f *fakeFence) ParseFenceURLResponse(resp *http.Response) (fence.FenceResponse, error) { - var msg fence.FenceResponse - if resp != nil && resp.Body != nil { - json.NewDecoder(resp.Body).Decode(&msg) - } - return msg, nil +func (f *fakeGen3Upload) Upload(ctx context.Context, url string, body io.Reader, size int64) error { + return nil } - -type fakeIndexd struct { - indexd.IndexdInterface - doFunc func(context.Context, *request.RequestBuilder) (*http.Response, error) +func (f *fakeGen3Upload) UploadPart(ctx context.Context, url string, body io.Reader, size int64) (string, error) { + return "", nil } - -func (f *fakeIndexd) Do(ctx context.Context, req *request.RequestBuilder) (*http.Response, error) { - return f.doFunc(ctx, req) +func (f *fakeGen3Upload) DeleteFile(ctx context.Context, guid string) (string, error) { + return "", nil } func TestMultipartUploadProgressIntegration(t *testing.T) { @@ -181,6 +175,137 @@ func TestMultipartUploadProgressIntegration(t *testing.T) { } } +func TestMultipartUploadResumesWithoutReinit(t *testing.T) { + ctx := context.Background() + + var putCount atomic.Int64 + var failFirstPut atomic.Bool + failFirstPut.Store(true) + origTransport := http.DefaultClient.Transport + http.DefaultClient.Transport = roundTripFunc(func(req *http.Request) (*http.Response, error) { + if req.Method != http.MethodPut { + return &http.Response{ + StatusCode: http.StatusMethodNotAllowed, + Body: io.NopCloser(strings.NewReader("method not allowed")), + Header: make(http.Header), + Request: req, + }, nil + } + _, _ = io.Copy(io.Discard, req.Body) + _ = req.Body.Close() + + if failFirstPut.Load() && putCount.Load() == 0 { + putCount.Add(1) + return &http.Response{ + StatusCode: http.StatusInternalServerError, + Body: io.NopCloser(strings.NewReader("simulated failure")), + Header: make(http.Header), + Request: req, + }, nil + } + n := putCount.Add(1) + h := make(http.Header) + h.Set("ETag", fmt.Sprintf("etag-%d", n)) + return &http.Response{ + StatusCode: http.StatusOK, + Body: io.NopCloser(strings.NewReader("")), + Header: h, + Request: req, + }, nil + }) + defer func() { http.DefaultClient.Transport = origTransport }() + + tmp := t.TempDir() + t.Setenv("DATA_CLIENT_CACHE_DIR", tmp) + path := filepath.Join(tmp, "large.bin") + f, err := os.Create(path) + if err != nil { + t.Fatalf("create temp file: %v", err) + } + // Sparse file >100MB triggers multipart with multiple parts. + if err := f.Truncate(120 * common.MB); err != nil { + _ = f.Close() + t.Fatalf("truncate temp file: %v", err) + } + if err := f.Close(); err != nil { + t.Fatalf("close temp file: %v", err) + } + + initCalls := 0 + completeCalls := 0 + logger := logs.NewGen3Logger(nil, "", "") + fake := &fakeGen3Upload{ + cred: &conf.Credential{ + APIEndpoint: "https://example.com", + AccessToken: "token", + }, + logger: logger, + doFunc: func(_ context.Context, req *request.RequestBuilder) (*http.Response, error) { + switch { + case strings.Contains(req.Url, common.FenceDataMultipartInitEndpoint): + initCalls++ + return newJSONResponse(req.Url, `{"uploadId":"upload-resume-1","guid":"guid-resume-1"}`), nil + case strings.Contains(req.Url, common.FenceDataMultipartUploadEndpoint): + return newJSONResponse(req.Url, `{"presigned_url":"https://upload.invalid/part"}`), nil + case strings.Contains(req.Url, common.FenceDataMultipartCompleteEndpoint): + completeCalls++ + return newJSONResponse(req.Url, `{}`), nil + default: + return nil, fmt.Errorf("unexpected request url: %s", req.Url) + } + }, + } + + req := common.FileUploadRequestObject{ + SourcePath: path, + ObjectKey: "resume.bin", + GUID: "guid-resume-1", + Bucket: "bucket", + } + checkpointPath, err := multipartCheckpointPath(req) + if err != nil { + t.Fatalf("checkpoint path: %v", err) + } + _ = os.Remove(checkpointPath) + + file1, err := os.Open(path) + if err != nil { + t.Fatalf("open file1: %v", err) + } + err = MultipartUpload(ctx, fake, req, file1, false) + _ = file1.Close() + if err == nil { + t.Fatal("expected first multipart upload to fail") + } + if initCalls != 1 { + t.Fatalf("expected one init after first run, got %d", initCalls) + } + if _, statErr := os.Stat(checkpointPath); statErr != nil { + t.Fatalf("expected checkpoint to exist after failure: %v", statErr) + } + + failFirstPut.Store(false) + file2, err := os.Open(path) + if err != nil { + t.Fatalf("open file2: %v", err) + } + err = MultipartUpload(ctx, fake, req, file2, false) + _ = file2.Close() + if err != nil { + t.Fatalf("resume multipart upload failed: %v", err) + } + + if initCalls != 1 { + t.Fatalf("expected resume to reuse existing upload init; init calls = %d", initCalls) + } + if completeCalls != 1 { + t.Fatalf("expected one complete call, got %d", completeCalls) + } + if _, statErr := os.Stat(checkpointPath); !os.IsNotExist(statErr) { + t.Fatalf("expected checkpoint cleanup after success, stat err: %v", statErr) + } +} + func newJSONResponse(rawURL, body string) *http.Response { parsedURL, err := url.Parse(rawURL) if err != nil { @@ -193,3 +318,9 @@ func newJSONResponse(rawURL, body string) *http.Response { Header: make(http.Header), } } + +type roundTripFunc func(*http.Request) (*http.Response, error) + +func (f roundTripFunc) RoundTrip(req *http.Request) (*http.Response, error) { + return f(req) +} diff --git a/upload/orchestrator.go b/upload/orchestrator.go new file mode 100644 index 0000000..8da56f4 --- /dev/null +++ b/upload/orchestrator.go @@ -0,0 +1,121 @@ +package upload + +import ( + "context" + "fmt" + "io" + "os" + "path/filepath" + "strings" + + "github.com/calypr/data-client/common" + "github.com/calypr/data-client/drs" + "github.com/calypr/data-client/transfer" +) + +// RegisterFile orchestrates the full registration and upload flow: +// 1. Build a DRS object from the local file (if not provided). +// 2. Register metadata with the DRS server via the provided drs.Client. +// 3. Upload the file content via the provided Backend. +func RegisterFile(ctx context.Context, bk UploadBackend, dc drs.Client, drsObject *drs.DRSObject, filePath string, bucketName string) (*drs.DRSObject, error) { + // 1. Ensure we have a valid OID/metadata. + // (Logic ported and generalized from git-drs/client/local/local_client.go) + + if drsObject == nil { + return nil, fmt.Errorf("drsObject must be provided (containing at least checksums/size)") + } + + // 2. Register with DRS server + res, err := dc.RegisterRecord(ctx, drsObject) + if err != nil { + return nil, fmt.Errorf("failed to register record: %w", err) + } + drsObject = res + + // 3. Check if file is already downloadable (optional but good optimization) + // (Skipping for now to prioritize core functionality, but can be added back) + + // 4. Determine upload filename/key + uploadFilename := filepath.Base(filePath) + if len(drsObject.AccessMethods) > 0 { + for _, am := range drsObject.AccessMethods { + if am.Type == "s3" || am.Type == "gs" { + if am.AccessUrl != nil && am.AccessUrl.Url != "" { + parts := strings.Split(am.AccessUrl.Url, "/") + if candidate := parts[len(parts)-1]; candidate != "" { + uploadFilename = candidate + break + } + } + } + } + } + + // 5. Perform Upload + file, err := os.Open(filePath) + if err != nil { + return nil, fmt.Errorf("failed to open file for upload: %w", err) + } + defer file.Close() + + stat, err := file.Stat() + if err != nil { + return nil, fmt.Errorf("failed to stat file: %w", err) + } + + threshold := int64(5 * common.GB) // Default threshold + if stat.Size() < threshold { + uploadURL, err := bk.ResolveUploadURL(ctx, drsObject.Id, uploadFilename, common.FileMetadata{}, bucketName) + if err != nil { + return nil, fmt.Errorf("failed to get upload URL: %w", err) + } + if err := bk.Upload(ctx, uploadURL, file, stat.Size()); err != nil { + return nil, fmt.Errorf("upload failed: %w", err) + } + } else { + if err := multipartUpload(ctx, bk, drsObject.Id, uploadFilename, bucketName, file, stat.Size()); err != nil { + return nil, fmt.Errorf("multipart upload failed: %w", err) + } + } + + return drsObject, nil +} + +func multipartUpload(ctx context.Context, bk UploadBackend, guid, filename, bucket string, file *os.File, size int64) error { + initResp, err := bk.InitMultipartUpload(ctx, guid, filename, bucket) + if err != nil { + return err + } + + chunkSize := OptimalChunkSize(size) + numChunks := int((size + chunkSize - 1) / chunkSize) + parts := make([]common.MultipartUploadPart, numChunks) + + for partNum := 1; partNum <= numChunks; partNum++ { + offset := int64(partNum-1) * chunkSize + partSize := chunkSize + if offset+partSize > size { + partSize = size - offset + } + + partURL, err := bk.GetMultipartUploadURL(ctx, "", initResp.UploadID, int32(partNum), bucket) + if err != nil { + return err + } + + section := io.NewSectionReader(file, offset, partSize) + etag, err := bk.UploadPart(ctx, partURL, section, partSize) + if err != nil { + return err + } + + parts[partNum-1] = common.MultipartUploadPart{ + PartNumber: int32(partNum), + ETag: etag, + } + } + + return bk.CompleteMultipartUpload(ctx, "", initResp.UploadID, parts, bucket) +} + +type UploadBackend = transfer.Uploader diff --git a/upload/request.go b/upload/request.go index 6036fed..e21dff2 100644 --- a/upload/request.go +++ b/upload/request.go @@ -2,85 +2,57 @@ package upload import ( "context" - "encoding/json" - "errors" "fmt" - "net/http" + // Added for io.Reader "os" - "strings" "github.com/calypr/data-client/common" - client "github.com/calypr/data-client/g3client" - req "github.com/calypr/data-client/request" + "github.com/calypr/data-client/transfer" "github.com/vbauerster/mpb/v8" + "github.com/vbauerster/mpb/v8/decor" ) // GeneratePresignedURL handles both Shepherd and Fence fallback -func GeneratePresignedUploadURL(ctx context.Context, g3 client.Gen3Interface, filename string, metadata common.FileMetadata, bucket string) (*PresignedURLResponse, error) { - hasShepherd, err := g3.Fence().CheckForShepherdAPI(ctx) - if err != nil || !hasShepherd { - msg, err := g3.Fence().InitUpload(ctx, filename, bucket, "") - if err != nil { - return nil, err - } - return &PresignedURLResponse{URL: msg.URL, GUID: msg.GUID}, nil - } - - shepherdPayload := ShepherdInitRequestObject{ - Filename: filename, - Authz: ShepherdAuthz{ - Version: "0", ResourcePaths: metadata.Authz, - }, - Aliases: metadata.Aliases, - Metadata: metadata.Metadata, - } - - reader, err := common.ToJSONReader(shepherdPayload) +func GeneratePresignedUploadURL(ctx context.Context, bk transfer.Uploader, filename string, metadata common.FileMetadata, bucket string) (*common.PresignedURLResponse, error) { + url, err := bk.ResolveUploadURL(ctx, "", filename, metadata, bucket) if err != nil { return nil, err } - - cred := g3.GetCredential() - r, err := g3.Fence().Do( - ctx, - &req.RequestBuilder{ - Url: cred.APIEndpoint + common.ShepherdEndpoint + "/objects", - Method: http.MethodPost, - Body: reader, - Token: cred.AccessToken, - }) - if err != nil || r.StatusCode != http.StatusCreated { - return nil, fmt.Errorf("shepherd upload init failed") - } - - var res PresignedURLResponse - if err := json.NewDecoder(r.Body).Decode(&res); err != nil { - return nil, err - } + var res common.PresignedURLResponse + res = common.PresignedURLResponse{URL: url, GUID: ""} return &res, nil } // GenerateUploadRequest helps preparing the HTTP request for upload and the progress bar for single part upload -func generateUploadRequest(ctx context.Context, g3 client.Gen3Interface, furObject common.FileUploadRequestObject, file *os.File, progress *mpb.Progress) (common.FileUploadRequestObject, error) { - if furObject.PresignedURL == "" { - msg, err := g3.Fence().GetUploadPresignedUrl(ctx, furObject.GUID, furObject.ObjectKey, furObject.Bucket) - if err != nil && !strings.Contains(err.Error(), "No GUID found") { - return furObject, fmt.Errorf("Upload error: %w", err) - } - if msg.URL == "" { - return furObject, errors.New("Upload error: error in generating presigned URL for " + furObject.ObjectKey) +func generateUploadRequest(ctx context.Context, bk transfer.Uploader, req common.FileUploadRequestObject, file *os.File, p *mpb.Progress) (common.FileUploadRequestObject, error) { + if req.PresignedURL == "" { + url, err := bk.ResolveUploadURL(ctx, req.GUID, req.ObjectKey, req.FileMetadata, req.Bucket) + if err != nil { + return req, fmt.Errorf("Upload error: %w", err) } - furObject.PresignedURL = msg.URL + req.PresignedURL = url } fi, err := file.Stat() if err != nil { - return furObject, errors.New("File stat error for file" + furObject.ObjectKey + ", file may be missing or unreadable because of permissions.\n") + return req, fmt.Errorf("stat failed: %w", err) } if fi.Size() > common.FileSizeLimit { - return furObject, errors.New("The file size of file " + furObject.ObjectKey + " exceeds the limit allowed and cannot be uploaded. The maximum allowed file size is " + FormatSize(common.FileSizeLimit) + ".\n") + return req, fmt.Errorf("file size exceeds limit") + } + + if p != nil { + p.AddBar(fi.Size(), + mpb.PrependDecorators( + decor.Name(req.ObjectKey, decor.WC{W: len(req.ObjectKey) + 1, C: decor.DindentRight}), + decor.CountersKibiByte("% .2f / % .2f"), + ), + mpb.AppendDecorators( + decor.OnComplete(decor.Percentage(decor.WC{W: 5}), "done"), + ), + ) } - return furObject, err + return req, nil } diff --git a/upload/retry.go b/upload/retry.go index 679a93d..15ff73f 100644 --- a/upload/retry.go +++ b/upload/retry.go @@ -7,7 +7,8 @@ import ( "time" "github.com/calypr/data-client/common" - client "github.com/calypr/data-client/g3client" + "github.com/calypr/data-client/logs" + "github.com/calypr/data-client/transfer" ) // GetWaitTime calculates exponential backoff with cap @@ -21,8 +22,7 @@ func GetWaitTime(retryCount int) time.Duration { } // RetryFailedUploads re-uploads previously failed files with exponential backoff -func RetryFailedUploads(ctx context.Context, g3 client.Gen3Interface, failedMap map[string]common.RetryObject) { - logger := g3.Logger() +func RetryFailedUploads(ctx context.Context, bk transfer.Uploader, logger *logs.Gen3Logger, failedMap map[string]common.RetryObject) { if len(failedMap) == 0 { logger.Println("No failed files to retry.") return @@ -52,7 +52,7 @@ func RetryFailedUploads(ctx context.Context, g3 client.Gen3Interface, failedMap // Clean up old record if exists if ro.GUID != "" { - if msg, err := g3.Fence().DeleteRecord( + if msg, err := bk.DeleteFile( ctx, ro.GUID, ); err == nil { @@ -80,7 +80,7 @@ func RetryFailedUploads(ctx context.Context, g3 client.Gen3Interface, failedMap FileMetadata: ro.FileMetadata, Bucket: ro.Bucket, } - err = MultipartUpload(ctx, g3, req, file, true) + err = MultipartUpload(ctx, bk, req, file, true) if err == nil { logger.Succeeded(ro.SourcePath, req.GUID) if sb != nil { @@ -90,15 +90,15 @@ func RetryFailedUploads(ctx context.Context, g3 client.Gen3Interface, failedMap } } else { // Retry single-part - respObj, err := GeneratePresignedUploadURL(ctx, g3, ro.ObjectKey, ro.FileMetadata, ro.Bucket) + respObj, err := GeneratePresignedUploadURL(ctx, bk, ro.ObjectKey, ro.FileMetadata, ro.Bucket) if err != nil { - handleRetryFailure(ctx, g3, ro, retryChan, err) + handleRetryFailure(ctx, bk, logger, ro, retryChan, err) continue } file, err := os.Open(ro.SourcePath) if err != nil { - handleRetryFailure(ctx, g3, ro, retryChan, err) + handleRetryFailure(ctx, bk, logger, ro, retryChan, err) continue } stat, _ := file.Stat() @@ -118,13 +118,13 @@ func RetryFailedUploads(ctx context.Context, g3 client.Gen3Interface, failedMap PresignedURL: respObj.URL, } - fur, err = generateUploadRequest(ctx, g3, fur, nil, nil) + fur, err = generateUploadRequest(ctx, bk, fur, nil, nil) if err != nil { - handleRetryFailure(ctx, g3, ro, retryChan, err) + handleRetryFailure(ctx, bk, logger, ro, retryChan, err) continue } - err = UploadSingle(ctx, g3, fur, true) + err = UploadSingle(ctx, bk, logger, fur, true) if err == nil { logger.Succeeded(ro.SourcePath, fur.GUID) if sb != nil { @@ -135,13 +135,12 @@ func RetryFailedUploads(ctx context.Context, g3 client.Gen3Interface, failedMap } // On failure, requeue if retries remain - handleRetryFailure(ctx, g3, ro, retryChan, err) + handleRetryFailure(ctx, bk, logger, ro, retryChan, err) } } // handleRetryFailure logs failure and requeues if retries remain -func handleRetryFailure(ctx context.Context, g3 client.Gen3Interface, ro common.RetryObject, retryChan chan common.RetryObject, err error) { - logger := g3.Logger() +func handleRetryFailure(ctx context.Context, bk transfer.Uploader, logger *logs.Gen3Logger, ro common.RetryObject, retryChan chan common.RetryObject, err error) { logger.Failed(ro.SourcePath, ro.ObjectKey, ro.FileMetadata, ro.GUID, ro.RetryCount, ro.Multipart) if err != nil { logger.Println("Retry error:", err) @@ -154,7 +153,7 @@ func handleRetryFailure(ctx context.Context, g3 client.Gen3Interface, ro common. // Max retries reached — final cleanup if ro.GUID != "" { - if msg, err := g3.Fence().DeleteRecord(ctx, ro.GUID); err == nil { + if msg, err := bk.DeleteFile(ctx, ro.GUID); err == nil { logger.Println("Cleaned up failed record:", msg) } else { logger.Println("Cleanup failed:", err) diff --git a/upload/singleFile.go b/upload/singleFile.go index 962a468..a531aa1 100644 --- a/upload/singleFile.go +++ b/upload/singleFile.go @@ -7,15 +7,12 @@ import ( "os" "github.com/calypr/data-client/common" - client "github.com/calypr/data-client/g3client" + "github.com/calypr/data-client/logs" + "github.com/calypr/data-client/transfer" ) -func UploadSingle(ctx context.Context, g3Client client.Gen3Interface, req common.FileUploadRequestObject, showProgress bool) error { - - // We use the provided client interface - g3i := g3Client - - g3i.Logger().InfoContext(ctx, "File Upload Request", "request", req) +func UploadSingle(ctx context.Context, bk transfer.Uploader, logger *logs.Gen3Logger, req common.FileUploadRequestObject, showProgress bool) error { + logger.InfoContext(ctx, "File Upload Request", "request", req) // Helper to handle * in path if it was passed, though optimally caller handles this. // We will trust the SourcePath in the request object mostly, but for safety we can check existence. @@ -25,14 +22,14 @@ func UploadSingle(ctx context.Context, g3Client client.Gen3Interface, req common file, err := os.Open(req.SourcePath) if err != nil { if showProgress { - sb := g3i.Logger().Scoreboard() + sb := logger.Scoreboard() if sb != nil { sb.IncrementSB(len(sb.Counts)) sb.PrintSB() } } - g3i.Logger().Failed(req.SourcePath, req.ObjectKey, common.FileMetadata{}, "", 0, false) - g3i.Logger().ErrorContext(ctx, "File open error", "file", req.SourcePath, "error", err) + logger.Failed(req.SourcePath, req.ObjectKey, common.FileMetadata{}, "", 0, false) + logger.ErrorContext(ctx, "File open error", "file", req.SourcePath, "error", err) return fmt.Errorf("[ERROR] when opening file path %s, an error occurred: %s\n", req.SourcePath, err.Error()) } defer file.Close() @@ -43,17 +40,17 @@ func UploadSingle(ctx context.Context, g3Client client.Gen3Interface, req common } fileSize := fi.Size() - furObject, err := generateUploadRequest(ctx, g3i, req, file, nil) + furObject, err := generateUploadRequest(ctx, bk, req, file, nil) if err != nil { if showProgress { - sb := g3i.Logger().Scoreboard() + sb := logger.Scoreboard() if sb != nil { sb.IncrementSB(len(sb.Counts)) sb.PrintSB() } } - g3i.Logger().Failed(req.SourcePath, req.ObjectKey, common.FileMetadata{}, req.GUID, 0, false) - g3i.Logger().ErrorContext(ctx, "Error occurred during request generation", "file", req.SourcePath, "error", err) + logger.Failed(req.SourcePath, req.ObjectKey, common.FileMetadata{}, req.GUID, 0, false) + logger.ErrorContext(ctx, "Error occurred during request generation", "file", req.SourcePath, "error", err) return fmt.Errorf("[ERROR] Error occurred during request generation for file %s: %s\n", req.SourcePath, err.Error()) } @@ -70,7 +67,7 @@ func UploadSingle(ctx context.Context, g3Client client.Gen3Interface, req common reader = progressTracker } - _, err = uploadPart(ctx, furObject.PresignedURL, reader, fileSize) + err = bk.Upload(ctx, furObject.PresignedURL, reader, fileSize) if progressTracker != nil { if finalizeErr := progressTracker.Finalize(); finalizeErr != nil && err == nil { err = finalizeErr @@ -78,15 +75,15 @@ func UploadSingle(ctx context.Context, g3Client client.Gen3Interface, req common } if err != nil { - g3i.Logger().ErrorContext(ctx, "Upload failed", "error", err) + logger.ErrorContext(ctx, "Upload failed", "error", err) return err } - g3i.Logger().InfoContext(ctx, "Successfully uploaded", "file", req.ObjectKey) - g3i.Logger().Succeeded(req.SourcePath, req.GUID) + logger.InfoContext(ctx, "Successfully uploaded", "file", req.ObjectKey) + logger.Succeeded(req.SourcePath, req.GUID) if showProgress { - sb := g3i.Logger().Scoreboard() + sb := logger.Scoreboard() if sb != nil { sb.IncrementSB(0) sb.PrintSB() diff --git a/upload/types.go b/upload/types.go index 8c69ce1..91697a5 100644 --- a/upload/types.go +++ b/upload/types.go @@ -2,11 +2,6 @@ package upload import "github.com/calypr/data-client/common" -type PresignedURLResponse struct { - GUID string `json:"guid"` - URL string `json:"upload_url"` -} - type UploadConfig struct { BucketName string NumParallel int @@ -16,20 +11,6 @@ type UploadConfig struct { ShowProgress bool } -// ShepherdInitRequestObject represents the payload that sends to Shepherd for getting a singlepart upload presignedURL or init a multipart upload for new object file -type ShepherdInitRequestObject struct { - Filename string `json:"file_name"` - Authz ShepherdAuthz `json:"authz"` - Aliases []string `json:"aliases"` - // Metadata is an encoded JSON string of any arbitrary metadata the user wishes to upload. - Metadata map[string]any `json:"metadata"` -} - -type ShepherdAuthz struct { - Version string `json:"version"` - ResourcePaths []string `json:"resource_paths"` -} - // FileInfo is a helper struct for including subdirname as filename type FileInfo struct { FilePath string diff --git a/upload/upload.go b/upload/upload.go index aec8d66..924c9b1 100644 --- a/upload/upload.go +++ b/upload/upload.go @@ -8,15 +8,15 @@ import ( "strings" "github.com/calypr/data-client/common" - drs "github.com/calypr/data-client/drs" // Imported for DRSObject - client "github.com/calypr/data-client/g3client" + drs "github.com/calypr/data-client/drs" + "github.com/calypr/data-client/transfer" "github.com/vbauerster/mpb/v8" ) // Upload is a unified catch-all function that automatically chooses between // single-part and multipart upload based on file size. -func Upload(ctx context.Context, g3 client.Gen3Interface, req common.FileUploadRequestObject, showProgress bool) error { - g3.Logger().Printf("Processing Upload Request for: %s\n", req.SourcePath) +func Upload(ctx context.Context, bk transfer.Uploader, req common.FileUploadRequestObject, showProgress bool) error { + bk.Logger().DebugContext(ctx, "Processing Upload Request", "source", req.SourcePath) file, err := os.Open(req.SourcePath) if err != nil { @@ -36,15 +36,16 @@ func Upload(ctx context.Context, g3 client.Gen3Interface, req common.FileUploadR // Use Single-Part if file is smaller than 5GB (or your defined limit) if fileSize < 5*common.GB { - g3.Logger().Printf("File size %d bytes (< 5GB), performing single-part upload\n", fileSize) - return UploadSingle(ctx, g3, req, true) + bk.Logger().DebugContext(ctx, "performing single-part upload", "size", fileSize) + return UploadSingle(ctx, bk, bk.Logger(), req, true) } - g3.Logger().Printf("File size %d bytes (>= 5GB), performing multipart upload\n", fileSize) - return MultipartUpload(ctx, g3, req, file, showProgress) + bk.Logger().DebugContext(ctx, "performing multipart upload", "size", fileSize) + return MultipartUpload(ctx, bk, req, file, showProgress) } // UploadSingleFile handles single-part upload with progress -func UploadSingleFile(ctx context.Context, g3 client.Gen3Interface, req common.FileUploadRequestObject, showProgress bool) error { +func UploadSingleFile(ctx context.Context, bk transfer.Uploader, req common.FileUploadRequestObject, showProgress bool) error { + logger := bk.Logger() file, err := os.Open(req.SourcePath) if err != nil { return err @@ -56,10 +57,6 @@ func UploadSingleFile(ctx context.Context, g3 client.Gen3Interface, req common.F return fmt.Errorf("file exceeds 5GB limit") } - if fi.Size() > common.FileSizeLimit { - return fmt.Errorf("file exceeds 5GB limit") - } - // Generate request with progress bar var p *mpb.Progress if showProgress { @@ -67,88 +64,62 @@ func UploadSingleFile(ctx context.Context, g3 client.Gen3Interface, req common.F } // Populate PresignedURL and GUID if missing - fur, err := generateUploadRequest(ctx, g3, req, file, p) + fur, err := generateUploadRequest(ctx, bk, req, file, p) if err != nil { return err } - return MultipartUpload(ctx, g3, fur, file, showProgress) + if fi.Size() < int64(common.FileSizeLimit) { + return UploadSingle(ctx, bk, logger, fur, true) + } + return MultipartUpload(ctx, bk, fur, file, showProgress) } // RegisterAndUploadFile orchestrates registration with Indexd and uploading via Fence. // It handles checking for existing records, upsert logic, checking if file is already downloadable, and performing the upload. -func RegisterAndUploadFile(ctx context.Context, g3 client.Gen3Interface, drsObject *drs.DRSObject, filePath string, bucketName string, upsert bool) (*drs.DRSObject, error) { - // 1. Register with Indexd - // Note: The caller is responsible for converting local DRS object to data-client DRS object if needed. - - res, err := g3.Indexd().RegisterRecord(ctx, drsObject) +func RegisterAndUploadFile(ctx context.Context, dc drs.Client, bk transfer.Uploader, drsObject *drs.DRSObject, filePath string, bucketName string, upsert bool) (*drs.DRSObject, error) { + logger := bk.Logger() + res, err := dc.RegisterRecord(ctx, drsObject) if err != nil { if strings.Contains(err.Error(), "already exists") { if !upsert { - g3.Logger().Printf("indexd record already exists, proceeding for %s\n", drsObject.Id) + logger.DebugContext(ctx, "record already exists", "id", drsObject.Id) } else { - g3.Logger().Printf("indexd record already exists, deleting and re-adding for %s\n", drsObject.Id) - err = g3.Indexd().DeleteIndexdRecord(ctx, drsObject.Id) + logger.DebugContext(ctx, "record already exists, recreating", "id", drsObject.Id) + err = dc.DeleteRecord(ctx, drsObject.Id) if err != nil { return nil, fmt.Errorf("failed to delete existing record: %w", err) } - res, err = g3.Indexd().RegisterRecord(ctx, drsObject) + res, err = dc.RegisterRecord(ctx, drsObject) if err != nil { return nil, fmt.Errorf("failed to re-register record: %w", err) } } } else { - return nil, fmt.Errorf("error registering indexd record: %w", err) + return nil, fmt.Errorf("error registering record: %w", err) } - } else { - // If registration succeeded, use the returned object which might have updated fields (e.g. created time) - // although we typically reuse the ID for upload. } - // If we didn't get a new object (upsert=false case), we should fetch the existing one to be sure about its state? - // But we have the ID in drsObject.Id. - // 2. Check if file is downloadable - downloadable, err := isFileDownloadable(ctx, g3, drsObject.Id) + downloadable, err := isFileDownloadable(ctx, dc, drsObject.Id) if err != nil { return nil, fmt.Errorf("failed to check if file is downloadable: %w", err) } if downloadable { - g3.Logger().Printf("File %s is already downloadable, skipping upload.\n", drsObject.Id) - // Return the registered object (or the one passed in if we didn't re-register) - // If we re-registered, res is populated. If not, we might want to return the fetched object? - // For consistency, let's return res if set, or fetch it. + logger.DebugContext(ctx, "file already downloadable, skipping upload", "id", drsObject.Id) if res != nil { return res, nil } - return g3.Indexd().GetObject(ctx, drsObject.Id) + return dc.GetObject(ctx, drsObject.Id) } // 3. Upload File uploadFilename := filepath.Base(filePath) - - // Attempt to determine the correct upload filename from the registered object's URL. - // git-drs registers s3://bucket/GUID/SHA, so we want to upload to "SHA", not "filename.ext". if res != nil && len(res.AccessMethods) > 0 { for _, am := range res.AccessMethods { - if am.Type == "s3" && am.AccessURL.URL != "" { - // Parse s3://bucket/guid/sha -> sha - parts := strings.Split(am.AccessURL.URL, "/") - if len(parts) > 0 { - candidate := parts[len(parts)-1] - if candidate != "" { - uploadFilename = candidate - } - } - break - } - } - } else if len(drsObject.AccessMethods) > 0 { - // Fallback to checking the input object if res didn't have methods (unlikely for upsert=false) - for _, am := range drsObject.AccessMethods { - if am.Type == "s3" && am.AccessURL.URL != "" { - parts := strings.Split(am.AccessURL.URL, "/") + if (am.Type == "s3" || am.Type == "gs") && am.AccessUrl != nil && am.AccessUrl.Url != "" { + parts := strings.Split(am.AccessUrl.Url, "/") if len(parts) > 0 { candidate := parts[len(parts)-1] if candidate != "" { @@ -167,22 +138,19 @@ func RegisterAndUploadFile(ctx context.Context, g3 client.Gen3Interface, drsObje Bucket: bucketName, } - // Use Upload function which handles single/multipart selection - err = Upload(ctx, g3, req, false) + err = Upload(ctx, bk, req, false) if err != nil { return nil, fmt.Errorf("failed to upload file: %w", err) } - // Return the object if res != nil { return res, nil } - return g3.Indexd().GetObject(ctx, drsObject.Id) + return dc.GetObject(ctx, drsObject.Id) } -func isFileDownloadable(ctx context.Context, g3 client.Gen3Interface, did string) (bool, error) { - // Get the object to find access methods - obj, err := g3.Indexd().GetObject(ctx, did) +func isFileDownloadable(ctx context.Context, dc drs.Client, did string) (bool, error) { + obj, err := dc.GetObject(ctx, did) if err != nil { return false, err } @@ -192,17 +160,15 @@ func isFileDownloadable(ctx context.Context, g3 client.Gen3Interface, did string } accessType := obj.AccessMethods[0].Type - res, err := g3.Indexd().GetDownloadURL(ctx, did, accessType) + res, err := dc.GetDownloadURL(ctx, did, accessType) if err != nil { - // If we can't get a download URL, it's not downloadable return false, nil } - if res.URL == "" { + if res.Url == "" { return false, nil } - // Check if the URL is accessible - err = common.CanDownloadFile(res.URL) + err = common.CanDownloadFile(res.Url) return err == nil, nil } diff --git a/upload/utils.go b/upload/utils.go index 54cf836..ca985f6 100644 --- a/upload/utils.go +++ b/upload/utils.go @@ -9,12 +9,13 @@ import ( "strings" "github.com/calypr/data-client/common" - client "github.com/calypr/data-client/g3client" "github.com/calypr/data-client/logs" + "github.com/calypr/data-client/transfer" ) -func SeparateSingleAndMultipartUploads(g3i client.Gen3Interface, objects []common.FileUploadRequestObject) ([]common.FileUploadRequestObject, []common.FileUploadRequestObject) { +func SeparateSingleAndMultipartUploads(bk transfer.Uploader, objects []common.FileUploadRequestObject) ([]common.FileUploadRequestObject, []common.FileUploadRequestObject) { fileSizeLimit := common.FileSizeLimit + logger := bk.Logger() var singlepartObjects []common.FileUploadRequestObject var multipartObjects []common.FileUploadRequestObject @@ -23,22 +24,17 @@ func SeparateSingleAndMultipartUploads(g3i client.Gen3Interface, objects []commo fi, err := os.Stat(object.SourcePath) if err != nil { if os.IsNotExist(err) { - g3i.Logger().Printf("The file you specified \"%s\" does not exist locally\n", object.SourcePath) + logger.Error("The file you specified does not exist locally", "path", object.SourcePath) } else { - g3i.Logger().Println("File stat error: " + err.Error()) + logger.Error("File stat error", "path", object.SourcePath, "error", err) } - g3i.Logger().Failed(object.SourcePath, object.ObjectKey, object.FileMetadata, object.GUID, 0, false) continue } if fi.IsDir() { continue } - if _, ok := g3i.Logger().GetSucceededLogMap()[object.SourcePath]; ok { - g3i.Logger().Println("File \"" + object.SourcePath + "\" found in history. Skipping.") - continue - } if fi.Size() > common.MultipartFileSizeLimit { - g3i.Logger().Printf("File %s exceeds max limit\n", fi.Name()) + logger.Warn("File exceeds max limit", "name", fi.Name(), "size", fi.Size()) continue } if fi.Size() > int64(fileSizeLimit) { From 5c3493163fd38445e1ef995abd334aa4d6f32ab3 Mon Sep 17 00:00:00 2001 From: matthewpeterkort Date: Thu, 26 Mar 2026 13:46:04 -0700 Subject: [PATCH 05/13] update apigen --- Makefile | 73 - apigen/api/internal.openapi.yaml | 621 ++++ apigen/api/openapi.yaml | 2733 +++++++++++++++++ apigen/drs/model_access_method.go | 380 +++ apigen/drs/model_access_method_access_url.go | 197 ++ .../drs/model_access_method_authorizations.go | 235 ++ .../drs/model_access_method_update_request.go | 197 ++ apigen/drs/model_access_url.go | 197 ++ apigen/drs/model_authorizations.go | 238 ++ ...model_bulk_access_method_update_request.go | 197 ++ ...ess_method_update_request_updates_inner.go | 189 ++ apigen/drs/model_bulk_access_url.go | 269 ++ apigen/drs/model_bulk_delete_request.go | 238 ++ apigen/drs/model_bulk_object_access_id.go | 163 + ..._access_id_bulk_object_access_ids_inner.go | 165 + .../drs/model_bulk_object_id_no_passport.go | 128 + ...bulk_update_access_methods_200_response.go | 160 + apigen/drs/model_checksum.go | 189 ++ apigen/drs/model_contents_object.go | 271 ++ apigen/drs/model_delete_request.go | 169 + apigen/drs/model_drs_object.go | 573 ++++ apigen/drs/model_drs_object_candidate.go | 448 +++ apigen/drs/model_drs_service.go | 224 ++ apigen/drs/model_drs_service_drs.go | 825 +++++ apigen/drs/model_drs_service_type.go | 159 + apigen/drs/model_error.go | 165 + .../model_get_bulk_access_url_200_response.go | 200 ++ .../model_get_bulk_objects_200_response.go | 200 ++ apigen/drs/model_get_bulk_objects_request.go | 197 ++ .../model_get_service_info_200_response.go | 562 ++++ .../model_options_bulk_object_200_response.go | 200 ++ apigen/drs/model_post_access_url_request.go | 128 + apigen/drs/model_post_object_request.go | 165 + .../model_register_objects_201_response.go | 160 + apigen/drs/model_register_objects_request.go | 197 ++ apigen/drs/model_service.go | 497 +++ apigen/drs/model_service_organization.go | 189 ++ apigen/drs/model_service_type.go | 218 ++ apigen/drs/model_summary.go | 202 ++ apigen/drs/model_unresolved_inner.go | 163 + apigen/drs/model_upload_method.go | 262 ++ apigen/drs/model_upload_method_access_url.go | 197 ++ apigen/drs/model_upload_request.go | 197 ++ apigen/drs/model_upload_request_object.go | 321 ++ apigen/drs/model_upload_response.go | 160 + apigen/drs/model_upload_response_object.go | 358 +++ apigen/drs/utils.go | 362 +++ .../internalapi/model_bulk_create_request.go | 158 + .../model_bulk_documents_request.go | 171 ++ .../model_bulk_documents_request_one_of.go | 162 + .../internalapi/model_bulk_hashes_request.go | 158 + .../model_bulk_sha256_validity_request.go | 162 + .../model_delete_by_query_response.go | 126 + ...del_internal_multipart_complete_request.go | 250 ++ .../model_internal_multipart_init_request.go | 198 ++ .../model_internal_multipart_init_response.go | 162 + .../model_internal_multipart_part.go | 186 ++ ...model_internal_multipart_upload_request.go | 250 ++ ...odel_internal_multipart_upload_response.go | 126 + apigen/internalapi/model_internal_record.go | 379 +++ .../model_internal_record_response.go | 559 ++++ .../internalapi/model_internal_signed_url.go | 126 + .../model_internal_upload_blank_request.go | 162 + .../model_internal_upload_blank_response.go | 162 + .../model_list_records_response.go | 126 + apigen/internalapi/utils.go | 361 +++ apigen/specs/drs-extensions-overlay.yaml | 25 + 67 files changed, 18474 insertions(+), 73 deletions(-) create mode 100644 apigen/api/internal.openapi.yaml create mode 100644 apigen/api/openapi.yaml create mode 100644 apigen/drs/model_access_method.go create mode 100644 apigen/drs/model_access_method_access_url.go create mode 100644 apigen/drs/model_access_method_authorizations.go create mode 100644 apigen/drs/model_access_method_update_request.go create mode 100644 apigen/drs/model_access_url.go create mode 100644 apigen/drs/model_authorizations.go create mode 100644 apigen/drs/model_bulk_access_method_update_request.go create mode 100644 apigen/drs/model_bulk_access_method_update_request_updates_inner.go create mode 100644 apigen/drs/model_bulk_access_url.go create mode 100644 apigen/drs/model_bulk_delete_request.go create mode 100644 apigen/drs/model_bulk_object_access_id.go create mode 100644 apigen/drs/model_bulk_object_access_id_bulk_object_access_ids_inner.go create mode 100644 apigen/drs/model_bulk_object_id_no_passport.go create mode 100644 apigen/drs/model_bulk_update_access_methods_200_response.go create mode 100644 apigen/drs/model_checksum.go create mode 100644 apigen/drs/model_contents_object.go create mode 100644 apigen/drs/model_delete_request.go create mode 100644 apigen/drs/model_drs_object.go create mode 100644 apigen/drs/model_drs_object_candidate.go create mode 100644 apigen/drs/model_drs_service.go create mode 100644 apigen/drs/model_drs_service_drs.go create mode 100644 apigen/drs/model_drs_service_type.go create mode 100644 apigen/drs/model_error.go create mode 100644 apigen/drs/model_get_bulk_access_url_200_response.go create mode 100644 apigen/drs/model_get_bulk_objects_200_response.go create mode 100644 apigen/drs/model_get_bulk_objects_request.go create mode 100644 apigen/drs/model_get_service_info_200_response.go create mode 100644 apigen/drs/model_options_bulk_object_200_response.go create mode 100644 apigen/drs/model_post_access_url_request.go create mode 100644 apigen/drs/model_post_object_request.go create mode 100644 apigen/drs/model_register_objects_201_response.go create mode 100644 apigen/drs/model_register_objects_request.go create mode 100644 apigen/drs/model_service.go create mode 100644 apigen/drs/model_service_organization.go create mode 100644 apigen/drs/model_service_type.go create mode 100644 apigen/drs/model_summary.go create mode 100644 apigen/drs/model_unresolved_inner.go create mode 100644 apigen/drs/model_upload_method.go create mode 100644 apigen/drs/model_upload_method_access_url.go create mode 100644 apigen/drs/model_upload_request.go create mode 100644 apigen/drs/model_upload_request_object.go create mode 100644 apigen/drs/model_upload_response.go create mode 100644 apigen/drs/model_upload_response_object.go create mode 100644 apigen/drs/utils.go create mode 100644 apigen/internalapi/model_bulk_create_request.go create mode 100644 apigen/internalapi/model_bulk_documents_request.go create mode 100644 apigen/internalapi/model_bulk_documents_request_one_of.go create mode 100644 apigen/internalapi/model_bulk_hashes_request.go create mode 100644 apigen/internalapi/model_bulk_sha256_validity_request.go create mode 100644 apigen/internalapi/model_delete_by_query_response.go create mode 100644 apigen/internalapi/model_internal_multipart_complete_request.go create mode 100644 apigen/internalapi/model_internal_multipart_init_request.go create mode 100644 apigen/internalapi/model_internal_multipart_init_response.go create mode 100644 apigen/internalapi/model_internal_multipart_part.go create mode 100644 apigen/internalapi/model_internal_multipart_upload_request.go create mode 100644 apigen/internalapi/model_internal_multipart_upload_response.go create mode 100644 apigen/internalapi/model_internal_record.go create mode 100644 apigen/internalapi/model_internal_record_response.go create mode 100644 apigen/internalapi/model_internal_signed_url.go create mode 100644 apigen/internalapi/model_internal_upload_blank_request.go create mode 100644 apigen/internalapi/model_internal_upload_blank_response.go create mode 100644 apigen/internalapi/model_list_records_response.go create mode 100644 apigen/internalapi/utils.go create mode 100644 apigen/specs/drs-extensions-overlay.yaml diff --git a/Makefile b/Makefile index 693e65a..d1246a2 100644 --- a/Makefile +++ b/Makefile @@ -20,12 +20,6 @@ OAG_IMAGE ?= openapitools/openapi-generator-cli:latest REDOCLY_IMAGE ?= redocly/cli:latest YQ_IMAGE ?= mikefarah/yq:latest GEN_OUT ?= .tmp/apigen.gen -LFS_OPENAPI ?= apigen/api/lfs.openapi.yaml -LFS_GEN_OUT ?= .tmp/apigen-lfs.gen -BUCKET_OPENAPI ?= apigen/api/bucket.openapi.yaml -BUCKET_GEN_OUT ?= .tmp/apigen-bucket.gen -METRICS_OPENAPI ?= apigen/api/metrics.openapi.yaml -METRICS_GEN_OUT ?= .tmp/apigen-metrics.gen INTERNAL_OPENAPI ?= apigen/api/internal.openapi.yaml INTERNAL_GEN_OUT ?= .tmp/apigen-internal.gen SCHEMAS_SUBMODULE ?= ga4gh/data-repository-service-schemas @@ -112,74 +106,8 @@ gen: mkdir -p apigen/drs; \ find "$(GEN_OUT)" -maxdepth 1 -type f -name '*.go' -exec mv {} apigen/drs/ \; ; \ echo "Generated DRS client models into ./apigen/drs"; \ - if [[ -f "$(LFS_OPENAPI)" ]]; then $(MAKE) gen-lfs; fi; \ - if [[ -f "$(BUCKET_OPENAPI)" ]]; then $(MAKE) gen-bucket; fi; \ - if [[ -f "$(METRICS_OPENAPI)" ]]; then $(MAKE) gen-metrics; fi; \ if [[ -f "$(INTERNAL_OPENAPI)" ]]; then $(MAKE) gen-internal; fi -.PHONY: gen-lfs -gen-lfs: - @set -euo pipefail; \ - rm -rf "$(LFS_GEN_OUT)"; \ - docker run --rm --pull=missing \ - --user "$$(id -u):$$(id -g)" \ - -v "$(PWD):/local" \ - $(OAG_IMAGE) generate \ - -g go \ - --skip-validate-spec \ - --git-repo-id data-client \ - --git-user-id calypr \ - -i /local/apigen/api/lfs.openapi.yaml \ - -o /local/$(LFS_GEN_OUT) \ - --global-property models,modelDocs=false,modelTests=false,supportingFiles=utils.go \ - --additional-properties packageName=lfsapi,enumClassPrefix=true; \ - rm -rf apigen/lfsapi; \ - mkdir -p apigen/lfsapi; \ - find "$(LFS_GEN_OUT)" -maxdepth 1 -type f -name '*.go' -exec mv {} apigen/lfsapi/ \; ; \ - echo "Generated LFS models into ./apigen/lfsapi" - -.PHONY: gen-bucket -gen-bucket: - @set -euo pipefail; \ - rm -rf "$(BUCKET_GEN_OUT)"; \ - docker run --rm --pull=missing \ - --user "$$(id -u):$$(id -g)" \ - -v "$(PWD):/local" \ - $(OAG_IMAGE) generate \ - -g go \ - --skip-validate-spec \ - --git-repo-id data-client \ - --git-user-id calypr \ - -i /local/apigen/api/bucket.openapi.yaml \ - -o /local/$(BUCKET_GEN_OUT) \ - --global-property models,modelDocs=false,modelTests=false,supportingFiles=utils.go \ - --additional-properties packageName=bucketapi,enumClassPrefix=true; \ - rm -rf apigen/bucketapi; \ - mkdir -p apigen/bucketapi; \ - find "$(BUCKET_GEN_OUT)" -maxdepth 1 -type f -name '*.go' -exec mv {} apigen/bucketapi/ \; ; \ - echo "Generated Bucket models into ./apigen/bucketapi" - -.PHONY: gen-metrics -gen-metrics: - @set -euo pipefail; \ - rm -rf "$(METRICS_GEN_OUT)"; \ - docker run --rm --pull=missing \ - --user "$$(id -u):$$(id -g)" \ - -v "$(PWD):/local" \ - $(OAG_IMAGE) generate \ - -g go \ - --skip-validate-spec \ - --git-repo-id data-client \ - --git-user-id calypr \ - -i /local/apigen/api/metrics.openapi.yaml \ - -o /local/$(METRICS_GEN_OUT) \ - --global-property models,modelDocs=false,modelTests=false,supportingFiles=utils.go \ - --additional-properties packageName=metricsapi,enumClassPrefix=true; \ - rm -rf apigen/metricsapi; \ - mkdir -p apigen/metricsapi; \ - find "$(METRICS_GEN_OUT)" -maxdepth 1 -type f -name '*.go' -exec mv {} apigen/metricsapi/ \; ; \ - echo "Generated Metrics models into ./apigen/metricsapi" - .PHONY: gen-internal gen-internal: @set -euo pipefail; \ @@ -217,4 +145,3 @@ clean: @rm -f $(BIN_DIR)/$(TARGET_NAME) @rm -f coverage.out coverage.html @rm -rf .tmp - diff --git a/apigen/api/internal.openapi.yaml b/apigen/api/internal.openapi.yaml new file mode 100644 index 0000000..27c1c9d --- /dev/null +++ b/apigen/api/internal.openapi.yaml @@ -0,0 +1,621 @@ +openapi: 3.0.3 +info: + title: Internal Compatibility API (DRS Server) + version: 1.0.0 + description: | + Consolidated internal API delta for drs-server. + This spec captures non-GA4GH DRS internal/compatibility routes. +servers: + - url: / +tags: + - name: internal-index + - name: internal-data +paths: + /index: + get: + tags: + - internal-index + operationId: internalList + parameters: + - in: query + name: hash + schema: + type: string + - in: query + name: authz + schema: + type: string + - in: query + name: organization + schema: + type: string + - in: query + name: program + schema: + type: string + - in: query + name: project + schema: + type: string + responses: + '200': + description: List response + content: + application/json: + schema: + $ref: '#/components/schemas/ListRecordsResponse' + '400': + description: Validation error + '401': + description: Authentication required + '403': + description: Forbidden + '500': + description: Internal server error + '501': + description: Listing mode not implemented without query params + post: + tags: + - internal-index + operationId: internalCreate + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/InternalRecord' + responses: + '201': + description: Created + content: + application/json: + schema: + $ref: '#/components/schemas/InternalRecordResponse' + '400': + description: Validation error + '401': + description: Authentication required + '403': + description: Forbidden + '500': + description: Internal server error + delete: + tags: + - internal-index + operationId: internalDeleteByQuery + parameters: + - in: query + name: authz + schema: + type: string + - in: query + name: organization + schema: + type: string + - in: query + name: program + schema: + type: string + - in: query + name: project + schema: + type: string + responses: + '200': + description: Deleted count + content: + application/json: + schema: + $ref: '#/components/schemas/DeleteByQueryResponse' + '400': + description: Validation error + '401': + description: Authentication required + '403': + description: Forbidden + '500': + description: Internal server error + /index/{id}: + get: + tags: + - internal-index + operationId: internalGet + parameters: + - in: path + name: id + required: true + schema: + type: string + responses: + '200': + description: Record + content: + application/json: + schema: + $ref: '#/components/schemas/InternalRecordResponse' + '404': + description: Not found + '500': + description: Internal server error + put: + tags: + - internal-index + operationId: internalUpdate + parameters: + - in: path + name: id + required: true + schema: + type: string + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/InternalRecord' + responses: + '200': + description: Updated + content: + application/json: + schema: + $ref: '#/components/schemas/InternalRecordResponse' + '400': + description: Validation error + '404': + description: Not found + '500': + description: Internal server error + delete: + tags: + - internal-index + operationId: internalDelete + parameters: + - in: path + name: id + required: true + schema: + type: string + responses: + '200': + description: Deleted + '404': + description: Not found + '500': + description: Internal server error + /index/bulk: + post: + tags: + - internal-index + operationId: internalBulkCreate + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/BulkCreateRequest' + responses: + '201': + description: Created records + content: + application/json: + schema: + $ref: '#/components/schemas/ListRecordsResponse' + '400': + description: Validation error + '401': + description: Authentication required + '403': + description: Forbidden + '500': + description: Internal server error + /index/bulk/hashes: + post: + tags: + - internal-index + operationId: internalBulkHashes + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/BulkHashesRequest' + responses: + '200': + description: Matched records + content: + application/json: + schema: + $ref: '#/components/schemas/ListRecordsResponse' + '400': + description: Validation error + '500': + description: Internal server error + /index/bulk/sha256/validity: + post: + tags: + - internal-index + operationId: internalBulkSHA256Validity + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/BulkSHA256ValidityRequest' + responses: + '200': + description: sha256 validity map + content: + application/json: + schema: + type: object + additionalProperties: + type: boolean + '400': + description: Validation error + '500': + description: Internal server error + /index/bulk/documents: + post: + tags: + - internal-index + operationId: internalBulkDocuments + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/BulkDocumentsRequest' + responses: + '200': + description: Records + content: + application/json: + schema: + type: array + items: + $ref: '#/components/schemas/InternalRecordResponse' + '400': + description: Validation error + '500': + description: Internal server error + /data/download/{file_id}: + get: + tags: + - internal-data + operationId: internalDownloadData + parameters: + - in: path + name: file_id + required: true + schema: + type: string + responses: + '200': + description: Signed download URL + content: + application/json: + schema: + $ref: '#/components/schemas/InternalSignedURL' + '401': + description: Authentication required + '403': + description: Forbidden + '404': + description: Not found + '500': + description: Internal server error + /data/upload: + post: + tags: + - internal-data + operationId: internalUploadBlank + requestBody: + required: false + content: + application/json: + schema: + $ref: '#/components/schemas/InternalUploadBlankRequest' + responses: + '200': + description: Upload URL issued + content: + application/json: + schema: + $ref: '#/components/schemas/InternalUploadBlankResponse' + '400': + description: Validation error + '401': + description: Authentication required + '403': + description: Forbidden + '500': + description: Internal server error + /data/upload/{file_id}: + get: + tags: + - internal-data + operationId: internalUploadURL + parameters: + - in: path + name: file_id + required: true + schema: + type: string + responses: + '200': + description: Signed upload URL + content: + application/json: + schema: + $ref: '#/components/schemas/InternalSignedURL' + '400': + description: Validation error + '401': + description: Authentication required + '403': + description: Forbidden + '500': + description: Internal server error + /data/multipart/init: + post: + tags: + - internal-data + operationId: internalMultipartInit + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/InternalMultipartInitRequest' + responses: + '200': + description: Multipart upload initialized + content: + application/json: + schema: + $ref: '#/components/schemas/InternalMultipartInitResponse' + '400': + description: Validation error + '401': + description: Authentication required + '403': + description: Forbidden + '500': + description: Internal server error + /data/multipart/upload: + post: + tags: + - internal-data + operationId: internalMultipartUpload + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/InternalMultipartUploadRequest' + responses: + '200': + description: Part presigned URL + content: + application/json: + schema: + $ref: '#/components/schemas/InternalMultipartUploadResponse' + '400': + description: Validation error + '401': + description: Authentication required + '403': + description: Forbidden + '500': + description: Internal server error + /data/multipart/complete: + post: + tags: + - internal-data + operationId: internalMultipartComplete + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/InternalMultipartCompleteRequest' + responses: + '200': + description: Upload completed + '400': + description: Validation error + '401': + description: Authentication required + '403': + description: Forbidden + '500': + description: Internal server error +components: + schemas: + HashInfo: + type: object + additionalProperties: + type: string + description: Hash map, e.g. {"sha256":"..."} + InternalRecord: + type: object + properties: + did: + type: string + hashes: + $ref: '#/components/schemas/HashInfo' + size: + type: integer + format: int64 + urls: + type: array + items: + type: string + authz: + type: array + items: + type: string + file_name: + type: string + organization: + type: string + project: + type: string + InternalRecordResponse: + allOf: + - $ref: '#/components/schemas/InternalRecord' + - type: object + properties: + baseid: + type: string + rev: + type: string + created_date: + type: string + updated_date: + type: string + uploader: + type: string + ListRecordsResponse: + type: object + properties: + records: + type: array + items: + $ref: '#/components/schemas/InternalRecordResponse' + BulkCreateRequest: + type: object + required: + - records + properties: + records: + type: array + items: + $ref: '#/components/schemas/InternalRecord' + BulkHashesRequest: + type: object + required: + - hashes + properties: + hashes: + type: array + items: + type: string + BulkSHA256ValidityRequest: + type: object + properties: + sha256: + type: array + items: + type: string + hashes: + type: array + items: + type: string + BulkDocumentsRequest: + oneOf: + - type: array + items: + type: string + - type: object + properties: + ids: + type: array + items: + type: string + dids: + type: array + items: + type: string + DeleteByQueryResponse: + type: object + properties: + deleted: + type: integer + InternalSignedURL: + type: object + properties: + url: + type: string + InternalUploadBlankRequest: + type: object + properties: + guid: + type: string + authz: + type: array + items: + type: string + InternalUploadBlankResponse: + type: object + properties: + guid: + type: string + url: + type: string + InternalMultipartInitRequest: + type: object + properties: + guid: + type: string + file_name: + type: string + bucket: + type: string + InternalMultipartInitResponse: + type: object + properties: + guid: + type: string + uploadId: + type: string + InternalMultipartUploadRequest: + type: object + required: + - key + - uploadId + - partNumber + properties: + key: + type: string + bucket: + type: string + uploadId: + type: string + partNumber: + type: integer + format: int32 + InternalMultipartUploadResponse: + type: object + properties: + presigned_url: + type: string + InternalMultipartPart: + type: object + required: + - PartNumber + - ETag + properties: + PartNumber: + type: integer + format: int32 + ETag: + type: string + InternalMultipartCompleteRequest: + type: object + required: + - key + - uploadId + - parts + properties: + key: + type: string + bucket: + type: string + uploadId: + type: string + parts: + type: array + items: + $ref: '#/components/schemas/InternalMultipartPart' diff --git a/apigen/api/openapi.yaml b/apigen/api/openapi.yaml new file mode 100644 index 0000000..3cb3496 --- /dev/null +++ b/apigen/api/openapi.yaml @@ -0,0 +1,2733 @@ +openapi: 3.0.3 +info: + title: Data Repository Service + version: 1.5.0 + x-logo: + url: https://www.ga4gh.org/wp-content/themes/ga4gh/dist/assets/svg/logos/logo-full-color.svg + termsOfService: https://www.ga4gh.org/terms-and-conditions/ + contact: + name: GA4GH Cloud Work Stream + email: ga4gh-cloud@ga4gh.org + license: + name: Apache 2.0 + url: https://raw.githubusercontent.com/ga4gh/data-repository-service-schemas/master/LICENSE +servers: + - url: https://{serverURL}/ga4gh/drs/v1 + variables: + serverURL: + default: drs.example.org + description: | + DRS server endpoints MUST be prefixed by the '/ga4gh/drs/v1' endpoint path +security: + - {} + - BasicAuth: [] + - BearerAuth: [] +tags: + - name: Introduction + description: | + The Data Repository Service (DRS) API provides a generic interface to data repositories so data consumers, including workflow systems, can access data objects in a single, standard way regardless of where they are stored and how they are managed. The primary functionality of DRS is to map a logical ID to a means for physically retrieving the data represented by the ID. The sections below describe the characteristics of those IDs, the types of data supported, how they can be pointed to using URIs, and how clients can use these URIs to ultimately make successful DRS API requests. This document also describes the DRS API in detail and provides information on the specific endpoints, request formats, and responses. This specification is intended for developers of DRS-compatible services and of clients that will call these DRS services. + + The key words MUST, MUST NOT, REQUIRED, SHALL, SHALL NOT, SHOULD, SHOULD NOT, RECOMMENDED, MAY, and OPTIONAL in this document are to be interpreted as described in [RFC 2119](https://datatracker.ietf.org/doc/html/rfc2119). + - name: DRS API Principles + description: | + ## DRS IDs + + Each implementation of DRS can choose its own id scheme, as long as it follows these guidelines: + + * DRS IDs are strings made up of uppercase and lowercase letters, decimal digits, hyphen, period, underscore and tilde [A-Za-z0-9.-_~]. See [RFC 3986 § 2.3](https://datatracker.ietf.org/doc/html/rfc3986#section-2.3). + * DRS IDs can contain other characters, but they MUST be encoded into valid DRS IDs whenever they are used in API calls. This is because non-encoded IDs may interfere with the interpretation of the objects/{id}/access endpoint. To overcome this limitation use percent-encoding of the ID, see [RFC 3986 § 2.4](https://datatracker.ietf.org/doc/html/rfc3986#section-2.4) + * One DRS ID MUST always return the same object data (or, in the case of a collection, the same set of objects). This constraint aids with reproducibility. + * DRS implementations MAY have more than one ID that maps to the same object. + * DRS version 1.x does NOT support semantics around multiple versions of an object. (For example, there’s no notion of “get latest version” or “list all versions”.) Individual implementations MAY choose an ID scheme that includes version hints. + + ## DRS URIs + + For convenience, including when passing content references to a [WES server](https://github.com/ga4gh/workflow-execution-service-schemas), we define a [URI scheme](https://en.wikipedia.org/wiki/Uniform_Resource_Identifier#Generic_syntax) for DRS-accessible content. This section documents the syntax of DRS URIs, and the rules clients follow for translating a DRS URI into a URL that they use for making the DRS API calls described in this spec. + + There are two styles of DRS URIs, Hostname-based and Compact Identifier-based, both using the `drs://` URI scheme. DRS servers may choose either style when exposing references to their content;. DRS clients MUST support resolving both styles. + + Tip: + > See [Appendix: Background Notes on DRS URIs](#tag/Background-Notes-on-DRS-URIs) for more information on our design motivations for DRS URIs. + + ### Hostname-based DRS URIs + + Hostname-based DRS URIs are simpler than compact identifier-based URIs. They contain the DRS server name and the DRS ID only and can be converted directly into a fetchable URL based on a simple rule. They take the form: + + ``` + drs:/// + ``` + + DRS URIs of this form mean *\"you can fetch the content with DRS id \ from the DRS server at \\"*. + For example, here are the client resolution steps if the URI is: + + ``` + drs://drs.example.org/314159 + ``` + + 1. The client parses the string to extract the hostname of “drs.example.org” and the id of “314159”. + 2. The client makes a GET request to the DRS server, using the standard DRS URL syntax: + + ``` + GET https://drs.example.org/ga4gh/drs/v1/objects/314159 + ``` + + The protocol is always https and the port is always the standard 443 SSL port. It is invalid to include a different port in a DRS hostname-based URI. + + Tip: + > See the [Appendix: Hostname-Based URIs](#tag/Hostname-Based-URIs) for information on how hostname-based DRS URI resolution to URLs is likely to change in the future, when the DRS v2 major release happens. + + ### Compact Identifier-based DRS URIs + + Compact Identifier-based DRS URIs use resolver registry services (specifically, [identifiers.org](https://identifiers.org/) and [n2t.net (Name-To-Thing)](https://n2t.net/)) to provide a layer of indirection between the DRS URI and the DRS server name — the actual DNS name of the DRS server is not present in the URI. This approach is based on the Joint Declaration of Data Citation Principles as detailed by [Wimalaratne et al (2018)](https://www.nature.com/articles/sdata201829). + + For more information, see the document [More Background on Compact Identifiers](./more-background-on-compact-identifiers.html). + + Compact Identifiers take the form: + + ``` + drs://[provider_code/]namespace:accession + ``` + + Together, provider code and the namespace are referred to as the `prefix`. The provider code is optional and is used by identifiers.org/n2t.net for compact identifier resolver mirrors. Both the `provider_code` and `namespace` disallow spaces or punctuation, only lowercase alphanumerical characters, underscores and dots are allowed (e.g. [A-Za-z0-9._]). + + Tip: + > See the [Appendix: Compact Identifier-Based URIs](#tag/Compact-Identifier-Based-URIs) for more background on Compact Identifiers and resolver registry services like identifiers.org/n2t.net (aka meta-resolvers), how to register prefixes, possible caching strategies, and security considerations. + + #### For DRS Servers + + If your DRS implementation will issue DRS URIs based *on your own* compact identifiers, you MUST first register a new prefix with identifiers.org (which is automatically mirrored to n2t.net). You will also need to include a provider resolver resource in this registration which links the prefix to your DRS server, so that DRS clients can get sufficient information to make a successful DRS GET request. For clarity, we recommend you choose a namespace beginning with `drs`. + + #### For DRS Clients + + A DRS client parses the DRS URI compact identifier components to extract the prefix and the accession, and then uses meta-resolver APIs to locate the actual DRS server. For example, here are the client resolution steps if the URI is: + + ``` + drs://drs.42:314159 + ``` + + 1. The client parses the string to extract the prefix of `drs.42` and the accession of `314159`, using the first occurrence of a colon (":") character after the initial `drs://` as a delimiter. (The colon character is not allowed in a Hostname-based DRS URI, making it easy to tell them apart.) + + 2. The client makes API calls to a meta-resolver to look up the URL pattern for the namespace. (See [Calling Meta-Resolver APIs for Compact Identifier-Based DRS URIs](#section/Calling-Meta-Resolver-APIs-for-Compact-Identifier-Based-DRS-URIs) for details.) The URL pattern is a string containing a `{$id}` parameter, such as: + + ``` + https://drs.myexample.org/ga4gh/drs/v1/objects/{$id} + ``` + + 3. The client generates a DRS URL from the URL template by replacing {$id} with the accession it extracted in step 1. It then makes a GET request to the DRS server: + + ``` + GET https://drs.myexample.org/ga4gh/drs/v1/objects/314159 + ``` + + 4. The client follows any HTTP redirects returned in step 3, in case the resolver goes through an extra layer of redirection. + + For performance reasons, DRS clients SHOULD cache the URL pattern returned in step 2, with a suggested 24 hour cache life. + + ### Choosing a URI Style + + DRS servers can choose to issue either hostname-based or compact identifier-based DRS URIs, and can be confident that compliant DRS clients will support both. DRS clients must be able to accommodate both URI types. Tradeoffs that DRS server builders, and third parties who need to cite DRS objects in datasets, workflows or elsewhere, may want to consider include: + + *Table 1: Choosing a URI Style* + + | | Hostname-based | Compact Identifier-based | + |-------------------|----------------|--------------------------| + | URI Durability | URIs are valid for as long as the server operator maintains ownership of the published DNS address. (They can of course point that address at different physical serving infrastructure as often as they would like.) | URIs are valid for as long as the server operator maintains ownership of the published compact identifier resolver namespace. (They also depend on the meta-resolvers like identifiers.org/n2t.net remaining operational, which is intended to be essentially forever.) | + | Client Efficiency | URIs require minimal client logic, and no network requests, to resolve. | URIs require small client logic, and 1-2 cacheable network requests, to resolve. | + | Security | Servers have full control over their own security practices. | Server operators, in addition to maintaining their own security practices, should confirm they are comfortable with the resolver registry security practices, including protection against denial of service and namespace-hijacking attacks. (See the [Appendix: Compact Identifier-Based URIs](#tag/Compact-Identifier-Based-URIs) for more information on resolver registry security.) | + + ## DRS Datatypes + DRS's job is data access, period. Therefore, the DRS API supports a simple flat content model -- every `DrsObject`, like a file, represents a single opaque blob of bytes. DRS has no understanding of the meaning of objects and only provides simple domain-agnostic metadata. Understanding the semantics of specific object types is the responsibility of the applications that use DRS to fetch those objects (e.g. samtools for BAM files, DICOM viewers for DICOM objects). + + ### Atomic Objects + DRS can be used to access individual objects of all kinds, simple or complex, large or small, stored in type-specific formats (e.g. BAM files, VCF files, CSV files). At the API level these are all the same; at the application level, DRS clients and servers are expected to agree on object semantics using non-DRS mechanisms, including but not limited to the GA4GH Data Connect API. + + ### Compound Objects + DRS can also be used to access compound objects, consisting of two or more atomic objects related to each other in a well-specified way. See the [Appendix: Compound Objects](#tag/Working-With-Compound-Objects) for suggested best practices for working with compound objects. + + ### [DEPRECATED] Bundles + Previous versions of the DRS API spec included support for a *bundle* content type, which was a folder-like collection of other DRS objects (either blobs or bundles), represented by a `DrsObject` with a `contents` array. As of v1.3, bundles have been deprecated in favor of the best practices documented in the [Appendix: Compound Objects](#tag/Working-With-Compound-Objects). A future version of the API spec may remove bundle support entirely and/or replace bundles with a scalable approach based on the needs of our driver projects. + + ## Read-only + + DRS v1 is a read-only API. We expect that each implementation will define its own mechanisms and interfaces (graphical and/or programmatic) for adding and updating data. + + ## Standards + + The DRS API specification is written in OpenAPI and embodies a RESTful service philosophy. It uses JSON in requests and responses and standard HTTPS on port 443 for information transport. Optionally, it + supports authentication and authorization using the [GA4GH Passport](https://github.com/ga4gh-duri/ga4gh-duri.github.io/tree/master/researcher_ids) standard. + - name: Authorization & Authentication + description: "## Making DRS Requests\n\nThe DRS implementation is responsible for defining and enforcing an authorization policy that determines which users are allowed to make which requests. GA4GH recommends that DRS implementations use an OAuth 2.0 [bearer token](https://oauth.net/2/bearer-tokens/) or a [GA4GH Passport](https://github.com/ga4gh-duri/ga4gh-duri.github.io/tree/master/researcher_ids), although they can choose other mechanisms if appropriate.\n\n## Fetching DRS Objects\n\nThe DRS API allows implementers to support a variety of different content access policies, depending on what `AccessMethod` records they return. Implementers have a choice to make the\nGET /objects/{object_id} and GET /objects/{object_id}/access/{access_id} calls open or requiring a Basic, Bearer, or Passport token (Passport requiring a POST). The following describes the\nvarious access approaches following a successful GET/POST /objects/{object_id} request in order to them obtain access to the bytes for a given object ID/access ID:\n\n* public content:\n * server provides an `access_url` with a `url` and no `headers`\n * caller fetches the object bytes without providing any auth info\n* private content that requires the caller to have out-of-band auth knowledge (e.g. service account credentials):\n * server provides an `access_url` with a `url` and no `headers`\n * caller fetches the object bytes, passing the auth info they obtained out-of-band\n* private content that requires the caller to pass an Authorization token:\n * server provides an `access_url` with a `url` and `headers`\n * caller fetches the object bytes, passing auth info via the specified header(s)\n* private content that uses an expensive-to-generate auth mechanism (e.g. a signed URL):\n * server provides an `access_id`\n * caller passes the `access_id` to the `/access` endpoint\n * server provides an `access_url` with the generated mechanism (e.g. a signed URL in the `url` field)\n * caller fetches the object bytes from the `url` (passing auth info from the specified headers, if any)\n\nIn the approaches above [GA4GH Passports](https://github.com/ga4gh-duri/ga4gh-duri.github.io/tree/master/researcher_ids) are not mentioned and that is on purpose. A DRS server may return a Bearer token or other platform-specific token in a header in response to a valid Bearer token or GA4GH Passport (Option 3 above). But it is not the responsibility of a DRS server to return a Passport, that is the responsibility of a Passport Broker and outside the scope of DRS.\n\nDRS implementers should ensure their solutions restrict access to targets as much as possible, detect attempts to exploit through log monitoring, and they are prepared to take action if an exploit in their DRS implementation is detected.\n\n## Authentication\n\n### Discovery\n\nThe APIs to fetch [DrsObjects](#tag/DrsObjectModel) and [AccessURLs](#tag/AccessURLModel) may require authorization. The authorization mode may vary between DRS objects hosted by a service. The authorization mode may vary between the APIs to fetch a [DrsObject](#tag/DrsObjectModel) and an associated [AccessURL](#tag/AccessURLModel). Implementers should indicate how to authenticate to fetch a [DrsObject](#tag/DrsObjectModel) by implementing the [OptionsOjbect](#operation/OptionsObject) API. Implementers should indicate how to authenticate to fetch an [AccessURL](#tag/AccessURLModel) within a [DrsObject](#tag/DrsObjectModel). \n\n### Modes\n\n#### BasicAuth\n\nA valid authorization token must be passed in the 'Authorization' header, e.g. \"Basic ${token_string}\"\n\n| Security Scheme Type | HTTP |\n|----------------------|------|\n| **HTTP Authorization Scheme** | basic |\n\n#### BearerAuth\n\nA valid authorization token must be passed in the 'Authorization' header, e.g. \"Bearer ${token_string}\"\n\n| Security Scheme Type | HTTP |\n|----------------------|------|\n| **HTTP Authorization Scheme** | bearer |\n\n#### PassportAuth\n\nA valid authorization [GA4GH Passport](https://github.com/ga4gh-duri/ga4gh-duri.github.io/tree/master/researcher_ids) token must be passed in the body of a POST request\n\n| Security Scheme Type | HTTP |\n|----------------------|------|\n| **HTTP POST** | tokens[] |\n" + - name: Objects + - name: Upload Request + - name: Access Method Updates + description: "# Access Method Updates\n\n> **Optional Functionality**: Access method updates are optional extensions to the DRS API. Not all DRS servers are required to implement this functionality. Clients should check `/service-info` for `accessMethodUpdateSupported` before attempting to use these endpoints.\n\nAccess method update endpoints allows authorized clients to modify how existing DRS objects can be accessed without changing the core object metadata (size, checksums, name). This is useful for storage migrations, adding mirrors, or updating URLs.\n\nThese endpoints will overwrite existing access methods for an object, if clients want to add access methods in addition to existing ones for objects they should first retrieve the current access methods and include them in the update request along with the new methods.\n\n## Use Cases\n\n- **Storage Migration**: Move data between storage providers while keeping same DRS object\n- **Mirror Addition**: Add CDN or regional access points for better performance \n- **URL Refresh**: Update changed domain names\n- **Access Optimization**: Add or remove access methods based on performance or cost\n\n## Design Principles\n\n- **Optional**: Access method update support is completely optional\n- **Immutable Core**: Only access methods can be updated - size, checksums, name remain unchanged\n- **Atomic Bulk Operations**: All updates succeed or all fail (transactional)\n- **Optional Validation**: Servers MAY validate new access methods point to same data\n- **Flexible Authentication**: Supports GA4GH Passports, Bearer tokens, API keys\n\n## Service Discovery\n\nCheck `/service-info` for access method update capabilities:\n\n```json\n{\n \"drs\": {\n \"accessMethodUpdateSupported\": true,\n \"maxBulkAccessMethodUpdateLength\": 100,\n \"validateAccessMethodUpdates\": false\n }\n}\n```\n\n- **`accessMethodUpdateSupported`**: Whether server supports access method updates\n- **`maxBulkAccessMethodUpdateLength`**: Maximum objects per bulk update request\n- **`validateAccessMethodUpdates`**: Whether server validates new access methods\n\n## Single Object Update\n\nUpdate access methods for a single DRS object:\n\n```bash\ncurl -X POST \"https://drs.example.org/objects/obj_123/access-methods\" \\\n -H \"Content-Type: application/json\" \\\n -d '{\n \"access_methods\": [\n {\n \"type\": \"https\",\n \"access_url\": {\n \"url\": \"https://new-cdn.example.org/data/file.bam\"\n }\n },\n {\n \"type\": \"s3\",\n \"access_id\": \"s3,\n \"access_url\": {\n \"url\": \"s3://new-bucket/migrated/file.bam\"\n }\n }\n ]\n }'\n```\n\n## Bulk Object Update\n\nUpdate access methods for multiple objects atomically:\n\n```bash\ncurl -X POST \"https://drs.example.org/objects/access-methods\" \\\n -H \"Content-Type: application/json\" \\\n -d '{\n \"updates\": [\n {\n \"object_id\": \"obj_123\",\n \"access_methods\": [\n {\n \"type\": \"https\",\n \"access_url\": {\"url\": \"https://new-location.com/file1.bam\"}\n }\n ]\n },\n {\n \"object_id\": \"obj_456\", \n \"access_methods\": [\n {\n \"type\": \"s3\",\n \"access_url\": {\"url\": \"s3://new-bucket/file2.vcf\"}\n }\n ]\n }\n ]\n }'\n```\n\n## Authentication\n\n**GA4GH Passports** (in request body):\n```json\n{\n \"access_methods\": [...],\n \"passports\": [\"eyJhbGci...\"]\n}\n```\n\n**Bearer Tokens** (in headers):\n```bash\ncurl -H \"Authorization: Bearer token\" -d '{\"access_methods\": [...]}' ...\n```\n\n## Validation\n\nServers MAY validate that new access methods point to the same data by checkingm file availability, checksums or file content. Validation behavior is advertised in `validateAccessMethodUpdates` service-info field.\n\n\n## Error Responses\n\n- **400**: Invalid access methods or validation failure\n- **401**: Authentication required\n- **403**: Insufficient permissions for object(s)\n- **404**: Object not found or access method updates not supported\n- **413**: Bulk request exceeds `maxBulkAccessMethodUpdateLength` limit\n\n## Examples\n\n**Storage Migration:**\n```bash\n# Check server capabilities\ncurl \"https://drs.example.org/service-info\"\n\n# Update single object after migration\ncurl -X POST \"https://drs.example.org/objects/obj_123/access-methods\" \\\n -d '{\"access_methods\": [{\"type\": \"s3\", \"access_url\": {\"url\": \"s3://new-bucket/file.bam\"}}]}'\n```\n\n**Add CDN Mirror:**\n```bash\n# Add additional access method without removing existing ones\ncurl -X POST \"https://drs.example.org/objects/obj_456/access-methods\" \\\n -d '{\n \"access_methods\": [\n {\"type\": \"https\", \"access_url\": {\"url\": \"https://origin.example.org/file.vcf\"}},\n {\"type\": \"https\", \"access_url\": {\"url\": \"https://cdn.example.org/file.vcf\"}}\n ]\n }'\n```\n\n**Bulk Migration:**\n```bash\n# Migrate multiple objects atomically\ncurl -X POST \"https://drs.example.org/objects/access-methods\" \\\n -d '{\n \"updates\": [\n {\"object_id\": \"obj_1\", \"access_methods\": [...]},\n {\"object_id\": \"obj_2\", \"access_methods\": [...]}\n ]\n }'\n```\n\n## Best Practices\n\n**Clients**: Check service-info first, handle atomic transaction failures, respect bulk limits, verify permissions\n\n**Servers**: Advertise capabilities clearly, implement atomic transactions for bulk operations, validate permissions, consider optional validation for data integrity\n\n## Backward Compatibility\n\nAccess method update functionality is designed to be backward compatible:\n\n- **No Impact on Existing Endpoints**: All existing DRS endpoints remain unchanged\n- **Optional Implementation**: Servers can ignore this functionality entirely \n- **Graceful Degradation**: Clients receive 404 responses when not supported\n- **Safe Defaults**: New service-info fields have safe default values" + - name: Service Info + - name: AccessMethodModel + x-displayName: AccessMethod + description: | + + - name: AccessURLModel + x-displayName: AccessURL + description: | + + - name: ChecksumModel + x-displayName: Checksum + description: | + + - name: ContentsObjectModel + x-displayName: ContentsObject + description: | + + - name: DrsObjectModel + x-displayName: DrsObject + description: | + + - name: DrsObjectCandidateModel + x-displayName: DrsObjectCandidate + description: | + + - name: ErrorModel + x-displayName: Error + description: | + + - name: UploadRequestModel + x-displayName: UploadRequest + description: | + + - name: UploadResponseModel + x-displayName: UploadResponse + description: | + + - name: UploadRequestObjectModel + x-displayName: UploadRequestObject + description: | + + - name: UploadResponseObjectModel + x-displayName: UploadResponseObject + description: | + + - name: UploadMethodModel + x-displayName: UploadMethod + description: | + + - name: DeleteRequestModel + x-displayName: DeleteRequest + description: | + + - name: BulkDeleteRequestModel + x-displayName: BulkDeleteRequest + description: | + + - name: DeleteResultModel + x-displayName: DeleteResult + description: | + + - name: BulkDeleteResponseModel + x-displayName: BulkDeleteResponse + description: | + + - name: Motivation + description: | + + + + + +
+ Data sharing requires portable data, consistent with the FAIR data principles (findable, accessible, interoperable, reusable). Today’s researchers and clinicians are surrounded by potentially useful data, but often need bespoke tools and processes to work with each dataset. Today’s data publishers don’t have a reliable way to make their data useful to all (and only) the people they choose. And today’s data controllers are tasked with implementing standard controls of non-standard mechanisms for data access. + + + + Figure 1: there’s an ocean of data, with many different tools to drink from it, but no guarantee that any tool will work with any subset of the data + +
+ + + + + + +
+ We need a standard way for data producers to make their data available to data consumers, that supports the control needs of the former and the access needs of the latter. And we need it to be interoperable, so anyone who builds access tools and systems can be confident they’ll work with all the data out there, and anyone who publishes data can be confident it will work with all the tools out there. + + + + Figure 2: by defining a standard Data Repository API, and adapting tools to use it, every data publisher can now make their data useful to every data consumer + +
+ + + + + + +
+ We envision a world where: +
    +
  • + there are many many data consumers, working in research and in care, who can use the tools of their choice to access any and all data that they have permission to see +
  • +
  • + there are many data access tools and platforms, supporting discovery, visualization, analysis, and collaboration +
  • +
  • + there are many data repositories, each with their own policies and characteristics, which can be accessed by a variety of tools +
  • +
  • + there are many data publishing tools and platforms, supporting a variety of data lifecycles and formats +
  • +
  • + there are many many data producers, generating data of all types, who can use the tools of their choice to make their data as widely available as is appropriate +
  • +
+
+ + + Figure 3: a standard Data Repository API enables an ecosystem of data producers and consumers + +
+ + This spec defines a standard **Data Repository Service (DRS) API** (“the yellow box”), to enable that ecosystem of data producers and consumers. Our goal is that the only thing data consumers need to know about a data repo is *\"here’s the DRS endpoint to access it\"*, and the only thing data publishers need to know to tap into the world of consumption tools is *\"here’s how to tell it where my DRS endpoint lives\"*. + + ## Federation + + The world’s biomedical data is controlled by groups with very different policies and restrictions on where their data lives and how it can be accessed. A primary purpose of DRS is to support unified access to disparate and distributed data. (As opposed to the alternative centralized model of "let’s just bring all the data into one single data repository”, which would be technically easier but is no more realistic than “let’s just bring all the websites into one single web host”.) + + In a DRS-enabled world, tool builders don’t have to worry about where the data their tools operate on lives — they can count on DRS to give them access. And tool users only need to know which DRS server is managing the data they need, and whether they have permission to access it; they don’t have to worry about how to physically get access to, or (worse) make a copy of the data. For example, if I have appropriate permissions, I can run a pooled analysis where I run a single tool across data managed by different DRS servers, potentially in different locations. + - name: Working With Compound Objects + description: | + ## Compound Objects + + The DRS API supports access to data objects, with each `DrsObject` representing a single opaque blob of bytes. Much content (e.g. VCF files) is well represented as a single atomic `DrsObject`. Some content, however (e.g. DICOM images) is best represented as a compound object consisting of a structured collection of atomic `DrsObject`s. In both cases, DRS isn't aware of the semantics of the objects it serves -- understanding those semantics is the responsibility of the applications that call DRS. + + Common examples of compound objects in biomedicine include: + * BAM+BAI genomic reads, with a small index (the BAI object) to large data (the BAM object), each object using a well-defined file format. + * DICOM images, with a contents object pointing to one or more raw image objects, each containing pixels from different aspects of a single logical biomedical image (e.g. different z-coordinates) + * studies, with a single table of contents listing multiple objects of various types that were generated together and are meant to be processed together + + ## Best Practice: Manifests + + As with atomic objects, DRS applications and servers are expected to agree on the semantics of compound objects using non-DRS mechanisms. The recommended best practice for representing a particular compound object type is: + 1. Define a manifest file syntax, which contains the DRS IDs of the constituent atomic objects, plus type-specific information about the relationship between those constituents. + * Manifest file syntax isn't prescribed by the spec, but we expect they will often be JSON files. + * For example, for a BAM+BAI pair the manifest file could contain two key-value pairs mapping the type of each constituent file to its DRS ID. + 3. Make manifest objects and their constituent objects available using standard DRS mechanisms -- each object is referenced via its own DRS ID, just like any other atomic object. + * For example, for a BAM+BAI pair, there would be three DRS IDs -- one for the manifest, one for the BAM, and one for the BAI. + 5. Document the expected client logic for processing compound objects of interest. This logic typically consists of using standard DRS mechanisms to fetch the manifest, parsing its syntax, extracting the DRS IDs of constituent objects, and using standard DRS mechanisms to fetch the constituents as needed. + * In some cases the application will always want to fetch all of the constituents; in other cases it may want to initially fetch a subset, and only fetch the others on demand. For example, a DICOM image viewer may only want to fetch the layers that are being rendered. + - name: Background Notes on DRS URIs + description: | + ## Design Motivation + + DRS URIs are aligned with the [FAIR data principles](https://www.nature.com/articles/sdata201618) and the [Joint Declaration of Data Citation Principles](https://www.nature.com/articles/sdata20182) — both hostname-based and compact identifier-based URIs provide globally unique, machine-resolvable, persistent identifiers for data. + + * We require all URIs to begin with `drs://` as a signal to humans and systems consuming these URIs that the response they will ultimately receive, after transforming the URI to a fetchable URL, will be a DRS JSON packet. This signal differentiates DRS URIs from the wide variety of other entities (HTML documents, PDFs, ontology notes, etc.) that can be represented by compact identifiers. + * We support hostname-based URIs because of their simplicity and efficiency for server and client implementers. + * We support compact identifier-based URIs, and the meta-resolver services of identifiers.org and n2t.net (Name-to-Thing), because of the wide adoption of compact identifiers in the research community. as detailed by [Wimalaratne et al (2018)](https://www.nature.com/articles/sdata201829) in "Uniform resolution of compact identifiers for biomedical data." + - name: Compact Identifier-Based URIs + description: | + **Note: Identifiers.org/n2t.net API Changes** + + The examples below show the current API interactions with [n2t.net](https://n2t.net/e/compact_ids.html) and [identifiers.org](https://docs.identifiers.org/) which may change over time. Please refer to the documentation from each site for the most up-to-date information. We will make best efforts to keep the DRS specification current but DRS clients MUST maintain their ability to use either the identifiers.org or n2t.net APIs to resolve compact identifier-based DRS URIs. + + ## Registering a DRS Server on a Meta-Resolver + + See the documentation on the [n2t.net](https://n2t.net/e/compact_ids.html) and [identifiers.org](https://docs.identifiers.org/) meta-resolvers for adding your own compact identifier type and registering your DRS server as a resolver. You can register new prefixes (or mirrors by adding resource provider codes) for free using a simple online form. For more information see [More Background on Compact Identifiers](./more-background-on-compact-identifiers.html). + + ## Calling Meta-Resolver APIs for Compact Identifier-Based DRS URIs + + Clients resolving Compact Identifier-based URIs need to convert a prefix (e.g. “drs.42”) into a URL pattern. They can do so by calling either the identifiers.org or the n2t.net API, since the two meta-resolvers keep their mapping databases in sync. + + ### Calling the identifiers.org API as a Client + + It takes two API calls to get the URL pattern. + + 1. The client makes a GET request to identifiers.org to find information about the prefix: + + ``` + GET https://registry.api.identifiers.org/restApi/namespaces/search/findByPrefix?prefix=drs.42 + ``` + + This request returns a JSON structure including various URLs containing an embedded namespace id, such as: + + ``` + "namespace" : { + "href":"https://registry.api.identifiers.org/restApi/namespaces/1234" + } + ``` + + 2. The client extracts the namespace id (in this example 1234), and uses it to make a second GET request to identifiers.org to find information about the namespace: + + ``` + GET https://registry.api.identifiers.org/restApi/resources/search/findAllByNamespaceId?id=1234 + ``` + + This request returns a JSON structure including an urlPattern field, whose value is a URL pattern containing a ${id} parameter, such as: + + ``` + "urlPattern" : "https://drs.myexample.org/ga4gh/drs/v1/objects/{$id}" + ``` + + ### Calling the n2t.net API as a Client + + It takes one API call to get the URL pattern. + + The client makes a GET request to n2t.net to find information about the namespace. (Note the trailing colon.) + + ``` + GET https://n2t.net/drs.42: + ``` + + This request returns a text structure including a redirect field, whose value is a URL pattern containing an `$id` parameter, such as: + + ``` + redirect: https://drs.myexample.org/ga4gh/drs/v1/objects/$id + ``` + + ## Caching with Compact Identifiers + + Identifiers.org/n2t.net compact identifier resolver records do not change frequently. This reality is useful for caching resolver records and their URL patterns for performance reasons. Builders of systems that use compact identifier-based DRS URIs should cache prefix resolver records from identifiers.org/n2t.net and occasionally refresh the records (such as every 24 hours). This approach will reduce the burden on these community services since we anticipate many DRS URIs will be regularly resolved in workflow systems. Alternatively, system builders may decide to directly mirror the registries themselves, instructions are provided on the identifiers.org/n2t.net websites. + + ## Security with Compact Identifiers + + As mentioned earlier, identifiers.org/n2t.net performs some basic verification of new prefixes and provider code mirror registrations on their sites. However, builders of systems that consume and resolve DRS URIs may have certain security compliance requirements and regulations that prohibit relying on an external site for resolving compact identifiers. In this case, systems under these security and compliance constraints may wish to whitelist certain compact identifier resolvers and/or vet records from identifiers.org/n2t.net before enabling in their systems. + + ## Accession Encoding to Valid DRS IDs + + The compact identifier format used by identifiers.org/n2t.net does not percent-encode reserved URI characters but, instead, relies on the first ":" character to separate prefix from accession. Since these accessions can contain any characters, and characters like "/" will interfere with DRS API calls, you *must* percent encode the accessions extracted from DRS compact identifier-based URIs when using as DRS IDs in subsequent DRS GET requests. An easy way for a DRS client to handle this is to get the initial DRS object JSON response from whatever redirects the compact identifier resolves to, then look for the `self_uri` in the JSON, which will give you the correctly percent-encoded DRS ID for subsequent DRS API calls such as the `access` method. + + ## Additional Examples + + For additional examples, see the document [More Background on Compact Identifiers](./more-background-on-compact-identifiers.html). + - name: Hostname-Based URIs + description: | + ## Encoding DRS IDs + + In hostname-based DRS URIs, the ID is always percent-encoded to ensure special characters do not interfere with subsequent DRS endpoint calls. As such, ":" is not allowed in the URI and is a convenient way of differentiating from a compact identifier-based DRS URI. Also, if a given DRS service implementation uses compact identifier accessions as their DRS IDs, they must be percent encoded before using them as DRS IDs in hostname-based DRS URIs and subsequent GET requests to a DRS service endpoint. + - name: GA4GH Service Registry + description: | + The [GA4GH Service Registry API specification](https://github.com/ga4gh-discovery/ga4gh-service-registry) allows information about GA4GH-compliant web services, including DRS services, to be aggregated into registries and made available via a standard API. The following considerations should be followed when registering DRS services within a service registry. + + * The DRS service attributes returned by `/service-info` (i.e. `id`, `name`, `description`, etc.) should have the same values as the registry entry for that service. + * The value of the `type` object's `artifact` property should be `drs` (i.e. the same as it appears in `service-info`) + * Each entry in a Service Registry must have a `url`, indicating the base URL to the web service. For DRS services, the registered `url` must include everything up to + the standardized `/ga4gh/drs/v1` path. Clients should be able to assume that: + + Adding `/ga4gh/drs/v1/objects/{object_id}` to the registered `url` will hit the `DrsObject` endpoint + + Adding `/ga4gh/drs/v1/service-info` to the registered `url` will hit the Service Info endpoint + + Example listing of a DRS API registration from a service registry's `/services` endpoint: + + ``` + [ + { + "id": "com.example.drs", + "name": "Example DRS API", + "type": { + "group": "org.ga4gh", + "artifact": "drs", + "version": "1.5.0" + }, + "description": "The Data Repository Service (DRS) API ...", + "organization": { + "id": "com.example", + "name": "Example Company" + }, + "contactUrl": "mailto:support@example.com", + "documentationUrl": "https://docs.example.com/docs/drs", + "createdAt": "2021-08-09T00:00:00Z", + "updatedAt": "2021-08-09T12:30:00Z", + "environment": "production", + "version": "1.13.4", + "url": "https://drs-service.example.com" + } + ] + ``` + - name: Upload Requests and Object Registration + description: "# Upload Requests and Object Registration\n\n> **Optional Functionality**: Upload and object registration are optional DRS extensions. Clients should check `/service-info` for `uploadRequestSupported` and `objectRegistrationSupported` before attempting to use these endpoints.\n\nThe DRS upload and object registration endpoints allows clients to negotiate with servers on mutually convenient storage backends and then register uploads as DRS objects through a three-phase workflow:\n\n1. **Request Upload URLs**: POST `/upload-request` with file metadata to receive upload methods and credentials\n2. **Upload Files**: Use returned URLs and credentials to upload files to storage using existing upload mechanisms. DRS is not involved in this step at all, DRS simply enables clients and servers to agree on a mutually convenient storage service.\n3. **Register Objects**: POST `/objects/register` to register \"candidate\" DRS objects with the server\n\nThis approach separates storage service and credential negotiation from file transfer and object registration, supporting a vendor-neutral means of sharing data in a DRS network.\n\nThe `/objects/register` endpoint can be used independently to register existing data without using the `/upload-request` endpoint, and servers can choose to only support object registration and not file uploads by setting the `uploadRequestSupported` and `objectRegistrationSupported` flags appropriately in `/service-info`.\n\nUpload requests and object registration endpoints only support bulk requests to simplify implementation and reflect real-world usage patterns. Bioinformatics workflows often involve uploading multiple related files together (e.g., BAM and VCF files with their indices, or analysis result sets), making bulk operations a natural fit. Single files/objects are handled as lists with one element. Implementations of the `/objects/register` endpoint SHOULD implement transaction semantics so that either all of the objects are successfully registered or none of them are, and clients should be robust to this behaviour. Transaction semantics for the `/upload-request` are encouraged but not required due to the variety and complexity of data transfer technologies.\n\nThe `/upload-request` endpoint does not require any state to be maintained on the DRS server (intermediate DRS object IDs etc.) it is simply a means for a server to provide details of where a client can upload data, and it should ensure that it trusts the client before providing such details. This means that if uploads fail and there is no later call to `/objects/register` there is no DRS state to manage, simplifying server implementation.\n\nServers SHOULD ensure that any data from unsuccessful uploads (e.g. incomplete multi-part uploads) are cleaned up, for example by using lifecycle configuration in the backend storage. There is _no_ means of requiring that a client ultimately registers a DRS object pointing at data uploaded, and so servers should consider implementing some form of storage \"garbage\" collection (or simply set a short lifecycle policy on the upload location and move uploaded data that is later registered as DRS objects to other locations, updating the `access_method`s accordingly). Servers should also implement some means of constraining upload size (quotas etc.) to protect against accidental or malicious unconstrained uploads.\n\nThe `/upload-request` endpoint can return one or more `upload_method`s of different types for each requested file, and backend specific details such as bucket names, object keys and credentials are supplied in a generic `upload_details` field. A straightforward implementation might return an single time-limited pre-signed POST URL as the `post_url` for an `upload_method` of type `https` which incorporates authentication into the URL, but because DRS is often used for large files such as BAMs and CRAMs we also want to support more sophisticated upload approaches implemented by storage backends such as multi-part uploads, automatic retries etc. The `upload_details` field can also be used to include bucket names, keys and temporary credentials that can be used in native clients and SDKs. This offers a natural way to adapt this protocol to new storage technologies. Refer to the examples below for some suggested implementations.\n\n## Service Discovery\n\nCheck `/service-info` for upload capabilities:\n\n```json\n{\n \"drs\": {\n \"uploadRequestSupported\": true,\n \"objectRegistrationSupported\": true,\n \"supportedUploadMethods\": [\"s3\", \"https\", \"gs\"],\n \"maxUploadSize\": 5368709120,\n \"maxUploadRequestLength\": 50,\n \"maxRegisterRequestLength\": 50,\n \"validateUploadChecksums\": true,\n \"validateUploadFileSizes\": false,\n \"relatedFileStorageSupported\": true\n }\n}\n```\n\nUpload related fields:\n\n- `uploadRequestSupported`: Upload request operations available via `/upload-request`\n- `objectRegistrationSupported`: Object registration operations available via `/objects/register`\n- `supportedUploadMethods`: Available storage backends \n- `maxUploadSize`: File size limit (bytes)\n- `maxUploadRequestLength`: Files per request limit for upload requests\n- `maxRegisterRequestLength`: Candidate objects per request limit for registration\n- `validateUploadChecksums`/`validateUploadFileSizes`: Server validation behavior\n- `relatedFileStorageSupported`: Files from same upload request stored under common prefixes\n\n## Upload Methods\n\nUpon receipt of a request for an upload method for a specific file, the server will respond with one or more `upload_method` with associated `type` and corresponding `upload_details` with upload locations, temporary credentials etc. These details are specific to backend implementations.\n\nExample storage backends:\n\n- **https**: Presigned POST URLs for HTTP uploads\n- **s3**: Direct S3 upload with temporary AWS credentials\n- **gs**: Google Cloud Storage with OAuth2 tokens\n- **ftp/sftp**: Traditional file transfer protocols using negotiated credentials\n\nServers may return a subset of advertised methods based on file characteristics, for example they may choose to store large objects such as WGS BAM files in different backends to small csv files.\n\n## Related File Storage (Optional)\n\nServers MAY support storing files from the same upload request under common prefixes, enabling bioinformatics workflows that expect co-located files:\n\n- **CRAM + CRAI**: Alignment files with index files\n- **VCF + TBI**: Variant files with tabix indexes \n- **FASTQ.ora + ORADATA.tar.gz**: Compressed files with associated reference data\n\nCheck `relatedFileStorageSupported` in service-info or examine upload URLs for common prefixes.\n\n## Object Registration\n\nAfter upload, clients can register files in bulk as DRS objects using POST `/objects/register`. Registration is all-or-nothing. If any candidate object fails to be registered in the server, the entire request fails and no objects are registered.\n\n**Candidate DRS object equirements**:\n\n- Complete metadata (name, size, checksums, MIME type)\n- Access methods pointing to file locations \n- Valid authorization (if required)\n- Do not include server-generated fields (id, self_uri, timestamps)\n\nUpon receipt of candidate objects for registration the server will create unique object IDs and returns complete DRS objects. Note that the server is not obliged to retain the clients supplied `access_method`s and is free to move data to different locations/backends once the object is registered. This means that a server can choose to receive uploads in a dedicated \"dropzone\", with hard quotas and additional security, and then move them to more permanent storage once the DRS object is registered. Clients SHOULD NOT cache the response from `/objects/register` as the `access_method`s might change after registration.\n\nThe `/objects/register` endpoint can also be used independently to register existing data that is already stored in accessible locations, without using the `/upload-request` workflow. This is useful for registering pre-existing datasets or files uploaded through other means. Servers may choose only to support registration and not uploads, and should advertise this in `/service-info`\n\n## Authentication & Validation\n\n**Authentication**: Supports GA4GH Passports, Basic auth, and Bearer tokens.\n\n**Checksums**: Required for all files (SHA-256, MD5, or IANA-registered algorithms). Servers MAY validate checksums and file sizes as advertised in service-info flags.\n\n## Error Handling\n\n**Client Errors (4xx)**:\n\n- Invalid metadata (400)\n- Missing auth (401)\n- Insufficient permissions (403)\n\n**Server Errors (5xx)**:\n\n- Storage unavailable (500)\n- Capacity limits (503)\n\n## Best Practices\n\n**Clients**: Check service-info first, calculate checksums, be robust to failed object registration\n**Servers**: Use short-lived tightly scoped credentials, support multiple upload methods, implement rate limiting, ensure unique storage backend names to avoid inadvertent overwrites (e.g. using UUIDs), ensure that quotas are enforced and incomplete or unregistered uploads are deleted\n**Security**: Time-limited credentials, single-use URLs, logging for audit\n\n## Security Considerations\n\n**Credential Scoping**: Implementers SHOULD scope upload credentials to the minimum necessary permissions and duration. Credentials should:\n\n- Allow write access only to the specific upload URL/path provided\n- Have the shortest practical expiration time (e.g. 15 minutes to 1 hour)\n- Be restricted to the specific file size and content type when possible\n- Not grant broader storage access beyond the intended upload location\n\nThis principle of least privilege reduces security exposure if credentials are compromised or misused.\n\n## Example Workflows\n\n### Simple HTTPS Upload\n\nUpload Request:\n\n```http\nPOST /upload-request\nContent-Type: application/json\n\n{\n \"requests\": [\n {\n \"name\": \"variants.vcf\",\n \"size\": 52428800,\n \"mime_type\": \"text/plain\",\n \"checksums\": [\n {\n \"checksum\": \"5d41402abc4b2a76b9719d911017c592\",\n \"type\": \"md5\"\n }\n ]\n }\n ]\n}\n```\n\nResponse:\n\n```json\n{\n \"responses\": [\n {\n \"name\": \"variants.vcf\",\n \"size\": 52428800,\n \"mime_type\": \"text/plain\",\n \"checksums\": [\n {\n \"checksum\": \"5d41402abc4b2a76b9719d911017c592\",\n \"type\": \"md5\"\n }\n ],\n \"upload_methods\": [\n {\n \"type\": \"https\",\n \"access_url\": {\n \"url\": \"https://uploads.example.org/variants.vcf\"\n },\n \"upload_details\": {\n \"post_url\": \"https://uploads.example.org/presigned-upload?signature=FAKE_SIG\"\n }\n }\n ]\n }\n ]\n}\n```\n\nUpload via HTTPS:\n\n```bash\n# Simple PUT upload to presigned URL\ncurl -X PUT \"https://uploads.example.org/presigned-upload?signature=FAKE_SIG\" \\\n --data-binary @variants.vcf\n```\n\nRegister DRS Object:\n\n```http\nPOST /objects/register\nContent-Type: application/json\n\n{\n \"candidates\": [\n {\n \"name\": \"variants.vcf\",\n \"size\": 52428800,\n \"mime_type\": \"text/plain\",\n \"checksums\": [\n {\n \"checksum\": \"5d41402abc4b2a76b9719d911017c592\",\n \"type\": \"md5\"\n }\n ],\n \"access_methods\": [\n {\n \"type\": \"https\",\n \"access_url\": {\n \"url\": \"https://uploads.example.org/variants.vcf\"\n }\n }\n ],\n \"description\": \"Variant calls in VCF format\"\n }\n ]\n}\n```\n\nResponse:\n\n```json\n{\n \"objects\": [\n {\n \"id\": \"drs_obj_f6e5d4c3b2a1\",\n \"self_uri\": \"drs://drs.example.org/drs_obj_f6e5d4c3b2a1\",\n \"name\": \"variants.vcf\",\n \"size\": 52428800,\n \"mime_type\": \"text/plain\",\n \"created_time\": \"2024-01-15T10:45:00Z\",\n \"updated_time\": \"2024-01-15T10:45:00Z\",\n \"version\": \"1.0\",\n \"checksums\": [\n {\n \"checksum\": \"5d41402abc4b2a76b9719d911017c592\",\n \"type\": \"md5\"\n }\n ],\n \"access_methods\": [\n {\n \"type\": \"https\",\n \"access_url\": {\n \"url\": \"https://uploads.example.org/variants.vcf\"\n }\n }\n ],\n \"description\": \"Variant calls in VCF format\"\n }\n ]\n}\n```\n\n### S3 Bulk Upload (BAM + Index)\n\nRequest Upload Methods for Related Files\n\n```http\nPOST /upload-request\nContent-Type: application/json\n\n{\n \"requests\": [\n {\n \"name\": \"sample.bam\",\n \"size\": 1073741824,\n \"mime_type\": \"application/octet-stream\",\n \"checksums\": [\n {\n \"checksum\": \"d41d8cd98f00b204e9800998ecf8427e\",\n \"type\": \"md5\"\n }\n ]\n },\n {\n \"name\": \"sample.bam.bai\",\n \"size\": 2097152,\n \"mime_type\": \"application/octet-stream\",\n \"checksums\": [\n {\n \"checksum\": \"098f6bcd4621d373cade4e832627b4f6\",\n \"type\": \"md5\"\n }\n ]\n }\n ]\n}\n```\n\nResponse:\n\n```json\n{\n \"responses\": [\n {\n \"name\": \"sample.bam\",\n \"size\": 1073741824,\n \"mime_type\": \"application/octet-stream\",\n \"checksums\": [\n {\n \"checksum\": \"d41d8cd98f00b204e9800998ecf8427e\",\n \"type\": \"md5\"\n }\n ],\n \"upload_methods\": [\n {\n \"type\": \"s3\",\n \"access_url\": {\n \"url\": \"s3://genomics-uploads/x7k9m/sample.bam\"\n },\n \"upload_details\": {\n \"bucket\": \"genomics-uploads\",\n \"key\": \"x7k9m/sample.bam\",\n \"access_key_id\": \"FAKE_ACCESS_KEY_123\",\n \"secret_access_key\": \"FAKE_SECRET_KEY_456\",\n \"session_token\": \"FAKE_SESSION_TOKEN_789\",\n \"expires_at\": \"2024-01-15T12:00:00Z\"\n }\n }\n ]\n },\n {\n \"name\": \"sample.bam.bai\",\n \"size\": 2097152,\n \"mime_type\": \"application/octet-stream\",\n \"checksums\": [\n {\n \"checksum\": \"098f6bcd4621d373cade4e832627b4f6\",\n \"type\": \"md5\"\n }\n ],\n \"upload_methods\": [\n {\n \"type\": \"s3\",\n \"access_url\": {\n \"url\": \"s3://genomics-uploads/x7k9m/sample.bam.bai\"\n },\n \"upload_details\": {\n \"bucket\": \"genomics-uploads\",\n \"key\": \"x7k9m/sample.bam.bai\",\n \"access_key_id\": \"FAKE_ACCESS_KEY_123\",\n \"secret_access_key\": \"FAKE_SECRET_KEY_456\",\n \"session_token\": \"FAKE_SESSION_TOKEN_789\",\n \"expires_at\": \"2024-01-15T12:00:00Z\"\n }\n }\n ]\n }\n ]\n}\n```\n\nUpload Both Files to S3:\n\n```bash\n# Upload BAM and index files using the supplied credentials (note common prefix)\naws s3 cp sample.bam s3://genomics-uploads/x7k9m/sample.bam\naws s3 cp sample.bam.bai s3://genomics-uploads/x7k9m/sample.bam.bai\n```\n\nRegister Both DRS Objects:\n\n```http\nPOST /objects/register\nContent-Type: application/json\n\n{\n \"candidates\": [\n {\n \"name\": \"sample.bam\",\n \"size\": 1073741824,\n \"mime_type\": \"application/octet-stream\",\n \"checksums\": [\n {\n \"checksum\": \"d41d8cd98f00b204e9800998ecf8427e\",\n \"type\": \"md5\"\n }\n ],\n \"access_methods\": [\n {\n \"type\": \"s3\",\n \"access_id\": \"s3\",\n \"access_url\": {\n \"url\": \"s3://genomics-uploads/x7k9m/sample.bam\"\n }\n }\n ],\n \"description\": \"BAM alignment file\"\n },\n {\n \"name\": \"sample.bam.bai\",\n \"size\": 2097152,\n \"mime_type\": \"application/octet-stream\",\n \"checksums\": [\n {\n \"checksum\": \"098f6bcd4621d373cade4e832627b4f6\",\n \"type\": \"md5\"\n }\n ],\n \"access_methods\": [\n {\n \"type\": \"s3\",\n \"access_id\": \"s3\",\n \"access_url\": {\n \"url\": \"s3://genomics-uploads/x7k9m/sample.bam.bai\"\n }\n }\n ],\n \"description\": \"BAM index file\"\n }\n ]\n}\n```\n\nResponse:\n\n```json\n{\n \"objects\": [\n {\n \"id\": \"drs_obj_a1b2c3d4e5f6\",\n \"self_uri\": \"drs://drs.example.org/drs_obj_a1b2c3d4e5f6\",\n \"name\": \"sample.bam\",\n \"size\": 1073741824,\n \"mime_type\": \"application/octet-stream\",\n \"created_time\": \"2024-01-15T10:30:00Z\",\n \"updated_time\": \"2024-01-15T10:30:00Z\",\n \"version\": \"1.0\",\n \"checksums\": [\n {\n \"checksum\": \"d41d8cd98f00b204e9800998ecf8427e\",\n \"type\": \"md5\"\n }\n ],\n \"access_methods\": [\n {\n \"type\": \"s3\",\n \"access_id\": \"s3\",\n \"access_url\": {\n \"url\": \"s3://genomics-uploads/x7k9m/sample.bam\"\n }\n }\n ],\n \"description\": \"BAM alignment file\"\n },\n {\n \"id\": \"drs_obj_b2c3d4e5f6a1\",\n \"self_uri\": \"drs://drs.example.org/drs_obj_b2c3d4e5f6a1\",\n \"name\": \"sample.bam.bai\",\n \"size\": 2097152,\n \"mime_type\": \"application/octet-stream\",\n \"created_time\": \"2024-01-15T10:30:00Z\",\n \"updated_time\": \"2024-01-15T10:30:00Z\",\n \"version\": \"1.0\",\n \"checksums\": [\n {\n \"checksum\": \"098f6bcd4621d373cade4e832627b4f6\",\n \"type\": \"md5\"\n }\n ],\n \"access_methods\": [\n {\n \"type\": \"s3\",\n \"access_id\": \"s3\",\n \"access_url\": {\n \"url\": \"s3://genomics-uploads/x7k9m/sample.bam.bai\"\n }\n }\n ],\n \"description\": \"BAM index file\"\n }\n ]\n}\n```\n" + - name: Object Deletion + description: "# Object Deletion\n\n> **Optional Functionality**: Delete support is an **optional** extension to the DRS API. Not all DRS servers are required to implement delete functionality. Clients should check for the availability of delete endpoints before attempting to use them.\n\nDRS delete functionality allows suitably authenticated clients to request that DRS objects are removed from the server and, optionally, to request that the server attempt to delete the underlying data.\n\nServers should ensure that they trust clients from whom they receive delete requests, and may choose to implement \"soft\" deletes to minimise the risk of accidental or malicious requests. The DRS specification does not currently provide explicit support for soft deletes. Because delete support is optional, servers operating in untrusted environments may choose not to support delete operations at all.\n\nIn combination with the `/objects/register` endpoint, metadata only delete requests offer a means for clients to update DRS metadata without affecting the underlying data, and without introducing additional update operations which would complicate server implementation.\n\nClients can express a preference that the underlying data referred to by the deleted DRS object(s) is deleted with the `delete_storage_data` parameter. Servers are free to interpret this as they choose, and can advertise whether they support it at all with the `deleteStorageDataSupported` flag. Servers that choose to attempt to honour the request need not perform this operation synchronously and may, for example, register the file for later deletion. Implementations may also choose to ensure that no other DRS object registered in the server refers to the underlying data before deleting. Servers may not have the necessary permissions to delete the data from the backend even if they would like to do so, or may encounter errors when they attempt deletion. In the case that a DRS object refers to data stored in multiple backends (e.g. has multiple `access_method`s) the server may attempt to delete the data from all or only some of the backends.\n\nFor these reasons clients MUST NOT depend on the server deleting the underlying storage data even if the server advertises that `deleteStorageDataSupported` and the client sets the `delete_storage_data` flag.\n\nIn situations where the DRS server controls the storage backend, DRS delete support offers a convenient vendor-neutral way for clients to update and delete DRS objects and corresponding data.\n\nFor bulk deletes using the `/objects/delete` endpoint the server SHOULD implement transaction semantics: if any object fails validation or deletion, the entire request should fail and no objects are deleted and no attempt is made to delete from underlying storage for any object.\n\n## Design principles\n\n- **Optional**: Delete support is completely optional\n- **Safety**: Preserves underlying data in storage unless explicitly requested\n- **Backward compatible**: No impact on existing DRS functionality\n- **Flexible authentication**: Supports GA4GH Passports, Bearer tokens, API keys\n- **Use POST rather than DELETE**: GA4GH Passports require request bodies, which DELETE methods don't reliably support across all HTTP infrastructure. POST ensures broad compatibility.\n\n## Service Discovery\n\nCheck `/service-info` for delete capabilities:\n\n```json\n{\n \"drs\": {\n \"uploadRequestSupported\": true,\n \"objectRegistrationSupported\": true,\n \"supportedUploadMethods\": [\"s3\", \"https\"],\n \"relatedFileStorageSupported\": true,\n \"deleteSupported\": true,\n \"maxBulkDeleteLength\": 100,\n \"deleteStorageDataSupported\": true\n }\n}\n```\n\n- **`deleteSupported`**: Whether server supports deletion\n- **`maxBulkDeleteLength`**: Maximum objects per bulk delete request \n- **`deleteStorageDataSupported`**: Whether server can attempt to delete underlying storage files\n\n### Single Object Delete: `POST /objects/{object_id}/delete`\n\n```bash\ncurl -X POST \"https://drs.example.org/objects/drs_object_123456/delete\" \\\n -H \"Content-Type: application/json\" \\\n -d '{\"passports\": [\"...\"], \"delete_storage_data\": false}'\n# Response: 204 No Content (indicates metadata deletion success only)\n```\n\n**Note**: HTTP responses indicate metadata deletion status only. Storage deletion (`delete_storage_data: true`) is a best effort attempt with no guarantee of success.\n\n### Bulk Object Delete: `POST /objects/delete`\n\n```bash\ncurl -X POST \"https://drs.example.org/objects/delete\" \\\n -H \"Content-Type: application/json\" \\\n -d '{\n \"bulk_object_ids\": [\"obj_1\", \"obj_2\", \"obj_3\"],\n \"passports\": [\"...\"],\n \"delete_storage_data\": false\n }'\n# Response: 204 No Content (all metadata deleted) or 4xx error (no objects deleted)\n```\n\n## Authentication\n\n**GA4GH Passports** (in request body):\n\n```json\n{\"passports\": [\"eyJhbGci...\"], \"delete_storage_data\": false}\n```\n\n**Bearer Tokens** (in headers):\n\n```bash\ncurl -H \"Authorization: Bearer token\" -d '{\"delete_storage_data\": false}' ...\n```\n\n## Underlying Storage Data\n\n**Important**: Storage data deletion is never guaranteed. Even when `delete_storage_data: true` is requested and the server supports it, the actual deletion may fail due to permissions, network issues, or storage service errors. Clients shoud not depend on storage deletion success.\n\nClients can request that the server attempts to delete the underlying data referred to by the DRS object using the `delete_storage_data` parameter.\n\n**`delete_storage_data: false`** (default): Removes DRS object metadata only, preserves underlying storage files\n\n**`delete_storage_data: true`**: Removes metadata AND requests server attempt to delete underlying storage files (requires `deleteStorageDataSupported: true`, **success not guaranteed**)\n\n## Update Pattern\n\nRather than introducing additional operations and endpoints for updating DRS objects, servers can allow clients to use the metadata-only deletion and object registration endpoints to create a new DRS object with updated metadata while leaving the underlying data in place.\n\n**Metadata update steps:**\n\n1. Delete metadata only: `POST /objects/{id}/delete` with `delete_storage_data: false`\n2. Re-register object: `POST /objects/register` with updated metadata\n\n```bash\n# Delete metadata (preserves storage)\ncurl -X POST \".../objects/obj_123/delete\" -d '{\"delete_storage_data\": false}'\n# Re-register with updates\ncurl -X POST \".../objects/register\" -d '{\"candidates\": [{\"name\": \"updated.txt\", ...}]}'\n```\n\n## Error Responses\n\n- **400**: Unsupported storage deletion or invalid request parameters\n- **403**: Insufficient permissions for any object in the request\n- **404**: Any object not found or delete endpoints not supported by server\n- **413**: Bulk request exceeds `maxBulkDeleteLength` limit\n\n## Examples\n\n**Metadata Update:**\n\n```bash\ncurl \".../service-info\" # Check capabilities\ncurl -X POST \".../objects/obj_123/delete\" -d '{\"delete_storage_data\": false}'\ncurl -X POST \".../objects/register\" -d '{\"candidates\": [{\"name\": \"updated.vcf\", ...}]}'\n```\n\n**Complete Removal:**\n\n```bash\ncurl -X POST \".../objects/obj_456/delete\" -H \"Authorization: Bearer token\" \\\n -d '{\"delete_storage_data\": true}'\n```\n\n**Bulk Delete (Atomic):**\n\n```bash\ncurl -X POST \".../objects/delete\" -d '{\n \"bulk_object_ids\": [\"obj_1\", \"obj_2\"],\n \"passports\": [\"...\"],\n \"delete_storage_data\": false\n}'\n# All objects deleted or none deleted (transactional)\n```\n\n## Best Practices\n\n**Clients:** Check service-info, default to safe deletion, handle transactional failures, respect limits, confirm destructive operations, do not rely on underlying storage deletion\n\n**Servers:** Advertise capabilities, validate permissions, implement atomic transactions, implement limits, use versioning to avoid inadvertent deletion.\n\n## Security Considerations\n\n- **Authentication**: Validate GA4GH Passports and Bearer tokens\n- **HTTPS Required**: Protect credentials in transit\n- **Rate Limiting**: Prevent abuse of delete endpoints\n- **Input Validation**: Sanitize all request parameters\n\n## Backward Compatibility\n\nDelete functionality is designed to be backward compatible:\n\n- **No Impact on Existing Endpoints**: All existing DRS endpoints remain unchanged\n- **Optional Implementation**: Servers can ignore delete functionality entirely\n- **Graceful Degradation**: Clients receive 404 responses when delete is not supported\n- **Safe Defaults**: New fields in service-info have safe default values, and requests default to leaving underlying data in place.\n" + - name: Access Method Update + description: "# Access Method Updates\n\n> **Optional Functionality**: Access method updates are optional extensions to the DRS API. Not all DRS servers are required to implement this functionality. Clients should check `/service-info` for `accessMethodUpdateSupported` before attempting to use these endpoints.\n\nAccess method update endpoints allows authorized clients to modify how existing DRS objects can be accessed without changing the core object metadata (size, checksums, name). This is useful for storage migrations, adding mirrors, or updating URLs.\n\nThese endpoints will overwrite existing access methods for an object, if clients want to add access methods in addition to existing ones for objects they should first retrieve the current access methods and include them in the update request along with the new methods.\n\n## Use Cases\n\n- **Storage Migration**: Move data between storage providers while keeping same DRS object\n- **Mirror Addition**: Add CDN or regional access points for better performance \n- **URL Refresh**: Update changed domain names\n- **Access Optimization**: Add or remove access methods based on performance or cost\n\n## Design Principles\n\n- **Optional**: Access method update support is completely optional\n- **Immutable Core**: Only access methods can be updated - size, checksums, name remain unchanged\n- **Atomic Bulk Operations**: All updates succeed or all fail (transactional)\n- **Optional Validation**: Servers MAY validate new access methods point to same data\n- **Flexible Authentication**: Supports GA4GH Passports, Bearer tokens, API keys\n\n## Service Discovery\n\nCheck `/service-info` for access method update capabilities:\n\n```json\n{\n \"drs\": {\n \"accessMethodUpdateSupported\": true,\n \"maxBulkAccessMethodUpdateLength\": 100,\n \"validateAccessMethodUpdates\": false\n }\n}\n```\n\n- **`accessMethodUpdateSupported`**: Whether server supports access method updates\n- **`maxBulkAccessMethodUpdateLength`**: Maximum objects per bulk update request\n- **`validateAccessMethodUpdates`**: Whether server validates new access methods\n\n## Single Object Update\n\nUpdate access methods for a single DRS object:\n\n```bash\ncurl -X POST \"https://drs.example.org/objects/obj_123/access-methods\" \\\n -H \"Content-Type: application/json\" \\\n -d '{\n \"access_methods\": [\n {\n \"type\": \"https\",\n \"access_url\": {\n \"url\": \"https://new-cdn.example.org/data/file.bam\"\n }\n },\n {\n \"type\": \"s3\",\n \"access_id\": \"s3,\n \"access_url\": {\n \"url\": \"s3://new-bucket/migrated/file.bam\"\n }\n }\n ]\n }'\n```\n\n## Bulk Object Update\n\nUpdate access methods for multiple objects atomically:\n\n```bash\ncurl -X POST \"https://drs.example.org/objects/access-methods\" \\\n -H \"Content-Type: application/json\" \\\n -d '{\n \"updates\": [\n {\n \"object_id\": \"obj_123\",\n \"access_methods\": [\n {\n \"type\": \"https\",\n \"access_url\": {\"url\": \"https://new-location.com/file1.bam\"}\n }\n ]\n },\n {\n \"object_id\": \"obj_456\", \n \"access_methods\": [\n {\n \"type\": \"s3\",\n \"access_url\": {\"url\": \"s3://new-bucket/file2.vcf\"}\n }\n ]\n }\n ]\n }'\n```\n\n## Authentication\n\n**GA4GH Passports** (in request body):\n```json\n{\n \"access_methods\": [...],\n \"passports\": [\"eyJhbGci...\"]\n}\n```\n\n**Bearer Tokens** (in headers):\n```bash\ncurl -H \"Authorization: Bearer token\" -d '{\"access_methods\": [...]}' ...\n```\n\n## Validation\n\nServers MAY validate that new access methods point to the same data by checkingm file availability, checksums or file content. Validation behavior is advertised in `validateAccessMethodUpdates` service-info field.\n\n\n## Error Responses\n\n- **400**: Invalid access methods or validation failure\n- **401**: Authentication required\n- **403**: Insufficient permissions for object(s)\n- **404**: Object not found or access method updates not supported\n- **413**: Bulk request exceeds `maxBulkAccessMethodUpdateLength` limit\n\n## Examples\n\n**Storage Migration:**\n```bash\n# Check server capabilities\ncurl \"https://drs.example.org/service-info\"\n\n# Update single object after migration\ncurl -X POST \"https://drs.example.org/objects/obj_123/access-methods\" \\\n -d '{\"access_methods\": [{\"type\": \"s3\", \"access_url\": {\"url\": \"s3://new-bucket/file.bam\"}}]}'\n```\n\n**Add CDN Mirror:**\n```bash\n# Add additional access method without removing existing ones\ncurl -X POST \"https://drs.example.org/objects/obj_456/access-methods\" \\\n -d '{\n \"access_methods\": [\n {\"type\": \"https\", \"access_url\": {\"url\": \"https://origin.example.org/file.vcf\"}},\n {\"type\": \"https\", \"access_url\": {\"url\": \"https://cdn.example.org/file.vcf\"}}\n ]\n }'\n```\n\n**Bulk Migration:**\n```bash\n# Migrate multiple objects atomically\ncurl -X POST \"https://drs.example.org/objects/access-methods\" \\\n -d '{\n \"updates\": [\n {\"object_id\": \"obj_1\", \"access_methods\": [...]},\n {\"object_id\": \"obj_2\", \"access_methods\": [...]}\n ]\n }'\n```\n\n## Best Practices\n\n**Clients**: Check service-info first, handle atomic transaction failures, respect bulk limits, verify permissions\n\n**Servers**: Advertise capabilities clearly, implement atomic transactions for bulk operations, validate permissions, consider optional validation for data integrity\n\n## Backward Compatibility\n\nAccess method update functionality is designed to be backward compatible:\n\n- **No Impact on Existing Endpoints**: All existing DRS endpoints remain unchanged\n- **Optional Implementation**: Servers can ignore this functionality entirely \n- **Graceful Degradation**: Clients receive 404 responses when not supported\n- **Safe Defaults**: New service-info fields have safe default values" +paths: + /service-info: + get: + summary: Retrieve information about this service + description: "Returns information about the DRS service along with stats pertaning to total object count and cumulative size in bytes.\nAlso indicates whether the server supports optional upload and delete operations and which methods are available.\n\nExtends the\n[v1.0.0 GA4GH Service Info specification](https://github.com/ga4gh-discovery/ga4gh-service-info)\nas the standardized format for GA4GH web services to self-describe.\n\nAccording to the \n[service-info type registry](https://github.com/ga4gh/TASC/blob/master/service-info/ga4gh-service-info.json)\nmaintained by the [Technical Alignment Sub Committee (TASC)](https://github.com/ga4gh/TASC),\na DRS service MUST have:\n * a `type.group` value of `org.ga4gh`\n * a `type.artifact` value of `drs`\n\n**Example 1: Server with upload and delete capabilities**\n```\n{\n \"id\": \"com.example.drs\",\n \"description\": \"Serves data according to DRS specification\",\n ...\n \"type\": {\n \"group\": \"org.ga4gh\",\n \"artifact\": \"drs\",\n \"version\": \"1.5\"\n }\n ...\n \"drs\":{\n \"maxBulkRequestLength\": 200,\n \"objectCount\": 774560,\n \"totalObjectSize\": 4018437188907752,\n \"uploadRequestSupported\": true,\n \"objectRegistrationSupported\": true,\n \"supportedUploadMethods\": [\"s3\", \"https\", \"gs\"],\n \"maxUploadSize\": 5368709120,\n \"maxUploadRequestLength\": 50,\n \"validateUploadChecksums\": true,\n \"validateUploadFileSizes\": false,\n \"relatedFileStorageSupported\": true,\n \"deleteSupported\": true,\n \"maxBulkDeleteLength\": 100,\n \"deleteStorageDataSupported\": true\n }\n}\n```\n\n**Example 2: Read-only server (no upload or delete)**\n```\n{\n \"id\": \"com.example.readonly-drs\",\n \"description\": \"Read-only DRS service\",\n ...\n \"type\": {\n \"group\": \"org.ga4gh\",\n \"artifact\": \"drs\",\n \"version\": \"1.5\"\n }\n ...\n \"drs\":{\n \"maxBulkRequestLength\": 500,\n \"objectCount\": 1250000,\n \"totalObjectSize\": 8500000000000000\n }\n}\n```\n\n**Example 3: Server with metadata-only delete capability**\n```\n{\n \"id\": \"com.example.metadata-drs\",\n \"description\": \"DRS service with metadata-only delete\",\n ...\n \"type\": {\n \"group\": \"org.ga4gh\",\n \"artifact\": \"drs\",\n \"version\": \"1.5\"\n }\n ...\n \"drs\":{\n \"maxBulkRequestLength\": 200,\n \"objectCount\": 500000,\n \"totalObjectSize\": 2500000000000000,\n \"deleteSupported\": true,\n \"maxBulkDeleteLength\": 50,\n \"deleteStorageDataSupported\": false\n }\n}\n```\n\nSee the [Service Registry Appendix](#tag/GA4GH-Service-Registry) for more information on how to register a DRS service with a service registry." + operationId: GetServiceInfo + responses: + '200': + $ref: '#/components/responses/200ServiceInfo' + '500': + $ref: '#/components/responses/500InternalServerError' + tags: + - Service Info + /objects/{object_id}: + options: + summary: Get Authorization info about a DrsObject. + security: + - {} + description: Returns a list of `Authorizations` that can be used to determine how to authorize requests to `GetObject` or `PostObject`. + operationId: OptionsObject + parameters: + - $ref: '#/components/parameters/ObjectId' + responses: + '200': + $ref: '#/components/responses/200OkAuthorizations' + '204': + $ref: '#/components/responses/AuthorizationsNotSupported' + '400': + $ref: '#/components/responses/400BadRequest' + '404': + $ref: '#/components/responses/404NotFoundDrsObject' + '405': + $ref: '#/components/responses/AuthorizationsNotSupported' + '500': + $ref: '#/components/responses/500InternalServerError' + tags: + - Objects + x-swagger-router-controller: ga4gh.drs.server + get: + summary: Get info about a DrsObject. + description: Returns object metadata, and a list of access methods that can be used to fetch object bytes. + operationId: GetObject + parameters: + - $ref: '#/components/parameters/ObjectId' + - $ref: '#/components/parameters/Expand' + responses: + '200': + $ref: '#/components/responses/200OkDrsObject' + '202': + $ref: '#/components/responses/202Accepted' + '400': + $ref: '#/components/responses/400BadRequest' + '401': + $ref: '#/components/responses/401Unauthorized' + '403': + $ref: '#/components/responses/403Forbidden' + '404': + $ref: '#/components/responses/404NotFoundDrsObject' + '500': + $ref: '#/components/responses/500InternalServerError' + tags: + - Objects + x-swagger-router-controller: ga4gh.drs.server + post: + summary: Get info about a DrsObject through POST'ing a Passport. + description: |- + Returns object metadata and a list of access methods that can be used to fetch object bytes. Method is a POST to accommodate a JWT GA4GH Passport sent in the request body in order to authorize access. + **Note**: To upload new files and register them as DRS objects, use the `/upload-request` endpoint to obtain upload methods and temporary credentials, then use POST `/objects/register` endpoint to register multiple objects at once. Note that upload functionality is optional and not all DRS servers implement the upload endpoints. + operationId: PostObject + security: + - PassportAuth: [] + responses: + '200': + $ref: '#/components/responses/200OkDrsObject' + '202': + $ref: '#/components/responses/202Accepted' + '400': + $ref: '#/components/responses/400BadRequest' + '401': + $ref: '#/components/responses/401Unauthorized' + '403': + $ref: '#/components/responses/403Forbidden' + '404': + $ref: '#/components/responses/404NotFoundAccess' + '500': + $ref: '#/components/responses/500InternalServerError' + tags: + - Objects + x-swagger-router-controller: ga4gh.drs.server + parameters: + - $ref: '#/components/parameters/ObjectId' + requestBody: + $ref: '#/components/requestBodies/PostObjectBody' + /objects/{object_id}/delete: + post: + summary: Delete a DRS object (optional endpoint) + description: |- + **Optional Endpoint**: This endpoint is not required for DRS server implementations. Not all DRS servers support delete functionality. + Deletes a DRS object by ID. This operation removes the DRS object metadata and optionally attempts to delete the underlying storage data based on the delete_storage_data parameter and server capabilities. + By default, only DRS object metadata is deleted while preserving underlying storage data. To attempt storage data deletion, clients must explicitly set delete_storage_data to true and the server must support storage data deletion (advertised via `deleteStorageDataSupported` in service-info). Servers will make a best effort attempt to delete storage data, but success is not guaranteed. + This endpoint uses POST method to accommodate GA4GH Passport authentication in the request body, ensuring compatibility across all HTTP clients and proxies. + **Important**: HTTP responses (204 No Content) indicate metadata deletion success only, not storage deletion success (which are not guaranteed to complete synchronously if they occur at all) + operationId: DeleteObject + security: + - {} + - BasicAuth: [] + - BearerAuth: [] + - PassportAuth: [] + parameters: + - $ref: '#/components/parameters/ObjectId' + requestBody: + $ref: '#/components/requestBodies/DeleteBody' + responses: + '204': + $ref: '#/components/responses/204DeleteSuccess' + '400': + $ref: '#/components/responses/400BadRequestDelete' + '401': + $ref: '#/components/responses/401Unauthorized' + '403': + $ref: '#/components/responses/403ForbiddenDelete' + '404': + $ref: '#/components/responses/404NotFoundDelete' + '500': + $ref: '#/components/responses/500InternalServerError' + tags: + - Objects + x-swagger-router-controller: ga4gh.drs.server + x-codegen-request-body-name: body + examples: + successful_metadata_delete: + summary: Successful metadata-only deletion + description: Complete example of successfully deleting DRS object metadata while preserving storage data + value: + request: + method: POST + url: /objects/drs_object_123456/delete + headers: + Content-Type: application/json + body: + passports: + - eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJnYTRnaF9wYXNzcG9ydF92MSI6W119.JJ5rN0ktP0qwyZmIPpxmF_p7JsxAZH6L6brUxtad3CM + delete_storage_data: false + response: + status: 204 + headers: + Content-Length: '0' + successful_full_delete: + summary: Successful full deletion + description: Complete example of successfully deleting both metadata and storage data + value: + request: + method: POST + url: /objects/drs_object_123456/delete + headers: + Content-Type: application/json + body: + passports: + - eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJnYTRnaF9wYXNzcG9ydF92MSI6W119.JJ5rN0ktP0qwyZmIPpxmF_p7JsxAZH6L6brUxtad3CM + delete_storage_data: true + response: + status: 204 + headers: + Content-Length: '0' + object_not_found: + summary: Object not found error + description: Complete example when trying to delete a non-existent object + value: + request: + method: POST + url: /objects/nonexistent_object/delete + headers: + Content-Type: application/json + body: + delete_storage_data: false + response: + status: 404 + headers: + Content-Type: application/json + body: + msg: DRS object nonexistent_object does not exist + status_code: 404 + insufficient_permissions: + summary: Insufficient permissions error + description: Complete example when client lacks delete permissions + value: + request: + method: POST + url: /objects/drs_object_123456/delete + headers: + Content-Type: application/json + body: + passports: + - eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpbnZhbGlkX3Bhc3Nwb3J0IjoidHJ1ZSJ9.invalid_signature + delete_storage_data: false + response: + status: 403 + headers: + Content-Type: application/json + body: + msg: Client lacks delete permission for object drs_object_123456 + status_code: 403 + /objects/delete: + post: + summary: Delete multiple DRS objects + description: |- + **Optional Endpoint**: This endpoint is not required for DRS server implementations. Not all DRS servers support delete functionality. + Delete multiple DRS objects in a single atomic transaction. If ANY object fails to be deleted, the ENTIRE request fails and NO objects are deleted. This ensures data consistency and prevents partial deletion scenarios. + **RECOMMENDED - Transactional Behavior**: Deletion operations SHOULD be atomic transactions. If ANY object fails validation or deletion, the ENTIRE request SHOULD fail and NO objects SHOULD be deleted. Servers SHOULD implement this as an all-or-nothing operation to ensure data consistency, but MAY implement partial deletion with appropriate error reporting if transactional behavior is not feasible. + **Authentication**: GA4GH Passports can be provided in the request body for authorization. + **Storage Data Deletion**: The `delete_storage_data` parameter controls whether the server will attempt to delete underlying storage files along with DRS metadata. This defaults to false for safety. Servers will make a best effort attempt to delete storage data, but success is not guaranteed. + **Server Responsibilities**: - SHOULD treat deletion as an atomic transaction (all succeed or all fail) - SHOULD validate ALL object IDs exist and are accessible before deleting ANY - SHOULD roll back any partial changes if any object fails deletion - SHOULD return 400 if any object ID is invalid or inaccessible when using transactional behavior + **Client Responsibilities**: - Provide valid object IDs for all objects to be deleted - Handle potential failure of entire batch if any single object cannot be deleted - Check service-info for `maxBulkDeleteLength` limits before making requests + operationId: bulkDeleteObjects + tags: + - Objects + requestBody: + $ref: '#/components/requestBodies/BulkDeleteBody' + responses: + '204': + $ref: '#/components/responses/204DeleteSuccess' + '400': + $ref: '#/components/responses/400BadRequestDelete' + '401': + $ref: '#/components/responses/401Unauthorized' + '403': + $ref: '#/components/responses/403ForbiddenDelete' + '404': + $ref: '#/components/responses/404NotFoundDelete' + '413': + $ref: '#/components/responses/413RequestTooLarge' + '500': + $ref: '#/components/responses/500InternalServerError' + security: + - {} + - BasicAuth: [] + - BearerAuth: [] + - PassportAuth: [] + x-codegen-request-body-name: body + examples: + successful_bulk_delete: + summary: Successful bulk deletion + description: Complete example of successfully deleting multiple objects + value: + request: + method: POST + url: /objects/delete + headers: + Content-Type: application/json + body: + bulk_object_ids: + - drs_object_123456 + - drs_object_789012 + - drs_object_345678 + passports: + - eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJnYTRnaF9wYXNzcG9ydF92MSI6W119.JJ5rN0ktP0qwyZmIPpxmF_p7JsxAZH6L6brUxtad3CM + delete_storage_data: false + response: + status: 204 + headers: + Content-Length: '0' + failed_bulk_delete: + summary: Failed bulk deletion (transactional) + description: Complete example of bulk deletion failing due to one invalid object ID - no objects are deleted + value: + request: + method: POST + url: /objects/delete + headers: + Content-Type: application/json + body: + bulk_object_ids: + - drs_object_123456 + - nonexistent_object + - drs_object_345678 + passports: + - eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJnYTRnaF9wYXNzcG9ydF92MSI6W119.JJ5rN0ktP0qwyZmIPpxmF_p7JsxAZH6L6brUxtad3CM + delete_storage_data: false + response: + status: 404 + headers: + Content-Type: application/json + body: + msg: Object 'nonexistent_object' not found. No objects were deleted due to transactional behavior. + status_code: 404 + bulk_limit_exceeded: + summary: Bulk limit exceeded error + description: Complete example when bulk request exceeds server limits + value: + request: + method: POST + url: /objects/delete + headers: + Content-Type: application/json + body: + bulk_object_ids: + - obj1 + - obj2 + - obj3 + - '...150 objects total' + delete_storage_data: false + response: + status: 413 + headers: + Content-Type: application/json + body: + msg: Bulk delete request contains 150 objects but server maximum is 100. Check maxBulkDeleteLength in service-info. + status_code: 413 + unsupported_storage_deletion: + summary: Unsupported storage deletion error + description: Complete example when client requests storage deletion but server doesn't support it + value: + request: + method: POST + url: /objects/delete + headers: + Content-Type: application/json + body: + bulk_object_ids: + - drs_object_123456 + - drs_object_789012 + delete_storage_data: true + response: + status: 400 + headers: + Content-Type: application/json + body: + msg: Server does not support storage data deletion. Set delete_storage_data to false or omit the parameter. + status_code: 400 + /objects: + options: + summary: Get Authorization info about multiple DrsObjects. + security: + - {} + description: Returns a structure that contains for each DrsObjects a list of `Authorizations` that can be used to determine how to authorize requests to `GetObject` or `PostObject` (or bulk equivalents). + operationId: OptionsBulkObject + responses: + '200': + $ref: '#/components/responses/200OkBulkAuthorizations' + '204': + $ref: '#/components/responses/AuthorizationsNotSupported' + '400': + $ref: '#/components/responses/400BadRequest' + '404': + $ref: '#/components/responses/404NotFoundDrsObject' + '405': + $ref: '#/components/responses/AuthorizationsNotSupported' + '413': + $ref: '#/components/responses/413RequestTooLarge' + '500': + $ref: '#/components/responses/500InternalServerError' + tags: + - Objects + x-swagger-router-controller: ga4gh.drs.server + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/BulkObjectIdNoPassport' + post: + summary: Get info about multiple DrsObjects with an optional Passport(s). + description: |- + Returns an array of object metadata and access methods for the specified object IDs. + The request is limited to use passports (one or more) or a single bearer token, so make sure your bulk request is for objects that all use the same passports/token. + **Note**: To register new DRS objects, use the dedicated `/objects/register` endpoint. + operationId: GetBulkObjects + security: + - PassportAuth: [] + parameters: + - $ref: '#/components/parameters/Expand' + responses: + '200': + $ref: '#/components/responses/200OkDrsObjects' + '202': + $ref: '#/components/responses/202Accepted' + '400': + $ref: '#/components/responses/400BadRequest' + '401': + $ref: '#/components/responses/401Unauthorized' + '403': + $ref: '#/components/responses/403Forbidden' + '404': + $ref: '#/components/responses/404NotFoundDrsObject' + '413': + $ref: '#/components/responses/413RequestTooLarge' + '500': + $ref: '#/components/responses/500InternalServerError' + tags: + - Objects + x-swagger-router-controller: ga4gh.drs.server + requestBody: + $ref: '#/components/requestBodies/BulkObjectBody' + /objects/register: + post: + summary: Register DRS objects + description: "**Optional Endpoint**: This endpoint is not required for DRS server implementations. Not all DRS servers support object registration. \nRegisters one or more \"candidate\" DRS objects with the server. If it accepts the request, the server will create unique object IDs for each registered object and return them in fully-formed DRS objects in response.\nThis endpoint can be used after uploading files using methods negotiated with the `/upload-request` endpoint to register the uploaded files as DRS objects, or to register existinf data. The request body should contain candidate DRS objects with all required metadata including access methods that correspond to the upload methods used during file upload.\n**RECOMMENDED - Transactional Behavior**: Registration operations SHOULD be atomic transactions. If ANY candidate object fails validation or registration, the ENTIRE request SHOULD fail and NO objects SHOULD be registered. Servers SHOULD implement this as an all-or-nothing operation to ensure data consistency, but MAY implement partial registration with appropriate error reporting if transactional behavior is not feasible.\n**Authentication**: GA4GH Passports can be provided in the request body for authorization. Bearer tokens can be supplied in headers.\n**Server Responsibilities**: - SHOULD treat registration as an atomic transaction (all succeed or all fail) - SHOULD validate ALL candidate objects before registering ANY - Create unique object IDs for each registered object - Add timestamps (created_time, updated_time) - SHOULD roll back any partial changes if any candidate fails validation\n**Client Responsibilities**: - Provide required DRS object metadata for all candidates - Include access methods corresponding to uploaded file locations - Ensure checksums match uploaded file content - Handle potential failure of entire batch if any single object is invalid" + operationId: RegisterObjects + security: + - {} + - BasicAuth: [] + - BearerAuth: [] + - PassportAuth: [] + requestBody: + $ref: '#/components/requestBodies/RegisterObjectsBody' + responses: + '201': + $ref: '#/components/responses/201ObjectsCreated' + '400': + $ref: '#/components/responses/400BadRequest' + '401': + $ref: '#/components/responses/401Unauthorized' + '403': + $ref: '#/components/responses/403Forbidden' + '413': + $ref: '#/components/responses/413RequestTooLarge' + '500': + $ref: '#/components/responses/500InternalServerError' + tags: + - Objects + x-swagger-router-controller: ga4gh.drs.server + x-codegen-request-body-name: body + /objects/{object_id}/access/{access_id}: + get: + summary: Get a URL for fetching bytes + description: |- + Returns a URL that can be used to fetch the bytes of a `DrsObject`. + This method only needs to be called when using an `AccessMethod` that contains an `access_id` (e.g., for servers that use signed URLs for fetching object bytes). + operationId: GetAccessURL + responses: + '200': + $ref: '#/components/responses/200OkAccess' + '202': + $ref: '#/components/responses/202Accepted' + '400': + $ref: '#/components/responses/400BadRequest' + '401': + $ref: '#/components/responses/401Unauthorized' + '403': + $ref: '#/components/responses/403Forbidden' + '404': + $ref: '#/components/responses/404NotFoundAccess' + '500': + $ref: '#/components/responses/500InternalServerError' + tags: + - Objects + x-swagger-router-controller: ga4gh.drs.server + parameters: + - $ref: '#/components/parameters/ObjectId' + - $ref: '#/components/parameters/AccessId' + post: + summary: Get a URL for fetching bytes through POST'ing a Passport + description: |- + Returns a URL that can be used to fetch the bytes of a `DrsObject`. + This method only needs to be called when using an `AccessMethod` that contains an `access_id` (e.g., for servers that use signed URLs for fetching object bytes). + Method is a POST to accommodate a JWT GA4GH Passport sent in the formData in order to authorize access. + operationId: PostAccessURL + security: + - PassportAuth: [] + responses: + '200': + $ref: '#/components/responses/200OkAccess' + '202': + $ref: '#/components/responses/202Accepted' + '400': + $ref: '#/components/responses/400BadRequest' + '401': + $ref: '#/components/responses/401Unauthorized' + '403': + $ref: '#/components/responses/403Forbidden' + '404': + $ref: '#/components/responses/404NotFoundAccess' + '500': + $ref: '#/components/responses/500InternalServerError' + tags: + - Objects + x-swagger-router-controller: ga4gh.drs.server + parameters: + - $ref: '#/components/parameters/ObjectId' + - $ref: '#/components/parameters/AccessId' + requestBody: + $ref: '#/components/requestBodies/Passports' + /objects/access: + post: + summary: Get URLs for fetching bytes from multiple objects with an optional Passport(s). + description: |- + Returns an array of URL objects that can be used to fetch the bytes of multiple `DrsObject`s. + This method only needs to be called when using an `AccessMethod` that contains an `access_id` (e.g., for servers that use signed URLs for fetching object bytes). + Currently this is limited to use passports (one or more) or a single bearer token, so make sure your bulk request is for objects that all use the same passports/token. + operationId: GetBulkAccessURL + security: + - PassportAuth: [] + responses: + '200': + $ref: '#/components/responses/200OkAccesses' + '202': + $ref: '#/components/responses/202Accepted' + '400': + $ref: '#/components/responses/400BadRequest' + '401': + $ref: '#/components/responses/401Unauthorized' + '403': + $ref: '#/components/responses/403Forbidden' + '404': + $ref: '#/components/responses/404NotFoundAccess' + '413': + $ref: '#/components/responses/413RequestTooLarge' + '500': + $ref: '#/components/responses/500InternalServerError' + tags: + - Objects + x-swagger-router-controller: ga4gh.drs.server + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/BulkObjectAccessId' + /objects/{object_id}/access-methods: + post: + summary: Update access methods for a DRS object + description: |- + **Optional Endpoint**: Not all DRS servers support access method updates. + Update the access methods for an existing DRS object. Only access methods are modified - core object metadata (size, checksums, name) remains unchanged. Servers MAY validate that new access methods point to the same data. + Note that existing access methods are overwritten, if clients want to add additional access methods they should first retrieve the current methods and include them along with the new methods in this request. + **Authentication**: GA4GH Passports can be provided in the request body. + operationId: updateObjectAccessMethods + parameters: + - name: object_id + in: path + required: true + schema: + type: string + description: DRS object identifier + requestBody: + $ref: '#/components/requestBodies/AccessMethodUpdateBody' + responses: + '200': + $ref: '#/components/responses/200AccessMethodUpdate' + '400': + $ref: '#/components/responses/400BadRequest' + '401': + $ref: '#/components/responses/401Unauthorized' + '403': + $ref: '#/components/responses/403Forbidden' + '404': + $ref: '#/components/responses/404NotFoundDrsObject' + '500': + $ref: '#/components/responses/500InternalServerError' + security: + - {} + - BasicAuth: [] + - BearerAuth: [] + - PassportAuth: [] + tags: + - Objects + /objects/checksum/{checksum}: + get: + summary: Get DRS objects that are a match for the checksum. + description: |- + Returns an array of `DRSObjects` that match a given checksum. + The checksum type is not provide, the checksum check is done against all checksum types. + operationId: GetObjectsByChecksum + security: + - PassportAuth: [] + parameters: + - $ref: '#/components/parameters/Checksum' + responses: + '200': + $ref: '#/components/responses/200OkDrsObjects' + '202': + $ref: '#/components/responses/202Accepted' + '400': + $ref: '#/components/responses/400BadRequest' + '401': + $ref: '#/components/responses/401Unauthorized' + '403': + $ref: '#/components/responses/403Forbidden' + '404': + $ref: '#/components/responses/404NotFoundDrsObject' + '500': + $ref: '#/components/responses/500InternalServerError' + tags: + - Objects + x-swagger-router-controller: ga4gh.drs.server + /objects/access-methods: + post: + summary: Bulk update access methods for multiple DRS objects + description: |- + **Optional Endpoint**: Not all DRS servers support access method updates. + Update access methods for multiple DRS objects in a single atomic transaction. If ANY object fails to update, the ENTIRE request fails and NO objects are updated. Only access methods are modified - core object metadata remains unchanged. + Note that existing access methods are overwritten, if clients want to add additional access methods they should first retrieve the current methods and include them along with the new methods in this request. + **Authentication**: GA4GH Passports can be provided in the request body. + operationId: bulkUpdateAccessMethods + requestBody: + $ref: '#/components/requestBodies/BulkAccessMethodUpdateBody' + responses: + '200': + $ref: '#/components/responses/200BulkAccessMethodUpdate' + '400': + $ref: '#/components/responses/400BadRequest' + '401': + $ref: '#/components/responses/401Unauthorized' + '403': + $ref: '#/components/responses/403Forbidden' + '404': + $ref: '#/components/responses/404NotFoundDrsObject' + '413': + $ref: '#/components/responses/413RequestTooLarge' + '500': + $ref: '#/components/responses/500InternalServerError' + security: + - {} + - BasicAuth: [] + - BearerAuth: [] + - PassportAuth: [] + tags: + - Objects + /upload-request: + post: + summary: Request upload methods for files + description: "**Optional Endpoint**: This endpoint is not required for DRS server implementations. Not all DRS servers support upload functionality. \nRequest upload method details and temporary credentials for uploading one or more files to an underlying storage service. This endpoint allows clients to obtain the necessary information to upload files before they are registered as DRS objects.\n**Discovery**: Before using this endpoint, clients should check the `/service-info` endpoint to determine if upload operations are supported. Look for `drs.uploadRequestSupported: true` and `drs.supportedUploadMethods` to understand which upload methods are available. Also check `drs.maxUploadSize` and `drs.maxUploadRequestLength` for server limits.\n**Usage Flow:**\n1. **Discovery**: Client checks `/service-info` endpoint to confirm upload support (`drs.uploadRequestSupported: true`) and available methods (`drs.supportedUploadMethods`)\n2. Client sends an upload request with file metadata (name, size, checksums, MIME type)\n3. Server responds with available upload methods (S3, HTTPS, Google Cloud Storage, etc.) and temporary credentials\n4. Client selects one or more upload methods from the response and uses the corresponding credentials to upload the file to the storage service\n5. Once uploaded, the client registers the files as DRS objects including access methods that correspond to the upload methods used with a POST request to `/objects/register`, the server will return fully formed DRS objects with server minted unique IDs.\n6. The registered DRS object becomes accessible through standard DRS API endpoints\n\n**Authentication:**\nThe endpoint supports multiple authentication methods including GA4GH Passport tokens sent in the request body. Passport tokens enable fine-grained authorization based on data access policies.\n**Upload Methods**: Response may include multiple options (s3, https, gs, ftp/sftp) for flexibility. Note that servers may return a subset of their advertised `supportedUploadMethods` based on file-specific factors such as file type, size, or server policies.\n**File Integrity**: All requests must include at least one checksum per file (SHA-256, MD5, or other IANA-registered algorithms).\n**Server Validation**: Servers MAY validate checksums/sizes but are not required to. Check service-info for validation behavior. Servers do not validate MIME types against actual file content - clients are responsible for providing accurate MIME type information." + operationId: PostUploadRequest + security: + - {} + - BasicAuth: [] + - BearerAuth: [] + - PassportAuth: [] + requestBody: + $ref: '#/components/requestBodies/UploadRequestBody' + responses: + '200': + $ref: '#/components/responses/200UploadRequest' + '400': + $ref: '#/components/responses/400BadRequest' + '401': + $ref: '#/components/responses/401Unauthorized' + '403': + $ref: '#/components/responses/403Forbidden' + '500': + $ref: '#/components/responses/500InternalServerError' + tags: + - Upload Request +components: + securitySchemes: + BasicAuth: + type: http + scheme: basic + description: | + A valid authorization token must be passed in the 'Authorization' header, + e.g. "Basic ${token_string}" + BearerAuth: + type: http + scheme: bearer + description: A valid authorization token must be passed in the 'Authorization' header, e.g. "Bearer ${token_string}" + PassportAuth: + type: http + scheme: bearer + x-in: body + bearerFormat: JWT + description: A valid GA4GH Passport must be passed in the body of an HTTP POST request as a tokens[] array. + schemas: + ServiceType: + description: Type of a GA4GH service + type: object + required: + - group + - artifact + - version + properties: + group: + type: string + description: Namespace in reverse domain name format. Use `org.ga4gh` for implementations compliant with official GA4GH specifications. For services with custom APIs not standardized by GA4GH, or implementations diverging from official GA4GH specifications, use a different namespace (e.g. your organization's reverse domain name). + example: org.ga4gh + artifact: + type: string + description: Name of the API or GA4GH specification implemented. Official GA4GH types should be assigned as part of standards approval process. Custom artifacts are supported. + example: beacon + version: + type: string + description: Version of the API or specification. GA4GH specifications use semantic versioning. + example: 1.0.0 + Service: + description: GA4GH service + type: object + required: + - id + - name + - type + - organization + - version + properties: + id: + type: string + description: Unique ID of this service. Reverse domain name notation is recommended, though not required. The identifier should attempt to be globally unique so it can be used in downstream aggregator services e.g. Service Registry. + example: org.ga4gh.myservice + name: + type: string + description: Name of this service. Should be human readable. + example: My project + type: + $ref: '#/components/schemas/ServiceType' + description: + type: string + description: Description of the service. Should be human readable and provide information about the service. + example: This service provides... + organization: + type: object + description: Organization providing the service + required: + - name + - url + properties: + name: + type: string + description: Name of the organization responsible for the service + example: My organization + url: + type: string + format: uri + description: URL of the website of the organization (RFC 3986 format) + example: https://example.com + contactUrl: + type: string + format: uri + description: URL of the contact for the provider of this service, e.g. a link to a contact form (RFC 3986 format), or an email (RFC 2368 format). + example: mailto:support@example.com + documentationUrl: + type: string + format: uri + description: URL of the documentation of this service (RFC 3986 format). This should help someone learn how to use your service, including any specifics required to access data, e.g. authentication. + example: https://docs.myservice.example.com + createdAt: + type: string + format: date-time + description: Timestamp describing when the service was first deployed and available (RFC 3339 format) + example: '2019-06-04T12:58:19Z' + updatedAt: + type: string + format: date-time + description: Timestamp describing when the service was last updated (RFC 3339 format) + example: '2019-06-04T12:58:19Z' + environment: + type: string + description: Environment the service is running in. Use this to distinguish between production, development and testing/staging deployments. Suggested values are prod, test, dev, staging. However this is advised and not enforced. + example: test + version: + type: string + description: Version of the service being described. Semantic versioning is recommended, but other identifiers, such as dates or commit hashes, are also allowed. The version should be changed whenever the service is updated. + example: 1.0.0 + DrsService: + type: object + required: + - type + - maxBulkRequestLength + properties: + maxBulkRequestLength: + type: integer + description: DEPRECATED - In 2.0 this will move to under the drs section of service info and not at the root level. The max length the bulk request endpoints can handle (>= 1) before generating a 413 error e.g. how long can the arrays bulk_object_ids and bulk_object_access_ids be for this server. + type: + type: object + required: + - artifact + properties: + artifact: + type: string + enum: + - drs + example: drs + drs: + type: object + required: + - maxBulkRequestLength + properties: + maxBulkRequestLength: + type: integer + description: The max length the bulk request endpoints can handle (>= 1) before generating a 413 error e.g. how long can the arrays bulk_object_ids and bulk_object_access_ids be for this server. + objectCount: + type: integer + description: The total number of objects in this DRS service. + totalObjectSize: + type: integer + description: The total size of all objects in this DRS service in bytes. As a general best practice, file bytes are counted for each unique file and not cloud mirrors or other redundant copies. + uploadRequestSupported: + type: boolean + description: Indicates whether this DRS server supports upload request operations via the `/upload-request` endpoint. If true, clients can request upload methods and credentials for uploading files. If false or missing, the server does not support upload request coordination. + default: false + objectRegistrationSupported: + type: boolean + description: Indicates whether this DRS server supports object registration operations via the `/objects/register` endpoint. If true, clients can register uploaded files or existing data as DRS objects. If false or missing, the server does not support object registration. + default: false + supportedUploadMethods: + type: array + items: + type: string + enum: + - s3 + - gs + - https + - ftp + - sftp + description: |- + List of upload methods supported by this DRS server. Only present when uploadRequestSupported is true. Clients can use this information to determine which upload methods are available before making upload requests. + - **s3**: Direct S3 upload with temporary AWS credentials - **gs**: Google Cloud Storage upload with access tokens - **https**: Presigned POST URL for HTTP uploads - **ftp**: File Transfer Protocol uploads - **sftp**: Secure File Transfer Protocol uploads - **gsiftp**: GridFTP secure file transfer - **globus**: Globus transfer service for high-performance data movement + maxUploadSize: + type: integer + format: int64 + description: Maximum file size in bytes that can be uploaded via the upload endpoints. Only present when uploadRequestSupported is true. If not specified, there is no explicit size limit. + maxUploadRequestLength: + type: integer + description: Maximum number of files that can be included in a single upload request. Only present when uploadRequestSupported is true. If not specified, defaults to the same value as maxBulkRequestLength. + maxRegisterRequestLength: + type: integer + description: Maximum number of candidate objects that can be included in a single registration request. Only present when objectRegistrationSupported is true. If not specified, defaults to the same value as maxBulkRequestLength. + validateUploadChecksums: + type: boolean + description: Indicates whether this DRS server validates uploaded file checksums against the provided metadata. If true, the server will verify that uploaded files match their declared checksums and may reject uploads with mismatches. If false or missing, the server does not perform checksum validation and relies on client-provided metadata. Only present when uploadRequestSupported or objectRegistrationSupported is true. + default: false + validateUploadFileSizes: + type: boolean + description: Indicates whether this DRS server validates uploaded file sizes against the provided metadata. If true, the server will verify that uploaded files match their declared sizes and may reject uploads with mismatches. If false or missing, the server does not perform file size validation and relies on client-provided metadata. Only present when uploadRequestSupported or objectRegistrationSupported is true. + default: false + relatedFileStorageSupported: + type: boolean + description: Indicates whether this DRS server supports storing files from the same upload request under a common prefix or folder structure. If true, the server will organize related files together in storage, enabling bioinformatics workflows that expect co-located files (e.g., CRAM + CRAI, VCF + TBI). If false or missing, the server may distribute files across different storage locations or prefixes. Only present when uploadRequestSupported is true. This feature is particularly valuable for genomics tools like samtools that expect index files to be co-located with data files. + default: false + deleteSupported: + type: boolean + description: Indicates whether this DRS server supports delete operations via the delete endpoints. If true, clients can delete DRS objects using POST requests to `/objects/{object_id}/delete` and `/objects/delete`. If false or missing, the server does not support delete operations and will return 404 for delete endpoint requests. Like upload functionality, delete support is entirely optional and servers remain DRS compliant without it. + default: false + maxBulkDeleteLength: + type: integer + description: Maximum number of objects that can be deleted in a single bulk delete request via `/objects/delete`. Only present when deleteSupported is true. If not specified when delete is supported, defaults to the same value as maxBulkRequestLength. Servers may enforce lower limits for delete operations compared to other bulk operations for safety reasons. + deleteStorageDataSupported: + type: boolean + description: 'Indicates whether this DRS server supports attempting to delete underlying storage data when clients request it. If true, the server will attempt to delete both metadata and storage files when `delete_storage_data: true` is specified in delete requests. If false or missing, the server only supports metadata deletion regardless of client request, preserving underlying storage data. Only present when deleteSupported is true. This is a capability flag indicating what the server can attempt, not a default behavior setting. Note: Storage deletion attempts may fail due to permissions, network issues, or storage service errors.' + default: false + accessMethodUpdateSupported: + type: boolean + description: Indicates whether this DRS server supports updating access methods for existing objects. If true, clients can update access methods using `/objects/{object_id}/access-methods` and `/objects/access-methods` endpoints. If false or missing, the server does not support access method updates. + default: false + maxBulkAccessMethodUpdateLength: + type: integer + description: Maximum number of objects that can be updated in a single bulk access method update request. Only present when accessMethodUpdateSupported is true. If not specified, defaults to maxBulkRequestLength. + validateAccessMethodUpdates: + type: boolean + description: Indicates whether this DRS server validates new access methods by verifying they point to the same data. If true, the server will attempt to verify checksums/content before updating access methods. If false or missing, the server trusts client-provided access methods without validation. Only present when accessMethodUpdateSupported is true. + default: false + Error: + type: object + description: An object that can optionally include information about the error. + properties: + msg: + type: string + description: A detailed error message. + status_code: + type: integer + description: The integer representing the HTTP status code (e.g. 200, 404). + Checksum: + type: object + required: + - checksum + - type + properties: + checksum: + type: string + description: The hex-string encoded checksum for the data + type: + type: string + description: |- + The digest method used to create the checksum. + The value (e.g. `sha-256`) SHOULD be listed as `Hash Name String` in the https://www.iana.org/assignments/named-information/named-information.xhtml#hash-alg[IANA Named Information Hash Algorithm Registry]. Other values MAY be used, as long as implementors are aware of the issues discussed in https://tools.ietf.org/html/rfc6920#section-9.4[RFC6920]. + GA4GH may provide more explicit guidance for use of non-IANA-registered algorithms in the future. Until then, if implementers do choose such an algorithm (e.g. because it's implemented by their storage provider), they SHOULD use an existing standard `type` value such as `md5`, `etag`, `crc32c`, `trunc512`, or `sha1`. + example: sha-256 + AccessURL: + type: object + required: + - url + properties: + url: + type: string + description: A fully resolvable URL that can be used to fetch the actual object bytes. + headers: + type: array + items: + type: string + description: An optional list of headers to include in the HTTP request to `url`. These headers can be used to provide auth tokens required to fetch the object bytes. + example: 'Authorization: Basic Z2E0Z2g6ZHJz' + Authorizations: + type: object + properties: + drs_object_id: + type: string + supported_types: + type: array + items: + type: string + enum: + - None + - BasicAuth + - BearerAuth + - PassportAuth + description: An Optional list of support authorization types. More than one can be supported and tried in sequence. Defaults to `None` if empty or missing. + passport_auth_issuers: + type: array + items: + type: string + description: If authorizations contain `PassportAuth` this is a required list of visa issuers (as found in a visa's `iss` claim) that may authorize access to this object. The caller must only provide passports that contain visas from this list. It is strongly recommended that the caller validate that it is appropriate to send the requested passport/visa to the DRS server to mitigate attacks by malicious DRS servers requesting credentials they should not have. + bearer_auth_issuers: + type: array + items: + type: string + description: If authorizations contain `BearerAuth` this is an optional list of issuers that may authorize access to this object. The caller must provide a token from one of these issuers. If this is empty or missing it assumed the caller knows which token to send via other means. It is strongly recommended that the caller validate that it is appropriate to send the requested token to the DRS server to mitigate attacks by malicious DRS servers requesting credentials they should not have. + AccessMethod: + type: object + required: + - type + properties: + type: + type: string + enum: + - s3 + - gs + - ftp + - gsiftp + - globus + - htsget + - https + - file + description: Type of the access method. + access_url: + allOf: + - $ref: '#/components/schemas/AccessURL' + - description: An `AccessURL` that can be used to fetch the actual object bytes. Note that at least one of `access_url` and `access_id` must be provided. + type: object + required: [url] + properties: + url: + type: string + description: "A fully resolvable URL that can be used to fetch the actual object bytes." + headers: + type: array + items: + type: string + description: "GA4GH-compatible list of HTTP headers." + access_id: + type: string + description: An arbitrary string to be passed to the `/access` method to get an `AccessURL`. This string must be unique within the scope of a single object. Note that at least one of `access_url` and `access_id` must be provided. + cloud: + type: string + description: Name of the cloud service provider that the object belongs to. If the cloud service is Amazon Web Services, Google Cloud Platform or Azure the values should be `aws`, `gcp`, or `azure` respectively. + example: aws, gcp, or azure + region: + type: string + description: Name of the region in the cloud service provider that the object belongs to. + example: us-east-1 + available: + type: boolean + description: Availablity of file in the cloud. This label defines if this file is immediately accessible via DRS. Any delay or requirement of thawing mechanism if the file is in offline/archival storage is classified as false, meaning it is unavailable. + example: true + authorizations: + allOf: + - $ref: '#/components/schemas/Authorizations' + - description: When `access_id` is provided, `authorizations` provides information about how to authorize the `/access` method. + type: object + properties: + drs_object_id: + type: string + supported_types: + type: array + items: + type: string + passport_auth_issuers: + type: array + items: + type: string + bearer_auth_issuers: + type: array + items: + type: string + ContentsObject: + type: object + required: + - name + properties: + name: + type: string + description: A name declared by the bundle author that must be used when materialising this object, overriding any name directly associated with the object itself. The name must be unique within the containing bundle. This string is made up of uppercase and lowercase letters, decimal digits, hyphen, period, and underscore [A-Za-z0-9.-_]. See http://pubs.opengroup.org/onlinepubs/9699919799/basedefs/V1_chap03.html#tag_03_282[portable filenames]. + id: + type: string + description: A DRS identifier of a `DrsObject` (either a single blob or a nested bundle). If this ContentsObject is an object within a nested bundle, then the id is optional. Otherwise, the id is required. + drs_uri: + type: array + description: A list of full DRS identifier URI paths that may be used to obtain the object. These URIs may be external to this DRS instance. + example: drs://drs.example.org/314159 + items: + type: string + contents: + type: array + description: If this ContentsObject describes a nested bundle and the caller specified "?expand=true" on the request, then this contents array must be present and describe the objects within the nested bundle. + items: + $ref: '#/components/schemas/ContentsObject' + DrsObject: + type: object + required: + - id + - self_uri + - size + - created_time + - checksums + properties: + id: + type: string + description: An identifier unique to this `DrsObject` + name: + type: string + description: |- + A string that can be used to name a `DrsObject`. + This string is made up of uppercase and lowercase letters, decimal digits, hyphen, period, and underscore [A-Za-z0-9.-_]. See http://pubs.opengroup.org/onlinepubs/9699919799/basedefs/V1_chap03.html#tag_03_282[portable filenames]. + self_uri: + type: string + description: |- + A drs:// hostname-based URI, as defined in the DRS documentation, that tells clients how to access this object. + The intent of this field is to make DRS objects self-contained, and therefore easier for clients to store and pass around. For example, if you arrive at this DRS JSON by resolving a compact identifier-based DRS URI, the `self_uri` presents you with a hostname and properly encoded DRS ID for use in subsequent `access` endpoint calls. + example: drs://drs.example.org/314159 + size: + type: integer + format: int64 + description: |- + For blobs, the blob size in bytes. + For bundles, the cumulative size, in bytes, of items in the `contents` field. + created_time: + type: string + format: date-time + description: |- + Timestamp of content creation in RFC3339. + (This is the creation time of the underlying content, not of the JSON object.) + updated_time: + type: string + format: date-time + description: Timestamp of content update in RFC3339, identical to `created_time` in systems that do not support updates. (This is the update time of the underlying content, not of the JSON object.) + version: + type: string + description: |- + A string representing a version. + (Some systems may use checksum, a RFC3339 timestamp, or an incrementing version number.) + mime_type: + type: string + description: A string providing the mime-type of the `DrsObject`. + example: application/json + checksums: + type: array + minItems: 1 + items: + $ref: '#/components/schemas/Checksum' + description: |- + The checksum of the `DrsObject`. At least one checksum must be provided. + For blobs, the checksum is computed over the bytes in the blob. + For bundles, the checksum is computed over a sorted concatenation of the checksums of its top-level contained objects (not recursive, names not included). The list of checksums is sorted alphabetically (hex-code) before concatenation and a further checksum is performed on the concatenated checksum value. + For example, if a bundle contains blobs with the following checksums: + md5(blob1) = 72794b6d + md5(blob2) = 5e089d29 + Then the checksum of the bundle is: + md5( concat( sort( md5(blob1), md5(blob2) ) ) ) + = md5( concat( sort( 72794b6d, 5e089d29 ) ) ) + = md5( concat( 5e089d29, 72794b6d ) ) + = md5( 5e089d2972794b6d ) + = f7a29a04 + access_methods: + type: array + minItems: 1 + items: + $ref: '#/components/schemas/AccessMethod' + description: |- + The list of access methods that can be used to fetch the `DrsObject`. + Required for single blobs; optional for bundles. + contents: + type: array + description: |- + If not set, this `DrsObject` is a single blob. + If set, this `DrsObject` is a bundle containing the listed `ContentsObject` s (some of which may be further nested). + items: + $ref: '#/components/schemas/ContentsObject' + description: + type: string + description: A human readable description of the `DrsObject`. + aliases: + type: array + items: + type: string + description: A list of strings that can be used to find other metadata about this `DrsObject` from external metadata sources. These aliases can be used to represent secondary accession numbers or external GUIDs. + DeleteRequest: + type: object + description: Request body for single object delete operations + properties: + passports: + type: array + items: + type: string + example: eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJnYTRnaF9wYXNzcG9ydF92MSI6W119.JJ5rN0ktP0qwyZmIPpxmF_p7JsxAZH6L6brUxtad3CM + description: the encoded JWT GA4GH Passport that contains embedded Visas. The overall JWT is signed as are the individual Passport Visas. + delete_storage_data: + type: boolean + default: false + description: If true, delete both DRS object metadata and underlying storage data (follows server's deleteStorageDataSupported capability). If false (default), only delete DRS object metadata while preserving underlying storage data. Clients must explicitly set this to true to enable storage data deletion, ensuring intentional choice for this potentially destructive operation. + BulkDeleteRequest: + type: object + description: Request body for bulk delete operations + required: + - bulk_object_ids + properties: + bulk_object_ids: + type: array + items: + type: string + description: Array of DRS object IDs to delete + example: + - drs_object_123456 + - drs_object_789012 + - drs_object_345678 + passports: + type: array + items: + type: string + example: eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJnYTRnaF9wYXNzcG9ydF92MSI6W119.JJ5rN0ktP0qwyZmIPpxmF_p7JsxAZH6L6brUxtad3CM + description: the encoded JWT GA4GH Passport that contains embedded Visas. The overall JWT is signed as are the individual Passport Visas. + delete_storage_data: + type: boolean + default: false + description: If true, delete both DRS object metadata and underlying storage data (follows server's deleteStorageDataSupported capability). If false (default), only delete DRS object metadata while preserving underlying storage data. Clients must explicitly set this to true to enable storage data deletion, ensuring intentional choice for this potentially destructive operation. + summary: + type: object + description: A summary of what was resolved. + properties: + requested: + type: integer + description: Number of items requested. + resolved: + type: integer + description: Number of objects resolved. + unresolved: + type: integer + description: Number of objects not resolved. + unresolved: + type: array + description: Error codes for each unresolved drs objects. + items: + type: object + properties: + error_code: + type: integer + object_ids: + type: array + items: + type: string + BulkObjectIdNoPassport: + type: object + description: The object that contains the DRS object IDs array + properties: + bulk_object_ids: + type: array + items: + type: string + description: An array of ObjectIDs. + DrsObjectCandidate: + type: object + required: + - size + - checksums + properties: + name: + type: string + description: |- + A string that can be used to name a `DrsObject`. + This string is made up of uppercase and lowercase letters, decimal digits, hyphen, period, and underscore [A-Za-z0-9.-_]. See http://pubs.opengroup.org/onlinepubs/9699919799/basedefs/V1_chap03.html#tag_03_282[portable filenames]. + size: + type: integer + format: int64 + description: |- + For blobs, the blob size in bytes. + For bundles, the cumulative size, in bytes, of items in the `contents` field. + version: + type: string + description: |- + A string representing a version. + (Some systems may use checksum, a RFC3339 timestamp, or an incrementing version number.) + mime_type: + type: string + description: A string providing the mime-type of the `DrsObject`. + example: application/json + checksums: + type: array + minItems: 1 + items: + $ref: '#/components/schemas/Checksum' + description: |- + The checksum of the `DrsObject`. At least one checksum must be provided. + For blobs, the checksum is computed over the bytes in the blob. + For bundles, the checksum is computed over a sorted concatenation of the checksums of its top-level contained objects (not recursive, names not included). The list of checksums is sorted alphabetically (hex-code) before concatenation and a further checksum is performed on the concatenated checksum value. + For example, if a bundle contains blobs with the following checksums: + md5(blob1) = 72794b6d + md5(blob2) = 5e089d29 + Then the checksum of the bundle is: + md5( concat( sort( md5(blob1), md5(blob2) ) ) ) + = md5( concat( sort( 72794b6d, 5e089d29 ) ) ) + = md5( concat( 5e089d29, 72794b6d ) ) + = md5( 5e089d2972794b6d ) + = f7a29a04 + access_methods: + type: array + minItems: 1 + items: + $ref: '#/components/schemas/AccessMethod' + description: |- + The list of access methods that can be used to fetch the `DrsObject`. + Required for single blobs; optional for bundles. + contents: + type: array + description: |- + If not set, this `DrsObject` is a single blob. + If set, this `DrsObject` is a bundle containing the listed `ContentsObject` s (some of which may be further nested). + items: + $ref: '#/components/schemas/ContentsObject' + description: + type: string + description: A human readable description of the `DrsObject`. + aliases: + type: array + items: + type: string + description: A list of strings that can be used to find other metadata about this `DrsObject` from external metadata sources. These aliases can be used to represent secondary accession numbers or external GUIDs. + BulkObjectAccessId: + type: object + description: The object that contains object_id/access_id tuples + properties: + passports: + type: array + items: + type: string + bulk_object_access_ids: + type: array + items: + type: object + properties: + bulk_object_id: + type: string + description: DRS object ID + bulk_access_ids: + type: array + description: DRS object access ID + items: + type: string + BulkAccessURL: + type: object + required: + - url + properties: + drs_object_id: + type: string + drs_access_id: + type: string + url: + type: string + description: A fully resolvable URL that can be used to fetch the actual object bytes. + headers: + type: array + items: + type: string + description: An optional list of headers to include in the HTTP request to `url`. These headers can be used to provide auth tokens required to fetch the object bytes. + example: 'Authorization: Basic Z2E0Z2g6ZHJz' + AccessMethodUpdateRequest: + type: object + required: + - access_methods + properties: + access_methods: + type: array + items: + $ref: '#/components/schemas/AccessMethod' + minItems: 1 + description: New access methods for the DRS object + passports: + type: array + items: + type: string + description: Optional GA4GH Passport JWTs for authorization + BulkAccessMethodUpdateRequest: + type: object + required: + - updates + properties: + updates: + type: array + items: + type: object + required: + - object_id + - access_methods + properties: + object_id: + type: string + description: DRS object ID to update + access_methods: + type: array + items: + $ref: '#/components/schemas/AccessMethod' + minItems: 1 + description: New access methods for this object + minItems: 1 + description: Array of access method updates to perform + passports: + type: array + items: + type: string + description: Optional GA4GH Passport JWTs for authorization + UploadRequestObject: + type: object + required: + - name + - size + - mime_type + - checksums + properties: + name: + type: string + description: The name of the file to upload + size: + type: integer + format: int64 + description: Size of the file in bytes + mime_type: + type: string + description: MIME type of the file + checksums: + type: array + items: + $ref: '#/components/schemas/Checksum' + minItems: 1 + description: Array of checksums for file integrity verification + description: + type: string + description: Optional description of the file + aliases: + type: array + items: + type: string + description: Optional array of alternative names for the file + UploadRequest: + type: object + required: + - requests + properties: + requests: + type: array + items: + $ref: '#/components/schemas/UploadRequestObject' + minItems: 1 + description: Array of upload requests for files + passports: + type: array + items: + type: string + description: Optional array of GA4GH Passport JWTs for authorization + UploadMethod: + type: object + required: + - type + - access_url + properties: + type: + type: string + enum: + - s3 + - gs + - https + - ftp + - sftp + - gsiftp + - globus + description: |- + Type of upload method. Implementations MAY support any subset of these types. + The 'https' type can be used to return a presigned POST URL and is expected to be the most common implementation for typical file uploads. This method provides a simple HTTP POST interface that works with standard web clients. + The 's3' type is primarily intended to support uploads of large files that want to take advantage of multipart uploads and automatic retries implemented in AWS libraries. This method provides direct access to S3-specific upload capabilities. + Other common implementations include 'gs' for Google Cloud Storage and 'sftp' for secure FTP uploads. + access_url: + allOf: + - $ref: '#/components/schemas/AccessURL' + - description: An `AccessURL` that specifies where the file will be accessible after upload. This URL will be used as the access_url in the eventual DRS object, ensuring consistency between upload and retrieval operations. + type: object + required: [url] + properties: + url: + type: string + description: "Inlined Upload URL context." + headers: + type: array + items: + type: string + description: "Inlined Upload Headers." + region: + type: string + description: Cloud region for the upload location. Optional for non-cloud storage types. + example: us-east-1 + upload_details: + type: object + additionalProperties: true + description: A dictionary of upload-specific configuration details that vary by upload method type. The contents and structure depend on the specific upload method being used. + UploadResponseObject: + type: object + required: + - name + - size + - mime_type + - checksums + properties: + name: + type: string + description: The name of the file + size: + type: integer + format: int64 + description: Size of the file in bytes + mime_type: + type: string + description: MIME type of the file + checksums: + type: array + items: + $ref: '#/components/schemas/Checksum' + minItems: 1 + description: Array of checksums for file integrity verification + description: + type: string + description: Optional description of the file + aliases: + type: array + items: + type: string + description: Optional array of alternative names + upload_methods: + type: array + items: + $ref: '#/components/schemas/UploadMethod' + description: Available methods for uploading this file + UploadResponse: + type: object + required: + - responses + properties: + responses: + type: array + items: + $ref: '#/components/schemas/UploadResponseObject' + description: List of upload responses for the requested files + responses: + 200ServiceInfo: + description: Retrieve info about the DRS service + content: + application/json: + schema: + allOf: + - $ref: '#/components/schemas/Service' + - $ref: '#/components/schemas/DrsService' + 500InternalServerError: + description: An unexpected error occurred. + content: + application/json: + schema: + $ref: '#/components/schemas/Error' + 200OkDrsObject: + description: The `DrsObject` was found successfully + content: + application/json: + schema: + $ref: '#/components/schemas/DrsObject' + 202Accepted: + description: | + The operation is delayed and will continue asynchronously. The client should retry this same request after the delay specified by Retry-After header. + headers: + Retry-After: + description: | + Delay in seconds. The client should retry this same request after waiting for this duration. To simplify client response processing, this must be an integral relative time in seconds. This value SHOULD represent the minimum duration the client should wait before attempting the operation again with a reasonable expectation of success. When it is not feasible for the server to determine the actual expected delay, the server may return a brief, fixed value instead. + schema: + type: integer + format: int64 + 400BadRequest: + description: The request is malformed. + content: + application/json: + schema: + $ref: '#/components/schemas/Error' + 401Unauthorized: + description: The request is unauthorized. + content: + application/json: + schema: + $ref: '#/components/schemas/Error' + 403Forbidden: + description: The requester is not authorized to perform this action. + content: + application/json: + schema: + $ref: '#/components/schemas/Error' + 404NotFoundDrsObject: + description: The requested `DrsObject` wasn't found. + content: + application/json: + schema: + $ref: '#/components/schemas/Error' + 404NotFoundAccess: + description: The requested `AccessURL` wasn't found. + content: + application/json: + schema: + $ref: '#/components/schemas/Error' + 200OkAuthorizations: + description: '`Authorizations` were found successfully' + content: + application/json: + schema: + $ref: '#/components/schemas/Authorizations' + AuthorizationsNotSupported: + description: '`Authorizations` are not supported for this object. Default to `None`.' + 204DeleteSuccess: + description: All DRS objects were successfully deleted. For bulk operations, this indicates that the entire atomic transaction completed successfully - all requested objects have been deleted. Storage data deletion (if requested) was attempted but success is not guaranteed. + 400BadRequestDelete: + description: 'The delete request is malformed or contains unsupported parameters (e.g., delete_storage_data: true when server doesn''t support storage data deletion).' + content: + application/json: + schema: + $ref: '#/components/schemas/Error' + examples: + unsupported_storage_deletion: + summary: Storage data deletion not supported + description: Client requested storage data deletion but server doesn't support it + value: + msg: Server does not support storage data deletion. Set delete_storage_data to false or omit the parameter. + status_code: 400 + invalid_request_format: + summary: Malformed request body + description: Request body contains invalid JSON or missing required fields + value: + msg: 'Invalid request body: bulk_object_ids is required for bulk delete operations' + status_code: 400 + empty_object_list: + summary: Empty object ID list + description: Bulk delete request with empty object ID array + value: + msg: bulk_object_ids cannot be empty + status_code: 400 + 403ForbiddenDelete: + description: The client is not authorized to delete the requested DRS object. + content: + application/json: + schema: + $ref: '#/components/schemas/Error' + examples: + insufficient_permissions: + summary: Insufficient delete permissions + description: Client lacks permission to delete the specified object + value: + msg: Client lacks delete permission for object drs_object_123456 + status_code: 403 + invalid_passport: + summary: Invalid GA4GH Passport + description: Provided GA4GH Passport is invalid or expired + value: + msg: Invalid or expired GA4GH Passport provided + status_code: 403 + missing_visa: + summary: Missing required visa + description: GA4GH Passport lacks required visa for delete operation + value: + msg: GA4GH Passport does not contain required visa for delete operation on this object + status_code: 403 + 404NotFoundDelete: + description: The requested DRS object for deletion wasn't found, or delete endpoints are not supported by this server. + content: + application/json: + schema: + $ref: '#/components/schemas/Error' + examples: + object_not_found: + summary: DRS object not found + description: The specified DRS object does not exist + value: + msg: DRS object drs_object_123456 does not exist + status_code: 404 + delete_not_supported: + summary: Delete operations not supported + description: This server does not support delete operations + value: + msg: Delete operations are not supported by this server + status_code: 404 + endpoint_not_found: + summary: Delete endpoint not available + description: Delete endpoints are not implemented on this server + value: + msg: The requested endpoint /objects/delete is not available on this server + status_code: 404 + 413RequestTooLarge: + description: The bulk request is too large. + content: + application/json: + schema: + $ref: '#/components/schemas/Error' + examples: + bulk_limit_exceeded: + summary: Bulk delete limit exceeded + description: Request contains more objects than server's maximum bulk delete limit + value: + msg: Bulk delete request contains 150 objects but server maximum is 100. Check maxBulkDeleteLength in service-info. + status_code: 413 + request_size_too_large: + summary: Request payload too large + description: The overall request payload exceeds server limits + value: + msg: Request payload size exceeds server limit of 1MB + status_code: 413 + 200OkDrsObjects: + description: The `DrsObjects` were found successfully + content: + application/json: + schema: + type: object + properties: + summary: + $ref: '#/components/schemas/summary' + unresolved_drs_objects: + $ref: '#/components/schemas/unresolved' + resolved_drs_object: + type: array + items: + $ref: '#/components/schemas/DrsObject' + 200OkBulkAuthorizations: + description: '`Authorizations` were found successfully' + content: + application/json: + schema: + type: object + properties: + summary: + $ref: '#/components/schemas/summary' + unresolved_drs_objects: + $ref: '#/components/schemas/unresolved' + resolved_drs_object: + type: array + items: + $ref: '#/components/schemas/Authorizations' + 201ObjectsCreated: + description: DRS objects were successfully registered as an atomic transaction. Returns the complete DRS objects with server-minted IDs and timestamps. All candidate objects were validated and registered together - if any had failed, none would have been registered. + content: + application/json: + schema: + type: object + required: + - objects + properties: + objects: + type: array + items: + $ref: '#/components/schemas/DrsObject' + description: Array of registered DRS objects in the same order as the candidates in the request + examples: + single_object_created: + summary: Single object registered + description: Response after registering one DRS object + value: + objects: + - id: drs_obj_a1b2c3d4e5f6 + self_uri: drs://drs.example.org/drs_obj_a1b2c3d4e5f6 + name: sample_data.vcf + size: 1048576 + mime_type: text/plain + created_time: '2024-01-15T10:30:00Z' + updated_time: '2024-01-15T10:30:00Z' + version: '1.0' + checksums: + - checksum: e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855 + type: sha-256 + access_methods: + - type: s3 + access_url: + url: s3://my-bucket/uploads/sample_data.vcf + description: Variant call format file for sample analysis + multiple_objects_created: + summary: Multiple objects registered + description: Response after registering multiple DRS objects + value: + objects: + - id: drs_obj_a1b2c3d4e5f6 + self_uri: drs://drs.example.org/drs_obj_a1b2c3d4e5f6 + name: genome_assembly.fasta + size: 3221225472 + mime_type: text/plain + created_time: '2024-01-15T09:00:00Z' + updated_time: '2024-01-15T09:00:00Z' + version: '1.0' + checksums: + - checksum: a665a45920422f9d417e4867efdc4fb8a04a1f3fff1fa07e998e86f7f7a27ae3 + type: sha-256 + access_methods: + - type: s3 + access_url: + url: s3://genomics-bucket/assemblies/hg38.fasta + description: Human genome reference assembly + - id: drs_obj_f6e5d4c3b2a1 + self_uri: drs://drs.example.org/drs_obj_f6e5d4c3b2a1 + name: annotations.gff3 + size: 524288000 + mime_type: text/plain + created_time: '2024-01-15T09:15:00Z' + updated_time: '2024-01-15T09:15:00Z' + version: '1.0' + checksums: + - checksum: b94d27b9934d3e08a52e52d7da7dabfac484efe37a5380ee9088f7ace2efcde9 + type: sha-256 + access_methods: + - type: https + access_url: + url: https://data.example.org/files/annotations.gff3 + description: Gene annotations in GFF3 format + 200OkAccess: + description: The `AccessURL` was found successfully + content: + application/json: + schema: + $ref: '#/components/schemas/AccessURL' + 200OkAccesses: + description: The `AccessURL` was found successfully + content: + application/json: + schema: + type: object + properties: + summary: + $ref: '#/components/schemas/summary' + unresolved_drs_objects: + $ref: '#/components/schemas/unresolved' + resolved_drs_object_access_urls: + type: array + items: + $ref: '#/components/schemas/BulkAccessURL' + 200AccessMethodUpdate: + description: Access methods successfully updated. Returns the updated DRS object with new access methods and updated timestamp. + content: + application/json: + schema: + $ref: '#/components/schemas/DrsObject' + 200BulkAccessMethodUpdate: + description: Access methods successfully updated for all objects. Returns updated DRS objects with new access methods and updated timestamps. + content: + application/json: + schema: + type: object + required: + - objects + properties: + objects: + type: array + items: + $ref: '#/components/schemas/DrsObject' + description: Array of updated DRS objects + 200UploadRequest: + description: Upload request processed successfully. Returns upload methods and temporary credentials for the requested files. + content: + application/json: + schema: + $ref: '#/components/schemas/UploadResponse' + examples: + s3_upload: + summary: S3 upload method response + description: Response with S3 upload method and temporary credentials + value: + responses: + - name: sample_data.vcf + size: 1048576 + mime_type: text/plain + checksums: + - checksum: e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855 + type: sha-256 + description: Variant call format file for sample analysis + aliases: + - sample_001_variants + - vcf_batch_2024 + upload_methods: + - type: s3 + access_url: + url: https://my-bucket.s3.amazonaws.com/uploads/drs_object_123456 + region: us-east-1 + upload_details: + bucket: my-bucket + key: uploads/drs_object_123456 + access_key_id: AKIAIOSFODNN7EXAMPLE + secret_access_key: wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY + session_token: AQoEXAMPLEH4aoAH0gNCAPyJxz4BlCFFxWNE1OPTgk5TthT+FvwqnKwRcOIfrRh3c/LTo6UDdyJwOOvEVPvLXCrrrUtdnniCEXAMPLE/IvU1dYUg2RVAJBanLiHb4IgRmpRV3zrkuWJOgQs8IZZaIv2BXIa2R4OlgkBN9bkUDNCJiBeb/AXlzBBko7b15fjrBs2+cTQtpZ3CYWFXG8C5zqx37wnOE49mRl/+OtkIKGO7fAE + expires_at: '2024-01-01T12:00:00Z' + https_upload: + summary: HTTPS upload method response + description: Response with HTTPS presigned POST URL for direct upload + value: + responses: + - name: genome_assembly.fasta + size: 3221225472 + mime_type: text/plain + checksums: + - checksum: a665a45920422f9d417e4867efdc4fb8a04a1f3fff1fa07e998e86f7f7a27ae3 + type: sha-256 + - checksum: 098f6bcd4621d373cade4e832627b4f6 + type: md5 + description: Human genome reference assembly + aliases: + - hg38_reference + upload_methods: + - type: https + access_url: + url: https://upload.example.org/v1/files/drs_object_789012 + upload_details: + post_url: https://upload.example.org/v1/files/drs_object_789012?signature=abc123 + multiple_methods: + summary: Multiple upload methods response + description: Response offering multiple upload method options for flexibility + value: + responses: + - name: annotations.gff3 + size: 524288000 + mime_type: text/plain + checksums: + - checksum: b94d27b9934d3e08a52e52d7da7dabfac484efe37a5380ee9088f7ace2efcde9 + type: sha-256 + description: Gene annotations in GFF3 format + upload_methods: + - type: s3 + access_url: + url: https://genomics-bucket.s3.us-west-2.amazonaws.com/uploads/drs_object_345678 + region: us-west-2 + upload_details: + bucket: genomics-bucket + key: uploads/drs_object_345678 + access_key_id: AKIAI44QH8DHBEXAMPLE + secret_access_key: je7MtGbClwBF/2Zp9Utk/h3yCo8nvbEXAMPLEKEY + session_token: temporary_session_token_here + expires_at: '2024-01-01T12:00:00Z' + - type: https + access_url: + url: https://upload-api.example.org/files/drs_object_345678 + upload_details: + post_url: https://upload-api.example.org/files/drs_object_345678?token=upload_token_12345 + - type: gs + access_url: + url: https://storage.googleapis.com/genomics-uploads/drs_object_345678 + region: us-central1 + upload_details: + bucket: genomics-uploads + key: drs_object_345678 + access_token: ya29.AHES6ZRVmB7fkLtd1XTmq6mo0S1wqZZi3-Lh_s-6Uw7p8vtgSwg + expires_at: '2024-01-01T12:00:00Z' + parameters: + ObjectId: + in: path + name: object_id + required: true + description: '`DrsObject` identifier' + schema: + type: string + Expand: + in: query + name: expand + schema: + type: boolean + example: false + description: |- + If false and the object_id refers to a bundle, then the ContentsObject array contains only those objects directly contained in the bundle. That is, if the bundle contains other bundles, those other bundles are not recursively included in the result. + If true and the object_id refers to a bundle, then the entire set of objects in the bundle is expanded. That is, if the bundle contains other bundles, then those other bundles are recursively expanded and included in the result. Recursion continues through the entire sub-tree of the bundle. + If the object_id refers to a blob, then the query parameter is ignored. + AccessId: + in: path + name: access_id + required: true + description: An `access_id` from the `access_methods` list of a `DrsObject` + schema: + type: string + Checksum: + in: path + name: checksum + required: true + description: A `checksum` value from the `checksums` list of a `DrsObject` + schema: + type: string + requestBodies: + PostObjectBody: + required: true + content: + application/json: + schema: + type: object + properties: + expand: + type: boolean + example: false + description: |- + If false and the object_id refers to a bundle, then the ContentsObject array contains only those objects directly contained in the bundle. That is, if the bundle contains other bundles, those other bundles are not recursively included in the result. + If true and the object_id refers to a bundle, then the entire set of objects in the bundle is expanded. That is, if the bundle contains other bundles, then those other bundles are recursively expanded and included in the result. Recursion continues through the entire sub-tree of the bundle. + If the object_id refers to a blob, then the query parameter is ignored. + passports: + type: array + items: + type: string + example: eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJnYTRnaF9wYXNzcG9ydF92MSI6W119.JJ5rN0ktP0qwyZmIPpxmF_p7JsxAZH6L6brUxtad3CM + description: the encoded JWT GA4GH Passport that contains embedded Visas. The overall JWT is signed as are the individual Passport Visas. + examples: + retrieve_with_auth: + summary: Retrieve object with authentication + description: Request object metadata with passport authentication + value: + expand: false + passports: + - eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJnYTRnaF9wYXNzcG9ydF92MSI6W119.JJ5rN0ktP0qwyZmIPpxmF_p7JsxAZH6L6brUxtad3CM + retrieve_expanded_bundle: + summary: Retrieve expanded bundle with authentication + description: Request expanded bundle contents with passport authentication + value: + expand: true + passports: + - eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJnYTRnaF9wYXNzcG9ydF92MSI6W119.JJ5rN0ktP0qwyZmIPpxmF_p7JsxAZH6L6brUxtad3CM + - eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCJ9.eyJnYTRnaF9wYXNzcG9ydF92MSI6W119.additional_passport_signature + DeleteBody: + required: false + content: + application/json: + schema: + $ref: '#/components/schemas/DeleteRequest' + examples: + metadata_only_delete: + summary: Delete metadata only (default) + description: Delete DRS object metadata while preserving underlying storage data. This is the default and safest option. + value: + passports: + - eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJnYTRnaF9wYXNzcG9ydF92MSI6W119.JJ5rN0ktP0qwyZmIPpxmF_p7JsxAZH6L6brUxtad3CM + delete_storage_data: false + full_delete: + summary: Delete metadata and storage data + description: Delete both DRS object metadata and underlying storage data (requires server support via deleteStorageDataSupported) + value: + passports: + - eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJnYTRnaF9wYXNzcG9ydF92MSI6W119.JJ5rN0ktP0qwyZmIPpxmF_p7JsxAZH6L6brUxtad3CM + delete_storage_data: true + no_auth_delete: + summary: Delete without authentication + description: Delete operation without GA4GH Passport authentication (for public objects or when using Bearer token in headers) + value: + delete_storage_data: false + minimal_request: + summary: Minimal delete request + description: Simplest delete request with no authentication and default behavior (metadata only) + value: {} + multiple_passports: + summary: Multiple GA4GH Passports + description: Delete request with multiple GA4GH Passports for complex authorization scenarios + value: + passports: + - eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJnYTRnaF9wYXNzcG9ydF92MSI6W119.JJ5rN0ktP0qwyZmIPpxmF_p7JsxAZH6L6brUxtad3CM + - eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCJ9.eyJnYTRnaF9wYXNzcG9ydF92MSI6W119.AbCdEfGhIjKlMnOpQrStUvWxYz + delete_storage_data: false + update_workflow: + summary: Safe update workflow + description: Delete metadata only to enable safe update pattern (delete metadata, then re-register with new metadata) + value: + passports: + - eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJnYTRnaF9wYXNzcG9ydF92MSI6W119.JJ5rN0ktP0qwyZmIPpxmF_p7JsxAZH6L6brUxtad3CM + delete_storage_data: false + BulkDeleteBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/BulkDeleteRequest' + examples: + bulk_metadata_delete: + summary: Bulk delete metadata only + description: Delete multiple DRS objects metadata while preserving underlying storage data (default and safest option) + value: + bulk_object_ids: + - drs_object_123456 + - drs_object_789012 + - drs_object_345678 + passports: + - eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJnYTRnaF9wYXNzcG9ydF92MSI6W119.JJ5rN0ktP0qwyZmIPpxmF_p7JsxAZH6L6brUxtad3CM + delete_storage_data: false + bulk_full_delete: + summary: Bulk delete metadata and storage data + description: Delete both metadata and storage data for multiple objects (requires server support via deleteStorageDataSupported) + value: + bulk_object_ids: + - drs_object_123456 + - drs_object_789012 + passports: + - eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJnYTRnaF9wYXNzcG9ydF92MSI6W119.JJ5rN0ktP0qwyZmIPpxmF_p7JsxAZH6L6brUxtad3CM + delete_storage_data: true + bulk_no_auth_delete: + summary: Bulk delete without authentication + description: Bulk delete operation without GA4GH Passport authentication (for public objects or when using Bearer token in headers) + value: + bulk_object_ids: + - drs_object_123456 + - drs_object_789012 + delete_storage_data: false + large_bulk_delete: + summary: Large bulk delete operation + description: Delete many objects in a single request (check maxBulkDeleteLength in service-info for limits) + value: + bulk_object_ids: + - drs_object_001 + - drs_object_002 + - drs_object_003 + - drs_object_004 + - drs_object_005 + - drs_object_006 + - drs_object_007 + - drs_object_008 + - drs_object_009 + - drs_object_010 + passports: + - eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJnYTRnaF9wYXNzcG9ydF92MSI6W119.JJ5rN0ktP0qwyZmIPpxmF_p7JsxAZH6L6brUxtad3CM + delete_storage_data: false + mixed_object_types: + summary: Mixed object types deletion + description: Delete objects with different ID formats and types in a single request + value: + bulk_object_ids: + - drs://example.org/123456 + - local_object_789 + - uuid:550e8400-e29b-41d4-a716-446655440000 + - compact:prefix:identifier + passports: + - eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJnYTRnaF9wYXNzcG9ydF92MSI6W119.JJ5rN0ktP0qwyZmIPpxmF_p7JsxAZH6L6brUxtad3CM + delete_storage_data: false + minimal_bulk_request: + summary: Minimal bulk delete request + description: Simplest bulk delete request with required fields only + value: + bulk_object_ids: + - drs_object_123456 + - drs_object_789012 + BulkObjectBody: + required: true + content: + application/json: + schema: + type: object + required: + - bulk_object_ids + properties: + passports: + type: array + items: + type: string + example: eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJnYTRnaF9wYXNzcG9ydF92MSI6W119.JJ5rN0ktP0qwyZmIPpxmF_p7JsxAZH6L6brUxtad3CM + description: the encoded JWT GA4GH Passport that contains embedded Visas. The overall JWT is signed as are the individual Passport Visas. + bulk_object_ids: + type: array + items: + type: string + minItems: 1 + description: An array of ObjectIDs to retrieve metadata for + examples: + bulk_retrieve: + summary: Bulk retrieve objects + description: Retrieve metadata for multiple existing DRS objects using their IDs + value: + passports: + - eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJnYTRnaF9wYXNzcG9ydF92MSI6W119.JJ5rN0ktP0qwyZmIPpxmF_p7JsxAZH6L6brUxtad3CM + bulk_object_ids: + - drs_object_123456 + - drs_object_789012 + - drs_object_345678 + bulk_retrieve_no_auth: + summary: Bulk retrieve without authentication + description: Retrieve metadata for public DRS objects + value: + bulk_object_ids: + - drs_object_public_123 + - drs_object_public_456 + RegisterObjectsBody: + description: Request body for registering DRS objects after upload + required: true + content: + application/json: + schema: + type: object + required: + - candidates + properties: + candidates: + type: array + items: + $ref: '#/components/schemas/DrsObjectCandidate' + minItems: 1 + description: Array of DRS object candidates to register (server will mint IDs and timestamps) + passports: + type: array + items: + type: string + description: Optional array of GA4GH Passport JWTs for authorization + examples: + single_object_registration: + summary: Register a single object + description: Register one DRS object after upload + value: + candidates: + - name: sample_data.vcf + size: 1048576 + mime_type: text/plain + checksums: + - checksum: e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855 + type: sha-256 + description: Variant call format file for sample analysis + access_methods: + - type: s3 + access_url: + url: s3://my-bucket/uploads/sample_data.vcf + passports: + - eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9... + bulk_object_registration: + summary: Register multiple objects + description: Register multiple DRS objects in a single request + value: + candidates: + - name: genome_assembly.fasta + size: 3221225472 + mime_type: text/plain + checksums: + - checksum: a665a45920422f9d417e4867efdc4fb8a04a1f3fff1fa07e998e86f7f7a27ae3 + type: sha-256 + description: Human genome reference assembly + access_methods: + - type: s3 + access_url: + url: s3://genomics-bucket/assemblies/hg38.fasta + - name: annotations.gff3 + size: 524288000 + mime_type: text/plain + checksums: + - checksum: b94d27b9934d3e08a52e52d7da7dabfac484efe37a5380ee9088f7ace2efcde9 + type: sha-256 + description: Gene annotations in GFF3 format + access_methods: + - type: https + access_url: + url: https://data.example.org/files/annotations.gff3 + Passports: + required: true + content: + application/json: + schema: + type: object + properties: + passports: + type: array + items: + type: string + example: eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJnYTRnaF9wYXNzcG9ydF92MSI6W119.JJ5rN0ktP0qwyZmIPpxmF_p7JsxAZH6L6brUxtad3CM + description: the encoded JWT GA4GH Passport that contains embedded Visas. The overall JWT is signed as are the individual Passport Visas. + AccessMethodUpdateBody: + description: Request body for updating access methods of a DRS object + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/AccessMethodUpdateRequest' + BulkAccessMethodUpdateBody: + description: Request body for bulk updating access methods of multiple DRS objects + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/BulkAccessMethodUpdateRequest' + UploadRequestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/UploadRequest' + examples: + single_file: + summary: Single file upload request + description: Request upload methods for a single file + value: + requests: + - name: sample_data.vcf + size: 1048576 + mime_type: text/plain + checksums: + - checksum: e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855 + type: sha-256 + description: Variant call format file for sample analysis + aliases: + - sample_001_variants + - vcf_batch_2024 + passports: + - eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJnYTRnaF9wYXNzcG9ydF92MSI6W119.JJ5rN0ktP0qwyZmIPpxmF_p7JsxAZH6L6brUxtad3CM + multiple_files: + summary: Multiple files upload request + description: Request upload methods for multiple files with different types + value: + requests: + - name: genome_assembly.fasta + size: 3221225472 + mime_type: text/plain + checksums: + - checksum: a665a45920422f9d417e4867efdc4fb8a04a1f3fff1fa07e998e86f7f7a27ae3 + type: sha-256 + - checksum: 098f6bcd4621d373cade4e832627b4f6 + type: md5 + description: Human genome reference assembly + aliases: + - hg38_reference + - name: annotations.gff3 + size: 524288000 + mime_type: text/plain + checksums: + - checksum: b94d27b9934d3e08a52e52d7da7dabfac484efe37a5380ee9088f7ace2efcde9 + type: sha-256 + description: Gene annotations in GFF3 format + - name: metadata.json + size: 2048 + mime_type: application/json + checksums: + - checksum: c89e4c5c7f2c8c8e8c8c8c8c8c8c8c8c8c8c8c8c8c8c8c8c8c8c8c8c8c8c8c8c + type: sha-256 + description: Sample metadata and experimental conditions + no_passports: + summary: Upload request without authentication + description: Request for public upload endpoints that don't require authentication + value: + requests: + - name: public_dataset.csv + size: 10240 + mime_type: text/csv + checksums: + - checksum: d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35 + type: sha-256 + description: Public research dataset +x-tagGroups: + - name: Overview + tags: + - Introduction + - DRS API Principles + - Authorization & Authentication + - name: Operations + tags: + - Objects + - Upload Request + - Service Info + - name: Models + tags: + - AccessMethodModel + - AccessURLModel + - ChecksumModel + - ContentsObjectModel + - DrsObjectModel + - DrsObjectCandidateModel + - ErrorModel + - UploadRequestModel + - UploadResponseModel + - UploadRequestObjectModel + - UploadResponseObjectModel + - UploadMethodModel + - DeleteRequestModel + - BulkDeleteRequestModel + - DeleteResultModel + - BulkDeleteResponseModel + - name: Appendices + tags: + - Motivation + - Working With Compound Objects + - Background Notes on DRS URIs + - Compact Identifier-Based URIs + - Hostname-Based URIs + - GA4GH Service Registry + - Upload Requests and Object Registration + - Object Deletion + - Access Method Update diff --git a/apigen/drs/model_access_method.go b/apigen/drs/model_access_method.go new file mode 100644 index 0000000..3667762 --- /dev/null +++ b/apigen/drs/model_access_method.go @@ -0,0 +1,380 @@ +/* +Data Repository Service + +No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) + +API version: 1.5.0 +Contact: ga4gh-cloud@ga4gh.org +*/ + +// Code generated by OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT. + +package drs + +import ( + "encoding/json" + "bytes" + "fmt" +) + +// checks if the AccessMethod type satisfies the MappedNullable interface at compile time +var _ MappedNullable = &AccessMethod{} + +// AccessMethod struct for AccessMethod +type AccessMethod struct { + // Type of the access method. + Type string `json:"type"` + AccessUrl *AccessMethodAccessUrl `json:"access_url,omitempty"` + // An arbitrary string to be passed to the `/access` method to get an `AccessURL`. This string must be unique within the scope of a single object. Note that at least one of `access_url` and `access_id` must be provided. + AccessId *string `json:"access_id,omitempty"` + // Name of the cloud service provider that the object belongs to. If the cloud service is Amazon Web Services, Google Cloud Platform or Azure the values should be `aws`, `gcp`, or `azure` respectively. + Cloud *string `json:"cloud,omitempty"` + // Name of the region in the cloud service provider that the object belongs to. + Region *string `json:"region,omitempty"` + // Availablity of file in the cloud. This label defines if this file is immediately accessible via DRS. Any delay or requirement of thawing mechanism if the file is in offline/archival storage is classified as false, meaning it is unavailable. + Available *bool `json:"available,omitempty"` + Authorizations *AccessMethodAuthorizations `json:"authorizations,omitempty"` +} + +type _AccessMethod AccessMethod + +// NewAccessMethod instantiates a new AccessMethod object +// This constructor will assign default values to properties that have it defined, +// and makes sure properties required by API are set, but the set of arguments +// will change when the set of required properties is changed +func NewAccessMethod(type_ string) *AccessMethod { + this := AccessMethod{} + this.Type = type_ + return &this +} + +// NewAccessMethodWithDefaults instantiates a new AccessMethod object +// This constructor will only assign default values to properties that have it defined, +// but it doesn't guarantee that properties required by API are set +func NewAccessMethodWithDefaults() *AccessMethod { + this := AccessMethod{} + return &this +} + +// GetType returns the Type field value +func (o *AccessMethod) GetType() string { + if o == nil { + var ret string + return ret + } + + return o.Type +} + +// GetTypeOk returns a tuple with the Type field value +// and a boolean to check if the value has been set. +func (o *AccessMethod) GetTypeOk() (*string, bool) { + if o == nil { + return nil, false + } + return &o.Type, true +} + +// SetType sets field value +func (o *AccessMethod) SetType(v string) { + o.Type = v +} + +// GetAccessUrl returns the AccessUrl field value if set, zero value otherwise. +func (o *AccessMethod) GetAccessUrl() AccessMethodAccessUrl { + if o == nil || IsNil(o.AccessUrl) { + var ret AccessMethodAccessUrl + return ret + } + return *o.AccessUrl +} + +// GetAccessUrlOk returns a tuple with the AccessUrl field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *AccessMethod) GetAccessUrlOk() (*AccessMethodAccessUrl, bool) { + if o == nil || IsNil(o.AccessUrl) { + return nil, false + } + return o.AccessUrl, true +} + +// HasAccessUrl returns a boolean if a field has been set. +func (o *AccessMethod) HasAccessUrl() bool { + if o != nil && !IsNil(o.AccessUrl) { + return true + } + + return false +} + +// SetAccessUrl gets a reference to the given AccessMethodAccessUrl and assigns it to the AccessUrl field. +func (o *AccessMethod) SetAccessUrl(v AccessMethodAccessUrl) { + o.AccessUrl = &v +} + +// GetAccessId returns the AccessId field value if set, zero value otherwise. +func (o *AccessMethod) GetAccessId() string { + if o == nil || IsNil(o.AccessId) { + var ret string + return ret + } + return *o.AccessId +} + +// GetAccessIdOk returns a tuple with the AccessId field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *AccessMethod) GetAccessIdOk() (*string, bool) { + if o == nil || IsNil(o.AccessId) { + return nil, false + } + return o.AccessId, true +} + +// HasAccessId returns a boolean if a field has been set. +func (o *AccessMethod) HasAccessId() bool { + if o != nil && !IsNil(o.AccessId) { + return true + } + + return false +} + +// SetAccessId gets a reference to the given string and assigns it to the AccessId field. +func (o *AccessMethod) SetAccessId(v string) { + o.AccessId = &v +} + +// GetCloud returns the Cloud field value if set, zero value otherwise. +func (o *AccessMethod) GetCloud() string { + if o == nil || IsNil(o.Cloud) { + var ret string + return ret + } + return *o.Cloud +} + +// GetCloudOk returns a tuple with the Cloud field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *AccessMethod) GetCloudOk() (*string, bool) { + if o == nil || IsNil(o.Cloud) { + return nil, false + } + return o.Cloud, true +} + +// HasCloud returns a boolean if a field has been set. +func (o *AccessMethod) HasCloud() bool { + if o != nil && !IsNil(o.Cloud) { + return true + } + + return false +} + +// SetCloud gets a reference to the given string and assigns it to the Cloud field. +func (o *AccessMethod) SetCloud(v string) { + o.Cloud = &v +} + +// GetRegion returns the Region field value if set, zero value otherwise. +func (o *AccessMethod) GetRegion() string { + if o == nil || IsNil(o.Region) { + var ret string + return ret + } + return *o.Region +} + +// GetRegionOk returns a tuple with the Region field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *AccessMethod) GetRegionOk() (*string, bool) { + if o == nil || IsNil(o.Region) { + return nil, false + } + return o.Region, true +} + +// HasRegion returns a boolean if a field has been set. +func (o *AccessMethod) HasRegion() bool { + if o != nil && !IsNil(o.Region) { + return true + } + + return false +} + +// SetRegion gets a reference to the given string and assigns it to the Region field. +func (o *AccessMethod) SetRegion(v string) { + o.Region = &v +} + +// GetAvailable returns the Available field value if set, zero value otherwise. +func (o *AccessMethod) GetAvailable() bool { + if o == nil || IsNil(o.Available) { + var ret bool + return ret + } + return *o.Available +} + +// GetAvailableOk returns a tuple with the Available field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *AccessMethod) GetAvailableOk() (*bool, bool) { + if o == nil || IsNil(o.Available) { + return nil, false + } + return o.Available, true +} + +// HasAvailable returns a boolean if a field has been set. +func (o *AccessMethod) HasAvailable() bool { + if o != nil && !IsNil(o.Available) { + return true + } + + return false +} + +// SetAvailable gets a reference to the given bool and assigns it to the Available field. +func (o *AccessMethod) SetAvailable(v bool) { + o.Available = &v +} + +// GetAuthorizations returns the Authorizations field value if set, zero value otherwise. +func (o *AccessMethod) GetAuthorizations() AccessMethodAuthorizations { + if o == nil || IsNil(o.Authorizations) { + var ret AccessMethodAuthorizations + return ret + } + return *o.Authorizations +} + +// GetAuthorizationsOk returns a tuple with the Authorizations field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *AccessMethod) GetAuthorizationsOk() (*AccessMethodAuthorizations, bool) { + if o == nil || IsNil(o.Authorizations) { + return nil, false + } + return o.Authorizations, true +} + +// HasAuthorizations returns a boolean if a field has been set. +func (o *AccessMethod) HasAuthorizations() bool { + if o != nil && !IsNil(o.Authorizations) { + return true + } + + return false +} + +// SetAuthorizations gets a reference to the given AccessMethodAuthorizations and assigns it to the Authorizations field. +func (o *AccessMethod) SetAuthorizations(v AccessMethodAuthorizations) { + o.Authorizations = &v +} + +func (o AccessMethod) MarshalJSON() ([]byte, error) { + toSerialize,err := o.ToMap() + if err != nil { + return []byte{}, err + } + return json.Marshal(toSerialize) +} + +func (o AccessMethod) ToMap() (map[string]interface{}, error) { + toSerialize := map[string]interface{}{} + toSerialize["type"] = o.Type + if !IsNil(o.AccessUrl) { + toSerialize["access_url"] = o.AccessUrl + } + if !IsNil(o.AccessId) { + toSerialize["access_id"] = o.AccessId + } + if !IsNil(o.Cloud) { + toSerialize["cloud"] = o.Cloud + } + if !IsNil(o.Region) { + toSerialize["region"] = o.Region + } + if !IsNil(o.Available) { + toSerialize["available"] = o.Available + } + if !IsNil(o.Authorizations) { + toSerialize["authorizations"] = o.Authorizations + } + return toSerialize, nil +} + +func (o *AccessMethod) UnmarshalJSON(data []byte) (err error) { + // This validates that all required properties are included in the JSON object + // by unmarshalling the object into a generic map with string keys and checking + // that every required field exists as a key in the generic map. + requiredProperties := []string{ + "type", + } + + allProperties := make(map[string]interface{}) + + err = json.Unmarshal(data, &allProperties) + + if err != nil { + return err; + } + + for _, requiredProperty := range(requiredProperties) { + if _, exists := allProperties[requiredProperty]; !exists { + return fmt.Errorf("no value given for required property %v", requiredProperty) + } + } + + varAccessMethod := _AccessMethod{} + + decoder := json.NewDecoder(bytes.NewReader(data)) + decoder.DisallowUnknownFields() + err = decoder.Decode(&varAccessMethod) + + if err != nil { + return err + } + + *o = AccessMethod(varAccessMethod) + + return err +} + +type NullableAccessMethod struct { + value *AccessMethod + isSet bool +} + +func (v NullableAccessMethod) Get() *AccessMethod { + return v.value +} + +func (v *NullableAccessMethod) Set(val *AccessMethod) { + v.value = val + v.isSet = true +} + +func (v NullableAccessMethod) IsSet() bool { + return v.isSet +} + +func (v *NullableAccessMethod) Unset() { + v.value = nil + v.isSet = false +} + +func NewNullableAccessMethod(val *AccessMethod) *NullableAccessMethod { + return &NullableAccessMethod{value: val, isSet: true} +} + +func (v NullableAccessMethod) MarshalJSON() ([]byte, error) { + return json.Marshal(v.value) +} + +func (v *NullableAccessMethod) UnmarshalJSON(src []byte) error { + v.isSet = true + return json.Unmarshal(src, &v.value) +} + + diff --git a/apigen/drs/model_access_method_access_url.go b/apigen/drs/model_access_method_access_url.go new file mode 100644 index 0000000..c19a8a9 --- /dev/null +++ b/apigen/drs/model_access_method_access_url.go @@ -0,0 +1,197 @@ +/* +Data Repository Service + +No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) + +API version: 1.5.0 +Contact: ga4gh-cloud@ga4gh.org +*/ + +// Code generated by OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT. + +package drs + +import ( + "encoding/json" + "bytes" + "fmt" +) + +// checks if the AccessMethodAccessUrl type satisfies the MappedNullable interface at compile time +var _ MappedNullable = &AccessMethodAccessUrl{} + +// AccessMethodAccessUrl An `AccessURL` that can be used to fetch the actual object bytes. Note that at least one of `access_url` and `access_id` must be provided. +type AccessMethodAccessUrl struct { + // A fully resolvable URL that can be used to fetch the actual object bytes. + Url string `json:"url"` + // GA4GH-compatible list of HTTP headers. + Headers []string `json:"headers,omitempty"` +} + +type _AccessMethodAccessUrl AccessMethodAccessUrl + +// NewAccessMethodAccessUrl instantiates a new AccessMethodAccessUrl object +// This constructor will assign default values to properties that have it defined, +// and makes sure properties required by API are set, but the set of arguments +// will change when the set of required properties is changed +func NewAccessMethodAccessUrl(url string) *AccessMethodAccessUrl { + this := AccessMethodAccessUrl{} + this.Url = url + return &this +} + +// NewAccessMethodAccessUrlWithDefaults instantiates a new AccessMethodAccessUrl object +// This constructor will only assign default values to properties that have it defined, +// but it doesn't guarantee that properties required by API are set +func NewAccessMethodAccessUrlWithDefaults() *AccessMethodAccessUrl { + this := AccessMethodAccessUrl{} + return &this +} + +// GetUrl returns the Url field value +func (o *AccessMethodAccessUrl) GetUrl() string { + if o == nil { + var ret string + return ret + } + + return o.Url +} + +// GetUrlOk returns a tuple with the Url field value +// and a boolean to check if the value has been set. +func (o *AccessMethodAccessUrl) GetUrlOk() (*string, bool) { + if o == nil { + return nil, false + } + return &o.Url, true +} + +// SetUrl sets field value +func (o *AccessMethodAccessUrl) SetUrl(v string) { + o.Url = v +} + +// GetHeaders returns the Headers field value if set, zero value otherwise. +func (o *AccessMethodAccessUrl) GetHeaders() []string { + if o == nil || IsNil(o.Headers) { + var ret []string + return ret + } + return o.Headers +} + +// GetHeadersOk returns a tuple with the Headers field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *AccessMethodAccessUrl) GetHeadersOk() ([]string, bool) { + if o == nil || IsNil(o.Headers) { + return nil, false + } + return o.Headers, true +} + +// HasHeaders returns a boolean if a field has been set. +func (o *AccessMethodAccessUrl) HasHeaders() bool { + if o != nil && !IsNil(o.Headers) { + return true + } + + return false +} + +// SetHeaders gets a reference to the given []string and assigns it to the Headers field. +func (o *AccessMethodAccessUrl) SetHeaders(v []string) { + o.Headers = v +} + +func (o AccessMethodAccessUrl) MarshalJSON() ([]byte, error) { + toSerialize,err := o.ToMap() + if err != nil { + return []byte{}, err + } + return json.Marshal(toSerialize) +} + +func (o AccessMethodAccessUrl) ToMap() (map[string]interface{}, error) { + toSerialize := map[string]interface{}{} + toSerialize["url"] = o.Url + if !IsNil(o.Headers) { + toSerialize["headers"] = o.Headers + } + return toSerialize, nil +} + +func (o *AccessMethodAccessUrl) UnmarshalJSON(data []byte) (err error) { + // This validates that all required properties are included in the JSON object + // by unmarshalling the object into a generic map with string keys and checking + // that every required field exists as a key in the generic map. + requiredProperties := []string{ + "url", + } + + allProperties := make(map[string]interface{}) + + err = json.Unmarshal(data, &allProperties) + + if err != nil { + return err; + } + + for _, requiredProperty := range(requiredProperties) { + if _, exists := allProperties[requiredProperty]; !exists { + return fmt.Errorf("no value given for required property %v", requiredProperty) + } + } + + varAccessMethodAccessUrl := _AccessMethodAccessUrl{} + + decoder := json.NewDecoder(bytes.NewReader(data)) + decoder.DisallowUnknownFields() + err = decoder.Decode(&varAccessMethodAccessUrl) + + if err != nil { + return err + } + + *o = AccessMethodAccessUrl(varAccessMethodAccessUrl) + + return err +} + +type NullableAccessMethodAccessUrl struct { + value *AccessMethodAccessUrl + isSet bool +} + +func (v NullableAccessMethodAccessUrl) Get() *AccessMethodAccessUrl { + return v.value +} + +func (v *NullableAccessMethodAccessUrl) Set(val *AccessMethodAccessUrl) { + v.value = val + v.isSet = true +} + +func (v NullableAccessMethodAccessUrl) IsSet() bool { + return v.isSet +} + +func (v *NullableAccessMethodAccessUrl) Unset() { + v.value = nil + v.isSet = false +} + +func NewNullableAccessMethodAccessUrl(val *AccessMethodAccessUrl) *NullableAccessMethodAccessUrl { + return &NullableAccessMethodAccessUrl{value: val, isSet: true} +} + +func (v NullableAccessMethodAccessUrl) MarshalJSON() ([]byte, error) { + return json.Marshal(v.value) +} + +func (v *NullableAccessMethodAccessUrl) UnmarshalJSON(src []byte) error { + v.isSet = true + return json.Unmarshal(src, &v.value) +} + + diff --git a/apigen/drs/model_access_method_authorizations.go b/apigen/drs/model_access_method_authorizations.go new file mode 100644 index 0000000..a5bc108 --- /dev/null +++ b/apigen/drs/model_access_method_authorizations.go @@ -0,0 +1,235 @@ +/* +Data Repository Service + +No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) + +API version: 1.5.0 +Contact: ga4gh-cloud@ga4gh.org +*/ + +// Code generated by OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT. + +package drs + +import ( + "encoding/json" +) + +// checks if the AccessMethodAuthorizations type satisfies the MappedNullable interface at compile time +var _ MappedNullable = &AccessMethodAuthorizations{} + +// AccessMethodAuthorizations When `access_id` is provided, `authorizations` provides information about how to authorize the `/access` method. +type AccessMethodAuthorizations struct { + DrsObjectId *string `json:"drs_object_id,omitempty"` + SupportedTypes []string `json:"supported_types,omitempty"` + PassportAuthIssuers []string `json:"passport_auth_issuers,omitempty"` + BearerAuthIssuers []string `json:"bearer_auth_issuers,omitempty"` +} + +// NewAccessMethodAuthorizations instantiates a new AccessMethodAuthorizations object +// This constructor will assign default values to properties that have it defined, +// and makes sure properties required by API are set, but the set of arguments +// will change when the set of required properties is changed +func NewAccessMethodAuthorizations() *AccessMethodAuthorizations { + this := AccessMethodAuthorizations{} + return &this +} + +// NewAccessMethodAuthorizationsWithDefaults instantiates a new AccessMethodAuthorizations object +// This constructor will only assign default values to properties that have it defined, +// but it doesn't guarantee that properties required by API are set +func NewAccessMethodAuthorizationsWithDefaults() *AccessMethodAuthorizations { + this := AccessMethodAuthorizations{} + return &this +} + +// GetDrsObjectId returns the DrsObjectId field value if set, zero value otherwise. +func (o *AccessMethodAuthorizations) GetDrsObjectId() string { + if o == nil || IsNil(o.DrsObjectId) { + var ret string + return ret + } + return *o.DrsObjectId +} + +// GetDrsObjectIdOk returns a tuple with the DrsObjectId field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *AccessMethodAuthorizations) GetDrsObjectIdOk() (*string, bool) { + if o == nil || IsNil(o.DrsObjectId) { + return nil, false + } + return o.DrsObjectId, true +} + +// HasDrsObjectId returns a boolean if a field has been set. +func (o *AccessMethodAuthorizations) HasDrsObjectId() bool { + if o != nil && !IsNil(o.DrsObjectId) { + return true + } + + return false +} + +// SetDrsObjectId gets a reference to the given string and assigns it to the DrsObjectId field. +func (o *AccessMethodAuthorizations) SetDrsObjectId(v string) { + o.DrsObjectId = &v +} + +// GetSupportedTypes returns the SupportedTypes field value if set, zero value otherwise. +func (o *AccessMethodAuthorizations) GetSupportedTypes() []string { + if o == nil || IsNil(o.SupportedTypes) { + var ret []string + return ret + } + return o.SupportedTypes +} + +// GetSupportedTypesOk returns a tuple with the SupportedTypes field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *AccessMethodAuthorizations) GetSupportedTypesOk() ([]string, bool) { + if o == nil || IsNil(o.SupportedTypes) { + return nil, false + } + return o.SupportedTypes, true +} + +// HasSupportedTypes returns a boolean if a field has been set. +func (o *AccessMethodAuthorizations) HasSupportedTypes() bool { + if o != nil && !IsNil(o.SupportedTypes) { + return true + } + + return false +} + +// SetSupportedTypes gets a reference to the given []string and assigns it to the SupportedTypes field. +func (o *AccessMethodAuthorizations) SetSupportedTypes(v []string) { + o.SupportedTypes = v +} + +// GetPassportAuthIssuers returns the PassportAuthIssuers field value if set, zero value otherwise. +func (o *AccessMethodAuthorizations) GetPassportAuthIssuers() []string { + if o == nil || IsNil(o.PassportAuthIssuers) { + var ret []string + return ret + } + return o.PassportAuthIssuers +} + +// GetPassportAuthIssuersOk returns a tuple with the PassportAuthIssuers field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *AccessMethodAuthorizations) GetPassportAuthIssuersOk() ([]string, bool) { + if o == nil || IsNil(o.PassportAuthIssuers) { + return nil, false + } + return o.PassportAuthIssuers, true +} + +// HasPassportAuthIssuers returns a boolean if a field has been set. +func (o *AccessMethodAuthorizations) HasPassportAuthIssuers() bool { + if o != nil && !IsNil(o.PassportAuthIssuers) { + return true + } + + return false +} + +// SetPassportAuthIssuers gets a reference to the given []string and assigns it to the PassportAuthIssuers field. +func (o *AccessMethodAuthorizations) SetPassportAuthIssuers(v []string) { + o.PassportAuthIssuers = v +} + +// GetBearerAuthIssuers returns the BearerAuthIssuers field value if set, zero value otherwise. +func (o *AccessMethodAuthorizations) GetBearerAuthIssuers() []string { + if o == nil || IsNil(o.BearerAuthIssuers) { + var ret []string + return ret + } + return o.BearerAuthIssuers +} + +// GetBearerAuthIssuersOk returns a tuple with the BearerAuthIssuers field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *AccessMethodAuthorizations) GetBearerAuthIssuersOk() ([]string, bool) { + if o == nil || IsNil(o.BearerAuthIssuers) { + return nil, false + } + return o.BearerAuthIssuers, true +} + +// HasBearerAuthIssuers returns a boolean if a field has been set. +func (o *AccessMethodAuthorizations) HasBearerAuthIssuers() bool { + if o != nil && !IsNil(o.BearerAuthIssuers) { + return true + } + + return false +} + +// SetBearerAuthIssuers gets a reference to the given []string and assigns it to the BearerAuthIssuers field. +func (o *AccessMethodAuthorizations) SetBearerAuthIssuers(v []string) { + o.BearerAuthIssuers = v +} + +func (o AccessMethodAuthorizations) MarshalJSON() ([]byte, error) { + toSerialize,err := o.ToMap() + if err != nil { + return []byte{}, err + } + return json.Marshal(toSerialize) +} + +func (o AccessMethodAuthorizations) ToMap() (map[string]interface{}, error) { + toSerialize := map[string]interface{}{} + if !IsNil(o.DrsObjectId) { + toSerialize["drs_object_id"] = o.DrsObjectId + } + if !IsNil(o.SupportedTypes) { + toSerialize["supported_types"] = o.SupportedTypes + } + if !IsNil(o.PassportAuthIssuers) { + toSerialize["passport_auth_issuers"] = o.PassportAuthIssuers + } + if !IsNil(o.BearerAuthIssuers) { + toSerialize["bearer_auth_issuers"] = o.BearerAuthIssuers + } + return toSerialize, nil +} + +type NullableAccessMethodAuthorizations struct { + value *AccessMethodAuthorizations + isSet bool +} + +func (v NullableAccessMethodAuthorizations) Get() *AccessMethodAuthorizations { + return v.value +} + +func (v *NullableAccessMethodAuthorizations) Set(val *AccessMethodAuthorizations) { + v.value = val + v.isSet = true +} + +func (v NullableAccessMethodAuthorizations) IsSet() bool { + return v.isSet +} + +func (v *NullableAccessMethodAuthorizations) Unset() { + v.value = nil + v.isSet = false +} + +func NewNullableAccessMethodAuthorizations(val *AccessMethodAuthorizations) *NullableAccessMethodAuthorizations { + return &NullableAccessMethodAuthorizations{value: val, isSet: true} +} + +func (v NullableAccessMethodAuthorizations) MarshalJSON() ([]byte, error) { + return json.Marshal(v.value) +} + +func (v *NullableAccessMethodAuthorizations) UnmarshalJSON(src []byte) error { + v.isSet = true + return json.Unmarshal(src, &v.value) +} + + diff --git a/apigen/drs/model_access_method_update_request.go b/apigen/drs/model_access_method_update_request.go new file mode 100644 index 0000000..09244c3 --- /dev/null +++ b/apigen/drs/model_access_method_update_request.go @@ -0,0 +1,197 @@ +/* +Data Repository Service + +No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) + +API version: 1.5.0 +Contact: ga4gh-cloud@ga4gh.org +*/ + +// Code generated by OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT. + +package drs + +import ( + "encoding/json" + "bytes" + "fmt" +) + +// checks if the AccessMethodUpdateRequest type satisfies the MappedNullable interface at compile time +var _ MappedNullable = &AccessMethodUpdateRequest{} + +// AccessMethodUpdateRequest struct for AccessMethodUpdateRequest +type AccessMethodUpdateRequest struct { + // New access methods for the DRS object + AccessMethods []AccessMethod `json:"access_methods"` + // Optional GA4GH Passport JWTs for authorization + Passports []string `json:"passports,omitempty"` +} + +type _AccessMethodUpdateRequest AccessMethodUpdateRequest + +// NewAccessMethodUpdateRequest instantiates a new AccessMethodUpdateRequest object +// This constructor will assign default values to properties that have it defined, +// and makes sure properties required by API are set, but the set of arguments +// will change when the set of required properties is changed +func NewAccessMethodUpdateRequest(accessMethods []AccessMethod) *AccessMethodUpdateRequest { + this := AccessMethodUpdateRequest{} + this.AccessMethods = accessMethods + return &this +} + +// NewAccessMethodUpdateRequestWithDefaults instantiates a new AccessMethodUpdateRequest object +// This constructor will only assign default values to properties that have it defined, +// but it doesn't guarantee that properties required by API are set +func NewAccessMethodUpdateRequestWithDefaults() *AccessMethodUpdateRequest { + this := AccessMethodUpdateRequest{} + return &this +} + +// GetAccessMethods returns the AccessMethods field value +func (o *AccessMethodUpdateRequest) GetAccessMethods() []AccessMethod { + if o == nil { + var ret []AccessMethod + return ret + } + + return o.AccessMethods +} + +// GetAccessMethodsOk returns a tuple with the AccessMethods field value +// and a boolean to check if the value has been set. +func (o *AccessMethodUpdateRequest) GetAccessMethodsOk() ([]AccessMethod, bool) { + if o == nil { + return nil, false + } + return o.AccessMethods, true +} + +// SetAccessMethods sets field value +func (o *AccessMethodUpdateRequest) SetAccessMethods(v []AccessMethod) { + o.AccessMethods = v +} + +// GetPassports returns the Passports field value if set, zero value otherwise. +func (o *AccessMethodUpdateRequest) GetPassports() []string { + if o == nil || IsNil(o.Passports) { + var ret []string + return ret + } + return o.Passports +} + +// GetPassportsOk returns a tuple with the Passports field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *AccessMethodUpdateRequest) GetPassportsOk() ([]string, bool) { + if o == nil || IsNil(o.Passports) { + return nil, false + } + return o.Passports, true +} + +// HasPassports returns a boolean if a field has been set. +func (o *AccessMethodUpdateRequest) HasPassports() bool { + if o != nil && !IsNil(o.Passports) { + return true + } + + return false +} + +// SetPassports gets a reference to the given []string and assigns it to the Passports field. +func (o *AccessMethodUpdateRequest) SetPassports(v []string) { + o.Passports = v +} + +func (o AccessMethodUpdateRequest) MarshalJSON() ([]byte, error) { + toSerialize,err := o.ToMap() + if err != nil { + return []byte{}, err + } + return json.Marshal(toSerialize) +} + +func (o AccessMethodUpdateRequest) ToMap() (map[string]interface{}, error) { + toSerialize := map[string]interface{}{} + toSerialize["access_methods"] = o.AccessMethods + if !IsNil(o.Passports) { + toSerialize["passports"] = o.Passports + } + return toSerialize, nil +} + +func (o *AccessMethodUpdateRequest) UnmarshalJSON(data []byte) (err error) { + // This validates that all required properties are included in the JSON object + // by unmarshalling the object into a generic map with string keys and checking + // that every required field exists as a key in the generic map. + requiredProperties := []string{ + "access_methods", + } + + allProperties := make(map[string]interface{}) + + err = json.Unmarshal(data, &allProperties) + + if err != nil { + return err; + } + + for _, requiredProperty := range(requiredProperties) { + if _, exists := allProperties[requiredProperty]; !exists { + return fmt.Errorf("no value given for required property %v", requiredProperty) + } + } + + varAccessMethodUpdateRequest := _AccessMethodUpdateRequest{} + + decoder := json.NewDecoder(bytes.NewReader(data)) + decoder.DisallowUnknownFields() + err = decoder.Decode(&varAccessMethodUpdateRequest) + + if err != nil { + return err + } + + *o = AccessMethodUpdateRequest(varAccessMethodUpdateRequest) + + return err +} + +type NullableAccessMethodUpdateRequest struct { + value *AccessMethodUpdateRequest + isSet bool +} + +func (v NullableAccessMethodUpdateRequest) Get() *AccessMethodUpdateRequest { + return v.value +} + +func (v *NullableAccessMethodUpdateRequest) Set(val *AccessMethodUpdateRequest) { + v.value = val + v.isSet = true +} + +func (v NullableAccessMethodUpdateRequest) IsSet() bool { + return v.isSet +} + +func (v *NullableAccessMethodUpdateRequest) Unset() { + v.value = nil + v.isSet = false +} + +func NewNullableAccessMethodUpdateRequest(val *AccessMethodUpdateRequest) *NullableAccessMethodUpdateRequest { + return &NullableAccessMethodUpdateRequest{value: val, isSet: true} +} + +func (v NullableAccessMethodUpdateRequest) MarshalJSON() ([]byte, error) { + return json.Marshal(v.value) +} + +func (v *NullableAccessMethodUpdateRequest) UnmarshalJSON(src []byte) error { + v.isSet = true + return json.Unmarshal(src, &v.value) +} + + diff --git a/apigen/drs/model_access_url.go b/apigen/drs/model_access_url.go new file mode 100644 index 0000000..ac64dad --- /dev/null +++ b/apigen/drs/model_access_url.go @@ -0,0 +1,197 @@ +/* +Data Repository Service + +No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) + +API version: 1.5.0 +Contact: ga4gh-cloud@ga4gh.org +*/ + +// Code generated by OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT. + +package drs + +import ( + "encoding/json" + "bytes" + "fmt" +) + +// checks if the AccessURL type satisfies the MappedNullable interface at compile time +var _ MappedNullable = &AccessURL{} + +// AccessURL struct for AccessURL +type AccessURL struct { + // A fully resolvable URL that can be used to fetch the actual object bytes. + Url string `json:"url"` + // An optional list of headers to include in the HTTP request to `url`. These headers can be used to provide auth tokens required to fetch the object bytes. + Headers []string `json:"headers,omitempty"` +} + +type _AccessURL AccessURL + +// NewAccessURL instantiates a new AccessURL object +// This constructor will assign default values to properties that have it defined, +// and makes sure properties required by API are set, but the set of arguments +// will change when the set of required properties is changed +func NewAccessURL(url string) *AccessURL { + this := AccessURL{} + this.Url = url + return &this +} + +// NewAccessURLWithDefaults instantiates a new AccessURL object +// This constructor will only assign default values to properties that have it defined, +// but it doesn't guarantee that properties required by API are set +func NewAccessURLWithDefaults() *AccessURL { + this := AccessURL{} + return &this +} + +// GetUrl returns the Url field value +func (o *AccessURL) GetUrl() string { + if o == nil { + var ret string + return ret + } + + return o.Url +} + +// GetUrlOk returns a tuple with the Url field value +// and a boolean to check if the value has been set. +func (o *AccessURL) GetUrlOk() (*string, bool) { + if o == nil { + return nil, false + } + return &o.Url, true +} + +// SetUrl sets field value +func (o *AccessURL) SetUrl(v string) { + o.Url = v +} + +// GetHeaders returns the Headers field value if set, zero value otherwise. +func (o *AccessURL) GetHeaders() []string { + if o == nil || IsNil(o.Headers) { + var ret []string + return ret + } + return o.Headers +} + +// GetHeadersOk returns a tuple with the Headers field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *AccessURL) GetHeadersOk() ([]string, bool) { + if o == nil || IsNil(o.Headers) { + return nil, false + } + return o.Headers, true +} + +// HasHeaders returns a boolean if a field has been set. +func (o *AccessURL) HasHeaders() bool { + if o != nil && !IsNil(o.Headers) { + return true + } + + return false +} + +// SetHeaders gets a reference to the given []string and assigns it to the Headers field. +func (o *AccessURL) SetHeaders(v []string) { + o.Headers = v +} + +func (o AccessURL) MarshalJSON() ([]byte, error) { + toSerialize,err := o.ToMap() + if err != nil { + return []byte{}, err + } + return json.Marshal(toSerialize) +} + +func (o AccessURL) ToMap() (map[string]interface{}, error) { + toSerialize := map[string]interface{}{} + toSerialize["url"] = o.Url + if !IsNil(o.Headers) { + toSerialize["headers"] = o.Headers + } + return toSerialize, nil +} + +func (o *AccessURL) UnmarshalJSON(data []byte) (err error) { + // This validates that all required properties are included in the JSON object + // by unmarshalling the object into a generic map with string keys and checking + // that every required field exists as a key in the generic map. + requiredProperties := []string{ + "url", + } + + allProperties := make(map[string]interface{}) + + err = json.Unmarshal(data, &allProperties) + + if err != nil { + return err; + } + + for _, requiredProperty := range(requiredProperties) { + if _, exists := allProperties[requiredProperty]; !exists { + return fmt.Errorf("no value given for required property %v", requiredProperty) + } + } + + varAccessURL := _AccessURL{} + + decoder := json.NewDecoder(bytes.NewReader(data)) + decoder.DisallowUnknownFields() + err = decoder.Decode(&varAccessURL) + + if err != nil { + return err + } + + *o = AccessURL(varAccessURL) + + return err +} + +type NullableAccessURL struct { + value *AccessURL + isSet bool +} + +func (v NullableAccessURL) Get() *AccessURL { + return v.value +} + +func (v *NullableAccessURL) Set(val *AccessURL) { + v.value = val + v.isSet = true +} + +func (v NullableAccessURL) IsSet() bool { + return v.isSet +} + +func (v *NullableAccessURL) Unset() { + v.value = nil + v.isSet = false +} + +func NewNullableAccessURL(val *AccessURL) *NullableAccessURL { + return &NullableAccessURL{value: val, isSet: true} +} + +func (v NullableAccessURL) MarshalJSON() ([]byte, error) { + return json.Marshal(v.value) +} + +func (v *NullableAccessURL) UnmarshalJSON(src []byte) error { + v.isSet = true + return json.Unmarshal(src, &v.value) +} + + diff --git a/apigen/drs/model_authorizations.go b/apigen/drs/model_authorizations.go new file mode 100644 index 0000000..d761106 --- /dev/null +++ b/apigen/drs/model_authorizations.go @@ -0,0 +1,238 @@ +/* +Data Repository Service + +No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) + +API version: 1.5.0 +Contact: ga4gh-cloud@ga4gh.org +*/ + +// Code generated by OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT. + +package drs + +import ( + "encoding/json" +) + +// checks if the Authorizations type satisfies the MappedNullable interface at compile time +var _ MappedNullable = &Authorizations{} + +// Authorizations struct for Authorizations +type Authorizations struct { + DrsObjectId *string `json:"drs_object_id,omitempty"` + // An Optional list of support authorization types. More than one can be supported and tried in sequence. Defaults to `None` if empty or missing. + SupportedTypes []string `json:"supported_types,omitempty"` + // If authorizations contain `PassportAuth` this is a required list of visa issuers (as found in a visa's `iss` claim) that may authorize access to this object. The caller must only provide passports that contain visas from this list. It is strongly recommended that the caller validate that it is appropriate to send the requested passport/visa to the DRS server to mitigate attacks by malicious DRS servers requesting credentials they should not have. + PassportAuthIssuers []string `json:"passport_auth_issuers,omitempty"` + // If authorizations contain `BearerAuth` this is an optional list of issuers that may authorize access to this object. The caller must provide a token from one of these issuers. If this is empty or missing it assumed the caller knows which token to send via other means. It is strongly recommended that the caller validate that it is appropriate to send the requested token to the DRS server to mitigate attacks by malicious DRS servers requesting credentials they should not have. + BearerAuthIssuers []string `json:"bearer_auth_issuers,omitempty"` +} + +// NewAuthorizations instantiates a new Authorizations object +// This constructor will assign default values to properties that have it defined, +// and makes sure properties required by API are set, but the set of arguments +// will change when the set of required properties is changed +func NewAuthorizations() *Authorizations { + this := Authorizations{} + return &this +} + +// NewAuthorizationsWithDefaults instantiates a new Authorizations object +// This constructor will only assign default values to properties that have it defined, +// but it doesn't guarantee that properties required by API are set +func NewAuthorizationsWithDefaults() *Authorizations { + this := Authorizations{} + return &this +} + +// GetDrsObjectId returns the DrsObjectId field value if set, zero value otherwise. +func (o *Authorizations) GetDrsObjectId() string { + if o == nil || IsNil(o.DrsObjectId) { + var ret string + return ret + } + return *o.DrsObjectId +} + +// GetDrsObjectIdOk returns a tuple with the DrsObjectId field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *Authorizations) GetDrsObjectIdOk() (*string, bool) { + if o == nil || IsNil(o.DrsObjectId) { + return nil, false + } + return o.DrsObjectId, true +} + +// HasDrsObjectId returns a boolean if a field has been set. +func (o *Authorizations) HasDrsObjectId() bool { + if o != nil && !IsNil(o.DrsObjectId) { + return true + } + + return false +} + +// SetDrsObjectId gets a reference to the given string and assigns it to the DrsObjectId field. +func (o *Authorizations) SetDrsObjectId(v string) { + o.DrsObjectId = &v +} + +// GetSupportedTypes returns the SupportedTypes field value if set, zero value otherwise. +func (o *Authorizations) GetSupportedTypes() []string { + if o == nil || IsNil(o.SupportedTypes) { + var ret []string + return ret + } + return o.SupportedTypes +} + +// GetSupportedTypesOk returns a tuple with the SupportedTypes field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *Authorizations) GetSupportedTypesOk() ([]string, bool) { + if o == nil || IsNil(o.SupportedTypes) { + return nil, false + } + return o.SupportedTypes, true +} + +// HasSupportedTypes returns a boolean if a field has been set. +func (o *Authorizations) HasSupportedTypes() bool { + if o != nil && !IsNil(o.SupportedTypes) { + return true + } + + return false +} + +// SetSupportedTypes gets a reference to the given []string and assigns it to the SupportedTypes field. +func (o *Authorizations) SetSupportedTypes(v []string) { + o.SupportedTypes = v +} + +// GetPassportAuthIssuers returns the PassportAuthIssuers field value if set, zero value otherwise. +func (o *Authorizations) GetPassportAuthIssuers() []string { + if o == nil || IsNil(o.PassportAuthIssuers) { + var ret []string + return ret + } + return o.PassportAuthIssuers +} + +// GetPassportAuthIssuersOk returns a tuple with the PassportAuthIssuers field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *Authorizations) GetPassportAuthIssuersOk() ([]string, bool) { + if o == nil || IsNil(o.PassportAuthIssuers) { + return nil, false + } + return o.PassportAuthIssuers, true +} + +// HasPassportAuthIssuers returns a boolean if a field has been set. +func (o *Authorizations) HasPassportAuthIssuers() bool { + if o != nil && !IsNil(o.PassportAuthIssuers) { + return true + } + + return false +} + +// SetPassportAuthIssuers gets a reference to the given []string and assigns it to the PassportAuthIssuers field. +func (o *Authorizations) SetPassportAuthIssuers(v []string) { + o.PassportAuthIssuers = v +} + +// GetBearerAuthIssuers returns the BearerAuthIssuers field value if set, zero value otherwise. +func (o *Authorizations) GetBearerAuthIssuers() []string { + if o == nil || IsNil(o.BearerAuthIssuers) { + var ret []string + return ret + } + return o.BearerAuthIssuers +} + +// GetBearerAuthIssuersOk returns a tuple with the BearerAuthIssuers field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *Authorizations) GetBearerAuthIssuersOk() ([]string, bool) { + if o == nil || IsNil(o.BearerAuthIssuers) { + return nil, false + } + return o.BearerAuthIssuers, true +} + +// HasBearerAuthIssuers returns a boolean if a field has been set. +func (o *Authorizations) HasBearerAuthIssuers() bool { + if o != nil && !IsNil(o.BearerAuthIssuers) { + return true + } + + return false +} + +// SetBearerAuthIssuers gets a reference to the given []string and assigns it to the BearerAuthIssuers field. +func (o *Authorizations) SetBearerAuthIssuers(v []string) { + o.BearerAuthIssuers = v +} + +func (o Authorizations) MarshalJSON() ([]byte, error) { + toSerialize,err := o.ToMap() + if err != nil { + return []byte{}, err + } + return json.Marshal(toSerialize) +} + +func (o Authorizations) ToMap() (map[string]interface{}, error) { + toSerialize := map[string]interface{}{} + if !IsNil(o.DrsObjectId) { + toSerialize["drs_object_id"] = o.DrsObjectId + } + if !IsNil(o.SupportedTypes) { + toSerialize["supported_types"] = o.SupportedTypes + } + if !IsNil(o.PassportAuthIssuers) { + toSerialize["passport_auth_issuers"] = o.PassportAuthIssuers + } + if !IsNil(o.BearerAuthIssuers) { + toSerialize["bearer_auth_issuers"] = o.BearerAuthIssuers + } + return toSerialize, nil +} + +type NullableAuthorizations struct { + value *Authorizations + isSet bool +} + +func (v NullableAuthorizations) Get() *Authorizations { + return v.value +} + +func (v *NullableAuthorizations) Set(val *Authorizations) { + v.value = val + v.isSet = true +} + +func (v NullableAuthorizations) IsSet() bool { + return v.isSet +} + +func (v *NullableAuthorizations) Unset() { + v.value = nil + v.isSet = false +} + +func NewNullableAuthorizations(val *Authorizations) *NullableAuthorizations { + return &NullableAuthorizations{value: val, isSet: true} +} + +func (v NullableAuthorizations) MarshalJSON() ([]byte, error) { + return json.Marshal(v.value) +} + +func (v *NullableAuthorizations) UnmarshalJSON(src []byte) error { + v.isSet = true + return json.Unmarshal(src, &v.value) +} + + diff --git a/apigen/drs/model_bulk_access_method_update_request.go b/apigen/drs/model_bulk_access_method_update_request.go new file mode 100644 index 0000000..972e2ea --- /dev/null +++ b/apigen/drs/model_bulk_access_method_update_request.go @@ -0,0 +1,197 @@ +/* +Data Repository Service + +No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) + +API version: 1.5.0 +Contact: ga4gh-cloud@ga4gh.org +*/ + +// Code generated by OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT. + +package drs + +import ( + "encoding/json" + "bytes" + "fmt" +) + +// checks if the BulkAccessMethodUpdateRequest type satisfies the MappedNullable interface at compile time +var _ MappedNullable = &BulkAccessMethodUpdateRequest{} + +// BulkAccessMethodUpdateRequest struct for BulkAccessMethodUpdateRequest +type BulkAccessMethodUpdateRequest struct { + // Array of access method updates to perform + Updates []BulkAccessMethodUpdateRequestUpdatesInner `json:"updates"` + // Optional GA4GH Passport JWTs for authorization + Passports []string `json:"passports,omitempty"` +} + +type _BulkAccessMethodUpdateRequest BulkAccessMethodUpdateRequest + +// NewBulkAccessMethodUpdateRequest instantiates a new BulkAccessMethodUpdateRequest object +// This constructor will assign default values to properties that have it defined, +// and makes sure properties required by API are set, but the set of arguments +// will change when the set of required properties is changed +func NewBulkAccessMethodUpdateRequest(updates []BulkAccessMethodUpdateRequestUpdatesInner) *BulkAccessMethodUpdateRequest { + this := BulkAccessMethodUpdateRequest{} + this.Updates = updates + return &this +} + +// NewBulkAccessMethodUpdateRequestWithDefaults instantiates a new BulkAccessMethodUpdateRequest object +// This constructor will only assign default values to properties that have it defined, +// but it doesn't guarantee that properties required by API are set +func NewBulkAccessMethodUpdateRequestWithDefaults() *BulkAccessMethodUpdateRequest { + this := BulkAccessMethodUpdateRequest{} + return &this +} + +// GetUpdates returns the Updates field value +func (o *BulkAccessMethodUpdateRequest) GetUpdates() []BulkAccessMethodUpdateRequestUpdatesInner { + if o == nil { + var ret []BulkAccessMethodUpdateRequestUpdatesInner + return ret + } + + return o.Updates +} + +// GetUpdatesOk returns a tuple with the Updates field value +// and a boolean to check if the value has been set. +func (o *BulkAccessMethodUpdateRequest) GetUpdatesOk() ([]BulkAccessMethodUpdateRequestUpdatesInner, bool) { + if o == nil { + return nil, false + } + return o.Updates, true +} + +// SetUpdates sets field value +func (o *BulkAccessMethodUpdateRequest) SetUpdates(v []BulkAccessMethodUpdateRequestUpdatesInner) { + o.Updates = v +} + +// GetPassports returns the Passports field value if set, zero value otherwise. +func (o *BulkAccessMethodUpdateRequest) GetPassports() []string { + if o == nil || IsNil(o.Passports) { + var ret []string + return ret + } + return o.Passports +} + +// GetPassportsOk returns a tuple with the Passports field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *BulkAccessMethodUpdateRequest) GetPassportsOk() ([]string, bool) { + if o == nil || IsNil(o.Passports) { + return nil, false + } + return o.Passports, true +} + +// HasPassports returns a boolean if a field has been set. +func (o *BulkAccessMethodUpdateRequest) HasPassports() bool { + if o != nil && !IsNil(o.Passports) { + return true + } + + return false +} + +// SetPassports gets a reference to the given []string and assigns it to the Passports field. +func (o *BulkAccessMethodUpdateRequest) SetPassports(v []string) { + o.Passports = v +} + +func (o BulkAccessMethodUpdateRequest) MarshalJSON() ([]byte, error) { + toSerialize,err := o.ToMap() + if err != nil { + return []byte{}, err + } + return json.Marshal(toSerialize) +} + +func (o BulkAccessMethodUpdateRequest) ToMap() (map[string]interface{}, error) { + toSerialize := map[string]interface{}{} + toSerialize["updates"] = o.Updates + if !IsNil(o.Passports) { + toSerialize["passports"] = o.Passports + } + return toSerialize, nil +} + +func (o *BulkAccessMethodUpdateRequest) UnmarshalJSON(data []byte) (err error) { + // This validates that all required properties are included in the JSON object + // by unmarshalling the object into a generic map with string keys and checking + // that every required field exists as a key in the generic map. + requiredProperties := []string{ + "updates", + } + + allProperties := make(map[string]interface{}) + + err = json.Unmarshal(data, &allProperties) + + if err != nil { + return err; + } + + for _, requiredProperty := range(requiredProperties) { + if _, exists := allProperties[requiredProperty]; !exists { + return fmt.Errorf("no value given for required property %v", requiredProperty) + } + } + + varBulkAccessMethodUpdateRequest := _BulkAccessMethodUpdateRequest{} + + decoder := json.NewDecoder(bytes.NewReader(data)) + decoder.DisallowUnknownFields() + err = decoder.Decode(&varBulkAccessMethodUpdateRequest) + + if err != nil { + return err + } + + *o = BulkAccessMethodUpdateRequest(varBulkAccessMethodUpdateRequest) + + return err +} + +type NullableBulkAccessMethodUpdateRequest struct { + value *BulkAccessMethodUpdateRequest + isSet bool +} + +func (v NullableBulkAccessMethodUpdateRequest) Get() *BulkAccessMethodUpdateRequest { + return v.value +} + +func (v *NullableBulkAccessMethodUpdateRequest) Set(val *BulkAccessMethodUpdateRequest) { + v.value = val + v.isSet = true +} + +func (v NullableBulkAccessMethodUpdateRequest) IsSet() bool { + return v.isSet +} + +func (v *NullableBulkAccessMethodUpdateRequest) Unset() { + v.value = nil + v.isSet = false +} + +func NewNullableBulkAccessMethodUpdateRequest(val *BulkAccessMethodUpdateRequest) *NullableBulkAccessMethodUpdateRequest { + return &NullableBulkAccessMethodUpdateRequest{value: val, isSet: true} +} + +func (v NullableBulkAccessMethodUpdateRequest) MarshalJSON() ([]byte, error) { + return json.Marshal(v.value) +} + +func (v *NullableBulkAccessMethodUpdateRequest) UnmarshalJSON(src []byte) error { + v.isSet = true + return json.Unmarshal(src, &v.value) +} + + diff --git a/apigen/drs/model_bulk_access_method_update_request_updates_inner.go b/apigen/drs/model_bulk_access_method_update_request_updates_inner.go new file mode 100644 index 0000000..1232659 --- /dev/null +++ b/apigen/drs/model_bulk_access_method_update_request_updates_inner.go @@ -0,0 +1,189 @@ +/* +Data Repository Service + +No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) + +API version: 1.5.0 +Contact: ga4gh-cloud@ga4gh.org +*/ + +// Code generated by OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT. + +package drs + +import ( + "encoding/json" + "bytes" + "fmt" +) + +// checks if the BulkAccessMethodUpdateRequestUpdatesInner type satisfies the MappedNullable interface at compile time +var _ MappedNullable = &BulkAccessMethodUpdateRequestUpdatesInner{} + +// BulkAccessMethodUpdateRequestUpdatesInner struct for BulkAccessMethodUpdateRequestUpdatesInner +type BulkAccessMethodUpdateRequestUpdatesInner struct { + // DRS object ID to update + ObjectId string `json:"object_id"` + // New access methods for this object + AccessMethods []AccessMethod `json:"access_methods"` +} + +type _BulkAccessMethodUpdateRequestUpdatesInner BulkAccessMethodUpdateRequestUpdatesInner + +// NewBulkAccessMethodUpdateRequestUpdatesInner instantiates a new BulkAccessMethodUpdateRequestUpdatesInner object +// This constructor will assign default values to properties that have it defined, +// and makes sure properties required by API are set, but the set of arguments +// will change when the set of required properties is changed +func NewBulkAccessMethodUpdateRequestUpdatesInner(objectId string, accessMethods []AccessMethod) *BulkAccessMethodUpdateRequestUpdatesInner { + this := BulkAccessMethodUpdateRequestUpdatesInner{} + this.ObjectId = objectId + this.AccessMethods = accessMethods + return &this +} + +// NewBulkAccessMethodUpdateRequestUpdatesInnerWithDefaults instantiates a new BulkAccessMethodUpdateRequestUpdatesInner object +// This constructor will only assign default values to properties that have it defined, +// but it doesn't guarantee that properties required by API are set +func NewBulkAccessMethodUpdateRequestUpdatesInnerWithDefaults() *BulkAccessMethodUpdateRequestUpdatesInner { + this := BulkAccessMethodUpdateRequestUpdatesInner{} + return &this +} + +// GetObjectId returns the ObjectId field value +func (o *BulkAccessMethodUpdateRequestUpdatesInner) GetObjectId() string { + if o == nil { + var ret string + return ret + } + + return o.ObjectId +} + +// GetObjectIdOk returns a tuple with the ObjectId field value +// and a boolean to check if the value has been set. +func (o *BulkAccessMethodUpdateRequestUpdatesInner) GetObjectIdOk() (*string, bool) { + if o == nil { + return nil, false + } + return &o.ObjectId, true +} + +// SetObjectId sets field value +func (o *BulkAccessMethodUpdateRequestUpdatesInner) SetObjectId(v string) { + o.ObjectId = v +} + +// GetAccessMethods returns the AccessMethods field value +func (o *BulkAccessMethodUpdateRequestUpdatesInner) GetAccessMethods() []AccessMethod { + if o == nil { + var ret []AccessMethod + return ret + } + + return o.AccessMethods +} + +// GetAccessMethodsOk returns a tuple with the AccessMethods field value +// and a boolean to check if the value has been set. +func (o *BulkAccessMethodUpdateRequestUpdatesInner) GetAccessMethodsOk() ([]AccessMethod, bool) { + if o == nil { + return nil, false + } + return o.AccessMethods, true +} + +// SetAccessMethods sets field value +func (o *BulkAccessMethodUpdateRequestUpdatesInner) SetAccessMethods(v []AccessMethod) { + o.AccessMethods = v +} + +func (o BulkAccessMethodUpdateRequestUpdatesInner) MarshalJSON() ([]byte, error) { + toSerialize,err := o.ToMap() + if err != nil { + return []byte{}, err + } + return json.Marshal(toSerialize) +} + +func (o BulkAccessMethodUpdateRequestUpdatesInner) ToMap() (map[string]interface{}, error) { + toSerialize := map[string]interface{}{} + toSerialize["object_id"] = o.ObjectId + toSerialize["access_methods"] = o.AccessMethods + return toSerialize, nil +} + +func (o *BulkAccessMethodUpdateRequestUpdatesInner) UnmarshalJSON(data []byte) (err error) { + // This validates that all required properties are included in the JSON object + // by unmarshalling the object into a generic map with string keys and checking + // that every required field exists as a key in the generic map. + requiredProperties := []string{ + "object_id", + "access_methods", + } + + allProperties := make(map[string]interface{}) + + err = json.Unmarshal(data, &allProperties) + + if err != nil { + return err; + } + + for _, requiredProperty := range(requiredProperties) { + if _, exists := allProperties[requiredProperty]; !exists { + return fmt.Errorf("no value given for required property %v", requiredProperty) + } + } + + varBulkAccessMethodUpdateRequestUpdatesInner := _BulkAccessMethodUpdateRequestUpdatesInner{} + + decoder := json.NewDecoder(bytes.NewReader(data)) + decoder.DisallowUnknownFields() + err = decoder.Decode(&varBulkAccessMethodUpdateRequestUpdatesInner) + + if err != nil { + return err + } + + *o = BulkAccessMethodUpdateRequestUpdatesInner(varBulkAccessMethodUpdateRequestUpdatesInner) + + return err +} + +type NullableBulkAccessMethodUpdateRequestUpdatesInner struct { + value *BulkAccessMethodUpdateRequestUpdatesInner + isSet bool +} + +func (v NullableBulkAccessMethodUpdateRequestUpdatesInner) Get() *BulkAccessMethodUpdateRequestUpdatesInner { + return v.value +} + +func (v *NullableBulkAccessMethodUpdateRequestUpdatesInner) Set(val *BulkAccessMethodUpdateRequestUpdatesInner) { + v.value = val + v.isSet = true +} + +func (v NullableBulkAccessMethodUpdateRequestUpdatesInner) IsSet() bool { + return v.isSet +} + +func (v *NullableBulkAccessMethodUpdateRequestUpdatesInner) Unset() { + v.value = nil + v.isSet = false +} + +func NewNullableBulkAccessMethodUpdateRequestUpdatesInner(val *BulkAccessMethodUpdateRequestUpdatesInner) *NullableBulkAccessMethodUpdateRequestUpdatesInner { + return &NullableBulkAccessMethodUpdateRequestUpdatesInner{value: val, isSet: true} +} + +func (v NullableBulkAccessMethodUpdateRequestUpdatesInner) MarshalJSON() ([]byte, error) { + return json.Marshal(v.value) +} + +func (v *NullableBulkAccessMethodUpdateRequestUpdatesInner) UnmarshalJSON(src []byte) error { + v.isSet = true + return json.Unmarshal(src, &v.value) +} + + diff --git a/apigen/drs/model_bulk_access_url.go b/apigen/drs/model_bulk_access_url.go new file mode 100644 index 0000000..17e6e39 --- /dev/null +++ b/apigen/drs/model_bulk_access_url.go @@ -0,0 +1,269 @@ +/* +Data Repository Service + +No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) + +API version: 1.5.0 +Contact: ga4gh-cloud@ga4gh.org +*/ + +// Code generated by OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT. + +package drs + +import ( + "encoding/json" + "bytes" + "fmt" +) + +// checks if the BulkAccessURL type satisfies the MappedNullable interface at compile time +var _ MappedNullable = &BulkAccessURL{} + +// BulkAccessURL struct for BulkAccessURL +type BulkAccessURL struct { + DrsObjectId *string `json:"drs_object_id,omitempty"` + DrsAccessId *string `json:"drs_access_id,omitempty"` + // A fully resolvable URL that can be used to fetch the actual object bytes. + Url string `json:"url"` + // An optional list of headers to include in the HTTP request to `url`. These headers can be used to provide auth tokens required to fetch the object bytes. + Headers []string `json:"headers,omitempty"` +} + +type _BulkAccessURL BulkAccessURL + +// NewBulkAccessURL instantiates a new BulkAccessURL object +// This constructor will assign default values to properties that have it defined, +// and makes sure properties required by API are set, but the set of arguments +// will change when the set of required properties is changed +func NewBulkAccessURL(url string) *BulkAccessURL { + this := BulkAccessURL{} + this.Url = url + return &this +} + +// NewBulkAccessURLWithDefaults instantiates a new BulkAccessURL object +// This constructor will only assign default values to properties that have it defined, +// but it doesn't guarantee that properties required by API are set +func NewBulkAccessURLWithDefaults() *BulkAccessURL { + this := BulkAccessURL{} + return &this +} + +// GetDrsObjectId returns the DrsObjectId field value if set, zero value otherwise. +func (o *BulkAccessURL) GetDrsObjectId() string { + if o == nil || IsNil(o.DrsObjectId) { + var ret string + return ret + } + return *o.DrsObjectId +} + +// GetDrsObjectIdOk returns a tuple with the DrsObjectId field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *BulkAccessURL) GetDrsObjectIdOk() (*string, bool) { + if o == nil || IsNil(o.DrsObjectId) { + return nil, false + } + return o.DrsObjectId, true +} + +// HasDrsObjectId returns a boolean if a field has been set. +func (o *BulkAccessURL) HasDrsObjectId() bool { + if o != nil && !IsNil(o.DrsObjectId) { + return true + } + + return false +} + +// SetDrsObjectId gets a reference to the given string and assigns it to the DrsObjectId field. +func (o *BulkAccessURL) SetDrsObjectId(v string) { + o.DrsObjectId = &v +} + +// GetDrsAccessId returns the DrsAccessId field value if set, zero value otherwise. +func (o *BulkAccessURL) GetDrsAccessId() string { + if o == nil || IsNil(o.DrsAccessId) { + var ret string + return ret + } + return *o.DrsAccessId +} + +// GetDrsAccessIdOk returns a tuple with the DrsAccessId field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *BulkAccessURL) GetDrsAccessIdOk() (*string, bool) { + if o == nil || IsNil(o.DrsAccessId) { + return nil, false + } + return o.DrsAccessId, true +} + +// HasDrsAccessId returns a boolean if a field has been set. +func (o *BulkAccessURL) HasDrsAccessId() bool { + if o != nil && !IsNil(o.DrsAccessId) { + return true + } + + return false +} + +// SetDrsAccessId gets a reference to the given string and assigns it to the DrsAccessId field. +func (o *BulkAccessURL) SetDrsAccessId(v string) { + o.DrsAccessId = &v +} + +// GetUrl returns the Url field value +func (o *BulkAccessURL) GetUrl() string { + if o == nil { + var ret string + return ret + } + + return o.Url +} + +// GetUrlOk returns a tuple with the Url field value +// and a boolean to check if the value has been set. +func (o *BulkAccessURL) GetUrlOk() (*string, bool) { + if o == nil { + return nil, false + } + return &o.Url, true +} + +// SetUrl sets field value +func (o *BulkAccessURL) SetUrl(v string) { + o.Url = v +} + +// GetHeaders returns the Headers field value if set, zero value otherwise. +func (o *BulkAccessURL) GetHeaders() []string { + if o == nil || IsNil(o.Headers) { + var ret []string + return ret + } + return o.Headers +} + +// GetHeadersOk returns a tuple with the Headers field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *BulkAccessURL) GetHeadersOk() ([]string, bool) { + if o == nil || IsNil(o.Headers) { + return nil, false + } + return o.Headers, true +} + +// HasHeaders returns a boolean if a field has been set. +func (o *BulkAccessURL) HasHeaders() bool { + if o != nil && !IsNil(o.Headers) { + return true + } + + return false +} + +// SetHeaders gets a reference to the given []string and assigns it to the Headers field. +func (o *BulkAccessURL) SetHeaders(v []string) { + o.Headers = v +} + +func (o BulkAccessURL) MarshalJSON() ([]byte, error) { + toSerialize,err := o.ToMap() + if err != nil { + return []byte{}, err + } + return json.Marshal(toSerialize) +} + +func (o BulkAccessURL) ToMap() (map[string]interface{}, error) { + toSerialize := map[string]interface{}{} + if !IsNil(o.DrsObjectId) { + toSerialize["drs_object_id"] = o.DrsObjectId + } + if !IsNil(o.DrsAccessId) { + toSerialize["drs_access_id"] = o.DrsAccessId + } + toSerialize["url"] = o.Url + if !IsNil(o.Headers) { + toSerialize["headers"] = o.Headers + } + return toSerialize, nil +} + +func (o *BulkAccessURL) UnmarshalJSON(data []byte) (err error) { + // This validates that all required properties are included in the JSON object + // by unmarshalling the object into a generic map with string keys and checking + // that every required field exists as a key in the generic map. + requiredProperties := []string{ + "url", + } + + allProperties := make(map[string]interface{}) + + err = json.Unmarshal(data, &allProperties) + + if err != nil { + return err; + } + + for _, requiredProperty := range(requiredProperties) { + if _, exists := allProperties[requiredProperty]; !exists { + return fmt.Errorf("no value given for required property %v", requiredProperty) + } + } + + varBulkAccessURL := _BulkAccessURL{} + + decoder := json.NewDecoder(bytes.NewReader(data)) + decoder.DisallowUnknownFields() + err = decoder.Decode(&varBulkAccessURL) + + if err != nil { + return err + } + + *o = BulkAccessURL(varBulkAccessURL) + + return err +} + +type NullableBulkAccessURL struct { + value *BulkAccessURL + isSet bool +} + +func (v NullableBulkAccessURL) Get() *BulkAccessURL { + return v.value +} + +func (v *NullableBulkAccessURL) Set(val *BulkAccessURL) { + v.value = val + v.isSet = true +} + +func (v NullableBulkAccessURL) IsSet() bool { + return v.isSet +} + +func (v *NullableBulkAccessURL) Unset() { + v.value = nil + v.isSet = false +} + +func NewNullableBulkAccessURL(val *BulkAccessURL) *NullableBulkAccessURL { + return &NullableBulkAccessURL{value: val, isSet: true} +} + +func (v NullableBulkAccessURL) MarshalJSON() ([]byte, error) { + return json.Marshal(v.value) +} + +func (v *NullableBulkAccessURL) UnmarshalJSON(src []byte) error { + v.isSet = true + return json.Unmarshal(src, &v.value) +} + + diff --git a/apigen/drs/model_bulk_delete_request.go b/apigen/drs/model_bulk_delete_request.go new file mode 100644 index 0000000..7937f5e --- /dev/null +++ b/apigen/drs/model_bulk_delete_request.go @@ -0,0 +1,238 @@ +/* +Data Repository Service + +No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) + +API version: 1.5.0 +Contact: ga4gh-cloud@ga4gh.org +*/ + +// Code generated by OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT. + +package drs + +import ( + "encoding/json" + "bytes" + "fmt" +) + +// checks if the BulkDeleteRequest type satisfies the MappedNullable interface at compile time +var _ MappedNullable = &BulkDeleteRequest{} + +// BulkDeleteRequest Request body for bulk delete operations +type BulkDeleteRequest struct { + // Array of DRS object IDs to delete + BulkObjectIds []string `json:"bulk_object_ids"` + // the encoded JWT GA4GH Passport that contains embedded Visas. The overall JWT is signed as are the individual Passport Visas. + Passports []string `json:"passports,omitempty"` + // If true, delete both DRS object metadata and underlying storage data (follows server's deleteStorageDataSupported capability). If false (default), only delete DRS object metadata while preserving underlying storage data. Clients must explicitly set this to true to enable storage data deletion, ensuring intentional choice for this potentially destructive operation. + DeleteStorageData *bool `json:"delete_storage_data,omitempty"` +} + +type _BulkDeleteRequest BulkDeleteRequest + +// NewBulkDeleteRequest instantiates a new BulkDeleteRequest object +// This constructor will assign default values to properties that have it defined, +// and makes sure properties required by API are set, but the set of arguments +// will change when the set of required properties is changed +func NewBulkDeleteRequest(bulkObjectIds []string) *BulkDeleteRequest { + this := BulkDeleteRequest{} + this.BulkObjectIds = bulkObjectIds + var deleteStorageData bool = false + this.DeleteStorageData = &deleteStorageData + return &this +} + +// NewBulkDeleteRequestWithDefaults instantiates a new BulkDeleteRequest object +// This constructor will only assign default values to properties that have it defined, +// but it doesn't guarantee that properties required by API are set +func NewBulkDeleteRequestWithDefaults() *BulkDeleteRequest { + this := BulkDeleteRequest{} + var deleteStorageData bool = false + this.DeleteStorageData = &deleteStorageData + return &this +} + +// GetBulkObjectIds returns the BulkObjectIds field value +func (o *BulkDeleteRequest) GetBulkObjectIds() []string { + if o == nil { + var ret []string + return ret + } + + return o.BulkObjectIds +} + +// GetBulkObjectIdsOk returns a tuple with the BulkObjectIds field value +// and a boolean to check if the value has been set. +func (o *BulkDeleteRequest) GetBulkObjectIdsOk() ([]string, bool) { + if o == nil { + return nil, false + } + return o.BulkObjectIds, true +} + +// SetBulkObjectIds sets field value +func (o *BulkDeleteRequest) SetBulkObjectIds(v []string) { + o.BulkObjectIds = v +} + +// GetPassports returns the Passports field value if set, zero value otherwise. +func (o *BulkDeleteRequest) GetPassports() []string { + if o == nil || IsNil(o.Passports) { + var ret []string + return ret + } + return o.Passports +} + +// GetPassportsOk returns a tuple with the Passports field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *BulkDeleteRequest) GetPassportsOk() ([]string, bool) { + if o == nil || IsNil(o.Passports) { + return nil, false + } + return o.Passports, true +} + +// HasPassports returns a boolean if a field has been set. +func (o *BulkDeleteRequest) HasPassports() bool { + if o != nil && !IsNil(o.Passports) { + return true + } + + return false +} + +// SetPassports gets a reference to the given []string and assigns it to the Passports field. +func (o *BulkDeleteRequest) SetPassports(v []string) { + o.Passports = v +} + +// GetDeleteStorageData returns the DeleteStorageData field value if set, zero value otherwise. +func (o *BulkDeleteRequest) GetDeleteStorageData() bool { + if o == nil || IsNil(o.DeleteStorageData) { + var ret bool + return ret + } + return *o.DeleteStorageData +} + +// GetDeleteStorageDataOk returns a tuple with the DeleteStorageData field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *BulkDeleteRequest) GetDeleteStorageDataOk() (*bool, bool) { + if o == nil || IsNil(o.DeleteStorageData) { + return nil, false + } + return o.DeleteStorageData, true +} + +// HasDeleteStorageData returns a boolean if a field has been set. +func (o *BulkDeleteRequest) HasDeleteStorageData() bool { + if o != nil && !IsNil(o.DeleteStorageData) { + return true + } + + return false +} + +// SetDeleteStorageData gets a reference to the given bool and assigns it to the DeleteStorageData field. +func (o *BulkDeleteRequest) SetDeleteStorageData(v bool) { + o.DeleteStorageData = &v +} + +func (o BulkDeleteRequest) MarshalJSON() ([]byte, error) { + toSerialize,err := o.ToMap() + if err != nil { + return []byte{}, err + } + return json.Marshal(toSerialize) +} + +func (o BulkDeleteRequest) ToMap() (map[string]interface{}, error) { + toSerialize := map[string]interface{}{} + toSerialize["bulk_object_ids"] = o.BulkObjectIds + if !IsNil(o.Passports) { + toSerialize["passports"] = o.Passports + } + if !IsNil(o.DeleteStorageData) { + toSerialize["delete_storage_data"] = o.DeleteStorageData + } + return toSerialize, nil +} + +func (o *BulkDeleteRequest) UnmarshalJSON(data []byte) (err error) { + // This validates that all required properties are included in the JSON object + // by unmarshalling the object into a generic map with string keys and checking + // that every required field exists as a key in the generic map. + requiredProperties := []string{ + "bulk_object_ids", + } + + allProperties := make(map[string]interface{}) + + err = json.Unmarshal(data, &allProperties) + + if err != nil { + return err; + } + + for _, requiredProperty := range(requiredProperties) { + if _, exists := allProperties[requiredProperty]; !exists { + return fmt.Errorf("no value given for required property %v", requiredProperty) + } + } + + varBulkDeleteRequest := _BulkDeleteRequest{} + + decoder := json.NewDecoder(bytes.NewReader(data)) + decoder.DisallowUnknownFields() + err = decoder.Decode(&varBulkDeleteRequest) + + if err != nil { + return err + } + + *o = BulkDeleteRequest(varBulkDeleteRequest) + + return err +} + +type NullableBulkDeleteRequest struct { + value *BulkDeleteRequest + isSet bool +} + +func (v NullableBulkDeleteRequest) Get() *BulkDeleteRequest { + return v.value +} + +func (v *NullableBulkDeleteRequest) Set(val *BulkDeleteRequest) { + v.value = val + v.isSet = true +} + +func (v NullableBulkDeleteRequest) IsSet() bool { + return v.isSet +} + +func (v *NullableBulkDeleteRequest) Unset() { + v.value = nil + v.isSet = false +} + +func NewNullableBulkDeleteRequest(val *BulkDeleteRequest) *NullableBulkDeleteRequest { + return &NullableBulkDeleteRequest{value: val, isSet: true} +} + +func (v NullableBulkDeleteRequest) MarshalJSON() ([]byte, error) { + return json.Marshal(v.value) +} + +func (v *NullableBulkDeleteRequest) UnmarshalJSON(src []byte) error { + v.isSet = true + return json.Unmarshal(src, &v.value) +} + + diff --git a/apigen/drs/model_bulk_object_access_id.go b/apigen/drs/model_bulk_object_access_id.go new file mode 100644 index 0000000..aa0408f --- /dev/null +++ b/apigen/drs/model_bulk_object_access_id.go @@ -0,0 +1,163 @@ +/* +Data Repository Service + +No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) + +API version: 1.5.0 +Contact: ga4gh-cloud@ga4gh.org +*/ + +// Code generated by OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT. + +package drs + +import ( + "encoding/json" +) + +// checks if the BulkObjectAccessId type satisfies the MappedNullable interface at compile time +var _ MappedNullable = &BulkObjectAccessId{} + +// BulkObjectAccessId The object that contains object_id/access_id tuples +type BulkObjectAccessId struct { + Passports []string `json:"passports,omitempty"` + BulkObjectAccessIds []BulkObjectAccessIdBulkObjectAccessIdsInner `json:"bulk_object_access_ids,omitempty"` +} + +// NewBulkObjectAccessId instantiates a new BulkObjectAccessId object +// This constructor will assign default values to properties that have it defined, +// and makes sure properties required by API are set, but the set of arguments +// will change when the set of required properties is changed +func NewBulkObjectAccessId() *BulkObjectAccessId { + this := BulkObjectAccessId{} + return &this +} + +// NewBulkObjectAccessIdWithDefaults instantiates a new BulkObjectAccessId object +// This constructor will only assign default values to properties that have it defined, +// but it doesn't guarantee that properties required by API are set +func NewBulkObjectAccessIdWithDefaults() *BulkObjectAccessId { + this := BulkObjectAccessId{} + return &this +} + +// GetPassports returns the Passports field value if set, zero value otherwise. +func (o *BulkObjectAccessId) GetPassports() []string { + if o == nil || IsNil(o.Passports) { + var ret []string + return ret + } + return o.Passports +} + +// GetPassportsOk returns a tuple with the Passports field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *BulkObjectAccessId) GetPassportsOk() ([]string, bool) { + if o == nil || IsNil(o.Passports) { + return nil, false + } + return o.Passports, true +} + +// HasPassports returns a boolean if a field has been set. +func (o *BulkObjectAccessId) HasPassports() bool { + if o != nil && !IsNil(o.Passports) { + return true + } + + return false +} + +// SetPassports gets a reference to the given []string and assigns it to the Passports field. +func (o *BulkObjectAccessId) SetPassports(v []string) { + o.Passports = v +} + +// GetBulkObjectAccessIds returns the BulkObjectAccessIds field value if set, zero value otherwise. +func (o *BulkObjectAccessId) GetBulkObjectAccessIds() []BulkObjectAccessIdBulkObjectAccessIdsInner { + if o == nil || IsNil(o.BulkObjectAccessIds) { + var ret []BulkObjectAccessIdBulkObjectAccessIdsInner + return ret + } + return o.BulkObjectAccessIds +} + +// GetBulkObjectAccessIdsOk returns a tuple with the BulkObjectAccessIds field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *BulkObjectAccessId) GetBulkObjectAccessIdsOk() ([]BulkObjectAccessIdBulkObjectAccessIdsInner, bool) { + if o == nil || IsNil(o.BulkObjectAccessIds) { + return nil, false + } + return o.BulkObjectAccessIds, true +} + +// HasBulkObjectAccessIds returns a boolean if a field has been set. +func (o *BulkObjectAccessId) HasBulkObjectAccessIds() bool { + if o != nil && !IsNil(o.BulkObjectAccessIds) { + return true + } + + return false +} + +// SetBulkObjectAccessIds gets a reference to the given []BulkObjectAccessIdBulkObjectAccessIdsInner and assigns it to the BulkObjectAccessIds field. +func (o *BulkObjectAccessId) SetBulkObjectAccessIds(v []BulkObjectAccessIdBulkObjectAccessIdsInner) { + o.BulkObjectAccessIds = v +} + +func (o BulkObjectAccessId) MarshalJSON() ([]byte, error) { + toSerialize,err := o.ToMap() + if err != nil { + return []byte{}, err + } + return json.Marshal(toSerialize) +} + +func (o BulkObjectAccessId) ToMap() (map[string]interface{}, error) { + toSerialize := map[string]interface{}{} + if !IsNil(o.Passports) { + toSerialize["passports"] = o.Passports + } + if !IsNil(o.BulkObjectAccessIds) { + toSerialize["bulk_object_access_ids"] = o.BulkObjectAccessIds + } + return toSerialize, nil +} + +type NullableBulkObjectAccessId struct { + value *BulkObjectAccessId + isSet bool +} + +func (v NullableBulkObjectAccessId) Get() *BulkObjectAccessId { + return v.value +} + +func (v *NullableBulkObjectAccessId) Set(val *BulkObjectAccessId) { + v.value = val + v.isSet = true +} + +func (v NullableBulkObjectAccessId) IsSet() bool { + return v.isSet +} + +func (v *NullableBulkObjectAccessId) Unset() { + v.value = nil + v.isSet = false +} + +func NewNullableBulkObjectAccessId(val *BulkObjectAccessId) *NullableBulkObjectAccessId { + return &NullableBulkObjectAccessId{value: val, isSet: true} +} + +func (v NullableBulkObjectAccessId) MarshalJSON() ([]byte, error) { + return json.Marshal(v.value) +} + +func (v *NullableBulkObjectAccessId) UnmarshalJSON(src []byte) error { + v.isSet = true + return json.Unmarshal(src, &v.value) +} + + diff --git a/apigen/drs/model_bulk_object_access_id_bulk_object_access_ids_inner.go b/apigen/drs/model_bulk_object_access_id_bulk_object_access_ids_inner.go new file mode 100644 index 0000000..32fe2b0 --- /dev/null +++ b/apigen/drs/model_bulk_object_access_id_bulk_object_access_ids_inner.go @@ -0,0 +1,165 @@ +/* +Data Repository Service + +No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) + +API version: 1.5.0 +Contact: ga4gh-cloud@ga4gh.org +*/ + +// Code generated by OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT. + +package drs + +import ( + "encoding/json" +) + +// checks if the BulkObjectAccessIdBulkObjectAccessIdsInner type satisfies the MappedNullable interface at compile time +var _ MappedNullable = &BulkObjectAccessIdBulkObjectAccessIdsInner{} + +// BulkObjectAccessIdBulkObjectAccessIdsInner struct for BulkObjectAccessIdBulkObjectAccessIdsInner +type BulkObjectAccessIdBulkObjectAccessIdsInner struct { + // DRS object ID + BulkObjectId *string `json:"bulk_object_id,omitempty"` + // DRS object access ID + BulkAccessIds []string `json:"bulk_access_ids,omitempty"` +} + +// NewBulkObjectAccessIdBulkObjectAccessIdsInner instantiates a new BulkObjectAccessIdBulkObjectAccessIdsInner object +// This constructor will assign default values to properties that have it defined, +// and makes sure properties required by API are set, but the set of arguments +// will change when the set of required properties is changed +func NewBulkObjectAccessIdBulkObjectAccessIdsInner() *BulkObjectAccessIdBulkObjectAccessIdsInner { + this := BulkObjectAccessIdBulkObjectAccessIdsInner{} + return &this +} + +// NewBulkObjectAccessIdBulkObjectAccessIdsInnerWithDefaults instantiates a new BulkObjectAccessIdBulkObjectAccessIdsInner object +// This constructor will only assign default values to properties that have it defined, +// but it doesn't guarantee that properties required by API are set +func NewBulkObjectAccessIdBulkObjectAccessIdsInnerWithDefaults() *BulkObjectAccessIdBulkObjectAccessIdsInner { + this := BulkObjectAccessIdBulkObjectAccessIdsInner{} + return &this +} + +// GetBulkObjectId returns the BulkObjectId field value if set, zero value otherwise. +func (o *BulkObjectAccessIdBulkObjectAccessIdsInner) GetBulkObjectId() string { + if o == nil || IsNil(o.BulkObjectId) { + var ret string + return ret + } + return *o.BulkObjectId +} + +// GetBulkObjectIdOk returns a tuple with the BulkObjectId field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *BulkObjectAccessIdBulkObjectAccessIdsInner) GetBulkObjectIdOk() (*string, bool) { + if o == nil || IsNil(o.BulkObjectId) { + return nil, false + } + return o.BulkObjectId, true +} + +// HasBulkObjectId returns a boolean if a field has been set. +func (o *BulkObjectAccessIdBulkObjectAccessIdsInner) HasBulkObjectId() bool { + if o != nil && !IsNil(o.BulkObjectId) { + return true + } + + return false +} + +// SetBulkObjectId gets a reference to the given string and assigns it to the BulkObjectId field. +func (o *BulkObjectAccessIdBulkObjectAccessIdsInner) SetBulkObjectId(v string) { + o.BulkObjectId = &v +} + +// GetBulkAccessIds returns the BulkAccessIds field value if set, zero value otherwise. +func (o *BulkObjectAccessIdBulkObjectAccessIdsInner) GetBulkAccessIds() []string { + if o == nil || IsNil(o.BulkAccessIds) { + var ret []string + return ret + } + return o.BulkAccessIds +} + +// GetBulkAccessIdsOk returns a tuple with the BulkAccessIds field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *BulkObjectAccessIdBulkObjectAccessIdsInner) GetBulkAccessIdsOk() ([]string, bool) { + if o == nil || IsNil(o.BulkAccessIds) { + return nil, false + } + return o.BulkAccessIds, true +} + +// HasBulkAccessIds returns a boolean if a field has been set. +func (o *BulkObjectAccessIdBulkObjectAccessIdsInner) HasBulkAccessIds() bool { + if o != nil && !IsNil(o.BulkAccessIds) { + return true + } + + return false +} + +// SetBulkAccessIds gets a reference to the given []string and assigns it to the BulkAccessIds field. +func (o *BulkObjectAccessIdBulkObjectAccessIdsInner) SetBulkAccessIds(v []string) { + o.BulkAccessIds = v +} + +func (o BulkObjectAccessIdBulkObjectAccessIdsInner) MarshalJSON() ([]byte, error) { + toSerialize,err := o.ToMap() + if err != nil { + return []byte{}, err + } + return json.Marshal(toSerialize) +} + +func (o BulkObjectAccessIdBulkObjectAccessIdsInner) ToMap() (map[string]interface{}, error) { + toSerialize := map[string]interface{}{} + if !IsNil(o.BulkObjectId) { + toSerialize["bulk_object_id"] = o.BulkObjectId + } + if !IsNil(o.BulkAccessIds) { + toSerialize["bulk_access_ids"] = o.BulkAccessIds + } + return toSerialize, nil +} + +type NullableBulkObjectAccessIdBulkObjectAccessIdsInner struct { + value *BulkObjectAccessIdBulkObjectAccessIdsInner + isSet bool +} + +func (v NullableBulkObjectAccessIdBulkObjectAccessIdsInner) Get() *BulkObjectAccessIdBulkObjectAccessIdsInner { + return v.value +} + +func (v *NullableBulkObjectAccessIdBulkObjectAccessIdsInner) Set(val *BulkObjectAccessIdBulkObjectAccessIdsInner) { + v.value = val + v.isSet = true +} + +func (v NullableBulkObjectAccessIdBulkObjectAccessIdsInner) IsSet() bool { + return v.isSet +} + +func (v *NullableBulkObjectAccessIdBulkObjectAccessIdsInner) Unset() { + v.value = nil + v.isSet = false +} + +func NewNullableBulkObjectAccessIdBulkObjectAccessIdsInner(val *BulkObjectAccessIdBulkObjectAccessIdsInner) *NullableBulkObjectAccessIdBulkObjectAccessIdsInner { + return &NullableBulkObjectAccessIdBulkObjectAccessIdsInner{value: val, isSet: true} +} + +func (v NullableBulkObjectAccessIdBulkObjectAccessIdsInner) MarshalJSON() ([]byte, error) { + return json.Marshal(v.value) +} + +func (v *NullableBulkObjectAccessIdBulkObjectAccessIdsInner) UnmarshalJSON(src []byte) error { + v.isSet = true + return json.Unmarshal(src, &v.value) +} + + diff --git a/apigen/drs/model_bulk_object_id_no_passport.go b/apigen/drs/model_bulk_object_id_no_passport.go new file mode 100644 index 0000000..f050e6c --- /dev/null +++ b/apigen/drs/model_bulk_object_id_no_passport.go @@ -0,0 +1,128 @@ +/* +Data Repository Service + +No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) + +API version: 1.5.0 +Contact: ga4gh-cloud@ga4gh.org +*/ + +// Code generated by OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT. + +package drs + +import ( + "encoding/json" +) + +// checks if the BulkObjectIdNoPassport type satisfies the MappedNullable interface at compile time +var _ MappedNullable = &BulkObjectIdNoPassport{} + +// BulkObjectIdNoPassport The object that contains the DRS object IDs array +type BulkObjectIdNoPassport struct { + // An array of ObjectIDs. + BulkObjectIds []string `json:"bulk_object_ids,omitempty"` +} + +// NewBulkObjectIdNoPassport instantiates a new BulkObjectIdNoPassport object +// This constructor will assign default values to properties that have it defined, +// and makes sure properties required by API are set, but the set of arguments +// will change when the set of required properties is changed +func NewBulkObjectIdNoPassport() *BulkObjectIdNoPassport { + this := BulkObjectIdNoPassport{} + return &this +} + +// NewBulkObjectIdNoPassportWithDefaults instantiates a new BulkObjectIdNoPassport object +// This constructor will only assign default values to properties that have it defined, +// but it doesn't guarantee that properties required by API are set +func NewBulkObjectIdNoPassportWithDefaults() *BulkObjectIdNoPassport { + this := BulkObjectIdNoPassport{} + return &this +} + +// GetBulkObjectIds returns the BulkObjectIds field value if set, zero value otherwise. +func (o *BulkObjectIdNoPassport) GetBulkObjectIds() []string { + if o == nil || IsNil(o.BulkObjectIds) { + var ret []string + return ret + } + return o.BulkObjectIds +} + +// GetBulkObjectIdsOk returns a tuple with the BulkObjectIds field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *BulkObjectIdNoPassport) GetBulkObjectIdsOk() ([]string, bool) { + if o == nil || IsNil(o.BulkObjectIds) { + return nil, false + } + return o.BulkObjectIds, true +} + +// HasBulkObjectIds returns a boolean if a field has been set. +func (o *BulkObjectIdNoPassport) HasBulkObjectIds() bool { + if o != nil && !IsNil(o.BulkObjectIds) { + return true + } + + return false +} + +// SetBulkObjectIds gets a reference to the given []string and assigns it to the BulkObjectIds field. +func (o *BulkObjectIdNoPassport) SetBulkObjectIds(v []string) { + o.BulkObjectIds = v +} + +func (o BulkObjectIdNoPassport) MarshalJSON() ([]byte, error) { + toSerialize,err := o.ToMap() + if err != nil { + return []byte{}, err + } + return json.Marshal(toSerialize) +} + +func (o BulkObjectIdNoPassport) ToMap() (map[string]interface{}, error) { + toSerialize := map[string]interface{}{} + if !IsNil(o.BulkObjectIds) { + toSerialize["bulk_object_ids"] = o.BulkObjectIds + } + return toSerialize, nil +} + +type NullableBulkObjectIdNoPassport struct { + value *BulkObjectIdNoPassport + isSet bool +} + +func (v NullableBulkObjectIdNoPassport) Get() *BulkObjectIdNoPassport { + return v.value +} + +func (v *NullableBulkObjectIdNoPassport) Set(val *BulkObjectIdNoPassport) { + v.value = val + v.isSet = true +} + +func (v NullableBulkObjectIdNoPassport) IsSet() bool { + return v.isSet +} + +func (v *NullableBulkObjectIdNoPassport) Unset() { + v.value = nil + v.isSet = false +} + +func NewNullableBulkObjectIdNoPassport(val *BulkObjectIdNoPassport) *NullableBulkObjectIdNoPassport { + return &NullableBulkObjectIdNoPassport{value: val, isSet: true} +} + +func (v NullableBulkObjectIdNoPassport) MarshalJSON() ([]byte, error) { + return json.Marshal(v.value) +} + +func (v *NullableBulkObjectIdNoPassport) UnmarshalJSON(src []byte) error { + v.isSet = true + return json.Unmarshal(src, &v.value) +} + + diff --git a/apigen/drs/model_bulk_update_access_methods_200_response.go b/apigen/drs/model_bulk_update_access_methods_200_response.go new file mode 100644 index 0000000..1075bc6 --- /dev/null +++ b/apigen/drs/model_bulk_update_access_methods_200_response.go @@ -0,0 +1,160 @@ +/* +Data Repository Service + +No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) + +API version: 1.5.0 +Contact: ga4gh-cloud@ga4gh.org +*/ + +// Code generated by OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT. + +package drs + +import ( + "encoding/json" + "bytes" + "fmt" +) + +// checks if the BulkUpdateAccessMethods200Response type satisfies the MappedNullable interface at compile time +var _ MappedNullable = &BulkUpdateAccessMethods200Response{} + +// BulkUpdateAccessMethods200Response struct for BulkUpdateAccessMethods200Response +type BulkUpdateAccessMethods200Response struct { + // Array of updated DRS objects + Objects []DrsObject `json:"objects"` +} + +type _BulkUpdateAccessMethods200Response BulkUpdateAccessMethods200Response + +// NewBulkUpdateAccessMethods200Response instantiates a new BulkUpdateAccessMethods200Response object +// This constructor will assign default values to properties that have it defined, +// and makes sure properties required by API are set, but the set of arguments +// will change when the set of required properties is changed +func NewBulkUpdateAccessMethods200Response(objects []DrsObject) *BulkUpdateAccessMethods200Response { + this := BulkUpdateAccessMethods200Response{} + this.Objects = objects + return &this +} + +// NewBulkUpdateAccessMethods200ResponseWithDefaults instantiates a new BulkUpdateAccessMethods200Response object +// This constructor will only assign default values to properties that have it defined, +// but it doesn't guarantee that properties required by API are set +func NewBulkUpdateAccessMethods200ResponseWithDefaults() *BulkUpdateAccessMethods200Response { + this := BulkUpdateAccessMethods200Response{} + return &this +} + +// GetObjects returns the Objects field value +func (o *BulkUpdateAccessMethods200Response) GetObjects() []DrsObject { + if o == nil { + var ret []DrsObject + return ret + } + + return o.Objects +} + +// GetObjectsOk returns a tuple with the Objects field value +// and a boolean to check if the value has been set. +func (o *BulkUpdateAccessMethods200Response) GetObjectsOk() ([]DrsObject, bool) { + if o == nil { + return nil, false + } + return o.Objects, true +} + +// SetObjects sets field value +func (o *BulkUpdateAccessMethods200Response) SetObjects(v []DrsObject) { + o.Objects = v +} + +func (o BulkUpdateAccessMethods200Response) MarshalJSON() ([]byte, error) { + toSerialize,err := o.ToMap() + if err != nil { + return []byte{}, err + } + return json.Marshal(toSerialize) +} + +func (o BulkUpdateAccessMethods200Response) ToMap() (map[string]interface{}, error) { + toSerialize := map[string]interface{}{} + toSerialize["objects"] = o.Objects + return toSerialize, nil +} + +func (o *BulkUpdateAccessMethods200Response) UnmarshalJSON(data []byte) (err error) { + // This validates that all required properties are included in the JSON object + // by unmarshalling the object into a generic map with string keys and checking + // that every required field exists as a key in the generic map. + requiredProperties := []string{ + "objects", + } + + allProperties := make(map[string]interface{}) + + err = json.Unmarshal(data, &allProperties) + + if err != nil { + return err; + } + + for _, requiredProperty := range(requiredProperties) { + if _, exists := allProperties[requiredProperty]; !exists { + return fmt.Errorf("no value given for required property %v", requiredProperty) + } + } + + varBulkUpdateAccessMethods200Response := _BulkUpdateAccessMethods200Response{} + + decoder := json.NewDecoder(bytes.NewReader(data)) + decoder.DisallowUnknownFields() + err = decoder.Decode(&varBulkUpdateAccessMethods200Response) + + if err != nil { + return err + } + + *o = BulkUpdateAccessMethods200Response(varBulkUpdateAccessMethods200Response) + + return err +} + +type NullableBulkUpdateAccessMethods200Response struct { + value *BulkUpdateAccessMethods200Response + isSet bool +} + +func (v NullableBulkUpdateAccessMethods200Response) Get() *BulkUpdateAccessMethods200Response { + return v.value +} + +func (v *NullableBulkUpdateAccessMethods200Response) Set(val *BulkUpdateAccessMethods200Response) { + v.value = val + v.isSet = true +} + +func (v NullableBulkUpdateAccessMethods200Response) IsSet() bool { + return v.isSet +} + +func (v *NullableBulkUpdateAccessMethods200Response) Unset() { + v.value = nil + v.isSet = false +} + +func NewNullableBulkUpdateAccessMethods200Response(val *BulkUpdateAccessMethods200Response) *NullableBulkUpdateAccessMethods200Response { + return &NullableBulkUpdateAccessMethods200Response{value: val, isSet: true} +} + +func (v NullableBulkUpdateAccessMethods200Response) MarshalJSON() ([]byte, error) { + return json.Marshal(v.value) +} + +func (v *NullableBulkUpdateAccessMethods200Response) UnmarshalJSON(src []byte) error { + v.isSet = true + return json.Unmarshal(src, &v.value) +} + + diff --git a/apigen/drs/model_checksum.go b/apigen/drs/model_checksum.go new file mode 100644 index 0000000..629d14b --- /dev/null +++ b/apigen/drs/model_checksum.go @@ -0,0 +1,189 @@ +/* +Data Repository Service + +No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) + +API version: 1.5.0 +Contact: ga4gh-cloud@ga4gh.org +*/ + +// Code generated by OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT. + +package drs + +import ( + "encoding/json" + "bytes" + "fmt" +) + +// checks if the Checksum type satisfies the MappedNullable interface at compile time +var _ MappedNullable = &Checksum{} + +// Checksum struct for Checksum +type Checksum struct { + // The hex-string encoded checksum for the data + Checksum string `json:"checksum"` + // The digest method used to create the checksum. The value (e.g. `sha-256`) SHOULD be listed as `Hash Name String` in the https://www.iana.org/assignments/named-information/named-information.xhtml#hash-alg[IANA Named Information Hash Algorithm Registry]. Other values MAY be used, as long as implementors are aware of the issues discussed in https://tools.ietf.org/html/rfc6920#section-9.4[RFC6920]. GA4GH may provide more explicit guidance for use of non-IANA-registered algorithms in the future. Until then, if implementers do choose such an algorithm (e.g. because it's implemented by their storage provider), they SHOULD use an existing standard `type` value such as `md5`, `etag`, `crc32c`, `trunc512`, or `sha1`. + Type string `json:"type"` +} + +type _Checksum Checksum + +// NewChecksum instantiates a new Checksum object +// This constructor will assign default values to properties that have it defined, +// and makes sure properties required by API are set, but the set of arguments +// will change when the set of required properties is changed +func NewChecksum(checksum string, type_ string) *Checksum { + this := Checksum{} + this.Checksum = checksum + this.Type = type_ + return &this +} + +// NewChecksumWithDefaults instantiates a new Checksum object +// This constructor will only assign default values to properties that have it defined, +// but it doesn't guarantee that properties required by API are set +func NewChecksumWithDefaults() *Checksum { + this := Checksum{} + return &this +} + +// GetChecksum returns the Checksum field value +func (o *Checksum) GetChecksum() string { + if o == nil { + var ret string + return ret + } + + return o.Checksum +} + +// GetChecksumOk returns a tuple with the Checksum field value +// and a boolean to check if the value has been set. +func (o *Checksum) GetChecksumOk() (*string, bool) { + if o == nil { + return nil, false + } + return &o.Checksum, true +} + +// SetChecksum sets field value +func (o *Checksum) SetChecksum(v string) { + o.Checksum = v +} + +// GetType returns the Type field value +func (o *Checksum) GetType() string { + if o == nil { + var ret string + return ret + } + + return o.Type +} + +// GetTypeOk returns a tuple with the Type field value +// and a boolean to check if the value has been set. +func (o *Checksum) GetTypeOk() (*string, bool) { + if o == nil { + return nil, false + } + return &o.Type, true +} + +// SetType sets field value +func (o *Checksum) SetType(v string) { + o.Type = v +} + +func (o Checksum) MarshalJSON() ([]byte, error) { + toSerialize,err := o.ToMap() + if err != nil { + return []byte{}, err + } + return json.Marshal(toSerialize) +} + +func (o Checksum) ToMap() (map[string]interface{}, error) { + toSerialize := map[string]interface{}{} + toSerialize["checksum"] = o.Checksum + toSerialize["type"] = o.Type + return toSerialize, nil +} + +func (o *Checksum) UnmarshalJSON(data []byte) (err error) { + // This validates that all required properties are included in the JSON object + // by unmarshalling the object into a generic map with string keys and checking + // that every required field exists as a key in the generic map. + requiredProperties := []string{ + "checksum", + "type", + } + + allProperties := make(map[string]interface{}) + + err = json.Unmarshal(data, &allProperties) + + if err != nil { + return err; + } + + for _, requiredProperty := range(requiredProperties) { + if _, exists := allProperties[requiredProperty]; !exists { + return fmt.Errorf("no value given for required property %v", requiredProperty) + } + } + + varChecksum := _Checksum{} + + decoder := json.NewDecoder(bytes.NewReader(data)) + decoder.DisallowUnknownFields() + err = decoder.Decode(&varChecksum) + + if err != nil { + return err + } + + *o = Checksum(varChecksum) + + return err +} + +type NullableChecksum struct { + value *Checksum + isSet bool +} + +func (v NullableChecksum) Get() *Checksum { + return v.value +} + +func (v *NullableChecksum) Set(val *Checksum) { + v.value = val + v.isSet = true +} + +func (v NullableChecksum) IsSet() bool { + return v.isSet +} + +func (v *NullableChecksum) Unset() { + v.value = nil + v.isSet = false +} + +func NewNullableChecksum(val *Checksum) *NullableChecksum { + return &NullableChecksum{value: val, isSet: true} +} + +func (v NullableChecksum) MarshalJSON() ([]byte, error) { + return json.Marshal(v.value) +} + +func (v *NullableChecksum) UnmarshalJSON(src []byte) error { + v.isSet = true + return json.Unmarshal(src, &v.value) +} + + diff --git a/apigen/drs/model_contents_object.go b/apigen/drs/model_contents_object.go new file mode 100644 index 0000000..8059a1e --- /dev/null +++ b/apigen/drs/model_contents_object.go @@ -0,0 +1,271 @@ +/* +Data Repository Service + +No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) + +API version: 1.5.0 +Contact: ga4gh-cloud@ga4gh.org +*/ + +// Code generated by OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT. + +package drs + +import ( + "encoding/json" + "bytes" + "fmt" +) + +// checks if the ContentsObject type satisfies the MappedNullable interface at compile time +var _ MappedNullable = &ContentsObject{} + +// ContentsObject struct for ContentsObject +type ContentsObject struct { + // A name declared by the bundle author that must be used when materialising this object, overriding any name directly associated with the object itself. The name must be unique within the containing bundle. This string is made up of uppercase and lowercase letters, decimal digits, hyphen, period, and underscore [A-Za-z0-9.-_]. See http://pubs.opengroup.org/onlinepubs/9699919799/basedefs/V1_chap03.html#tag_03_282[portable filenames]. + Name string `json:"name"` + // A DRS identifier of a `DrsObject` (either a single blob or a nested bundle). If this ContentsObject is an object within a nested bundle, then the id is optional. Otherwise, the id is required. + Id *string `json:"id,omitempty"` + // A list of full DRS identifier URI paths that may be used to obtain the object. These URIs may be external to this DRS instance. + DrsUri []string `json:"drs_uri,omitempty"` + // If this ContentsObject describes a nested bundle and the caller specified \"?expand=true\" on the request, then this contents array must be present and describe the objects within the nested bundle. + Contents []ContentsObject `json:"contents,omitempty"` +} + +type _ContentsObject ContentsObject + +// NewContentsObject instantiates a new ContentsObject object +// This constructor will assign default values to properties that have it defined, +// and makes sure properties required by API are set, but the set of arguments +// will change when the set of required properties is changed +func NewContentsObject(name string) *ContentsObject { + this := ContentsObject{} + this.Name = name + return &this +} + +// NewContentsObjectWithDefaults instantiates a new ContentsObject object +// This constructor will only assign default values to properties that have it defined, +// but it doesn't guarantee that properties required by API are set +func NewContentsObjectWithDefaults() *ContentsObject { + this := ContentsObject{} + return &this +} + +// GetName returns the Name field value +func (o *ContentsObject) GetName() string { + if o == nil { + var ret string + return ret + } + + return o.Name +} + +// GetNameOk returns a tuple with the Name field value +// and a boolean to check if the value has been set. +func (o *ContentsObject) GetNameOk() (*string, bool) { + if o == nil { + return nil, false + } + return &o.Name, true +} + +// SetName sets field value +func (o *ContentsObject) SetName(v string) { + o.Name = v +} + +// GetId returns the Id field value if set, zero value otherwise. +func (o *ContentsObject) GetId() string { + if o == nil || IsNil(o.Id) { + var ret string + return ret + } + return *o.Id +} + +// GetIdOk returns a tuple with the Id field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *ContentsObject) GetIdOk() (*string, bool) { + if o == nil || IsNil(o.Id) { + return nil, false + } + return o.Id, true +} + +// HasId returns a boolean if a field has been set. +func (o *ContentsObject) HasId() bool { + if o != nil && !IsNil(o.Id) { + return true + } + + return false +} + +// SetId gets a reference to the given string and assigns it to the Id field. +func (o *ContentsObject) SetId(v string) { + o.Id = &v +} + +// GetDrsUri returns the DrsUri field value if set, zero value otherwise. +func (o *ContentsObject) GetDrsUri() []string { + if o == nil || IsNil(o.DrsUri) { + var ret []string + return ret + } + return o.DrsUri +} + +// GetDrsUriOk returns a tuple with the DrsUri field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *ContentsObject) GetDrsUriOk() ([]string, bool) { + if o == nil || IsNil(o.DrsUri) { + return nil, false + } + return o.DrsUri, true +} + +// HasDrsUri returns a boolean if a field has been set. +func (o *ContentsObject) HasDrsUri() bool { + if o != nil && !IsNil(o.DrsUri) { + return true + } + + return false +} + +// SetDrsUri gets a reference to the given []string and assigns it to the DrsUri field. +func (o *ContentsObject) SetDrsUri(v []string) { + o.DrsUri = v +} + +// GetContents returns the Contents field value if set, zero value otherwise. +func (o *ContentsObject) GetContents() []ContentsObject { + if o == nil || IsNil(o.Contents) { + var ret []ContentsObject + return ret + } + return o.Contents +} + +// GetContentsOk returns a tuple with the Contents field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *ContentsObject) GetContentsOk() ([]ContentsObject, bool) { + if o == nil || IsNil(o.Contents) { + return nil, false + } + return o.Contents, true +} + +// HasContents returns a boolean if a field has been set. +func (o *ContentsObject) HasContents() bool { + if o != nil && !IsNil(o.Contents) { + return true + } + + return false +} + +// SetContents gets a reference to the given []ContentsObject and assigns it to the Contents field. +func (o *ContentsObject) SetContents(v []ContentsObject) { + o.Contents = v +} + +func (o ContentsObject) MarshalJSON() ([]byte, error) { + toSerialize,err := o.ToMap() + if err != nil { + return []byte{}, err + } + return json.Marshal(toSerialize) +} + +func (o ContentsObject) ToMap() (map[string]interface{}, error) { + toSerialize := map[string]interface{}{} + toSerialize["name"] = o.Name + if !IsNil(o.Id) { + toSerialize["id"] = o.Id + } + if !IsNil(o.DrsUri) { + toSerialize["drs_uri"] = o.DrsUri + } + if !IsNil(o.Contents) { + toSerialize["contents"] = o.Contents + } + return toSerialize, nil +} + +func (o *ContentsObject) UnmarshalJSON(data []byte) (err error) { + // This validates that all required properties are included in the JSON object + // by unmarshalling the object into a generic map with string keys and checking + // that every required field exists as a key in the generic map. + requiredProperties := []string{ + "name", + } + + allProperties := make(map[string]interface{}) + + err = json.Unmarshal(data, &allProperties) + + if err != nil { + return err; + } + + for _, requiredProperty := range(requiredProperties) { + if _, exists := allProperties[requiredProperty]; !exists { + return fmt.Errorf("no value given for required property %v", requiredProperty) + } + } + + varContentsObject := _ContentsObject{} + + decoder := json.NewDecoder(bytes.NewReader(data)) + decoder.DisallowUnknownFields() + err = decoder.Decode(&varContentsObject) + + if err != nil { + return err + } + + *o = ContentsObject(varContentsObject) + + return err +} + +type NullableContentsObject struct { + value *ContentsObject + isSet bool +} + +func (v NullableContentsObject) Get() *ContentsObject { + return v.value +} + +func (v *NullableContentsObject) Set(val *ContentsObject) { + v.value = val + v.isSet = true +} + +func (v NullableContentsObject) IsSet() bool { + return v.isSet +} + +func (v *NullableContentsObject) Unset() { + v.value = nil + v.isSet = false +} + +func NewNullableContentsObject(val *ContentsObject) *NullableContentsObject { + return &NullableContentsObject{value: val, isSet: true} +} + +func (v NullableContentsObject) MarshalJSON() ([]byte, error) { + return json.Marshal(v.value) +} + +func (v *NullableContentsObject) UnmarshalJSON(src []byte) error { + v.isSet = true + return json.Unmarshal(src, &v.value) +} + + diff --git a/apigen/drs/model_delete_request.go b/apigen/drs/model_delete_request.go new file mode 100644 index 0000000..42cd290 --- /dev/null +++ b/apigen/drs/model_delete_request.go @@ -0,0 +1,169 @@ +/* +Data Repository Service + +No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) + +API version: 1.5.0 +Contact: ga4gh-cloud@ga4gh.org +*/ + +// Code generated by OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT. + +package drs + +import ( + "encoding/json" +) + +// checks if the DeleteRequest type satisfies the MappedNullable interface at compile time +var _ MappedNullable = &DeleteRequest{} + +// DeleteRequest Request body for single object delete operations +type DeleteRequest struct { + // the encoded JWT GA4GH Passport that contains embedded Visas. The overall JWT is signed as are the individual Passport Visas. + Passports []string `json:"passports,omitempty"` + // If true, delete both DRS object metadata and underlying storage data (follows server's deleteStorageDataSupported capability). If false (default), only delete DRS object metadata while preserving underlying storage data. Clients must explicitly set this to true to enable storage data deletion, ensuring intentional choice for this potentially destructive operation. + DeleteStorageData *bool `json:"delete_storage_data,omitempty"` +} + +// NewDeleteRequest instantiates a new DeleteRequest object +// This constructor will assign default values to properties that have it defined, +// and makes sure properties required by API are set, but the set of arguments +// will change when the set of required properties is changed +func NewDeleteRequest() *DeleteRequest { + this := DeleteRequest{} + var deleteStorageData bool = false + this.DeleteStorageData = &deleteStorageData + return &this +} + +// NewDeleteRequestWithDefaults instantiates a new DeleteRequest object +// This constructor will only assign default values to properties that have it defined, +// but it doesn't guarantee that properties required by API are set +func NewDeleteRequestWithDefaults() *DeleteRequest { + this := DeleteRequest{} + var deleteStorageData bool = false + this.DeleteStorageData = &deleteStorageData + return &this +} + +// GetPassports returns the Passports field value if set, zero value otherwise. +func (o *DeleteRequest) GetPassports() []string { + if o == nil || IsNil(o.Passports) { + var ret []string + return ret + } + return o.Passports +} + +// GetPassportsOk returns a tuple with the Passports field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *DeleteRequest) GetPassportsOk() ([]string, bool) { + if o == nil || IsNil(o.Passports) { + return nil, false + } + return o.Passports, true +} + +// HasPassports returns a boolean if a field has been set. +func (o *DeleteRequest) HasPassports() bool { + if o != nil && !IsNil(o.Passports) { + return true + } + + return false +} + +// SetPassports gets a reference to the given []string and assigns it to the Passports field. +func (o *DeleteRequest) SetPassports(v []string) { + o.Passports = v +} + +// GetDeleteStorageData returns the DeleteStorageData field value if set, zero value otherwise. +func (o *DeleteRequest) GetDeleteStorageData() bool { + if o == nil || IsNil(o.DeleteStorageData) { + var ret bool + return ret + } + return *o.DeleteStorageData +} + +// GetDeleteStorageDataOk returns a tuple with the DeleteStorageData field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *DeleteRequest) GetDeleteStorageDataOk() (*bool, bool) { + if o == nil || IsNil(o.DeleteStorageData) { + return nil, false + } + return o.DeleteStorageData, true +} + +// HasDeleteStorageData returns a boolean if a field has been set. +func (o *DeleteRequest) HasDeleteStorageData() bool { + if o != nil && !IsNil(o.DeleteStorageData) { + return true + } + + return false +} + +// SetDeleteStorageData gets a reference to the given bool and assigns it to the DeleteStorageData field. +func (o *DeleteRequest) SetDeleteStorageData(v bool) { + o.DeleteStorageData = &v +} + +func (o DeleteRequest) MarshalJSON() ([]byte, error) { + toSerialize,err := o.ToMap() + if err != nil { + return []byte{}, err + } + return json.Marshal(toSerialize) +} + +func (o DeleteRequest) ToMap() (map[string]interface{}, error) { + toSerialize := map[string]interface{}{} + if !IsNil(o.Passports) { + toSerialize["passports"] = o.Passports + } + if !IsNil(o.DeleteStorageData) { + toSerialize["delete_storage_data"] = o.DeleteStorageData + } + return toSerialize, nil +} + +type NullableDeleteRequest struct { + value *DeleteRequest + isSet bool +} + +func (v NullableDeleteRequest) Get() *DeleteRequest { + return v.value +} + +func (v *NullableDeleteRequest) Set(val *DeleteRequest) { + v.value = val + v.isSet = true +} + +func (v NullableDeleteRequest) IsSet() bool { + return v.isSet +} + +func (v *NullableDeleteRequest) Unset() { + v.value = nil + v.isSet = false +} + +func NewNullableDeleteRequest(val *DeleteRequest) *NullableDeleteRequest { + return &NullableDeleteRequest{value: val, isSet: true} +} + +func (v NullableDeleteRequest) MarshalJSON() ([]byte, error) { + return json.Marshal(v.value) +} + +func (v *NullableDeleteRequest) UnmarshalJSON(src []byte) error { + v.isSet = true + return json.Unmarshal(src, &v.value) +} + + diff --git a/apigen/drs/model_drs_object.go b/apigen/drs/model_drs_object.go new file mode 100644 index 0000000..14a06f4 --- /dev/null +++ b/apigen/drs/model_drs_object.go @@ -0,0 +1,573 @@ +/* +Data Repository Service + +No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) + +API version: 1.5.0 +Contact: ga4gh-cloud@ga4gh.org +*/ + +// Code generated by OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT. + +package drs + +import ( + "encoding/json" + "time" + "bytes" + "fmt" +) + +// checks if the DrsObject type satisfies the MappedNullable interface at compile time +var _ MappedNullable = &DrsObject{} + +// DrsObject struct for DrsObject +type DrsObject struct { + // An identifier unique to this `DrsObject` + Id string `json:"id"` + // A string that can be used to name a `DrsObject`. This string is made up of uppercase and lowercase letters, decimal digits, hyphen, period, and underscore [A-Za-z0-9.-_]. See http://pubs.opengroup.org/onlinepubs/9699919799/basedefs/V1_chap03.html#tag_03_282[portable filenames]. + Name *string `json:"name,omitempty"` + // A drs:// hostname-based URI, as defined in the DRS documentation, that tells clients how to access this object. The intent of this field is to make DRS objects self-contained, and therefore easier for clients to store and pass around. For example, if you arrive at this DRS JSON by resolving a compact identifier-based DRS URI, the `self_uri` presents you with a hostname and properly encoded DRS ID for use in subsequent `access` endpoint calls. + SelfUri string `json:"self_uri"` + // For blobs, the blob size in bytes. For bundles, the cumulative size, in bytes, of items in the `contents` field. + Size int64 `json:"size"` + // Timestamp of content creation in RFC3339. (This is the creation time of the underlying content, not of the JSON object.) + CreatedTime time.Time `json:"created_time"` + // Timestamp of content update in RFC3339, identical to `created_time` in systems that do not support updates. (This is the update time of the underlying content, not of the JSON object.) + UpdatedTime *time.Time `json:"updated_time,omitempty"` + // A string representing a version. (Some systems may use checksum, a RFC3339 timestamp, or an incrementing version number.) + Version *string `json:"version,omitempty"` + // A string providing the mime-type of the `DrsObject`. + MimeType *string `json:"mime_type,omitempty"` + // The checksum of the `DrsObject`. At least one checksum must be provided. For blobs, the checksum is computed over the bytes in the blob. For bundles, the checksum is computed over a sorted concatenation of the checksums of its top-level contained objects (not recursive, names not included). The list of checksums is sorted alphabetically (hex-code) before concatenation and a further checksum is performed on the concatenated checksum value. For example, if a bundle contains blobs with the following checksums: md5(blob1) = 72794b6d md5(blob2) = 5e089d29 Then the checksum of the bundle is: md5( concat( sort( md5(blob1), md5(blob2) ) ) ) = md5( concat( sort( 72794b6d, 5e089d29 ) ) ) = md5( concat( 5e089d29, 72794b6d ) ) = md5( 5e089d2972794b6d ) = f7a29a04 + Checksums []Checksum `json:"checksums"` + // The list of access methods that can be used to fetch the `DrsObject`. Required for single blobs; optional for bundles. + AccessMethods []AccessMethod `json:"access_methods,omitempty"` + // If not set, this `DrsObject` is a single blob. If set, this `DrsObject` is a bundle containing the listed `ContentsObject` s (some of which may be further nested). + Contents []ContentsObject `json:"contents,omitempty"` + // A human readable description of the `DrsObject`. + Description *string `json:"description,omitempty"` + // A list of strings that can be used to find other metadata about this `DrsObject` from external metadata sources. These aliases can be used to represent secondary accession numbers or external GUIDs. + Aliases []string `json:"aliases,omitempty"` +} + +type _DrsObject DrsObject + +// NewDrsObject instantiates a new DrsObject object +// This constructor will assign default values to properties that have it defined, +// and makes sure properties required by API are set, but the set of arguments +// will change when the set of required properties is changed +func NewDrsObject(id string, selfUri string, size int64, createdTime time.Time, checksums []Checksum) *DrsObject { + this := DrsObject{} + this.Id = id + this.SelfUri = selfUri + this.Size = size + this.CreatedTime = createdTime + this.Checksums = checksums + return &this +} + +// NewDrsObjectWithDefaults instantiates a new DrsObject object +// This constructor will only assign default values to properties that have it defined, +// but it doesn't guarantee that properties required by API are set +func NewDrsObjectWithDefaults() *DrsObject { + this := DrsObject{} + return &this +} + +// GetId returns the Id field value +func (o *DrsObject) GetId() string { + if o == nil { + var ret string + return ret + } + + return o.Id +} + +// GetIdOk returns a tuple with the Id field value +// and a boolean to check if the value has been set. +func (o *DrsObject) GetIdOk() (*string, bool) { + if o == nil { + return nil, false + } + return &o.Id, true +} + +// SetId sets field value +func (o *DrsObject) SetId(v string) { + o.Id = v +} + +// GetName returns the Name field value if set, zero value otherwise. +func (o *DrsObject) GetName() string { + if o == nil || IsNil(o.Name) { + var ret string + return ret + } + return *o.Name +} + +// GetNameOk returns a tuple with the Name field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *DrsObject) GetNameOk() (*string, bool) { + if o == nil || IsNil(o.Name) { + return nil, false + } + return o.Name, true +} + +// HasName returns a boolean if a field has been set. +func (o *DrsObject) HasName() bool { + if o != nil && !IsNil(o.Name) { + return true + } + + return false +} + +// SetName gets a reference to the given string and assigns it to the Name field. +func (o *DrsObject) SetName(v string) { + o.Name = &v +} + +// GetSelfUri returns the SelfUri field value +func (o *DrsObject) GetSelfUri() string { + if o == nil { + var ret string + return ret + } + + return o.SelfUri +} + +// GetSelfUriOk returns a tuple with the SelfUri field value +// and a boolean to check if the value has been set. +func (o *DrsObject) GetSelfUriOk() (*string, bool) { + if o == nil { + return nil, false + } + return &o.SelfUri, true +} + +// SetSelfUri sets field value +func (o *DrsObject) SetSelfUri(v string) { + o.SelfUri = v +} + +// GetSize returns the Size field value +func (o *DrsObject) GetSize() int64 { + if o == nil { + var ret int64 + return ret + } + + return o.Size +} + +// GetSizeOk returns a tuple with the Size field value +// and a boolean to check if the value has been set. +func (o *DrsObject) GetSizeOk() (*int64, bool) { + if o == nil { + return nil, false + } + return &o.Size, true +} + +// SetSize sets field value +func (o *DrsObject) SetSize(v int64) { + o.Size = v +} + +// GetCreatedTime returns the CreatedTime field value +func (o *DrsObject) GetCreatedTime() time.Time { + if o == nil { + var ret time.Time + return ret + } + + return o.CreatedTime +} + +// GetCreatedTimeOk returns a tuple with the CreatedTime field value +// and a boolean to check if the value has been set. +func (o *DrsObject) GetCreatedTimeOk() (*time.Time, bool) { + if o == nil { + return nil, false + } + return &o.CreatedTime, true +} + +// SetCreatedTime sets field value +func (o *DrsObject) SetCreatedTime(v time.Time) { + o.CreatedTime = v +} + +// GetUpdatedTime returns the UpdatedTime field value if set, zero value otherwise. +func (o *DrsObject) GetUpdatedTime() time.Time { + if o == nil || IsNil(o.UpdatedTime) { + var ret time.Time + return ret + } + return *o.UpdatedTime +} + +// GetUpdatedTimeOk returns a tuple with the UpdatedTime field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *DrsObject) GetUpdatedTimeOk() (*time.Time, bool) { + if o == nil || IsNil(o.UpdatedTime) { + return nil, false + } + return o.UpdatedTime, true +} + +// HasUpdatedTime returns a boolean if a field has been set. +func (o *DrsObject) HasUpdatedTime() bool { + if o != nil && !IsNil(o.UpdatedTime) { + return true + } + + return false +} + +// SetUpdatedTime gets a reference to the given time.Time and assigns it to the UpdatedTime field. +func (o *DrsObject) SetUpdatedTime(v time.Time) { + o.UpdatedTime = &v +} + +// GetVersion returns the Version field value if set, zero value otherwise. +func (o *DrsObject) GetVersion() string { + if o == nil || IsNil(o.Version) { + var ret string + return ret + } + return *o.Version +} + +// GetVersionOk returns a tuple with the Version field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *DrsObject) GetVersionOk() (*string, bool) { + if o == nil || IsNil(o.Version) { + return nil, false + } + return o.Version, true +} + +// HasVersion returns a boolean if a field has been set. +func (o *DrsObject) HasVersion() bool { + if o != nil && !IsNil(o.Version) { + return true + } + + return false +} + +// SetVersion gets a reference to the given string and assigns it to the Version field. +func (o *DrsObject) SetVersion(v string) { + o.Version = &v +} + +// GetMimeType returns the MimeType field value if set, zero value otherwise. +func (o *DrsObject) GetMimeType() string { + if o == nil || IsNil(o.MimeType) { + var ret string + return ret + } + return *o.MimeType +} + +// GetMimeTypeOk returns a tuple with the MimeType field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *DrsObject) GetMimeTypeOk() (*string, bool) { + if o == nil || IsNil(o.MimeType) { + return nil, false + } + return o.MimeType, true +} + +// HasMimeType returns a boolean if a field has been set. +func (o *DrsObject) HasMimeType() bool { + if o != nil && !IsNil(o.MimeType) { + return true + } + + return false +} + +// SetMimeType gets a reference to the given string and assigns it to the MimeType field. +func (o *DrsObject) SetMimeType(v string) { + o.MimeType = &v +} + +// GetChecksums returns the Checksums field value +func (o *DrsObject) GetChecksums() []Checksum { + if o == nil { + var ret []Checksum + return ret + } + + return o.Checksums +} + +// GetChecksumsOk returns a tuple with the Checksums field value +// and a boolean to check if the value has been set. +func (o *DrsObject) GetChecksumsOk() ([]Checksum, bool) { + if o == nil { + return nil, false + } + return o.Checksums, true +} + +// SetChecksums sets field value +func (o *DrsObject) SetChecksums(v []Checksum) { + o.Checksums = v +} + +// GetAccessMethods returns the AccessMethods field value if set, zero value otherwise. +func (o *DrsObject) GetAccessMethods() []AccessMethod { + if o == nil || IsNil(o.AccessMethods) { + var ret []AccessMethod + return ret + } + return o.AccessMethods +} + +// GetAccessMethodsOk returns a tuple with the AccessMethods field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *DrsObject) GetAccessMethodsOk() ([]AccessMethod, bool) { + if o == nil || IsNil(o.AccessMethods) { + return nil, false + } + return o.AccessMethods, true +} + +// HasAccessMethods returns a boolean if a field has been set. +func (o *DrsObject) HasAccessMethods() bool { + if o != nil && !IsNil(o.AccessMethods) { + return true + } + + return false +} + +// SetAccessMethods gets a reference to the given []AccessMethod and assigns it to the AccessMethods field. +func (o *DrsObject) SetAccessMethods(v []AccessMethod) { + o.AccessMethods = v +} + +// GetContents returns the Contents field value if set, zero value otherwise. +func (o *DrsObject) GetContents() []ContentsObject { + if o == nil || IsNil(o.Contents) { + var ret []ContentsObject + return ret + } + return o.Contents +} + +// GetContentsOk returns a tuple with the Contents field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *DrsObject) GetContentsOk() ([]ContentsObject, bool) { + if o == nil || IsNil(o.Contents) { + return nil, false + } + return o.Contents, true +} + +// HasContents returns a boolean if a field has been set. +func (o *DrsObject) HasContents() bool { + if o != nil && !IsNil(o.Contents) { + return true + } + + return false +} + +// SetContents gets a reference to the given []ContentsObject and assigns it to the Contents field. +func (o *DrsObject) SetContents(v []ContentsObject) { + o.Contents = v +} + +// GetDescription returns the Description field value if set, zero value otherwise. +func (o *DrsObject) GetDescription() string { + if o == nil || IsNil(o.Description) { + var ret string + return ret + } + return *o.Description +} + +// GetDescriptionOk returns a tuple with the Description field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *DrsObject) GetDescriptionOk() (*string, bool) { + if o == nil || IsNil(o.Description) { + return nil, false + } + return o.Description, true +} + +// HasDescription returns a boolean if a field has been set. +func (o *DrsObject) HasDescription() bool { + if o != nil && !IsNil(o.Description) { + return true + } + + return false +} + +// SetDescription gets a reference to the given string and assigns it to the Description field. +func (o *DrsObject) SetDescription(v string) { + o.Description = &v +} + +// GetAliases returns the Aliases field value if set, zero value otherwise. +func (o *DrsObject) GetAliases() []string { + if o == nil || IsNil(o.Aliases) { + var ret []string + return ret + } + return o.Aliases +} + +// GetAliasesOk returns a tuple with the Aliases field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *DrsObject) GetAliasesOk() ([]string, bool) { + if o == nil || IsNil(o.Aliases) { + return nil, false + } + return o.Aliases, true +} + +// HasAliases returns a boolean if a field has been set. +func (o *DrsObject) HasAliases() bool { + if o != nil && !IsNil(o.Aliases) { + return true + } + + return false +} + +// SetAliases gets a reference to the given []string and assigns it to the Aliases field. +func (o *DrsObject) SetAliases(v []string) { + o.Aliases = v +} + +func (o DrsObject) MarshalJSON() ([]byte, error) { + toSerialize,err := o.ToMap() + if err != nil { + return []byte{}, err + } + return json.Marshal(toSerialize) +} + +func (o DrsObject) ToMap() (map[string]interface{}, error) { + toSerialize := map[string]interface{}{} + toSerialize["id"] = o.Id + if !IsNil(o.Name) { + toSerialize["name"] = o.Name + } + toSerialize["self_uri"] = o.SelfUri + toSerialize["size"] = o.Size + toSerialize["created_time"] = o.CreatedTime + if !IsNil(o.UpdatedTime) { + toSerialize["updated_time"] = o.UpdatedTime + } + if !IsNil(o.Version) { + toSerialize["version"] = o.Version + } + if !IsNil(o.MimeType) { + toSerialize["mime_type"] = o.MimeType + } + toSerialize["checksums"] = o.Checksums + if !IsNil(o.AccessMethods) { + toSerialize["access_methods"] = o.AccessMethods + } + if !IsNil(o.Contents) { + toSerialize["contents"] = o.Contents + } + if !IsNil(o.Description) { + toSerialize["description"] = o.Description + } + if !IsNil(o.Aliases) { + toSerialize["aliases"] = o.Aliases + } + return toSerialize, nil +} + +func (o *DrsObject) UnmarshalJSON(data []byte) (err error) { + // This validates that all required properties are included in the JSON object + // by unmarshalling the object into a generic map with string keys and checking + // that every required field exists as a key in the generic map. + requiredProperties := []string{ + "id", + "self_uri", + "size", + "created_time", + "checksums", + } + + allProperties := make(map[string]interface{}) + + err = json.Unmarshal(data, &allProperties) + + if err != nil { + return err; + } + + for _, requiredProperty := range(requiredProperties) { + if _, exists := allProperties[requiredProperty]; !exists { + return fmt.Errorf("no value given for required property %v", requiredProperty) + } + } + + varDrsObject := _DrsObject{} + + decoder := json.NewDecoder(bytes.NewReader(data)) + decoder.DisallowUnknownFields() + err = decoder.Decode(&varDrsObject) + + if err != nil { + return err + } + + *o = DrsObject(varDrsObject) + + return err +} + +type NullableDrsObject struct { + value *DrsObject + isSet bool +} + +func (v NullableDrsObject) Get() *DrsObject { + return v.value +} + +func (v *NullableDrsObject) Set(val *DrsObject) { + v.value = val + v.isSet = true +} + +func (v NullableDrsObject) IsSet() bool { + return v.isSet +} + +func (v *NullableDrsObject) Unset() { + v.value = nil + v.isSet = false +} + +func NewNullableDrsObject(val *DrsObject) *NullableDrsObject { + return &NullableDrsObject{value: val, isSet: true} +} + +func (v NullableDrsObject) MarshalJSON() ([]byte, error) { + return json.Marshal(v.value) +} + +func (v *NullableDrsObject) UnmarshalJSON(src []byte) error { + v.isSet = true + return json.Unmarshal(src, &v.value) +} + + diff --git a/apigen/drs/model_drs_object_candidate.go b/apigen/drs/model_drs_object_candidate.go new file mode 100644 index 0000000..8bf6ee2 --- /dev/null +++ b/apigen/drs/model_drs_object_candidate.go @@ -0,0 +1,448 @@ +/* +Data Repository Service + +No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) + +API version: 1.5.0 +Contact: ga4gh-cloud@ga4gh.org +*/ + +// Code generated by OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT. + +package drs + +import ( + "encoding/json" + "bytes" + "fmt" +) + +// checks if the DrsObjectCandidate type satisfies the MappedNullable interface at compile time +var _ MappedNullable = &DrsObjectCandidate{} + +// DrsObjectCandidate struct for DrsObjectCandidate +type DrsObjectCandidate struct { + // A string that can be used to name a `DrsObject`. This string is made up of uppercase and lowercase letters, decimal digits, hyphen, period, and underscore [A-Za-z0-9.-_]. See http://pubs.opengroup.org/onlinepubs/9699919799/basedefs/V1_chap03.html#tag_03_282[portable filenames]. + Name *string `json:"name,omitempty"` + // For blobs, the blob size in bytes. For bundles, the cumulative size, in bytes, of items in the `contents` field. + Size int64 `json:"size"` + // A string representing a version. (Some systems may use checksum, a RFC3339 timestamp, or an incrementing version number.) + Version *string `json:"version,omitempty"` + // A string providing the mime-type of the `DrsObject`. + MimeType *string `json:"mime_type,omitempty"` + // The checksum of the `DrsObject`. At least one checksum must be provided. For blobs, the checksum is computed over the bytes in the blob. For bundles, the checksum is computed over a sorted concatenation of the checksums of its top-level contained objects (not recursive, names not included). The list of checksums is sorted alphabetically (hex-code) before concatenation and a further checksum is performed on the concatenated checksum value. For example, if a bundle contains blobs with the following checksums: md5(blob1) = 72794b6d md5(blob2) = 5e089d29 Then the checksum of the bundle is: md5( concat( sort( md5(blob1), md5(blob2) ) ) ) = md5( concat( sort( 72794b6d, 5e089d29 ) ) ) = md5( concat( 5e089d29, 72794b6d ) ) = md5( 5e089d2972794b6d ) = f7a29a04 + Checksums []Checksum `json:"checksums"` + // The list of access methods that can be used to fetch the `DrsObject`. Required for single blobs; optional for bundles. + AccessMethods []AccessMethod `json:"access_methods,omitempty"` + // If not set, this `DrsObject` is a single blob. If set, this `DrsObject` is a bundle containing the listed `ContentsObject` s (some of which may be further nested). + Contents []ContentsObject `json:"contents,omitempty"` + // A human readable description of the `DrsObject`. + Description *string `json:"description,omitempty"` + // A list of strings that can be used to find other metadata about this `DrsObject` from external metadata sources. These aliases can be used to represent secondary accession numbers or external GUIDs. + Aliases []string `json:"aliases,omitempty"` +} + +type _DrsObjectCandidate DrsObjectCandidate + +// NewDrsObjectCandidate instantiates a new DrsObjectCandidate object +// This constructor will assign default values to properties that have it defined, +// and makes sure properties required by API are set, but the set of arguments +// will change when the set of required properties is changed +func NewDrsObjectCandidate(size int64, checksums []Checksum) *DrsObjectCandidate { + this := DrsObjectCandidate{} + this.Size = size + this.Checksums = checksums + return &this +} + +// NewDrsObjectCandidateWithDefaults instantiates a new DrsObjectCandidate object +// This constructor will only assign default values to properties that have it defined, +// but it doesn't guarantee that properties required by API are set +func NewDrsObjectCandidateWithDefaults() *DrsObjectCandidate { + this := DrsObjectCandidate{} + return &this +} + +// GetName returns the Name field value if set, zero value otherwise. +func (o *DrsObjectCandidate) GetName() string { + if o == nil || IsNil(o.Name) { + var ret string + return ret + } + return *o.Name +} + +// GetNameOk returns a tuple with the Name field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *DrsObjectCandidate) GetNameOk() (*string, bool) { + if o == nil || IsNil(o.Name) { + return nil, false + } + return o.Name, true +} + +// HasName returns a boolean if a field has been set. +func (o *DrsObjectCandidate) HasName() bool { + if o != nil && !IsNil(o.Name) { + return true + } + + return false +} + +// SetName gets a reference to the given string and assigns it to the Name field. +func (o *DrsObjectCandidate) SetName(v string) { + o.Name = &v +} + +// GetSize returns the Size field value +func (o *DrsObjectCandidate) GetSize() int64 { + if o == nil { + var ret int64 + return ret + } + + return o.Size +} + +// GetSizeOk returns a tuple with the Size field value +// and a boolean to check if the value has been set. +func (o *DrsObjectCandidate) GetSizeOk() (*int64, bool) { + if o == nil { + return nil, false + } + return &o.Size, true +} + +// SetSize sets field value +func (o *DrsObjectCandidate) SetSize(v int64) { + o.Size = v +} + +// GetVersion returns the Version field value if set, zero value otherwise. +func (o *DrsObjectCandidate) GetVersion() string { + if o == nil || IsNil(o.Version) { + var ret string + return ret + } + return *o.Version +} + +// GetVersionOk returns a tuple with the Version field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *DrsObjectCandidate) GetVersionOk() (*string, bool) { + if o == nil || IsNil(o.Version) { + return nil, false + } + return o.Version, true +} + +// HasVersion returns a boolean if a field has been set. +func (o *DrsObjectCandidate) HasVersion() bool { + if o != nil && !IsNil(o.Version) { + return true + } + + return false +} + +// SetVersion gets a reference to the given string and assigns it to the Version field. +func (o *DrsObjectCandidate) SetVersion(v string) { + o.Version = &v +} + +// GetMimeType returns the MimeType field value if set, zero value otherwise. +func (o *DrsObjectCandidate) GetMimeType() string { + if o == nil || IsNil(o.MimeType) { + var ret string + return ret + } + return *o.MimeType +} + +// GetMimeTypeOk returns a tuple with the MimeType field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *DrsObjectCandidate) GetMimeTypeOk() (*string, bool) { + if o == nil || IsNil(o.MimeType) { + return nil, false + } + return o.MimeType, true +} + +// HasMimeType returns a boolean if a field has been set. +func (o *DrsObjectCandidate) HasMimeType() bool { + if o != nil && !IsNil(o.MimeType) { + return true + } + + return false +} + +// SetMimeType gets a reference to the given string and assigns it to the MimeType field. +func (o *DrsObjectCandidate) SetMimeType(v string) { + o.MimeType = &v +} + +// GetChecksums returns the Checksums field value +func (o *DrsObjectCandidate) GetChecksums() []Checksum { + if o == nil { + var ret []Checksum + return ret + } + + return o.Checksums +} + +// GetChecksumsOk returns a tuple with the Checksums field value +// and a boolean to check if the value has been set. +func (o *DrsObjectCandidate) GetChecksumsOk() ([]Checksum, bool) { + if o == nil { + return nil, false + } + return o.Checksums, true +} + +// SetChecksums sets field value +func (o *DrsObjectCandidate) SetChecksums(v []Checksum) { + o.Checksums = v +} + +// GetAccessMethods returns the AccessMethods field value if set, zero value otherwise. +func (o *DrsObjectCandidate) GetAccessMethods() []AccessMethod { + if o == nil || IsNil(o.AccessMethods) { + var ret []AccessMethod + return ret + } + return o.AccessMethods +} + +// GetAccessMethodsOk returns a tuple with the AccessMethods field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *DrsObjectCandidate) GetAccessMethodsOk() ([]AccessMethod, bool) { + if o == nil || IsNil(o.AccessMethods) { + return nil, false + } + return o.AccessMethods, true +} + +// HasAccessMethods returns a boolean if a field has been set. +func (o *DrsObjectCandidate) HasAccessMethods() bool { + if o != nil && !IsNil(o.AccessMethods) { + return true + } + + return false +} + +// SetAccessMethods gets a reference to the given []AccessMethod and assigns it to the AccessMethods field. +func (o *DrsObjectCandidate) SetAccessMethods(v []AccessMethod) { + o.AccessMethods = v +} + +// GetContents returns the Contents field value if set, zero value otherwise. +func (o *DrsObjectCandidate) GetContents() []ContentsObject { + if o == nil || IsNil(o.Contents) { + var ret []ContentsObject + return ret + } + return o.Contents +} + +// GetContentsOk returns a tuple with the Contents field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *DrsObjectCandidate) GetContentsOk() ([]ContentsObject, bool) { + if o == nil || IsNil(o.Contents) { + return nil, false + } + return o.Contents, true +} + +// HasContents returns a boolean if a field has been set. +func (o *DrsObjectCandidate) HasContents() bool { + if o != nil && !IsNil(o.Contents) { + return true + } + + return false +} + +// SetContents gets a reference to the given []ContentsObject and assigns it to the Contents field. +func (o *DrsObjectCandidate) SetContents(v []ContentsObject) { + o.Contents = v +} + +// GetDescription returns the Description field value if set, zero value otherwise. +func (o *DrsObjectCandidate) GetDescription() string { + if o == nil || IsNil(o.Description) { + var ret string + return ret + } + return *o.Description +} + +// GetDescriptionOk returns a tuple with the Description field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *DrsObjectCandidate) GetDescriptionOk() (*string, bool) { + if o == nil || IsNil(o.Description) { + return nil, false + } + return o.Description, true +} + +// HasDescription returns a boolean if a field has been set. +func (o *DrsObjectCandidate) HasDescription() bool { + if o != nil && !IsNil(o.Description) { + return true + } + + return false +} + +// SetDescription gets a reference to the given string and assigns it to the Description field. +func (o *DrsObjectCandidate) SetDescription(v string) { + o.Description = &v +} + +// GetAliases returns the Aliases field value if set, zero value otherwise. +func (o *DrsObjectCandidate) GetAliases() []string { + if o == nil || IsNil(o.Aliases) { + var ret []string + return ret + } + return o.Aliases +} + +// GetAliasesOk returns a tuple with the Aliases field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *DrsObjectCandidate) GetAliasesOk() ([]string, bool) { + if o == nil || IsNil(o.Aliases) { + return nil, false + } + return o.Aliases, true +} + +// HasAliases returns a boolean if a field has been set. +func (o *DrsObjectCandidate) HasAliases() bool { + if o != nil && !IsNil(o.Aliases) { + return true + } + + return false +} + +// SetAliases gets a reference to the given []string and assigns it to the Aliases field. +func (o *DrsObjectCandidate) SetAliases(v []string) { + o.Aliases = v +} + +func (o DrsObjectCandidate) MarshalJSON() ([]byte, error) { + toSerialize,err := o.ToMap() + if err != nil { + return []byte{}, err + } + return json.Marshal(toSerialize) +} + +func (o DrsObjectCandidate) ToMap() (map[string]interface{}, error) { + toSerialize := map[string]interface{}{} + if !IsNil(o.Name) { + toSerialize["name"] = o.Name + } + toSerialize["size"] = o.Size + if !IsNil(o.Version) { + toSerialize["version"] = o.Version + } + if !IsNil(o.MimeType) { + toSerialize["mime_type"] = o.MimeType + } + toSerialize["checksums"] = o.Checksums + if !IsNil(o.AccessMethods) { + toSerialize["access_methods"] = o.AccessMethods + } + if !IsNil(o.Contents) { + toSerialize["contents"] = o.Contents + } + if !IsNil(o.Description) { + toSerialize["description"] = o.Description + } + if !IsNil(o.Aliases) { + toSerialize["aliases"] = o.Aliases + } + return toSerialize, nil +} + +func (o *DrsObjectCandidate) UnmarshalJSON(data []byte) (err error) { + // This validates that all required properties are included in the JSON object + // by unmarshalling the object into a generic map with string keys and checking + // that every required field exists as a key in the generic map. + requiredProperties := []string{ + "size", + "checksums", + } + + allProperties := make(map[string]interface{}) + + err = json.Unmarshal(data, &allProperties) + + if err != nil { + return err; + } + + for _, requiredProperty := range(requiredProperties) { + if _, exists := allProperties[requiredProperty]; !exists { + return fmt.Errorf("no value given for required property %v", requiredProperty) + } + } + + varDrsObjectCandidate := _DrsObjectCandidate{} + + decoder := json.NewDecoder(bytes.NewReader(data)) + decoder.DisallowUnknownFields() + err = decoder.Decode(&varDrsObjectCandidate) + + if err != nil { + return err + } + + *o = DrsObjectCandidate(varDrsObjectCandidate) + + return err +} + +type NullableDrsObjectCandidate struct { + value *DrsObjectCandidate + isSet bool +} + +func (v NullableDrsObjectCandidate) Get() *DrsObjectCandidate { + return v.value +} + +func (v *NullableDrsObjectCandidate) Set(val *DrsObjectCandidate) { + v.value = val + v.isSet = true +} + +func (v NullableDrsObjectCandidate) IsSet() bool { + return v.isSet +} + +func (v *NullableDrsObjectCandidate) Unset() { + v.value = nil + v.isSet = false +} + +func NewNullableDrsObjectCandidate(val *DrsObjectCandidate) *NullableDrsObjectCandidate { + return &NullableDrsObjectCandidate{value: val, isSet: true} +} + +func (v NullableDrsObjectCandidate) MarshalJSON() ([]byte, error) { + return json.Marshal(v.value) +} + +func (v *NullableDrsObjectCandidate) UnmarshalJSON(src []byte) error { + v.isSet = true + return json.Unmarshal(src, &v.value) +} + + diff --git a/apigen/drs/model_drs_service.go b/apigen/drs/model_drs_service.go new file mode 100644 index 0000000..f87786a --- /dev/null +++ b/apigen/drs/model_drs_service.go @@ -0,0 +1,224 @@ +/* +Data Repository Service + +No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) + +API version: 1.5.0 +Contact: ga4gh-cloud@ga4gh.org +*/ + +// Code generated by OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT. + +package drs + +import ( + "encoding/json" + "bytes" + "fmt" +) + +// checks if the DrsService type satisfies the MappedNullable interface at compile time +var _ MappedNullable = &DrsService{} + +// DrsService struct for DrsService +type DrsService struct { + // DEPRECATED - In 2.0 this will move to under the drs section of service info and not at the root level. The max length the bulk request endpoints can handle (>= 1) before generating a 413 error e.g. how long can the arrays bulk_object_ids and bulk_object_access_ids be for this server. + MaxBulkRequestLength int32 `json:"maxBulkRequestLength"` + Type DrsServiceType `json:"type"` + Drs *DrsServiceDrs `json:"drs,omitempty"` +} + +type _DrsService DrsService + +// NewDrsService instantiates a new DrsService object +// This constructor will assign default values to properties that have it defined, +// and makes sure properties required by API are set, but the set of arguments +// will change when the set of required properties is changed +func NewDrsService(maxBulkRequestLength int32, type_ DrsServiceType) *DrsService { + this := DrsService{} + this.MaxBulkRequestLength = maxBulkRequestLength + this.Type = type_ + return &this +} + +// NewDrsServiceWithDefaults instantiates a new DrsService object +// This constructor will only assign default values to properties that have it defined, +// but it doesn't guarantee that properties required by API are set +func NewDrsServiceWithDefaults() *DrsService { + this := DrsService{} + return &this +} + +// GetMaxBulkRequestLength returns the MaxBulkRequestLength field value +func (o *DrsService) GetMaxBulkRequestLength() int32 { + if o == nil { + var ret int32 + return ret + } + + return o.MaxBulkRequestLength +} + +// GetMaxBulkRequestLengthOk returns a tuple with the MaxBulkRequestLength field value +// and a boolean to check if the value has been set. +func (o *DrsService) GetMaxBulkRequestLengthOk() (*int32, bool) { + if o == nil { + return nil, false + } + return &o.MaxBulkRequestLength, true +} + +// SetMaxBulkRequestLength sets field value +func (o *DrsService) SetMaxBulkRequestLength(v int32) { + o.MaxBulkRequestLength = v +} + +// GetType returns the Type field value +func (o *DrsService) GetType() DrsServiceType { + if o == nil { + var ret DrsServiceType + return ret + } + + return o.Type +} + +// GetTypeOk returns a tuple with the Type field value +// and a boolean to check if the value has been set. +func (o *DrsService) GetTypeOk() (*DrsServiceType, bool) { + if o == nil { + return nil, false + } + return &o.Type, true +} + +// SetType sets field value +func (o *DrsService) SetType(v DrsServiceType) { + o.Type = v +} + +// GetDrs returns the Drs field value if set, zero value otherwise. +func (o *DrsService) GetDrs() DrsServiceDrs { + if o == nil || IsNil(o.Drs) { + var ret DrsServiceDrs + return ret + } + return *o.Drs +} + +// GetDrsOk returns a tuple with the Drs field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *DrsService) GetDrsOk() (*DrsServiceDrs, bool) { + if o == nil || IsNil(o.Drs) { + return nil, false + } + return o.Drs, true +} + +// HasDrs returns a boolean if a field has been set. +func (o *DrsService) HasDrs() bool { + if o != nil && !IsNil(o.Drs) { + return true + } + + return false +} + +// SetDrs gets a reference to the given DrsServiceDrs and assigns it to the Drs field. +func (o *DrsService) SetDrs(v DrsServiceDrs) { + o.Drs = &v +} + +func (o DrsService) MarshalJSON() ([]byte, error) { + toSerialize,err := o.ToMap() + if err != nil { + return []byte{}, err + } + return json.Marshal(toSerialize) +} + +func (o DrsService) ToMap() (map[string]interface{}, error) { + toSerialize := map[string]interface{}{} + toSerialize["maxBulkRequestLength"] = o.MaxBulkRequestLength + toSerialize["type"] = o.Type + if !IsNil(o.Drs) { + toSerialize["drs"] = o.Drs + } + return toSerialize, nil +} + +func (o *DrsService) UnmarshalJSON(data []byte) (err error) { + // This validates that all required properties are included in the JSON object + // by unmarshalling the object into a generic map with string keys and checking + // that every required field exists as a key in the generic map. + requiredProperties := []string{ + "maxBulkRequestLength", + "type", + } + + allProperties := make(map[string]interface{}) + + err = json.Unmarshal(data, &allProperties) + + if err != nil { + return err; + } + + for _, requiredProperty := range(requiredProperties) { + if _, exists := allProperties[requiredProperty]; !exists { + return fmt.Errorf("no value given for required property %v", requiredProperty) + } + } + + varDrsService := _DrsService{} + + decoder := json.NewDecoder(bytes.NewReader(data)) + decoder.DisallowUnknownFields() + err = decoder.Decode(&varDrsService) + + if err != nil { + return err + } + + *o = DrsService(varDrsService) + + return err +} + +type NullableDrsService struct { + value *DrsService + isSet bool +} + +func (v NullableDrsService) Get() *DrsService { + return v.value +} + +func (v *NullableDrsService) Set(val *DrsService) { + v.value = val + v.isSet = true +} + +func (v NullableDrsService) IsSet() bool { + return v.isSet +} + +func (v *NullableDrsService) Unset() { + v.value = nil + v.isSet = false +} + +func NewNullableDrsService(val *DrsService) *NullableDrsService { + return &NullableDrsService{value: val, isSet: true} +} + +func (v NullableDrsService) MarshalJSON() ([]byte, error) { + return json.Marshal(v.value) +} + +func (v *NullableDrsService) UnmarshalJSON(src []byte) error { + v.isSet = true + return json.Unmarshal(src, &v.value) +} + + diff --git a/apigen/drs/model_drs_service_drs.go b/apigen/drs/model_drs_service_drs.go new file mode 100644 index 0000000..fe7ba14 --- /dev/null +++ b/apigen/drs/model_drs_service_drs.go @@ -0,0 +1,825 @@ +/* +Data Repository Service + +No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) + +API version: 1.5.0 +Contact: ga4gh-cloud@ga4gh.org +*/ + +// Code generated by OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT. + +package drs + +import ( + "encoding/json" + "bytes" + "fmt" +) + +// checks if the DrsServiceDrs type satisfies the MappedNullable interface at compile time +var _ MappedNullable = &DrsServiceDrs{} + +// DrsServiceDrs struct for DrsServiceDrs +type DrsServiceDrs struct { + // The max length the bulk request endpoints can handle (>= 1) before generating a 413 error e.g. how long can the arrays bulk_object_ids and bulk_object_access_ids be for this server. + MaxBulkRequestLength int32 `json:"maxBulkRequestLength"` + // The total number of objects in this DRS service. + ObjectCount *int32 `json:"objectCount,omitempty"` + // The total size of all objects in this DRS service in bytes. As a general best practice, file bytes are counted for each unique file and not cloud mirrors or other redundant copies. + TotalObjectSize *int32 `json:"totalObjectSize,omitempty"` + // Indicates whether this DRS server supports upload request operations via the `/upload-request` endpoint. If true, clients can request upload methods and credentials for uploading files. If false or missing, the server does not support upload request coordination. + UploadRequestSupported *bool `json:"uploadRequestSupported,omitempty"` + // Indicates whether this DRS server supports object registration operations via the `/objects/register` endpoint. If true, clients can register uploaded files or existing data as DRS objects. If false or missing, the server does not support object registration. + ObjectRegistrationSupported *bool `json:"objectRegistrationSupported,omitempty"` + // List of upload methods supported by this DRS server. Only present when uploadRequestSupported is true. Clients can use this information to determine which upload methods are available before making upload requests. - **s3**: Direct S3 upload with temporary AWS credentials - **gs**: Google Cloud Storage upload with access tokens - **https**: Presigned POST URL for HTTP uploads - **ftp**: File Transfer Protocol uploads - **sftp**: Secure File Transfer Protocol uploads - **gsiftp**: GridFTP secure file transfer - **globus**: Globus transfer service for high-performance data movement + SupportedUploadMethods []string `json:"supportedUploadMethods,omitempty"` + // Maximum file size in bytes that can be uploaded via the upload endpoints. Only present when uploadRequestSupported is true. If not specified, there is no explicit size limit. + MaxUploadSize *int64 `json:"maxUploadSize,omitempty"` + // Maximum number of files that can be included in a single upload request. Only present when uploadRequestSupported is true. If not specified, defaults to the same value as maxBulkRequestLength. + MaxUploadRequestLength *int32 `json:"maxUploadRequestLength,omitempty"` + // Maximum number of candidate objects that can be included in a single registration request. Only present when objectRegistrationSupported is true. If not specified, defaults to the same value as maxBulkRequestLength. + MaxRegisterRequestLength *int32 `json:"maxRegisterRequestLength,omitempty"` + // Indicates whether this DRS server validates uploaded file checksums against the provided metadata. If true, the server will verify that uploaded files match their declared checksums and may reject uploads with mismatches. If false or missing, the server does not perform checksum validation and relies on client-provided metadata. Only present when uploadRequestSupported or objectRegistrationSupported is true. + ValidateUploadChecksums *bool `json:"validateUploadChecksums,omitempty"` + // Indicates whether this DRS server validates uploaded file sizes against the provided metadata. If true, the server will verify that uploaded files match their declared sizes and may reject uploads with mismatches. If false or missing, the server does not perform file size validation and relies on client-provided metadata. Only present when uploadRequestSupported or objectRegistrationSupported is true. + ValidateUploadFileSizes *bool `json:"validateUploadFileSizes,omitempty"` + // Indicates whether this DRS server supports storing files from the same upload request under a common prefix or folder structure. If true, the server will organize related files together in storage, enabling bioinformatics workflows that expect co-located files (e.g., CRAM + CRAI, VCF + TBI). If false or missing, the server may distribute files across different storage locations or prefixes. Only present when uploadRequestSupported is true. This feature is particularly valuable for genomics tools like samtools that expect index files to be co-located with data files. + RelatedFileStorageSupported *bool `json:"relatedFileStorageSupported,omitempty"` + // Indicates whether this DRS server supports delete operations via the delete endpoints. If true, clients can delete DRS objects using POST requests to `/objects/{object_id}/delete` and `/objects/delete`. If false or missing, the server does not support delete operations and will return 404 for delete endpoint requests. Like upload functionality, delete support is entirely optional and servers remain DRS compliant without it. + DeleteSupported *bool `json:"deleteSupported,omitempty"` + // Maximum number of objects that can be deleted in a single bulk delete request via `/objects/delete`. Only present when deleteSupported is true. If not specified when delete is supported, defaults to the same value as maxBulkRequestLength. Servers may enforce lower limits for delete operations compared to other bulk operations for safety reasons. + MaxBulkDeleteLength *int32 `json:"maxBulkDeleteLength,omitempty"` + // Indicates whether this DRS server supports attempting to delete underlying storage data when clients request it. If true, the server will attempt to delete both metadata and storage files when `delete_storage_data: true` is specified in delete requests. If false or missing, the server only supports metadata deletion regardless of client request, preserving underlying storage data. Only present when deleteSupported is true. This is a capability flag indicating what the server can attempt, not a default behavior setting. Note: Storage deletion attempts may fail due to permissions, network issues, or storage service errors. + DeleteStorageDataSupported *bool `json:"deleteStorageDataSupported,omitempty"` + // Indicates whether this DRS server supports updating access methods for existing objects. If true, clients can update access methods using `/objects/{object_id}/access-methods` and `/objects/access-methods` endpoints. If false or missing, the server does not support access method updates. + AccessMethodUpdateSupported *bool `json:"accessMethodUpdateSupported,omitempty"` + // Maximum number of objects that can be updated in a single bulk access method update request. Only present when accessMethodUpdateSupported is true. If not specified, defaults to maxBulkRequestLength. + MaxBulkAccessMethodUpdateLength *int32 `json:"maxBulkAccessMethodUpdateLength,omitempty"` + // Indicates whether this DRS server validates new access methods by verifying they point to the same data. If true, the server will attempt to verify checksums/content before updating access methods. If false or missing, the server trusts client-provided access methods without validation. Only present when accessMethodUpdateSupported is true. + ValidateAccessMethodUpdates *bool `json:"validateAccessMethodUpdates,omitempty"` +} + +type _DrsServiceDrs DrsServiceDrs + +// NewDrsServiceDrs instantiates a new DrsServiceDrs object +// This constructor will assign default values to properties that have it defined, +// and makes sure properties required by API are set, but the set of arguments +// will change when the set of required properties is changed +func NewDrsServiceDrs(maxBulkRequestLength int32) *DrsServiceDrs { + this := DrsServiceDrs{} + this.MaxBulkRequestLength = maxBulkRequestLength + var uploadRequestSupported bool = false + this.UploadRequestSupported = &uploadRequestSupported + var objectRegistrationSupported bool = false + this.ObjectRegistrationSupported = &objectRegistrationSupported + var validateUploadChecksums bool = false + this.ValidateUploadChecksums = &validateUploadChecksums + var validateUploadFileSizes bool = false + this.ValidateUploadFileSizes = &validateUploadFileSizes + var relatedFileStorageSupported bool = false + this.RelatedFileStorageSupported = &relatedFileStorageSupported + var deleteSupported bool = false + this.DeleteSupported = &deleteSupported + var deleteStorageDataSupported bool = false + this.DeleteStorageDataSupported = &deleteStorageDataSupported + var accessMethodUpdateSupported bool = false + this.AccessMethodUpdateSupported = &accessMethodUpdateSupported + var validateAccessMethodUpdates bool = false + this.ValidateAccessMethodUpdates = &validateAccessMethodUpdates + return &this +} + +// NewDrsServiceDrsWithDefaults instantiates a new DrsServiceDrs object +// This constructor will only assign default values to properties that have it defined, +// but it doesn't guarantee that properties required by API are set +func NewDrsServiceDrsWithDefaults() *DrsServiceDrs { + this := DrsServiceDrs{} + var uploadRequestSupported bool = false + this.UploadRequestSupported = &uploadRequestSupported + var objectRegistrationSupported bool = false + this.ObjectRegistrationSupported = &objectRegistrationSupported + var validateUploadChecksums bool = false + this.ValidateUploadChecksums = &validateUploadChecksums + var validateUploadFileSizes bool = false + this.ValidateUploadFileSizes = &validateUploadFileSizes + var relatedFileStorageSupported bool = false + this.RelatedFileStorageSupported = &relatedFileStorageSupported + var deleteSupported bool = false + this.DeleteSupported = &deleteSupported + var deleteStorageDataSupported bool = false + this.DeleteStorageDataSupported = &deleteStorageDataSupported + var accessMethodUpdateSupported bool = false + this.AccessMethodUpdateSupported = &accessMethodUpdateSupported + var validateAccessMethodUpdates bool = false + this.ValidateAccessMethodUpdates = &validateAccessMethodUpdates + return &this +} + +// GetMaxBulkRequestLength returns the MaxBulkRequestLength field value +func (o *DrsServiceDrs) GetMaxBulkRequestLength() int32 { + if o == nil { + var ret int32 + return ret + } + + return o.MaxBulkRequestLength +} + +// GetMaxBulkRequestLengthOk returns a tuple with the MaxBulkRequestLength field value +// and a boolean to check if the value has been set. +func (o *DrsServiceDrs) GetMaxBulkRequestLengthOk() (*int32, bool) { + if o == nil { + return nil, false + } + return &o.MaxBulkRequestLength, true +} + +// SetMaxBulkRequestLength sets field value +func (o *DrsServiceDrs) SetMaxBulkRequestLength(v int32) { + o.MaxBulkRequestLength = v +} + +// GetObjectCount returns the ObjectCount field value if set, zero value otherwise. +func (o *DrsServiceDrs) GetObjectCount() int32 { + if o == nil || IsNil(o.ObjectCount) { + var ret int32 + return ret + } + return *o.ObjectCount +} + +// GetObjectCountOk returns a tuple with the ObjectCount field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *DrsServiceDrs) GetObjectCountOk() (*int32, bool) { + if o == nil || IsNil(o.ObjectCount) { + return nil, false + } + return o.ObjectCount, true +} + +// HasObjectCount returns a boolean if a field has been set. +func (o *DrsServiceDrs) HasObjectCount() bool { + if o != nil && !IsNil(o.ObjectCount) { + return true + } + + return false +} + +// SetObjectCount gets a reference to the given int32 and assigns it to the ObjectCount field. +func (o *DrsServiceDrs) SetObjectCount(v int32) { + o.ObjectCount = &v +} + +// GetTotalObjectSize returns the TotalObjectSize field value if set, zero value otherwise. +func (o *DrsServiceDrs) GetTotalObjectSize() int32 { + if o == nil || IsNil(o.TotalObjectSize) { + var ret int32 + return ret + } + return *o.TotalObjectSize +} + +// GetTotalObjectSizeOk returns a tuple with the TotalObjectSize field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *DrsServiceDrs) GetTotalObjectSizeOk() (*int32, bool) { + if o == nil || IsNil(o.TotalObjectSize) { + return nil, false + } + return o.TotalObjectSize, true +} + +// HasTotalObjectSize returns a boolean if a field has been set. +func (o *DrsServiceDrs) HasTotalObjectSize() bool { + if o != nil && !IsNil(o.TotalObjectSize) { + return true + } + + return false +} + +// SetTotalObjectSize gets a reference to the given int32 and assigns it to the TotalObjectSize field. +func (o *DrsServiceDrs) SetTotalObjectSize(v int32) { + o.TotalObjectSize = &v +} + +// GetUploadRequestSupported returns the UploadRequestSupported field value if set, zero value otherwise. +func (o *DrsServiceDrs) GetUploadRequestSupported() bool { + if o == nil || IsNil(o.UploadRequestSupported) { + var ret bool + return ret + } + return *o.UploadRequestSupported +} + +// GetUploadRequestSupportedOk returns a tuple with the UploadRequestSupported field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *DrsServiceDrs) GetUploadRequestSupportedOk() (*bool, bool) { + if o == nil || IsNil(o.UploadRequestSupported) { + return nil, false + } + return o.UploadRequestSupported, true +} + +// HasUploadRequestSupported returns a boolean if a field has been set. +func (o *DrsServiceDrs) HasUploadRequestSupported() bool { + if o != nil && !IsNil(o.UploadRequestSupported) { + return true + } + + return false +} + +// SetUploadRequestSupported gets a reference to the given bool and assigns it to the UploadRequestSupported field. +func (o *DrsServiceDrs) SetUploadRequestSupported(v bool) { + o.UploadRequestSupported = &v +} + +// GetObjectRegistrationSupported returns the ObjectRegistrationSupported field value if set, zero value otherwise. +func (o *DrsServiceDrs) GetObjectRegistrationSupported() bool { + if o == nil || IsNil(o.ObjectRegistrationSupported) { + var ret bool + return ret + } + return *o.ObjectRegistrationSupported +} + +// GetObjectRegistrationSupportedOk returns a tuple with the ObjectRegistrationSupported field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *DrsServiceDrs) GetObjectRegistrationSupportedOk() (*bool, bool) { + if o == nil || IsNil(o.ObjectRegistrationSupported) { + return nil, false + } + return o.ObjectRegistrationSupported, true +} + +// HasObjectRegistrationSupported returns a boolean if a field has been set. +func (o *DrsServiceDrs) HasObjectRegistrationSupported() bool { + if o != nil && !IsNil(o.ObjectRegistrationSupported) { + return true + } + + return false +} + +// SetObjectRegistrationSupported gets a reference to the given bool and assigns it to the ObjectRegistrationSupported field. +func (o *DrsServiceDrs) SetObjectRegistrationSupported(v bool) { + o.ObjectRegistrationSupported = &v +} + +// GetSupportedUploadMethods returns the SupportedUploadMethods field value if set, zero value otherwise. +func (o *DrsServiceDrs) GetSupportedUploadMethods() []string { + if o == nil || IsNil(o.SupportedUploadMethods) { + var ret []string + return ret + } + return o.SupportedUploadMethods +} + +// GetSupportedUploadMethodsOk returns a tuple with the SupportedUploadMethods field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *DrsServiceDrs) GetSupportedUploadMethodsOk() ([]string, bool) { + if o == nil || IsNil(o.SupportedUploadMethods) { + return nil, false + } + return o.SupportedUploadMethods, true +} + +// HasSupportedUploadMethods returns a boolean if a field has been set. +func (o *DrsServiceDrs) HasSupportedUploadMethods() bool { + if o != nil && !IsNil(o.SupportedUploadMethods) { + return true + } + + return false +} + +// SetSupportedUploadMethods gets a reference to the given []string and assigns it to the SupportedUploadMethods field. +func (o *DrsServiceDrs) SetSupportedUploadMethods(v []string) { + o.SupportedUploadMethods = v +} + +// GetMaxUploadSize returns the MaxUploadSize field value if set, zero value otherwise. +func (o *DrsServiceDrs) GetMaxUploadSize() int64 { + if o == nil || IsNil(o.MaxUploadSize) { + var ret int64 + return ret + } + return *o.MaxUploadSize +} + +// GetMaxUploadSizeOk returns a tuple with the MaxUploadSize field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *DrsServiceDrs) GetMaxUploadSizeOk() (*int64, bool) { + if o == nil || IsNil(o.MaxUploadSize) { + return nil, false + } + return o.MaxUploadSize, true +} + +// HasMaxUploadSize returns a boolean if a field has been set. +func (o *DrsServiceDrs) HasMaxUploadSize() bool { + if o != nil && !IsNil(o.MaxUploadSize) { + return true + } + + return false +} + +// SetMaxUploadSize gets a reference to the given int64 and assigns it to the MaxUploadSize field. +func (o *DrsServiceDrs) SetMaxUploadSize(v int64) { + o.MaxUploadSize = &v +} + +// GetMaxUploadRequestLength returns the MaxUploadRequestLength field value if set, zero value otherwise. +func (o *DrsServiceDrs) GetMaxUploadRequestLength() int32 { + if o == nil || IsNil(o.MaxUploadRequestLength) { + var ret int32 + return ret + } + return *o.MaxUploadRequestLength +} + +// GetMaxUploadRequestLengthOk returns a tuple with the MaxUploadRequestLength field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *DrsServiceDrs) GetMaxUploadRequestLengthOk() (*int32, bool) { + if o == nil || IsNil(o.MaxUploadRequestLength) { + return nil, false + } + return o.MaxUploadRequestLength, true +} + +// HasMaxUploadRequestLength returns a boolean if a field has been set. +func (o *DrsServiceDrs) HasMaxUploadRequestLength() bool { + if o != nil && !IsNil(o.MaxUploadRequestLength) { + return true + } + + return false +} + +// SetMaxUploadRequestLength gets a reference to the given int32 and assigns it to the MaxUploadRequestLength field. +func (o *DrsServiceDrs) SetMaxUploadRequestLength(v int32) { + o.MaxUploadRequestLength = &v +} + +// GetMaxRegisterRequestLength returns the MaxRegisterRequestLength field value if set, zero value otherwise. +func (o *DrsServiceDrs) GetMaxRegisterRequestLength() int32 { + if o == nil || IsNil(o.MaxRegisterRequestLength) { + var ret int32 + return ret + } + return *o.MaxRegisterRequestLength +} + +// GetMaxRegisterRequestLengthOk returns a tuple with the MaxRegisterRequestLength field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *DrsServiceDrs) GetMaxRegisterRequestLengthOk() (*int32, bool) { + if o == nil || IsNil(o.MaxRegisterRequestLength) { + return nil, false + } + return o.MaxRegisterRequestLength, true +} + +// HasMaxRegisterRequestLength returns a boolean if a field has been set. +func (o *DrsServiceDrs) HasMaxRegisterRequestLength() bool { + if o != nil && !IsNil(o.MaxRegisterRequestLength) { + return true + } + + return false +} + +// SetMaxRegisterRequestLength gets a reference to the given int32 and assigns it to the MaxRegisterRequestLength field. +func (o *DrsServiceDrs) SetMaxRegisterRequestLength(v int32) { + o.MaxRegisterRequestLength = &v +} + +// GetValidateUploadChecksums returns the ValidateUploadChecksums field value if set, zero value otherwise. +func (o *DrsServiceDrs) GetValidateUploadChecksums() bool { + if o == nil || IsNil(o.ValidateUploadChecksums) { + var ret bool + return ret + } + return *o.ValidateUploadChecksums +} + +// GetValidateUploadChecksumsOk returns a tuple with the ValidateUploadChecksums field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *DrsServiceDrs) GetValidateUploadChecksumsOk() (*bool, bool) { + if o == nil || IsNil(o.ValidateUploadChecksums) { + return nil, false + } + return o.ValidateUploadChecksums, true +} + +// HasValidateUploadChecksums returns a boolean if a field has been set. +func (o *DrsServiceDrs) HasValidateUploadChecksums() bool { + if o != nil && !IsNil(o.ValidateUploadChecksums) { + return true + } + + return false +} + +// SetValidateUploadChecksums gets a reference to the given bool and assigns it to the ValidateUploadChecksums field. +func (o *DrsServiceDrs) SetValidateUploadChecksums(v bool) { + o.ValidateUploadChecksums = &v +} + +// GetValidateUploadFileSizes returns the ValidateUploadFileSizes field value if set, zero value otherwise. +func (o *DrsServiceDrs) GetValidateUploadFileSizes() bool { + if o == nil || IsNil(o.ValidateUploadFileSizes) { + var ret bool + return ret + } + return *o.ValidateUploadFileSizes +} + +// GetValidateUploadFileSizesOk returns a tuple with the ValidateUploadFileSizes field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *DrsServiceDrs) GetValidateUploadFileSizesOk() (*bool, bool) { + if o == nil || IsNil(o.ValidateUploadFileSizes) { + return nil, false + } + return o.ValidateUploadFileSizes, true +} + +// HasValidateUploadFileSizes returns a boolean if a field has been set. +func (o *DrsServiceDrs) HasValidateUploadFileSizes() bool { + if o != nil && !IsNil(o.ValidateUploadFileSizes) { + return true + } + + return false +} + +// SetValidateUploadFileSizes gets a reference to the given bool and assigns it to the ValidateUploadFileSizes field. +func (o *DrsServiceDrs) SetValidateUploadFileSizes(v bool) { + o.ValidateUploadFileSizes = &v +} + +// GetRelatedFileStorageSupported returns the RelatedFileStorageSupported field value if set, zero value otherwise. +func (o *DrsServiceDrs) GetRelatedFileStorageSupported() bool { + if o == nil || IsNil(o.RelatedFileStorageSupported) { + var ret bool + return ret + } + return *o.RelatedFileStorageSupported +} + +// GetRelatedFileStorageSupportedOk returns a tuple with the RelatedFileStorageSupported field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *DrsServiceDrs) GetRelatedFileStorageSupportedOk() (*bool, bool) { + if o == nil || IsNil(o.RelatedFileStorageSupported) { + return nil, false + } + return o.RelatedFileStorageSupported, true +} + +// HasRelatedFileStorageSupported returns a boolean if a field has been set. +func (o *DrsServiceDrs) HasRelatedFileStorageSupported() bool { + if o != nil && !IsNil(o.RelatedFileStorageSupported) { + return true + } + + return false +} + +// SetRelatedFileStorageSupported gets a reference to the given bool and assigns it to the RelatedFileStorageSupported field. +func (o *DrsServiceDrs) SetRelatedFileStorageSupported(v bool) { + o.RelatedFileStorageSupported = &v +} + +// GetDeleteSupported returns the DeleteSupported field value if set, zero value otherwise. +func (o *DrsServiceDrs) GetDeleteSupported() bool { + if o == nil || IsNil(o.DeleteSupported) { + var ret bool + return ret + } + return *o.DeleteSupported +} + +// GetDeleteSupportedOk returns a tuple with the DeleteSupported field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *DrsServiceDrs) GetDeleteSupportedOk() (*bool, bool) { + if o == nil || IsNil(o.DeleteSupported) { + return nil, false + } + return o.DeleteSupported, true +} + +// HasDeleteSupported returns a boolean if a field has been set. +func (o *DrsServiceDrs) HasDeleteSupported() bool { + if o != nil && !IsNil(o.DeleteSupported) { + return true + } + + return false +} + +// SetDeleteSupported gets a reference to the given bool and assigns it to the DeleteSupported field. +func (o *DrsServiceDrs) SetDeleteSupported(v bool) { + o.DeleteSupported = &v +} + +// GetMaxBulkDeleteLength returns the MaxBulkDeleteLength field value if set, zero value otherwise. +func (o *DrsServiceDrs) GetMaxBulkDeleteLength() int32 { + if o == nil || IsNil(o.MaxBulkDeleteLength) { + var ret int32 + return ret + } + return *o.MaxBulkDeleteLength +} + +// GetMaxBulkDeleteLengthOk returns a tuple with the MaxBulkDeleteLength field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *DrsServiceDrs) GetMaxBulkDeleteLengthOk() (*int32, bool) { + if o == nil || IsNil(o.MaxBulkDeleteLength) { + return nil, false + } + return o.MaxBulkDeleteLength, true +} + +// HasMaxBulkDeleteLength returns a boolean if a field has been set. +func (o *DrsServiceDrs) HasMaxBulkDeleteLength() bool { + if o != nil && !IsNil(o.MaxBulkDeleteLength) { + return true + } + + return false +} + +// SetMaxBulkDeleteLength gets a reference to the given int32 and assigns it to the MaxBulkDeleteLength field. +func (o *DrsServiceDrs) SetMaxBulkDeleteLength(v int32) { + o.MaxBulkDeleteLength = &v +} + +// GetDeleteStorageDataSupported returns the DeleteStorageDataSupported field value if set, zero value otherwise. +func (o *DrsServiceDrs) GetDeleteStorageDataSupported() bool { + if o == nil || IsNil(o.DeleteStorageDataSupported) { + var ret bool + return ret + } + return *o.DeleteStorageDataSupported +} + +// GetDeleteStorageDataSupportedOk returns a tuple with the DeleteStorageDataSupported field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *DrsServiceDrs) GetDeleteStorageDataSupportedOk() (*bool, bool) { + if o == nil || IsNil(o.DeleteStorageDataSupported) { + return nil, false + } + return o.DeleteStorageDataSupported, true +} + +// HasDeleteStorageDataSupported returns a boolean if a field has been set. +func (o *DrsServiceDrs) HasDeleteStorageDataSupported() bool { + if o != nil && !IsNil(o.DeleteStorageDataSupported) { + return true + } + + return false +} + +// SetDeleteStorageDataSupported gets a reference to the given bool and assigns it to the DeleteStorageDataSupported field. +func (o *DrsServiceDrs) SetDeleteStorageDataSupported(v bool) { + o.DeleteStorageDataSupported = &v +} + +// GetAccessMethodUpdateSupported returns the AccessMethodUpdateSupported field value if set, zero value otherwise. +func (o *DrsServiceDrs) GetAccessMethodUpdateSupported() bool { + if o == nil || IsNil(o.AccessMethodUpdateSupported) { + var ret bool + return ret + } + return *o.AccessMethodUpdateSupported +} + +// GetAccessMethodUpdateSupportedOk returns a tuple with the AccessMethodUpdateSupported field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *DrsServiceDrs) GetAccessMethodUpdateSupportedOk() (*bool, bool) { + if o == nil || IsNil(o.AccessMethodUpdateSupported) { + return nil, false + } + return o.AccessMethodUpdateSupported, true +} + +// HasAccessMethodUpdateSupported returns a boolean if a field has been set. +func (o *DrsServiceDrs) HasAccessMethodUpdateSupported() bool { + if o != nil && !IsNil(o.AccessMethodUpdateSupported) { + return true + } + + return false +} + +// SetAccessMethodUpdateSupported gets a reference to the given bool and assigns it to the AccessMethodUpdateSupported field. +func (o *DrsServiceDrs) SetAccessMethodUpdateSupported(v bool) { + o.AccessMethodUpdateSupported = &v +} + +// GetMaxBulkAccessMethodUpdateLength returns the MaxBulkAccessMethodUpdateLength field value if set, zero value otherwise. +func (o *DrsServiceDrs) GetMaxBulkAccessMethodUpdateLength() int32 { + if o == nil || IsNil(o.MaxBulkAccessMethodUpdateLength) { + var ret int32 + return ret + } + return *o.MaxBulkAccessMethodUpdateLength +} + +// GetMaxBulkAccessMethodUpdateLengthOk returns a tuple with the MaxBulkAccessMethodUpdateLength field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *DrsServiceDrs) GetMaxBulkAccessMethodUpdateLengthOk() (*int32, bool) { + if o == nil || IsNil(o.MaxBulkAccessMethodUpdateLength) { + return nil, false + } + return o.MaxBulkAccessMethodUpdateLength, true +} + +// HasMaxBulkAccessMethodUpdateLength returns a boolean if a field has been set. +func (o *DrsServiceDrs) HasMaxBulkAccessMethodUpdateLength() bool { + if o != nil && !IsNil(o.MaxBulkAccessMethodUpdateLength) { + return true + } + + return false +} + +// SetMaxBulkAccessMethodUpdateLength gets a reference to the given int32 and assigns it to the MaxBulkAccessMethodUpdateLength field. +func (o *DrsServiceDrs) SetMaxBulkAccessMethodUpdateLength(v int32) { + o.MaxBulkAccessMethodUpdateLength = &v +} + +// GetValidateAccessMethodUpdates returns the ValidateAccessMethodUpdates field value if set, zero value otherwise. +func (o *DrsServiceDrs) GetValidateAccessMethodUpdates() bool { + if o == nil || IsNil(o.ValidateAccessMethodUpdates) { + var ret bool + return ret + } + return *o.ValidateAccessMethodUpdates +} + +// GetValidateAccessMethodUpdatesOk returns a tuple with the ValidateAccessMethodUpdates field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *DrsServiceDrs) GetValidateAccessMethodUpdatesOk() (*bool, bool) { + if o == nil || IsNil(o.ValidateAccessMethodUpdates) { + return nil, false + } + return o.ValidateAccessMethodUpdates, true +} + +// HasValidateAccessMethodUpdates returns a boolean if a field has been set. +func (o *DrsServiceDrs) HasValidateAccessMethodUpdates() bool { + if o != nil && !IsNil(o.ValidateAccessMethodUpdates) { + return true + } + + return false +} + +// SetValidateAccessMethodUpdates gets a reference to the given bool and assigns it to the ValidateAccessMethodUpdates field. +func (o *DrsServiceDrs) SetValidateAccessMethodUpdates(v bool) { + o.ValidateAccessMethodUpdates = &v +} + +func (o DrsServiceDrs) MarshalJSON() ([]byte, error) { + toSerialize,err := o.ToMap() + if err != nil { + return []byte{}, err + } + return json.Marshal(toSerialize) +} + +func (o DrsServiceDrs) ToMap() (map[string]interface{}, error) { + toSerialize := map[string]interface{}{} + toSerialize["maxBulkRequestLength"] = o.MaxBulkRequestLength + if !IsNil(o.ObjectCount) { + toSerialize["objectCount"] = o.ObjectCount + } + if !IsNil(o.TotalObjectSize) { + toSerialize["totalObjectSize"] = o.TotalObjectSize + } + if !IsNil(o.UploadRequestSupported) { + toSerialize["uploadRequestSupported"] = o.UploadRequestSupported + } + if !IsNil(o.ObjectRegistrationSupported) { + toSerialize["objectRegistrationSupported"] = o.ObjectRegistrationSupported + } + if !IsNil(o.SupportedUploadMethods) { + toSerialize["supportedUploadMethods"] = o.SupportedUploadMethods + } + if !IsNil(o.MaxUploadSize) { + toSerialize["maxUploadSize"] = o.MaxUploadSize + } + if !IsNil(o.MaxUploadRequestLength) { + toSerialize["maxUploadRequestLength"] = o.MaxUploadRequestLength + } + if !IsNil(o.MaxRegisterRequestLength) { + toSerialize["maxRegisterRequestLength"] = o.MaxRegisterRequestLength + } + if !IsNil(o.ValidateUploadChecksums) { + toSerialize["validateUploadChecksums"] = o.ValidateUploadChecksums + } + if !IsNil(o.ValidateUploadFileSizes) { + toSerialize["validateUploadFileSizes"] = o.ValidateUploadFileSizes + } + if !IsNil(o.RelatedFileStorageSupported) { + toSerialize["relatedFileStorageSupported"] = o.RelatedFileStorageSupported + } + if !IsNil(o.DeleteSupported) { + toSerialize["deleteSupported"] = o.DeleteSupported + } + if !IsNil(o.MaxBulkDeleteLength) { + toSerialize["maxBulkDeleteLength"] = o.MaxBulkDeleteLength + } + if !IsNil(o.DeleteStorageDataSupported) { + toSerialize["deleteStorageDataSupported"] = o.DeleteStorageDataSupported + } + if !IsNil(o.AccessMethodUpdateSupported) { + toSerialize["accessMethodUpdateSupported"] = o.AccessMethodUpdateSupported + } + if !IsNil(o.MaxBulkAccessMethodUpdateLength) { + toSerialize["maxBulkAccessMethodUpdateLength"] = o.MaxBulkAccessMethodUpdateLength + } + if !IsNil(o.ValidateAccessMethodUpdates) { + toSerialize["validateAccessMethodUpdates"] = o.ValidateAccessMethodUpdates + } + return toSerialize, nil +} + +func (o *DrsServiceDrs) UnmarshalJSON(data []byte) (err error) { + // This validates that all required properties are included in the JSON object + // by unmarshalling the object into a generic map with string keys and checking + // that every required field exists as a key in the generic map. + requiredProperties := []string{ + "maxBulkRequestLength", + } + + allProperties := make(map[string]interface{}) + + err = json.Unmarshal(data, &allProperties) + + if err != nil { + return err; + } + + for _, requiredProperty := range(requiredProperties) { + if _, exists := allProperties[requiredProperty]; !exists { + return fmt.Errorf("no value given for required property %v", requiredProperty) + } + } + + varDrsServiceDrs := _DrsServiceDrs{} + + decoder := json.NewDecoder(bytes.NewReader(data)) + decoder.DisallowUnknownFields() + err = decoder.Decode(&varDrsServiceDrs) + + if err != nil { + return err + } + + *o = DrsServiceDrs(varDrsServiceDrs) + + return err +} + +type NullableDrsServiceDrs struct { + value *DrsServiceDrs + isSet bool +} + +func (v NullableDrsServiceDrs) Get() *DrsServiceDrs { + return v.value +} + +func (v *NullableDrsServiceDrs) Set(val *DrsServiceDrs) { + v.value = val + v.isSet = true +} + +func (v NullableDrsServiceDrs) IsSet() bool { + return v.isSet +} + +func (v *NullableDrsServiceDrs) Unset() { + v.value = nil + v.isSet = false +} + +func NewNullableDrsServiceDrs(val *DrsServiceDrs) *NullableDrsServiceDrs { + return &NullableDrsServiceDrs{value: val, isSet: true} +} + +func (v NullableDrsServiceDrs) MarshalJSON() ([]byte, error) { + return json.Marshal(v.value) +} + +func (v *NullableDrsServiceDrs) UnmarshalJSON(src []byte) error { + v.isSet = true + return json.Unmarshal(src, &v.value) +} + + diff --git a/apigen/drs/model_drs_service_type.go b/apigen/drs/model_drs_service_type.go new file mode 100644 index 0000000..cecf0ad --- /dev/null +++ b/apigen/drs/model_drs_service_type.go @@ -0,0 +1,159 @@ +/* +Data Repository Service + +No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) + +API version: 1.5.0 +Contact: ga4gh-cloud@ga4gh.org +*/ + +// Code generated by OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT. + +package drs + +import ( + "encoding/json" + "bytes" + "fmt" +) + +// checks if the DrsServiceType type satisfies the MappedNullable interface at compile time +var _ MappedNullable = &DrsServiceType{} + +// DrsServiceType struct for DrsServiceType +type DrsServiceType struct { + Artifact string `json:"artifact"` +} + +type _DrsServiceType DrsServiceType + +// NewDrsServiceType instantiates a new DrsServiceType object +// This constructor will assign default values to properties that have it defined, +// and makes sure properties required by API are set, but the set of arguments +// will change when the set of required properties is changed +func NewDrsServiceType(artifact string) *DrsServiceType { + this := DrsServiceType{} + this.Artifact = artifact + return &this +} + +// NewDrsServiceTypeWithDefaults instantiates a new DrsServiceType object +// This constructor will only assign default values to properties that have it defined, +// but it doesn't guarantee that properties required by API are set +func NewDrsServiceTypeWithDefaults() *DrsServiceType { + this := DrsServiceType{} + return &this +} + +// GetArtifact returns the Artifact field value +func (o *DrsServiceType) GetArtifact() string { + if o == nil { + var ret string + return ret + } + + return o.Artifact +} + +// GetArtifactOk returns a tuple with the Artifact field value +// and a boolean to check if the value has been set. +func (o *DrsServiceType) GetArtifactOk() (*string, bool) { + if o == nil { + return nil, false + } + return &o.Artifact, true +} + +// SetArtifact sets field value +func (o *DrsServiceType) SetArtifact(v string) { + o.Artifact = v +} + +func (o DrsServiceType) MarshalJSON() ([]byte, error) { + toSerialize,err := o.ToMap() + if err != nil { + return []byte{}, err + } + return json.Marshal(toSerialize) +} + +func (o DrsServiceType) ToMap() (map[string]interface{}, error) { + toSerialize := map[string]interface{}{} + toSerialize["artifact"] = o.Artifact + return toSerialize, nil +} + +func (o *DrsServiceType) UnmarshalJSON(data []byte) (err error) { + // This validates that all required properties are included in the JSON object + // by unmarshalling the object into a generic map with string keys and checking + // that every required field exists as a key in the generic map. + requiredProperties := []string{ + "artifact", + } + + allProperties := make(map[string]interface{}) + + err = json.Unmarshal(data, &allProperties) + + if err != nil { + return err; + } + + for _, requiredProperty := range(requiredProperties) { + if _, exists := allProperties[requiredProperty]; !exists { + return fmt.Errorf("no value given for required property %v", requiredProperty) + } + } + + varDrsServiceType := _DrsServiceType{} + + decoder := json.NewDecoder(bytes.NewReader(data)) + decoder.DisallowUnknownFields() + err = decoder.Decode(&varDrsServiceType) + + if err != nil { + return err + } + + *o = DrsServiceType(varDrsServiceType) + + return err +} + +type NullableDrsServiceType struct { + value *DrsServiceType + isSet bool +} + +func (v NullableDrsServiceType) Get() *DrsServiceType { + return v.value +} + +func (v *NullableDrsServiceType) Set(val *DrsServiceType) { + v.value = val + v.isSet = true +} + +func (v NullableDrsServiceType) IsSet() bool { + return v.isSet +} + +func (v *NullableDrsServiceType) Unset() { + v.value = nil + v.isSet = false +} + +func NewNullableDrsServiceType(val *DrsServiceType) *NullableDrsServiceType { + return &NullableDrsServiceType{value: val, isSet: true} +} + +func (v NullableDrsServiceType) MarshalJSON() ([]byte, error) { + return json.Marshal(v.value) +} + +func (v *NullableDrsServiceType) UnmarshalJSON(src []byte) error { + v.isSet = true + return json.Unmarshal(src, &v.value) +} + + diff --git a/apigen/drs/model_error.go b/apigen/drs/model_error.go new file mode 100644 index 0000000..5bc566e --- /dev/null +++ b/apigen/drs/model_error.go @@ -0,0 +1,165 @@ +/* +Data Repository Service + +No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) + +API version: 1.5.0 +Contact: ga4gh-cloud@ga4gh.org +*/ + +// Code generated by OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT. + +package drs + +import ( + "encoding/json" +) + +// checks if the Error type satisfies the MappedNullable interface at compile time +var _ MappedNullable = &Error{} + +// Error An object that can optionally include information about the error. +type Error struct { + // A detailed error message. + Msg *string `json:"msg,omitempty"` + // The integer representing the HTTP status code (e.g. 200, 404). + StatusCode *int32 `json:"status_code,omitempty"` +} + +// NewError instantiates a new Error object +// This constructor will assign default values to properties that have it defined, +// and makes sure properties required by API are set, but the set of arguments +// will change when the set of required properties is changed +func NewError() *Error { + this := Error{} + return &this +} + +// NewErrorWithDefaults instantiates a new Error object +// This constructor will only assign default values to properties that have it defined, +// but it doesn't guarantee that properties required by API are set +func NewErrorWithDefaults() *Error { + this := Error{} + return &this +} + +// GetMsg returns the Msg field value if set, zero value otherwise. +func (o *Error) GetMsg() string { + if o == nil || IsNil(o.Msg) { + var ret string + return ret + } + return *o.Msg +} + +// GetMsgOk returns a tuple with the Msg field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *Error) GetMsgOk() (*string, bool) { + if o == nil || IsNil(o.Msg) { + return nil, false + } + return o.Msg, true +} + +// HasMsg returns a boolean if a field has been set. +func (o *Error) HasMsg() bool { + if o != nil && !IsNil(o.Msg) { + return true + } + + return false +} + +// SetMsg gets a reference to the given string and assigns it to the Msg field. +func (o *Error) SetMsg(v string) { + o.Msg = &v +} + +// GetStatusCode returns the StatusCode field value if set, zero value otherwise. +func (o *Error) GetStatusCode() int32 { + if o == nil || IsNil(o.StatusCode) { + var ret int32 + return ret + } + return *o.StatusCode +} + +// GetStatusCodeOk returns a tuple with the StatusCode field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *Error) GetStatusCodeOk() (*int32, bool) { + if o == nil || IsNil(o.StatusCode) { + return nil, false + } + return o.StatusCode, true +} + +// HasStatusCode returns a boolean if a field has been set. +func (o *Error) HasStatusCode() bool { + if o != nil && !IsNil(o.StatusCode) { + return true + } + + return false +} + +// SetStatusCode gets a reference to the given int32 and assigns it to the StatusCode field. +func (o *Error) SetStatusCode(v int32) { + o.StatusCode = &v +} + +func (o Error) MarshalJSON() ([]byte, error) { + toSerialize,err := o.ToMap() + if err != nil { + return []byte{}, err + } + return json.Marshal(toSerialize) +} + +func (o Error) ToMap() (map[string]interface{}, error) { + toSerialize := map[string]interface{}{} + if !IsNil(o.Msg) { + toSerialize["msg"] = o.Msg + } + if !IsNil(o.StatusCode) { + toSerialize["status_code"] = o.StatusCode + } + return toSerialize, nil +} + +type NullableError struct { + value *Error + isSet bool +} + +func (v NullableError) Get() *Error { + return v.value +} + +func (v *NullableError) Set(val *Error) { + v.value = val + v.isSet = true +} + +func (v NullableError) IsSet() bool { + return v.isSet +} + +func (v *NullableError) Unset() { + v.value = nil + v.isSet = false +} + +func NewNullableError(val *Error) *NullableError { + return &NullableError{value: val, isSet: true} +} + +func (v NullableError) MarshalJSON() ([]byte, error) { + return json.Marshal(v.value) +} + +func (v *NullableError) UnmarshalJSON(src []byte) error { + v.isSet = true + return json.Unmarshal(src, &v.value) +} + + diff --git a/apigen/drs/model_get_bulk_access_url_200_response.go b/apigen/drs/model_get_bulk_access_url_200_response.go new file mode 100644 index 0000000..b3db2cb --- /dev/null +++ b/apigen/drs/model_get_bulk_access_url_200_response.go @@ -0,0 +1,200 @@ +/* +Data Repository Service + +No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) + +API version: 1.5.0 +Contact: ga4gh-cloud@ga4gh.org +*/ + +// Code generated by OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT. + +package drs + +import ( + "encoding/json" +) + +// checks if the GetBulkAccessURL200Response type satisfies the MappedNullable interface at compile time +var _ MappedNullable = &GetBulkAccessURL200Response{} + +// GetBulkAccessURL200Response struct for GetBulkAccessURL200Response +type GetBulkAccessURL200Response struct { + Summary *Summary `json:"summary,omitempty"` + // Error codes for each unresolved drs objects. + UnresolvedDrsObjects []UnresolvedInner `json:"unresolved_drs_objects,omitempty"` + ResolvedDrsObjectAccessUrls []BulkAccessURL `json:"resolved_drs_object_access_urls,omitempty"` +} + +// NewGetBulkAccessURL200Response instantiates a new GetBulkAccessURL200Response object +// This constructor will assign default values to properties that have it defined, +// and makes sure properties required by API are set, but the set of arguments +// will change when the set of required properties is changed +func NewGetBulkAccessURL200Response() *GetBulkAccessURL200Response { + this := GetBulkAccessURL200Response{} + return &this +} + +// NewGetBulkAccessURL200ResponseWithDefaults instantiates a new GetBulkAccessURL200Response object +// This constructor will only assign default values to properties that have it defined, +// but it doesn't guarantee that properties required by API are set +func NewGetBulkAccessURL200ResponseWithDefaults() *GetBulkAccessURL200Response { + this := GetBulkAccessURL200Response{} + return &this +} + +// GetSummary returns the Summary field value if set, zero value otherwise. +func (o *GetBulkAccessURL200Response) GetSummary() Summary { + if o == nil || IsNil(o.Summary) { + var ret Summary + return ret + } + return *o.Summary +} + +// GetSummaryOk returns a tuple with the Summary field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *GetBulkAccessURL200Response) GetSummaryOk() (*Summary, bool) { + if o == nil || IsNil(o.Summary) { + return nil, false + } + return o.Summary, true +} + +// HasSummary returns a boolean if a field has been set. +func (o *GetBulkAccessURL200Response) HasSummary() bool { + if o != nil && !IsNil(o.Summary) { + return true + } + + return false +} + +// SetSummary gets a reference to the given Summary and assigns it to the Summary field. +func (o *GetBulkAccessURL200Response) SetSummary(v Summary) { + o.Summary = &v +} + +// GetUnresolvedDrsObjects returns the UnresolvedDrsObjects field value if set, zero value otherwise. +func (o *GetBulkAccessURL200Response) GetUnresolvedDrsObjects() []UnresolvedInner { + if o == nil || IsNil(o.UnresolvedDrsObjects) { + var ret []UnresolvedInner + return ret + } + return o.UnresolvedDrsObjects +} + +// GetUnresolvedDrsObjectsOk returns a tuple with the UnresolvedDrsObjects field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *GetBulkAccessURL200Response) GetUnresolvedDrsObjectsOk() ([]UnresolvedInner, bool) { + if o == nil || IsNil(o.UnresolvedDrsObjects) { + return nil, false + } + return o.UnresolvedDrsObjects, true +} + +// HasUnresolvedDrsObjects returns a boolean if a field has been set. +func (o *GetBulkAccessURL200Response) HasUnresolvedDrsObjects() bool { + if o != nil && !IsNil(o.UnresolvedDrsObjects) { + return true + } + + return false +} + +// SetUnresolvedDrsObjects gets a reference to the given []UnresolvedInner and assigns it to the UnresolvedDrsObjects field. +func (o *GetBulkAccessURL200Response) SetUnresolvedDrsObjects(v []UnresolvedInner) { + o.UnresolvedDrsObjects = v +} + +// GetResolvedDrsObjectAccessUrls returns the ResolvedDrsObjectAccessUrls field value if set, zero value otherwise. +func (o *GetBulkAccessURL200Response) GetResolvedDrsObjectAccessUrls() []BulkAccessURL { + if o == nil || IsNil(o.ResolvedDrsObjectAccessUrls) { + var ret []BulkAccessURL + return ret + } + return o.ResolvedDrsObjectAccessUrls +} + +// GetResolvedDrsObjectAccessUrlsOk returns a tuple with the ResolvedDrsObjectAccessUrls field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *GetBulkAccessURL200Response) GetResolvedDrsObjectAccessUrlsOk() ([]BulkAccessURL, bool) { + if o == nil || IsNil(o.ResolvedDrsObjectAccessUrls) { + return nil, false + } + return o.ResolvedDrsObjectAccessUrls, true +} + +// HasResolvedDrsObjectAccessUrls returns a boolean if a field has been set. +func (o *GetBulkAccessURL200Response) HasResolvedDrsObjectAccessUrls() bool { + if o != nil && !IsNil(o.ResolvedDrsObjectAccessUrls) { + return true + } + + return false +} + +// SetResolvedDrsObjectAccessUrls gets a reference to the given []BulkAccessURL and assigns it to the ResolvedDrsObjectAccessUrls field. +func (o *GetBulkAccessURL200Response) SetResolvedDrsObjectAccessUrls(v []BulkAccessURL) { + o.ResolvedDrsObjectAccessUrls = v +} + +func (o GetBulkAccessURL200Response) MarshalJSON() ([]byte, error) { + toSerialize,err := o.ToMap() + if err != nil { + return []byte{}, err + } + return json.Marshal(toSerialize) +} + +func (o GetBulkAccessURL200Response) ToMap() (map[string]interface{}, error) { + toSerialize := map[string]interface{}{} + if !IsNil(o.Summary) { + toSerialize["summary"] = o.Summary + } + if !IsNil(o.UnresolvedDrsObjects) { + toSerialize["unresolved_drs_objects"] = o.UnresolvedDrsObjects + } + if !IsNil(o.ResolvedDrsObjectAccessUrls) { + toSerialize["resolved_drs_object_access_urls"] = o.ResolvedDrsObjectAccessUrls + } + return toSerialize, nil +} + +type NullableGetBulkAccessURL200Response struct { + value *GetBulkAccessURL200Response + isSet bool +} + +func (v NullableGetBulkAccessURL200Response) Get() *GetBulkAccessURL200Response { + return v.value +} + +func (v *NullableGetBulkAccessURL200Response) Set(val *GetBulkAccessURL200Response) { + v.value = val + v.isSet = true +} + +func (v NullableGetBulkAccessURL200Response) IsSet() bool { + return v.isSet +} + +func (v *NullableGetBulkAccessURL200Response) Unset() { + v.value = nil + v.isSet = false +} + +func NewNullableGetBulkAccessURL200Response(val *GetBulkAccessURL200Response) *NullableGetBulkAccessURL200Response { + return &NullableGetBulkAccessURL200Response{value: val, isSet: true} +} + +func (v NullableGetBulkAccessURL200Response) MarshalJSON() ([]byte, error) { + return json.Marshal(v.value) +} + +func (v *NullableGetBulkAccessURL200Response) UnmarshalJSON(src []byte) error { + v.isSet = true + return json.Unmarshal(src, &v.value) +} + + diff --git a/apigen/drs/model_get_bulk_objects_200_response.go b/apigen/drs/model_get_bulk_objects_200_response.go new file mode 100644 index 0000000..d92607a --- /dev/null +++ b/apigen/drs/model_get_bulk_objects_200_response.go @@ -0,0 +1,200 @@ +/* +Data Repository Service + +No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) + +API version: 1.5.0 +Contact: ga4gh-cloud@ga4gh.org +*/ + +// Code generated by OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT. + +package drs + +import ( + "encoding/json" +) + +// checks if the GetBulkObjects200Response type satisfies the MappedNullable interface at compile time +var _ MappedNullable = &GetBulkObjects200Response{} + +// GetBulkObjects200Response struct for GetBulkObjects200Response +type GetBulkObjects200Response struct { + Summary *Summary `json:"summary,omitempty"` + // Error codes for each unresolved drs objects. + UnresolvedDrsObjects []UnresolvedInner `json:"unresolved_drs_objects,omitempty"` + ResolvedDrsObject []DrsObject `json:"resolved_drs_object,omitempty"` +} + +// NewGetBulkObjects200Response instantiates a new GetBulkObjects200Response object +// This constructor will assign default values to properties that have it defined, +// and makes sure properties required by API are set, but the set of arguments +// will change when the set of required properties is changed +func NewGetBulkObjects200Response() *GetBulkObjects200Response { + this := GetBulkObjects200Response{} + return &this +} + +// NewGetBulkObjects200ResponseWithDefaults instantiates a new GetBulkObjects200Response object +// This constructor will only assign default values to properties that have it defined, +// but it doesn't guarantee that properties required by API are set +func NewGetBulkObjects200ResponseWithDefaults() *GetBulkObjects200Response { + this := GetBulkObjects200Response{} + return &this +} + +// GetSummary returns the Summary field value if set, zero value otherwise. +func (o *GetBulkObjects200Response) GetSummary() Summary { + if o == nil || IsNil(o.Summary) { + var ret Summary + return ret + } + return *o.Summary +} + +// GetSummaryOk returns a tuple with the Summary field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *GetBulkObjects200Response) GetSummaryOk() (*Summary, bool) { + if o == nil || IsNil(o.Summary) { + return nil, false + } + return o.Summary, true +} + +// HasSummary returns a boolean if a field has been set. +func (o *GetBulkObjects200Response) HasSummary() bool { + if o != nil && !IsNil(o.Summary) { + return true + } + + return false +} + +// SetSummary gets a reference to the given Summary and assigns it to the Summary field. +func (o *GetBulkObjects200Response) SetSummary(v Summary) { + o.Summary = &v +} + +// GetUnresolvedDrsObjects returns the UnresolvedDrsObjects field value if set, zero value otherwise. +func (o *GetBulkObjects200Response) GetUnresolvedDrsObjects() []UnresolvedInner { + if o == nil || IsNil(o.UnresolvedDrsObjects) { + var ret []UnresolvedInner + return ret + } + return o.UnresolvedDrsObjects +} + +// GetUnresolvedDrsObjectsOk returns a tuple with the UnresolvedDrsObjects field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *GetBulkObjects200Response) GetUnresolvedDrsObjectsOk() ([]UnresolvedInner, bool) { + if o == nil || IsNil(o.UnresolvedDrsObjects) { + return nil, false + } + return o.UnresolvedDrsObjects, true +} + +// HasUnresolvedDrsObjects returns a boolean if a field has been set. +func (o *GetBulkObjects200Response) HasUnresolvedDrsObjects() bool { + if o != nil && !IsNil(o.UnresolvedDrsObjects) { + return true + } + + return false +} + +// SetUnresolvedDrsObjects gets a reference to the given []UnresolvedInner and assigns it to the UnresolvedDrsObjects field. +func (o *GetBulkObjects200Response) SetUnresolvedDrsObjects(v []UnresolvedInner) { + o.UnresolvedDrsObjects = v +} + +// GetResolvedDrsObject returns the ResolvedDrsObject field value if set, zero value otherwise. +func (o *GetBulkObjects200Response) GetResolvedDrsObject() []DrsObject { + if o == nil || IsNil(o.ResolvedDrsObject) { + var ret []DrsObject + return ret + } + return o.ResolvedDrsObject +} + +// GetResolvedDrsObjectOk returns a tuple with the ResolvedDrsObject field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *GetBulkObjects200Response) GetResolvedDrsObjectOk() ([]DrsObject, bool) { + if o == nil || IsNil(o.ResolvedDrsObject) { + return nil, false + } + return o.ResolvedDrsObject, true +} + +// HasResolvedDrsObject returns a boolean if a field has been set. +func (o *GetBulkObjects200Response) HasResolvedDrsObject() bool { + if o != nil && !IsNil(o.ResolvedDrsObject) { + return true + } + + return false +} + +// SetResolvedDrsObject gets a reference to the given []DrsObject and assigns it to the ResolvedDrsObject field. +func (o *GetBulkObjects200Response) SetResolvedDrsObject(v []DrsObject) { + o.ResolvedDrsObject = v +} + +func (o GetBulkObjects200Response) MarshalJSON() ([]byte, error) { + toSerialize,err := o.ToMap() + if err != nil { + return []byte{}, err + } + return json.Marshal(toSerialize) +} + +func (o GetBulkObjects200Response) ToMap() (map[string]interface{}, error) { + toSerialize := map[string]interface{}{} + if !IsNil(o.Summary) { + toSerialize["summary"] = o.Summary + } + if !IsNil(o.UnresolvedDrsObjects) { + toSerialize["unresolved_drs_objects"] = o.UnresolvedDrsObjects + } + if !IsNil(o.ResolvedDrsObject) { + toSerialize["resolved_drs_object"] = o.ResolvedDrsObject + } + return toSerialize, nil +} + +type NullableGetBulkObjects200Response struct { + value *GetBulkObjects200Response + isSet bool +} + +func (v NullableGetBulkObjects200Response) Get() *GetBulkObjects200Response { + return v.value +} + +func (v *NullableGetBulkObjects200Response) Set(val *GetBulkObjects200Response) { + v.value = val + v.isSet = true +} + +func (v NullableGetBulkObjects200Response) IsSet() bool { + return v.isSet +} + +func (v *NullableGetBulkObjects200Response) Unset() { + v.value = nil + v.isSet = false +} + +func NewNullableGetBulkObjects200Response(val *GetBulkObjects200Response) *NullableGetBulkObjects200Response { + return &NullableGetBulkObjects200Response{value: val, isSet: true} +} + +func (v NullableGetBulkObjects200Response) MarshalJSON() ([]byte, error) { + return json.Marshal(v.value) +} + +func (v *NullableGetBulkObjects200Response) UnmarshalJSON(src []byte) error { + v.isSet = true + return json.Unmarshal(src, &v.value) +} + + diff --git a/apigen/drs/model_get_bulk_objects_request.go b/apigen/drs/model_get_bulk_objects_request.go new file mode 100644 index 0000000..5656fec --- /dev/null +++ b/apigen/drs/model_get_bulk_objects_request.go @@ -0,0 +1,197 @@ +/* +Data Repository Service + +No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) + +API version: 1.5.0 +Contact: ga4gh-cloud@ga4gh.org +*/ + +// Code generated by OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT. + +package drs + +import ( + "encoding/json" + "bytes" + "fmt" +) + +// checks if the GetBulkObjectsRequest type satisfies the MappedNullable interface at compile time +var _ MappedNullable = &GetBulkObjectsRequest{} + +// GetBulkObjectsRequest struct for GetBulkObjectsRequest +type GetBulkObjectsRequest struct { + // the encoded JWT GA4GH Passport that contains embedded Visas. The overall JWT is signed as are the individual Passport Visas. + Passports []string `json:"passports,omitempty"` + // An array of ObjectIDs to retrieve metadata for + BulkObjectIds []string `json:"bulk_object_ids"` +} + +type _GetBulkObjectsRequest GetBulkObjectsRequest + +// NewGetBulkObjectsRequest instantiates a new GetBulkObjectsRequest object +// This constructor will assign default values to properties that have it defined, +// and makes sure properties required by API are set, but the set of arguments +// will change when the set of required properties is changed +func NewGetBulkObjectsRequest(bulkObjectIds []string) *GetBulkObjectsRequest { + this := GetBulkObjectsRequest{} + this.BulkObjectIds = bulkObjectIds + return &this +} + +// NewGetBulkObjectsRequestWithDefaults instantiates a new GetBulkObjectsRequest object +// This constructor will only assign default values to properties that have it defined, +// but it doesn't guarantee that properties required by API are set +func NewGetBulkObjectsRequestWithDefaults() *GetBulkObjectsRequest { + this := GetBulkObjectsRequest{} + return &this +} + +// GetPassports returns the Passports field value if set, zero value otherwise. +func (o *GetBulkObjectsRequest) GetPassports() []string { + if o == nil || IsNil(o.Passports) { + var ret []string + return ret + } + return o.Passports +} + +// GetPassportsOk returns a tuple with the Passports field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *GetBulkObjectsRequest) GetPassportsOk() ([]string, bool) { + if o == nil || IsNil(o.Passports) { + return nil, false + } + return o.Passports, true +} + +// HasPassports returns a boolean if a field has been set. +func (o *GetBulkObjectsRequest) HasPassports() bool { + if o != nil && !IsNil(o.Passports) { + return true + } + + return false +} + +// SetPassports gets a reference to the given []string and assigns it to the Passports field. +func (o *GetBulkObjectsRequest) SetPassports(v []string) { + o.Passports = v +} + +// GetBulkObjectIds returns the BulkObjectIds field value +func (o *GetBulkObjectsRequest) GetBulkObjectIds() []string { + if o == nil { + var ret []string + return ret + } + + return o.BulkObjectIds +} + +// GetBulkObjectIdsOk returns a tuple with the BulkObjectIds field value +// and a boolean to check if the value has been set. +func (o *GetBulkObjectsRequest) GetBulkObjectIdsOk() ([]string, bool) { + if o == nil { + return nil, false + } + return o.BulkObjectIds, true +} + +// SetBulkObjectIds sets field value +func (o *GetBulkObjectsRequest) SetBulkObjectIds(v []string) { + o.BulkObjectIds = v +} + +func (o GetBulkObjectsRequest) MarshalJSON() ([]byte, error) { + toSerialize,err := o.ToMap() + if err != nil { + return []byte{}, err + } + return json.Marshal(toSerialize) +} + +func (o GetBulkObjectsRequest) ToMap() (map[string]interface{}, error) { + toSerialize := map[string]interface{}{} + if !IsNil(o.Passports) { + toSerialize["passports"] = o.Passports + } + toSerialize["bulk_object_ids"] = o.BulkObjectIds + return toSerialize, nil +} + +func (o *GetBulkObjectsRequest) UnmarshalJSON(data []byte) (err error) { + // This validates that all required properties are included in the JSON object + // by unmarshalling the object into a generic map with string keys and checking + // that every required field exists as a key in the generic map. + requiredProperties := []string{ + "bulk_object_ids", + } + + allProperties := make(map[string]interface{}) + + err = json.Unmarshal(data, &allProperties) + + if err != nil { + return err; + } + + for _, requiredProperty := range(requiredProperties) { + if _, exists := allProperties[requiredProperty]; !exists { + return fmt.Errorf("no value given for required property %v", requiredProperty) + } + } + + varGetBulkObjectsRequest := _GetBulkObjectsRequest{} + + decoder := json.NewDecoder(bytes.NewReader(data)) + decoder.DisallowUnknownFields() + err = decoder.Decode(&varGetBulkObjectsRequest) + + if err != nil { + return err + } + + *o = GetBulkObjectsRequest(varGetBulkObjectsRequest) + + return err +} + +type NullableGetBulkObjectsRequest struct { + value *GetBulkObjectsRequest + isSet bool +} + +func (v NullableGetBulkObjectsRequest) Get() *GetBulkObjectsRequest { + return v.value +} + +func (v *NullableGetBulkObjectsRequest) Set(val *GetBulkObjectsRequest) { + v.value = val + v.isSet = true +} + +func (v NullableGetBulkObjectsRequest) IsSet() bool { + return v.isSet +} + +func (v *NullableGetBulkObjectsRequest) Unset() { + v.value = nil + v.isSet = false +} + +func NewNullableGetBulkObjectsRequest(val *GetBulkObjectsRequest) *NullableGetBulkObjectsRequest { + return &NullableGetBulkObjectsRequest{value: val, isSet: true} +} + +func (v NullableGetBulkObjectsRequest) MarshalJSON() ([]byte, error) { + return json.Marshal(v.value) +} + +func (v *NullableGetBulkObjectsRequest) UnmarshalJSON(src []byte) error { + v.isSet = true + return json.Unmarshal(src, &v.value) +} + + diff --git a/apigen/drs/model_get_service_info_200_response.go b/apigen/drs/model_get_service_info_200_response.go new file mode 100644 index 0000000..5bdc486 --- /dev/null +++ b/apigen/drs/model_get_service_info_200_response.go @@ -0,0 +1,562 @@ +/* +Data Repository Service + +No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) + +API version: 1.5.0 +Contact: ga4gh-cloud@ga4gh.org +*/ + +// Code generated by OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT. + +package drs + +import ( + "encoding/json" + "time" + "bytes" + "fmt" +) + +// checks if the GetServiceInfo200Response type satisfies the MappedNullable interface at compile time +var _ MappedNullable = &GetServiceInfo200Response{} + +// GetServiceInfo200Response struct for GetServiceInfo200Response +type GetServiceInfo200Response struct { + // Unique ID of this service. Reverse domain name notation is recommended, though not required. The identifier should attempt to be globally unique so it can be used in downstream aggregator services e.g. Service Registry. + Id string `json:"id"` + // Name of this service. Should be human readable. + Name string `json:"name"` + Type DrsServiceType `json:"type"` + // Description of the service. Should be human readable and provide information about the service. + Description *string `json:"description,omitempty"` + Organization ServiceOrganization `json:"organization"` + // URL of the contact for the provider of this service, e.g. a link to a contact form (RFC 3986 format), or an email (RFC 2368 format). + ContactUrl *string `json:"contactUrl,omitempty"` + // URL of the documentation of this service (RFC 3986 format). This should help someone learn how to use your service, including any specifics required to access data, e.g. authentication. + DocumentationUrl *string `json:"documentationUrl,omitempty"` + // Timestamp describing when the service was first deployed and available (RFC 3339 format) + CreatedAt *time.Time `json:"createdAt,omitempty"` + // Timestamp describing when the service was last updated (RFC 3339 format) + UpdatedAt *time.Time `json:"updatedAt,omitempty"` + // Environment the service is running in. Use this to distinguish between production, development and testing/staging deployments. Suggested values are prod, test, dev, staging. However this is advised and not enforced. + Environment *string `json:"environment,omitempty"` + // Version of the service being described. Semantic versioning is recommended, but other identifiers, such as dates or commit hashes, are also allowed. The version should be changed whenever the service is updated. + Version string `json:"version"` + // DEPRECATED - In 2.0 this will move to under the drs section of service info and not at the root level. The max length the bulk request endpoints can handle (>= 1) before generating a 413 error e.g. how long can the arrays bulk_object_ids and bulk_object_access_ids be for this server. + MaxBulkRequestLength int32 `json:"maxBulkRequestLength"` + Drs *DrsServiceDrs `json:"drs,omitempty"` +} + +type _GetServiceInfo200Response GetServiceInfo200Response + +// NewGetServiceInfo200Response instantiates a new GetServiceInfo200Response object +// This constructor will assign default values to properties that have it defined, +// and makes sure properties required by API are set, but the set of arguments +// will change when the set of required properties is changed +func NewGetServiceInfo200Response(id string, name string, type_ DrsServiceType, organization ServiceOrganization, version string, maxBulkRequestLength int32) *GetServiceInfo200Response { + this := GetServiceInfo200Response{} + this.Id = id + this.Name = name + this.Type = type_ + this.Organization = organization + this.Version = version + this.MaxBulkRequestLength = maxBulkRequestLength + return &this +} + +// NewGetServiceInfo200ResponseWithDefaults instantiates a new GetServiceInfo200Response object +// This constructor will only assign default values to properties that have it defined, +// but it doesn't guarantee that properties required by API are set +func NewGetServiceInfo200ResponseWithDefaults() *GetServiceInfo200Response { + this := GetServiceInfo200Response{} + return &this +} + +// GetId returns the Id field value +func (o *GetServiceInfo200Response) GetId() string { + if o == nil { + var ret string + return ret + } + + return o.Id +} + +// GetIdOk returns a tuple with the Id field value +// and a boolean to check if the value has been set. +func (o *GetServiceInfo200Response) GetIdOk() (*string, bool) { + if o == nil { + return nil, false + } + return &o.Id, true +} + +// SetId sets field value +func (o *GetServiceInfo200Response) SetId(v string) { + o.Id = v +} + +// GetName returns the Name field value +func (o *GetServiceInfo200Response) GetName() string { + if o == nil { + var ret string + return ret + } + + return o.Name +} + +// GetNameOk returns a tuple with the Name field value +// and a boolean to check if the value has been set. +func (o *GetServiceInfo200Response) GetNameOk() (*string, bool) { + if o == nil { + return nil, false + } + return &o.Name, true +} + +// SetName sets field value +func (o *GetServiceInfo200Response) SetName(v string) { + o.Name = v +} + +// GetType returns the Type field value +func (o *GetServiceInfo200Response) GetType() DrsServiceType { + if o == nil { + var ret DrsServiceType + return ret + } + + return o.Type +} + +// GetTypeOk returns a tuple with the Type field value +// and a boolean to check if the value has been set. +func (o *GetServiceInfo200Response) GetTypeOk() (*DrsServiceType, bool) { + if o == nil { + return nil, false + } + return &o.Type, true +} + +// SetType sets field value +func (o *GetServiceInfo200Response) SetType(v DrsServiceType) { + o.Type = v +} + +// GetDescription returns the Description field value if set, zero value otherwise. +func (o *GetServiceInfo200Response) GetDescription() string { + if o == nil || IsNil(o.Description) { + var ret string + return ret + } + return *o.Description +} + +// GetDescriptionOk returns a tuple with the Description field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *GetServiceInfo200Response) GetDescriptionOk() (*string, bool) { + if o == nil || IsNil(o.Description) { + return nil, false + } + return o.Description, true +} + +// HasDescription returns a boolean if a field has been set. +func (o *GetServiceInfo200Response) HasDescription() bool { + if o != nil && !IsNil(o.Description) { + return true + } + + return false +} + +// SetDescription gets a reference to the given string and assigns it to the Description field. +func (o *GetServiceInfo200Response) SetDescription(v string) { + o.Description = &v +} + +// GetOrganization returns the Organization field value +func (o *GetServiceInfo200Response) GetOrganization() ServiceOrganization { + if o == nil { + var ret ServiceOrganization + return ret + } + + return o.Organization +} + +// GetOrganizationOk returns a tuple with the Organization field value +// and a boolean to check if the value has been set. +func (o *GetServiceInfo200Response) GetOrganizationOk() (*ServiceOrganization, bool) { + if o == nil { + return nil, false + } + return &o.Organization, true +} + +// SetOrganization sets field value +func (o *GetServiceInfo200Response) SetOrganization(v ServiceOrganization) { + o.Organization = v +} + +// GetContactUrl returns the ContactUrl field value if set, zero value otherwise. +func (o *GetServiceInfo200Response) GetContactUrl() string { + if o == nil || IsNil(o.ContactUrl) { + var ret string + return ret + } + return *o.ContactUrl +} + +// GetContactUrlOk returns a tuple with the ContactUrl field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *GetServiceInfo200Response) GetContactUrlOk() (*string, bool) { + if o == nil || IsNil(o.ContactUrl) { + return nil, false + } + return o.ContactUrl, true +} + +// HasContactUrl returns a boolean if a field has been set. +func (o *GetServiceInfo200Response) HasContactUrl() bool { + if o != nil && !IsNil(o.ContactUrl) { + return true + } + + return false +} + +// SetContactUrl gets a reference to the given string and assigns it to the ContactUrl field. +func (o *GetServiceInfo200Response) SetContactUrl(v string) { + o.ContactUrl = &v +} + +// GetDocumentationUrl returns the DocumentationUrl field value if set, zero value otherwise. +func (o *GetServiceInfo200Response) GetDocumentationUrl() string { + if o == nil || IsNil(o.DocumentationUrl) { + var ret string + return ret + } + return *o.DocumentationUrl +} + +// GetDocumentationUrlOk returns a tuple with the DocumentationUrl field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *GetServiceInfo200Response) GetDocumentationUrlOk() (*string, bool) { + if o == nil || IsNil(o.DocumentationUrl) { + return nil, false + } + return o.DocumentationUrl, true +} + +// HasDocumentationUrl returns a boolean if a field has been set. +func (o *GetServiceInfo200Response) HasDocumentationUrl() bool { + if o != nil && !IsNil(o.DocumentationUrl) { + return true + } + + return false +} + +// SetDocumentationUrl gets a reference to the given string and assigns it to the DocumentationUrl field. +func (o *GetServiceInfo200Response) SetDocumentationUrl(v string) { + o.DocumentationUrl = &v +} + +// GetCreatedAt returns the CreatedAt field value if set, zero value otherwise. +func (o *GetServiceInfo200Response) GetCreatedAt() time.Time { + if o == nil || IsNil(o.CreatedAt) { + var ret time.Time + return ret + } + return *o.CreatedAt +} + +// GetCreatedAtOk returns a tuple with the CreatedAt field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *GetServiceInfo200Response) GetCreatedAtOk() (*time.Time, bool) { + if o == nil || IsNil(o.CreatedAt) { + return nil, false + } + return o.CreatedAt, true +} + +// HasCreatedAt returns a boolean if a field has been set. +func (o *GetServiceInfo200Response) HasCreatedAt() bool { + if o != nil && !IsNil(o.CreatedAt) { + return true + } + + return false +} + +// SetCreatedAt gets a reference to the given time.Time and assigns it to the CreatedAt field. +func (o *GetServiceInfo200Response) SetCreatedAt(v time.Time) { + o.CreatedAt = &v +} + +// GetUpdatedAt returns the UpdatedAt field value if set, zero value otherwise. +func (o *GetServiceInfo200Response) GetUpdatedAt() time.Time { + if o == nil || IsNil(o.UpdatedAt) { + var ret time.Time + return ret + } + return *o.UpdatedAt +} + +// GetUpdatedAtOk returns a tuple with the UpdatedAt field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *GetServiceInfo200Response) GetUpdatedAtOk() (*time.Time, bool) { + if o == nil || IsNil(o.UpdatedAt) { + return nil, false + } + return o.UpdatedAt, true +} + +// HasUpdatedAt returns a boolean if a field has been set. +func (o *GetServiceInfo200Response) HasUpdatedAt() bool { + if o != nil && !IsNil(o.UpdatedAt) { + return true + } + + return false +} + +// SetUpdatedAt gets a reference to the given time.Time and assigns it to the UpdatedAt field. +func (o *GetServiceInfo200Response) SetUpdatedAt(v time.Time) { + o.UpdatedAt = &v +} + +// GetEnvironment returns the Environment field value if set, zero value otherwise. +func (o *GetServiceInfo200Response) GetEnvironment() string { + if o == nil || IsNil(o.Environment) { + var ret string + return ret + } + return *o.Environment +} + +// GetEnvironmentOk returns a tuple with the Environment field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *GetServiceInfo200Response) GetEnvironmentOk() (*string, bool) { + if o == nil || IsNil(o.Environment) { + return nil, false + } + return o.Environment, true +} + +// HasEnvironment returns a boolean if a field has been set. +func (o *GetServiceInfo200Response) HasEnvironment() bool { + if o != nil && !IsNil(o.Environment) { + return true + } + + return false +} + +// SetEnvironment gets a reference to the given string and assigns it to the Environment field. +func (o *GetServiceInfo200Response) SetEnvironment(v string) { + o.Environment = &v +} + +// GetVersion returns the Version field value +func (o *GetServiceInfo200Response) GetVersion() string { + if o == nil { + var ret string + return ret + } + + return o.Version +} + +// GetVersionOk returns a tuple with the Version field value +// and a boolean to check if the value has been set. +func (o *GetServiceInfo200Response) GetVersionOk() (*string, bool) { + if o == nil { + return nil, false + } + return &o.Version, true +} + +// SetVersion sets field value +func (o *GetServiceInfo200Response) SetVersion(v string) { + o.Version = v +} + +// GetMaxBulkRequestLength returns the MaxBulkRequestLength field value +func (o *GetServiceInfo200Response) GetMaxBulkRequestLength() int32 { + if o == nil { + var ret int32 + return ret + } + + return o.MaxBulkRequestLength +} + +// GetMaxBulkRequestLengthOk returns a tuple with the MaxBulkRequestLength field value +// and a boolean to check if the value has been set. +func (o *GetServiceInfo200Response) GetMaxBulkRequestLengthOk() (*int32, bool) { + if o == nil { + return nil, false + } + return &o.MaxBulkRequestLength, true +} + +// SetMaxBulkRequestLength sets field value +func (o *GetServiceInfo200Response) SetMaxBulkRequestLength(v int32) { + o.MaxBulkRequestLength = v +} + +// GetDrs returns the Drs field value if set, zero value otherwise. +func (o *GetServiceInfo200Response) GetDrs() DrsServiceDrs { + if o == nil || IsNil(o.Drs) { + var ret DrsServiceDrs + return ret + } + return *o.Drs +} + +// GetDrsOk returns a tuple with the Drs field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *GetServiceInfo200Response) GetDrsOk() (*DrsServiceDrs, bool) { + if o == nil || IsNil(o.Drs) { + return nil, false + } + return o.Drs, true +} + +// HasDrs returns a boolean if a field has been set. +func (o *GetServiceInfo200Response) HasDrs() bool { + if o != nil && !IsNil(o.Drs) { + return true + } + + return false +} + +// SetDrs gets a reference to the given DrsServiceDrs and assigns it to the Drs field. +func (o *GetServiceInfo200Response) SetDrs(v DrsServiceDrs) { + o.Drs = &v +} + +func (o GetServiceInfo200Response) MarshalJSON() ([]byte, error) { + toSerialize,err := o.ToMap() + if err != nil { + return []byte{}, err + } + return json.Marshal(toSerialize) +} + +func (o GetServiceInfo200Response) ToMap() (map[string]interface{}, error) { + toSerialize := map[string]interface{}{} + toSerialize["id"] = o.Id + toSerialize["name"] = o.Name + toSerialize["type"] = o.Type + if !IsNil(o.Description) { + toSerialize["description"] = o.Description + } + toSerialize["organization"] = o.Organization + if !IsNil(o.ContactUrl) { + toSerialize["contactUrl"] = o.ContactUrl + } + if !IsNil(o.DocumentationUrl) { + toSerialize["documentationUrl"] = o.DocumentationUrl + } + if !IsNil(o.CreatedAt) { + toSerialize["createdAt"] = o.CreatedAt + } + if !IsNil(o.UpdatedAt) { + toSerialize["updatedAt"] = o.UpdatedAt + } + if !IsNil(o.Environment) { + toSerialize["environment"] = o.Environment + } + toSerialize["version"] = o.Version + toSerialize["maxBulkRequestLength"] = o.MaxBulkRequestLength + if !IsNil(o.Drs) { + toSerialize["drs"] = o.Drs + } + return toSerialize, nil +} + +func (o *GetServiceInfo200Response) UnmarshalJSON(data []byte) (err error) { + // This validates that all required properties are included in the JSON object + // by unmarshalling the object into a generic map with string keys and checking + // that every required field exists as a key in the generic map. + requiredProperties := []string{ + "id", + "name", + "type", + "organization", + "version", + "maxBulkRequestLength", + } + + allProperties := make(map[string]interface{}) + + err = json.Unmarshal(data, &allProperties) + + if err != nil { + return err; + } + + for _, requiredProperty := range(requiredProperties) { + if _, exists := allProperties[requiredProperty]; !exists { + return fmt.Errorf("no value given for required property %v", requiredProperty) + } + } + + varGetServiceInfo200Response := _GetServiceInfo200Response{} + + decoder := json.NewDecoder(bytes.NewReader(data)) + decoder.DisallowUnknownFields() + err = decoder.Decode(&varGetServiceInfo200Response) + + if err != nil { + return err + } + + *o = GetServiceInfo200Response(varGetServiceInfo200Response) + + return err +} + +type NullableGetServiceInfo200Response struct { + value *GetServiceInfo200Response + isSet bool +} + +func (v NullableGetServiceInfo200Response) Get() *GetServiceInfo200Response { + return v.value +} + +func (v *NullableGetServiceInfo200Response) Set(val *GetServiceInfo200Response) { + v.value = val + v.isSet = true +} + +func (v NullableGetServiceInfo200Response) IsSet() bool { + return v.isSet +} + +func (v *NullableGetServiceInfo200Response) Unset() { + v.value = nil + v.isSet = false +} + +func NewNullableGetServiceInfo200Response(val *GetServiceInfo200Response) *NullableGetServiceInfo200Response { + return &NullableGetServiceInfo200Response{value: val, isSet: true} +} + +func (v NullableGetServiceInfo200Response) MarshalJSON() ([]byte, error) { + return json.Marshal(v.value) +} + +func (v *NullableGetServiceInfo200Response) UnmarshalJSON(src []byte) error { + v.isSet = true + return json.Unmarshal(src, &v.value) +} + + diff --git a/apigen/drs/model_options_bulk_object_200_response.go b/apigen/drs/model_options_bulk_object_200_response.go new file mode 100644 index 0000000..67df1c7 --- /dev/null +++ b/apigen/drs/model_options_bulk_object_200_response.go @@ -0,0 +1,200 @@ +/* +Data Repository Service + +No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) + +API version: 1.5.0 +Contact: ga4gh-cloud@ga4gh.org +*/ + +// Code generated by OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT. + +package drs + +import ( + "encoding/json" +) + +// checks if the OptionsBulkObject200Response type satisfies the MappedNullable interface at compile time +var _ MappedNullable = &OptionsBulkObject200Response{} + +// OptionsBulkObject200Response struct for OptionsBulkObject200Response +type OptionsBulkObject200Response struct { + Summary *Summary `json:"summary,omitempty"` + // Error codes for each unresolved drs objects. + UnresolvedDrsObjects []UnresolvedInner `json:"unresolved_drs_objects,omitempty"` + ResolvedDrsObject []Authorizations `json:"resolved_drs_object,omitempty"` +} + +// NewOptionsBulkObject200Response instantiates a new OptionsBulkObject200Response object +// This constructor will assign default values to properties that have it defined, +// and makes sure properties required by API are set, but the set of arguments +// will change when the set of required properties is changed +func NewOptionsBulkObject200Response() *OptionsBulkObject200Response { + this := OptionsBulkObject200Response{} + return &this +} + +// NewOptionsBulkObject200ResponseWithDefaults instantiates a new OptionsBulkObject200Response object +// This constructor will only assign default values to properties that have it defined, +// but it doesn't guarantee that properties required by API are set +func NewOptionsBulkObject200ResponseWithDefaults() *OptionsBulkObject200Response { + this := OptionsBulkObject200Response{} + return &this +} + +// GetSummary returns the Summary field value if set, zero value otherwise. +func (o *OptionsBulkObject200Response) GetSummary() Summary { + if o == nil || IsNil(o.Summary) { + var ret Summary + return ret + } + return *o.Summary +} + +// GetSummaryOk returns a tuple with the Summary field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *OptionsBulkObject200Response) GetSummaryOk() (*Summary, bool) { + if o == nil || IsNil(o.Summary) { + return nil, false + } + return o.Summary, true +} + +// HasSummary returns a boolean if a field has been set. +func (o *OptionsBulkObject200Response) HasSummary() bool { + if o != nil && !IsNil(o.Summary) { + return true + } + + return false +} + +// SetSummary gets a reference to the given Summary and assigns it to the Summary field. +func (o *OptionsBulkObject200Response) SetSummary(v Summary) { + o.Summary = &v +} + +// GetUnresolvedDrsObjects returns the UnresolvedDrsObjects field value if set, zero value otherwise. +func (o *OptionsBulkObject200Response) GetUnresolvedDrsObjects() []UnresolvedInner { + if o == nil || IsNil(o.UnresolvedDrsObjects) { + var ret []UnresolvedInner + return ret + } + return o.UnresolvedDrsObjects +} + +// GetUnresolvedDrsObjectsOk returns a tuple with the UnresolvedDrsObjects field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *OptionsBulkObject200Response) GetUnresolvedDrsObjectsOk() ([]UnresolvedInner, bool) { + if o == nil || IsNil(o.UnresolvedDrsObjects) { + return nil, false + } + return o.UnresolvedDrsObjects, true +} + +// HasUnresolvedDrsObjects returns a boolean if a field has been set. +func (o *OptionsBulkObject200Response) HasUnresolvedDrsObjects() bool { + if o != nil && !IsNil(o.UnresolvedDrsObjects) { + return true + } + + return false +} + +// SetUnresolvedDrsObjects gets a reference to the given []UnresolvedInner and assigns it to the UnresolvedDrsObjects field. +func (o *OptionsBulkObject200Response) SetUnresolvedDrsObjects(v []UnresolvedInner) { + o.UnresolvedDrsObjects = v +} + +// GetResolvedDrsObject returns the ResolvedDrsObject field value if set, zero value otherwise. +func (o *OptionsBulkObject200Response) GetResolvedDrsObject() []Authorizations { + if o == nil || IsNil(o.ResolvedDrsObject) { + var ret []Authorizations + return ret + } + return o.ResolvedDrsObject +} + +// GetResolvedDrsObjectOk returns a tuple with the ResolvedDrsObject field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *OptionsBulkObject200Response) GetResolvedDrsObjectOk() ([]Authorizations, bool) { + if o == nil || IsNil(o.ResolvedDrsObject) { + return nil, false + } + return o.ResolvedDrsObject, true +} + +// HasResolvedDrsObject returns a boolean if a field has been set. +func (o *OptionsBulkObject200Response) HasResolvedDrsObject() bool { + if o != nil && !IsNil(o.ResolvedDrsObject) { + return true + } + + return false +} + +// SetResolvedDrsObject gets a reference to the given []Authorizations and assigns it to the ResolvedDrsObject field. +func (o *OptionsBulkObject200Response) SetResolvedDrsObject(v []Authorizations) { + o.ResolvedDrsObject = v +} + +func (o OptionsBulkObject200Response) MarshalJSON() ([]byte, error) { + toSerialize,err := o.ToMap() + if err != nil { + return []byte{}, err + } + return json.Marshal(toSerialize) +} + +func (o OptionsBulkObject200Response) ToMap() (map[string]interface{}, error) { + toSerialize := map[string]interface{}{} + if !IsNil(o.Summary) { + toSerialize["summary"] = o.Summary + } + if !IsNil(o.UnresolvedDrsObjects) { + toSerialize["unresolved_drs_objects"] = o.UnresolvedDrsObjects + } + if !IsNil(o.ResolvedDrsObject) { + toSerialize["resolved_drs_object"] = o.ResolvedDrsObject + } + return toSerialize, nil +} + +type NullableOptionsBulkObject200Response struct { + value *OptionsBulkObject200Response + isSet bool +} + +func (v NullableOptionsBulkObject200Response) Get() *OptionsBulkObject200Response { + return v.value +} + +func (v *NullableOptionsBulkObject200Response) Set(val *OptionsBulkObject200Response) { + v.value = val + v.isSet = true +} + +func (v NullableOptionsBulkObject200Response) IsSet() bool { + return v.isSet +} + +func (v *NullableOptionsBulkObject200Response) Unset() { + v.value = nil + v.isSet = false +} + +func NewNullableOptionsBulkObject200Response(val *OptionsBulkObject200Response) *NullableOptionsBulkObject200Response { + return &NullableOptionsBulkObject200Response{value: val, isSet: true} +} + +func (v NullableOptionsBulkObject200Response) MarshalJSON() ([]byte, error) { + return json.Marshal(v.value) +} + +func (v *NullableOptionsBulkObject200Response) UnmarshalJSON(src []byte) error { + v.isSet = true + return json.Unmarshal(src, &v.value) +} + + diff --git a/apigen/drs/model_post_access_url_request.go b/apigen/drs/model_post_access_url_request.go new file mode 100644 index 0000000..805a44c --- /dev/null +++ b/apigen/drs/model_post_access_url_request.go @@ -0,0 +1,128 @@ +/* +Data Repository Service + +No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) + +API version: 1.5.0 +Contact: ga4gh-cloud@ga4gh.org +*/ + +// Code generated by OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT. + +package drs + +import ( + "encoding/json" +) + +// checks if the PostAccessURLRequest type satisfies the MappedNullable interface at compile time +var _ MappedNullable = &PostAccessURLRequest{} + +// PostAccessURLRequest struct for PostAccessURLRequest +type PostAccessURLRequest struct { + // the encoded JWT GA4GH Passport that contains embedded Visas. The overall JWT is signed as are the individual Passport Visas. + Passports []string `json:"passports,omitempty"` +} + +// NewPostAccessURLRequest instantiates a new PostAccessURLRequest object +// This constructor will assign default values to properties that have it defined, +// and makes sure properties required by API are set, but the set of arguments +// will change when the set of required properties is changed +func NewPostAccessURLRequest() *PostAccessURLRequest { + this := PostAccessURLRequest{} + return &this +} + +// NewPostAccessURLRequestWithDefaults instantiates a new PostAccessURLRequest object +// This constructor will only assign default values to properties that have it defined, +// but it doesn't guarantee that properties required by API are set +func NewPostAccessURLRequestWithDefaults() *PostAccessURLRequest { + this := PostAccessURLRequest{} + return &this +} + +// GetPassports returns the Passports field value if set, zero value otherwise. +func (o *PostAccessURLRequest) GetPassports() []string { + if o == nil || IsNil(o.Passports) { + var ret []string + return ret + } + return o.Passports +} + +// GetPassportsOk returns a tuple with the Passports field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *PostAccessURLRequest) GetPassportsOk() ([]string, bool) { + if o == nil || IsNil(o.Passports) { + return nil, false + } + return o.Passports, true +} + +// HasPassports returns a boolean if a field has been set. +func (o *PostAccessURLRequest) HasPassports() bool { + if o != nil && !IsNil(o.Passports) { + return true + } + + return false +} + +// SetPassports gets a reference to the given []string and assigns it to the Passports field. +func (o *PostAccessURLRequest) SetPassports(v []string) { + o.Passports = v +} + +func (o PostAccessURLRequest) MarshalJSON() ([]byte, error) { + toSerialize,err := o.ToMap() + if err != nil { + return []byte{}, err + } + return json.Marshal(toSerialize) +} + +func (o PostAccessURLRequest) ToMap() (map[string]interface{}, error) { + toSerialize := map[string]interface{}{} + if !IsNil(o.Passports) { + toSerialize["passports"] = o.Passports + } + return toSerialize, nil +} + +type NullablePostAccessURLRequest struct { + value *PostAccessURLRequest + isSet bool +} + +func (v NullablePostAccessURLRequest) Get() *PostAccessURLRequest { + return v.value +} + +func (v *NullablePostAccessURLRequest) Set(val *PostAccessURLRequest) { + v.value = val + v.isSet = true +} + +func (v NullablePostAccessURLRequest) IsSet() bool { + return v.isSet +} + +func (v *NullablePostAccessURLRequest) Unset() { + v.value = nil + v.isSet = false +} + +func NewNullablePostAccessURLRequest(val *PostAccessURLRequest) *NullablePostAccessURLRequest { + return &NullablePostAccessURLRequest{value: val, isSet: true} +} + +func (v NullablePostAccessURLRequest) MarshalJSON() ([]byte, error) { + return json.Marshal(v.value) +} + +func (v *NullablePostAccessURLRequest) UnmarshalJSON(src []byte) error { + v.isSet = true + return json.Unmarshal(src, &v.value) +} + + diff --git a/apigen/drs/model_post_object_request.go b/apigen/drs/model_post_object_request.go new file mode 100644 index 0000000..bf21e0e --- /dev/null +++ b/apigen/drs/model_post_object_request.go @@ -0,0 +1,165 @@ +/* +Data Repository Service + +No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) + +API version: 1.5.0 +Contact: ga4gh-cloud@ga4gh.org +*/ + +// Code generated by OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT. + +package drs + +import ( + "encoding/json" +) + +// checks if the PostObjectRequest type satisfies the MappedNullable interface at compile time +var _ MappedNullable = &PostObjectRequest{} + +// PostObjectRequest struct for PostObjectRequest +type PostObjectRequest struct { + // If false and the object_id refers to a bundle, then the ContentsObject array contains only those objects directly contained in the bundle. That is, if the bundle contains other bundles, those other bundles are not recursively included in the result. If true and the object_id refers to a bundle, then the entire set of objects in the bundle is expanded. That is, if the bundle contains other bundles, then those other bundles are recursively expanded and included in the result. Recursion continues through the entire sub-tree of the bundle. If the object_id refers to a blob, then the query parameter is ignored. + Expand *bool `json:"expand,omitempty"` + // the encoded JWT GA4GH Passport that contains embedded Visas. The overall JWT is signed as are the individual Passport Visas. + Passports []string `json:"passports,omitempty"` +} + +// NewPostObjectRequest instantiates a new PostObjectRequest object +// This constructor will assign default values to properties that have it defined, +// and makes sure properties required by API are set, but the set of arguments +// will change when the set of required properties is changed +func NewPostObjectRequest() *PostObjectRequest { + this := PostObjectRequest{} + return &this +} + +// NewPostObjectRequestWithDefaults instantiates a new PostObjectRequest object +// This constructor will only assign default values to properties that have it defined, +// but it doesn't guarantee that properties required by API are set +func NewPostObjectRequestWithDefaults() *PostObjectRequest { + this := PostObjectRequest{} + return &this +} + +// GetExpand returns the Expand field value if set, zero value otherwise. +func (o *PostObjectRequest) GetExpand() bool { + if o == nil || IsNil(o.Expand) { + var ret bool + return ret + } + return *o.Expand +} + +// GetExpandOk returns a tuple with the Expand field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *PostObjectRequest) GetExpandOk() (*bool, bool) { + if o == nil || IsNil(o.Expand) { + return nil, false + } + return o.Expand, true +} + +// HasExpand returns a boolean if a field has been set. +func (o *PostObjectRequest) HasExpand() bool { + if o != nil && !IsNil(o.Expand) { + return true + } + + return false +} + +// SetExpand gets a reference to the given bool and assigns it to the Expand field. +func (o *PostObjectRequest) SetExpand(v bool) { + o.Expand = &v +} + +// GetPassports returns the Passports field value if set, zero value otherwise. +func (o *PostObjectRequest) GetPassports() []string { + if o == nil || IsNil(o.Passports) { + var ret []string + return ret + } + return o.Passports +} + +// GetPassportsOk returns a tuple with the Passports field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *PostObjectRequest) GetPassportsOk() ([]string, bool) { + if o == nil || IsNil(o.Passports) { + return nil, false + } + return o.Passports, true +} + +// HasPassports returns a boolean if a field has been set. +func (o *PostObjectRequest) HasPassports() bool { + if o != nil && !IsNil(o.Passports) { + return true + } + + return false +} + +// SetPassports gets a reference to the given []string and assigns it to the Passports field. +func (o *PostObjectRequest) SetPassports(v []string) { + o.Passports = v +} + +func (o PostObjectRequest) MarshalJSON() ([]byte, error) { + toSerialize,err := o.ToMap() + if err != nil { + return []byte{}, err + } + return json.Marshal(toSerialize) +} + +func (o PostObjectRequest) ToMap() (map[string]interface{}, error) { + toSerialize := map[string]interface{}{} + if !IsNil(o.Expand) { + toSerialize["expand"] = o.Expand + } + if !IsNil(o.Passports) { + toSerialize["passports"] = o.Passports + } + return toSerialize, nil +} + +type NullablePostObjectRequest struct { + value *PostObjectRequest + isSet bool +} + +func (v NullablePostObjectRequest) Get() *PostObjectRequest { + return v.value +} + +func (v *NullablePostObjectRequest) Set(val *PostObjectRequest) { + v.value = val + v.isSet = true +} + +func (v NullablePostObjectRequest) IsSet() bool { + return v.isSet +} + +func (v *NullablePostObjectRequest) Unset() { + v.value = nil + v.isSet = false +} + +func NewNullablePostObjectRequest(val *PostObjectRequest) *NullablePostObjectRequest { + return &NullablePostObjectRequest{value: val, isSet: true} +} + +func (v NullablePostObjectRequest) MarshalJSON() ([]byte, error) { + return json.Marshal(v.value) +} + +func (v *NullablePostObjectRequest) UnmarshalJSON(src []byte) error { + v.isSet = true + return json.Unmarshal(src, &v.value) +} + + diff --git a/apigen/drs/model_register_objects_201_response.go b/apigen/drs/model_register_objects_201_response.go new file mode 100644 index 0000000..eeb7b27 --- /dev/null +++ b/apigen/drs/model_register_objects_201_response.go @@ -0,0 +1,160 @@ +/* +Data Repository Service + +No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) + +API version: 1.5.0 +Contact: ga4gh-cloud@ga4gh.org +*/ + +// Code generated by OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT. + +package drs + +import ( + "encoding/json" + "bytes" + "fmt" +) + +// checks if the RegisterObjects201Response type satisfies the MappedNullable interface at compile time +var _ MappedNullable = &RegisterObjects201Response{} + +// RegisterObjects201Response struct for RegisterObjects201Response +type RegisterObjects201Response struct { + // Array of registered DRS objects in the same order as the candidates in the request + Objects []DrsObject `json:"objects"` +} + +type _RegisterObjects201Response RegisterObjects201Response + +// NewRegisterObjects201Response instantiates a new RegisterObjects201Response object +// This constructor will assign default values to properties that have it defined, +// and makes sure properties required by API are set, but the set of arguments +// will change when the set of required properties is changed +func NewRegisterObjects201Response(objects []DrsObject) *RegisterObjects201Response { + this := RegisterObjects201Response{} + this.Objects = objects + return &this +} + +// NewRegisterObjects201ResponseWithDefaults instantiates a new RegisterObjects201Response object +// This constructor will only assign default values to properties that have it defined, +// but it doesn't guarantee that properties required by API are set +func NewRegisterObjects201ResponseWithDefaults() *RegisterObjects201Response { + this := RegisterObjects201Response{} + return &this +} + +// GetObjects returns the Objects field value +func (o *RegisterObjects201Response) GetObjects() []DrsObject { + if o == nil { + var ret []DrsObject + return ret + } + + return o.Objects +} + +// GetObjectsOk returns a tuple with the Objects field value +// and a boolean to check if the value has been set. +func (o *RegisterObjects201Response) GetObjectsOk() ([]DrsObject, bool) { + if o == nil { + return nil, false + } + return o.Objects, true +} + +// SetObjects sets field value +func (o *RegisterObjects201Response) SetObjects(v []DrsObject) { + o.Objects = v +} + +func (o RegisterObjects201Response) MarshalJSON() ([]byte, error) { + toSerialize,err := o.ToMap() + if err != nil { + return []byte{}, err + } + return json.Marshal(toSerialize) +} + +func (o RegisterObjects201Response) ToMap() (map[string]interface{}, error) { + toSerialize := map[string]interface{}{} + toSerialize["objects"] = o.Objects + return toSerialize, nil +} + +func (o *RegisterObjects201Response) UnmarshalJSON(data []byte) (err error) { + // This validates that all required properties are included in the JSON object + // by unmarshalling the object into a generic map with string keys and checking + // that every required field exists as a key in the generic map. + requiredProperties := []string{ + "objects", + } + + allProperties := make(map[string]interface{}) + + err = json.Unmarshal(data, &allProperties) + + if err != nil { + return err; + } + + for _, requiredProperty := range(requiredProperties) { + if _, exists := allProperties[requiredProperty]; !exists { + return fmt.Errorf("no value given for required property %v", requiredProperty) + } + } + + varRegisterObjects201Response := _RegisterObjects201Response{} + + decoder := json.NewDecoder(bytes.NewReader(data)) + decoder.DisallowUnknownFields() + err = decoder.Decode(&varRegisterObjects201Response) + + if err != nil { + return err + } + + *o = RegisterObjects201Response(varRegisterObjects201Response) + + return err +} + +type NullableRegisterObjects201Response struct { + value *RegisterObjects201Response + isSet bool +} + +func (v NullableRegisterObjects201Response) Get() *RegisterObjects201Response { + return v.value +} + +func (v *NullableRegisterObjects201Response) Set(val *RegisterObjects201Response) { + v.value = val + v.isSet = true +} + +func (v NullableRegisterObjects201Response) IsSet() bool { + return v.isSet +} + +func (v *NullableRegisterObjects201Response) Unset() { + v.value = nil + v.isSet = false +} + +func NewNullableRegisterObjects201Response(val *RegisterObjects201Response) *NullableRegisterObjects201Response { + return &NullableRegisterObjects201Response{value: val, isSet: true} +} + +func (v NullableRegisterObjects201Response) MarshalJSON() ([]byte, error) { + return json.Marshal(v.value) +} + +func (v *NullableRegisterObjects201Response) UnmarshalJSON(src []byte) error { + v.isSet = true + return json.Unmarshal(src, &v.value) +} + + diff --git a/apigen/drs/model_register_objects_request.go b/apigen/drs/model_register_objects_request.go new file mode 100644 index 0000000..85d332d --- /dev/null +++ b/apigen/drs/model_register_objects_request.go @@ -0,0 +1,197 @@ +/* +Data Repository Service + +No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) + +API version: 1.5.0 +Contact: ga4gh-cloud@ga4gh.org +*/ + +// Code generated by OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT. + +package drs + +import ( + "encoding/json" + "bytes" + "fmt" +) + +// checks if the RegisterObjectsRequest type satisfies the MappedNullable interface at compile time +var _ MappedNullable = &RegisterObjectsRequest{} + +// RegisterObjectsRequest struct for RegisterObjectsRequest +type RegisterObjectsRequest struct { + // Array of DRS object candidates to register (server will mint IDs and timestamps) + Candidates []DrsObjectCandidate `json:"candidates"` + // Optional array of GA4GH Passport JWTs for authorization + Passports []string `json:"passports,omitempty"` +} + +type _RegisterObjectsRequest RegisterObjectsRequest + +// NewRegisterObjectsRequest instantiates a new RegisterObjectsRequest object +// This constructor will assign default values to properties that have it defined, +// and makes sure properties required by API are set, but the set of arguments +// will change when the set of required properties is changed +func NewRegisterObjectsRequest(candidates []DrsObjectCandidate) *RegisterObjectsRequest { + this := RegisterObjectsRequest{} + this.Candidates = candidates + return &this +} + +// NewRegisterObjectsRequestWithDefaults instantiates a new RegisterObjectsRequest object +// This constructor will only assign default values to properties that have it defined, +// but it doesn't guarantee that properties required by API are set +func NewRegisterObjectsRequestWithDefaults() *RegisterObjectsRequest { + this := RegisterObjectsRequest{} + return &this +} + +// GetCandidates returns the Candidates field value +func (o *RegisterObjectsRequest) GetCandidates() []DrsObjectCandidate { + if o == nil { + var ret []DrsObjectCandidate + return ret + } + + return o.Candidates +} + +// GetCandidatesOk returns a tuple with the Candidates field value +// and a boolean to check if the value has been set. +func (o *RegisterObjectsRequest) GetCandidatesOk() ([]DrsObjectCandidate, bool) { + if o == nil { + return nil, false + } + return o.Candidates, true +} + +// SetCandidates sets field value +func (o *RegisterObjectsRequest) SetCandidates(v []DrsObjectCandidate) { + o.Candidates = v +} + +// GetPassports returns the Passports field value if set, zero value otherwise. +func (o *RegisterObjectsRequest) GetPassports() []string { + if o == nil || IsNil(o.Passports) { + var ret []string + return ret + } + return o.Passports +} + +// GetPassportsOk returns a tuple with the Passports field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *RegisterObjectsRequest) GetPassportsOk() ([]string, bool) { + if o == nil || IsNil(o.Passports) { + return nil, false + } + return o.Passports, true +} + +// HasPassports returns a boolean if a field has been set. +func (o *RegisterObjectsRequest) HasPassports() bool { + if o != nil && !IsNil(o.Passports) { + return true + } + + return false +} + +// SetPassports gets a reference to the given []string and assigns it to the Passports field. +func (o *RegisterObjectsRequest) SetPassports(v []string) { + o.Passports = v +} + +func (o RegisterObjectsRequest) MarshalJSON() ([]byte, error) { + toSerialize,err := o.ToMap() + if err != nil { + return []byte{}, err + } + return json.Marshal(toSerialize) +} + +func (o RegisterObjectsRequest) ToMap() (map[string]interface{}, error) { + toSerialize := map[string]interface{}{} + toSerialize["candidates"] = o.Candidates + if !IsNil(o.Passports) { + toSerialize["passports"] = o.Passports + } + return toSerialize, nil +} + +func (o *RegisterObjectsRequest) UnmarshalJSON(data []byte) (err error) { + // This validates that all required properties are included in the JSON object + // by unmarshalling the object into a generic map with string keys and checking + // that every required field exists as a key in the generic map. + requiredProperties := []string{ + "candidates", + } + + allProperties := make(map[string]interface{}) + + err = json.Unmarshal(data, &allProperties) + + if err != nil { + return err; + } + + for _, requiredProperty := range(requiredProperties) { + if _, exists := allProperties[requiredProperty]; !exists { + return fmt.Errorf("no value given for required property %v", requiredProperty) + } + } + + varRegisterObjectsRequest := _RegisterObjectsRequest{} + + decoder := json.NewDecoder(bytes.NewReader(data)) + decoder.DisallowUnknownFields() + err = decoder.Decode(&varRegisterObjectsRequest) + + if err != nil { + return err + } + + *o = RegisterObjectsRequest(varRegisterObjectsRequest) + + return err +} + +type NullableRegisterObjectsRequest struct { + value *RegisterObjectsRequest + isSet bool +} + +func (v NullableRegisterObjectsRequest) Get() *RegisterObjectsRequest { + return v.value +} + +func (v *NullableRegisterObjectsRequest) Set(val *RegisterObjectsRequest) { + v.value = val + v.isSet = true +} + +func (v NullableRegisterObjectsRequest) IsSet() bool { + return v.isSet +} + +func (v *NullableRegisterObjectsRequest) Unset() { + v.value = nil + v.isSet = false +} + +func NewNullableRegisterObjectsRequest(val *RegisterObjectsRequest) *NullableRegisterObjectsRequest { + return &NullableRegisterObjectsRequest{value: val, isSet: true} +} + +func (v NullableRegisterObjectsRequest) MarshalJSON() ([]byte, error) { + return json.Marshal(v.value) +} + +func (v *NullableRegisterObjectsRequest) UnmarshalJSON(src []byte) error { + v.isSet = true + return json.Unmarshal(src, &v.value) +} + + diff --git a/apigen/drs/model_service.go b/apigen/drs/model_service.go new file mode 100644 index 0000000..5b8a3fc --- /dev/null +++ b/apigen/drs/model_service.go @@ -0,0 +1,497 @@ +/* +Data Repository Service + +No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) + +API version: 1.5.0 +Contact: ga4gh-cloud@ga4gh.org +*/ + +// Code generated by OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT. + +package drs + +import ( + "encoding/json" + "time" + "bytes" + "fmt" +) + +// checks if the Service type satisfies the MappedNullable interface at compile time +var _ MappedNullable = &Service{} + +// Service GA4GH service +type Service struct { + // Unique ID of this service. Reverse domain name notation is recommended, though not required. The identifier should attempt to be globally unique so it can be used in downstream aggregator services e.g. Service Registry. + Id string `json:"id"` + // Name of this service. Should be human readable. + Name string `json:"name"` + Type ServiceType `json:"type"` + // Description of the service. Should be human readable and provide information about the service. + Description *string `json:"description,omitempty"` + Organization ServiceOrganization `json:"organization"` + // URL of the contact for the provider of this service, e.g. a link to a contact form (RFC 3986 format), or an email (RFC 2368 format). + ContactUrl *string `json:"contactUrl,omitempty"` + // URL of the documentation of this service (RFC 3986 format). This should help someone learn how to use your service, including any specifics required to access data, e.g. authentication. + DocumentationUrl *string `json:"documentationUrl,omitempty"` + // Timestamp describing when the service was first deployed and available (RFC 3339 format) + CreatedAt *time.Time `json:"createdAt,omitempty"` + // Timestamp describing when the service was last updated (RFC 3339 format) + UpdatedAt *time.Time `json:"updatedAt,omitempty"` + // Environment the service is running in. Use this to distinguish between production, development and testing/staging deployments. Suggested values are prod, test, dev, staging. However this is advised and not enforced. + Environment *string `json:"environment,omitempty"` + // Version of the service being described. Semantic versioning is recommended, but other identifiers, such as dates or commit hashes, are also allowed. The version should be changed whenever the service is updated. + Version string `json:"version"` +} + +type _Service Service + +// NewService instantiates a new Service object +// This constructor will assign default values to properties that have it defined, +// and makes sure properties required by API are set, but the set of arguments +// will change when the set of required properties is changed +func NewService(id string, name string, type_ ServiceType, organization ServiceOrganization, version string) *Service { + this := Service{} + this.Id = id + this.Name = name + this.Type = type_ + this.Organization = organization + this.Version = version + return &this +} + +// NewServiceWithDefaults instantiates a new Service object +// This constructor will only assign default values to properties that have it defined, +// but it doesn't guarantee that properties required by API are set +func NewServiceWithDefaults() *Service { + this := Service{} + return &this +} + +// GetId returns the Id field value +func (o *Service) GetId() string { + if o == nil { + var ret string + return ret + } + + return o.Id +} + +// GetIdOk returns a tuple with the Id field value +// and a boolean to check if the value has been set. +func (o *Service) GetIdOk() (*string, bool) { + if o == nil { + return nil, false + } + return &o.Id, true +} + +// SetId sets field value +func (o *Service) SetId(v string) { + o.Id = v +} + +// GetName returns the Name field value +func (o *Service) GetName() string { + if o == nil { + var ret string + return ret + } + + return o.Name +} + +// GetNameOk returns a tuple with the Name field value +// and a boolean to check if the value has been set. +func (o *Service) GetNameOk() (*string, bool) { + if o == nil { + return nil, false + } + return &o.Name, true +} + +// SetName sets field value +func (o *Service) SetName(v string) { + o.Name = v +} + +// GetType returns the Type field value +func (o *Service) GetType() ServiceType { + if o == nil { + var ret ServiceType + return ret + } + + return o.Type +} + +// GetTypeOk returns a tuple with the Type field value +// and a boolean to check if the value has been set. +func (o *Service) GetTypeOk() (*ServiceType, bool) { + if o == nil { + return nil, false + } + return &o.Type, true +} + +// SetType sets field value +func (o *Service) SetType(v ServiceType) { + o.Type = v +} + +// GetDescription returns the Description field value if set, zero value otherwise. +func (o *Service) GetDescription() string { + if o == nil || IsNil(o.Description) { + var ret string + return ret + } + return *o.Description +} + +// GetDescriptionOk returns a tuple with the Description field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *Service) GetDescriptionOk() (*string, bool) { + if o == nil || IsNil(o.Description) { + return nil, false + } + return o.Description, true +} + +// HasDescription returns a boolean if a field has been set. +func (o *Service) HasDescription() bool { + if o != nil && !IsNil(o.Description) { + return true + } + + return false +} + +// SetDescription gets a reference to the given string and assigns it to the Description field. +func (o *Service) SetDescription(v string) { + o.Description = &v +} + +// GetOrganization returns the Organization field value +func (o *Service) GetOrganization() ServiceOrganization { + if o == nil { + var ret ServiceOrganization + return ret + } + + return o.Organization +} + +// GetOrganizationOk returns a tuple with the Organization field value +// and a boolean to check if the value has been set. +func (o *Service) GetOrganizationOk() (*ServiceOrganization, bool) { + if o == nil { + return nil, false + } + return &o.Organization, true +} + +// SetOrganization sets field value +func (o *Service) SetOrganization(v ServiceOrganization) { + o.Organization = v +} + +// GetContactUrl returns the ContactUrl field value if set, zero value otherwise. +func (o *Service) GetContactUrl() string { + if o == nil || IsNil(o.ContactUrl) { + var ret string + return ret + } + return *o.ContactUrl +} + +// GetContactUrlOk returns a tuple with the ContactUrl field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *Service) GetContactUrlOk() (*string, bool) { + if o == nil || IsNil(o.ContactUrl) { + return nil, false + } + return o.ContactUrl, true +} + +// HasContactUrl returns a boolean if a field has been set. +func (o *Service) HasContactUrl() bool { + if o != nil && !IsNil(o.ContactUrl) { + return true + } + + return false +} + +// SetContactUrl gets a reference to the given string and assigns it to the ContactUrl field. +func (o *Service) SetContactUrl(v string) { + o.ContactUrl = &v +} + +// GetDocumentationUrl returns the DocumentationUrl field value if set, zero value otherwise. +func (o *Service) GetDocumentationUrl() string { + if o == nil || IsNil(o.DocumentationUrl) { + var ret string + return ret + } + return *o.DocumentationUrl +} + +// GetDocumentationUrlOk returns a tuple with the DocumentationUrl field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *Service) GetDocumentationUrlOk() (*string, bool) { + if o == nil || IsNil(o.DocumentationUrl) { + return nil, false + } + return o.DocumentationUrl, true +} + +// HasDocumentationUrl returns a boolean if a field has been set. +func (o *Service) HasDocumentationUrl() bool { + if o != nil && !IsNil(o.DocumentationUrl) { + return true + } + + return false +} + +// SetDocumentationUrl gets a reference to the given string and assigns it to the DocumentationUrl field. +func (o *Service) SetDocumentationUrl(v string) { + o.DocumentationUrl = &v +} + +// GetCreatedAt returns the CreatedAt field value if set, zero value otherwise. +func (o *Service) GetCreatedAt() time.Time { + if o == nil || IsNil(o.CreatedAt) { + var ret time.Time + return ret + } + return *o.CreatedAt +} + +// GetCreatedAtOk returns a tuple with the CreatedAt field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *Service) GetCreatedAtOk() (*time.Time, bool) { + if o == nil || IsNil(o.CreatedAt) { + return nil, false + } + return o.CreatedAt, true +} + +// HasCreatedAt returns a boolean if a field has been set. +func (o *Service) HasCreatedAt() bool { + if o != nil && !IsNil(o.CreatedAt) { + return true + } + + return false +} + +// SetCreatedAt gets a reference to the given time.Time and assigns it to the CreatedAt field. +func (o *Service) SetCreatedAt(v time.Time) { + o.CreatedAt = &v +} + +// GetUpdatedAt returns the UpdatedAt field value if set, zero value otherwise. +func (o *Service) GetUpdatedAt() time.Time { + if o == nil || IsNil(o.UpdatedAt) { + var ret time.Time + return ret + } + return *o.UpdatedAt +} + +// GetUpdatedAtOk returns a tuple with the UpdatedAt field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *Service) GetUpdatedAtOk() (*time.Time, bool) { + if o == nil || IsNil(o.UpdatedAt) { + return nil, false + } + return o.UpdatedAt, true +} + +// HasUpdatedAt returns a boolean if a field has been set. +func (o *Service) HasUpdatedAt() bool { + if o != nil && !IsNil(o.UpdatedAt) { + return true + } + + return false +} + +// SetUpdatedAt gets a reference to the given time.Time and assigns it to the UpdatedAt field. +func (o *Service) SetUpdatedAt(v time.Time) { + o.UpdatedAt = &v +} + +// GetEnvironment returns the Environment field value if set, zero value otherwise. +func (o *Service) GetEnvironment() string { + if o == nil || IsNil(o.Environment) { + var ret string + return ret + } + return *o.Environment +} + +// GetEnvironmentOk returns a tuple with the Environment field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *Service) GetEnvironmentOk() (*string, bool) { + if o == nil || IsNil(o.Environment) { + return nil, false + } + return o.Environment, true +} + +// HasEnvironment returns a boolean if a field has been set. +func (o *Service) HasEnvironment() bool { + if o != nil && !IsNil(o.Environment) { + return true + } + + return false +} + +// SetEnvironment gets a reference to the given string and assigns it to the Environment field. +func (o *Service) SetEnvironment(v string) { + o.Environment = &v +} + +// GetVersion returns the Version field value +func (o *Service) GetVersion() string { + if o == nil { + var ret string + return ret + } + + return o.Version +} + +// GetVersionOk returns a tuple with the Version field value +// and a boolean to check if the value has been set. +func (o *Service) GetVersionOk() (*string, bool) { + if o == nil { + return nil, false + } + return &o.Version, true +} + +// SetVersion sets field value +func (o *Service) SetVersion(v string) { + o.Version = v +} + +func (o Service) MarshalJSON() ([]byte, error) { + toSerialize,err := o.ToMap() + if err != nil { + return []byte{}, err + } + return json.Marshal(toSerialize) +} + +func (o Service) ToMap() (map[string]interface{}, error) { + toSerialize := map[string]interface{}{} + toSerialize["id"] = o.Id + toSerialize["name"] = o.Name + toSerialize["type"] = o.Type + if !IsNil(o.Description) { + toSerialize["description"] = o.Description + } + toSerialize["organization"] = o.Organization + if !IsNil(o.ContactUrl) { + toSerialize["contactUrl"] = o.ContactUrl + } + if !IsNil(o.DocumentationUrl) { + toSerialize["documentationUrl"] = o.DocumentationUrl + } + if !IsNil(o.CreatedAt) { + toSerialize["createdAt"] = o.CreatedAt + } + if !IsNil(o.UpdatedAt) { + toSerialize["updatedAt"] = o.UpdatedAt + } + if !IsNil(o.Environment) { + toSerialize["environment"] = o.Environment + } + toSerialize["version"] = o.Version + return toSerialize, nil +} + +func (o *Service) UnmarshalJSON(data []byte) (err error) { + // This validates that all required properties are included in the JSON object + // by unmarshalling the object into a generic map with string keys and checking + // that every required field exists as a key in the generic map. + requiredProperties := []string{ + "id", + "name", + "type", + "organization", + "version", + } + + allProperties := make(map[string]interface{}) + + err = json.Unmarshal(data, &allProperties) + + if err != nil { + return err; + } + + for _, requiredProperty := range(requiredProperties) { + if _, exists := allProperties[requiredProperty]; !exists { + return fmt.Errorf("no value given for required property %v", requiredProperty) + } + } + + varService := _Service{} + + decoder := json.NewDecoder(bytes.NewReader(data)) + decoder.DisallowUnknownFields() + err = decoder.Decode(&varService) + + if err != nil { + return err + } + + *o = Service(varService) + + return err +} + +type NullableService struct { + value *Service + isSet bool +} + +func (v NullableService) Get() *Service { + return v.value +} + +func (v *NullableService) Set(val *Service) { + v.value = val + v.isSet = true +} + +func (v NullableService) IsSet() bool { + return v.isSet +} + +func (v *NullableService) Unset() { + v.value = nil + v.isSet = false +} + +func NewNullableService(val *Service) *NullableService { + return &NullableService{value: val, isSet: true} +} + +func (v NullableService) MarshalJSON() ([]byte, error) { + return json.Marshal(v.value) +} + +func (v *NullableService) UnmarshalJSON(src []byte) error { + v.isSet = true + return json.Unmarshal(src, &v.value) +} + + diff --git a/apigen/drs/model_service_organization.go b/apigen/drs/model_service_organization.go new file mode 100644 index 0000000..bc6eab9 --- /dev/null +++ b/apigen/drs/model_service_organization.go @@ -0,0 +1,189 @@ +/* +Data Repository Service + +No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) + +API version: 1.5.0 +Contact: ga4gh-cloud@ga4gh.org +*/ + +// Code generated by OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT. + +package drs + +import ( + "encoding/json" + "bytes" + "fmt" +) + +// checks if the ServiceOrganization type satisfies the MappedNullable interface at compile time +var _ MappedNullable = &ServiceOrganization{} + +// ServiceOrganization Organization providing the service +type ServiceOrganization struct { + // Name of the organization responsible for the service + Name string `json:"name"` + // URL of the website of the organization (RFC 3986 format) + Url string `json:"url"` +} + +type _ServiceOrganization ServiceOrganization + +// NewServiceOrganization instantiates a new ServiceOrganization object +// This constructor will assign default values to properties that have it defined, +// and makes sure properties required by API are set, but the set of arguments +// will change when the set of required properties is changed +func NewServiceOrganization(name string, url string) *ServiceOrganization { + this := ServiceOrganization{} + this.Name = name + this.Url = url + return &this +} + +// NewServiceOrganizationWithDefaults instantiates a new ServiceOrganization object +// This constructor will only assign default values to properties that have it defined, +// but it doesn't guarantee that properties required by API are set +func NewServiceOrganizationWithDefaults() *ServiceOrganization { + this := ServiceOrganization{} + return &this +} + +// GetName returns the Name field value +func (o *ServiceOrganization) GetName() string { + if o == nil { + var ret string + return ret + } + + return o.Name +} + +// GetNameOk returns a tuple with the Name field value +// and a boolean to check if the value has been set. +func (o *ServiceOrganization) GetNameOk() (*string, bool) { + if o == nil { + return nil, false + } + return &o.Name, true +} + +// SetName sets field value +func (o *ServiceOrganization) SetName(v string) { + o.Name = v +} + +// GetUrl returns the Url field value +func (o *ServiceOrganization) GetUrl() string { + if o == nil { + var ret string + return ret + } + + return o.Url +} + +// GetUrlOk returns a tuple with the Url field value +// and a boolean to check if the value has been set. +func (o *ServiceOrganization) GetUrlOk() (*string, bool) { + if o == nil { + return nil, false + } + return &o.Url, true +} + +// SetUrl sets field value +func (o *ServiceOrganization) SetUrl(v string) { + o.Url = v +} + +func (o ServiceOrganization) MarshalJSON() ([]byte, error) { + toSerialize,err := o.ToMap() + if err != nil { + return []byte{}, err + } + return json.Marshal(toSerialize) +} + +func (o ServiceOrganization) ToMap() (map[string]interface{}, error) { + toSerialize := map[string]interface{}{} + toSerialize["name"] = o.Name + toSerialize["url"] = o.Url + return toSerialize, nil +} + +func (o *ServiceOrganization) UnmarshalJSON(data []byte) (err error) { + // This validates that all required properties are included in the JSON object + // by unmarshalling the object into a generic map with string keys and checking + // that every required field exists as a key in the generic map. + requiredProperties := []string{ + "name", + "url", + } + + allProperties := make(map[string]interface{}) + + err = json.Unmarshal(data, &allProperties) + + if err != nil { + return err; + } + + for _, requiredProperty := range(requiredProperties) { + if _, exists := allProperties[requiredProperty]; !exists { + return fmt.Errorf("no value given for required property %v", requiredProperty) + } + } + + varServiceOrganization := _ServiceOrganization{} + + decoder := json.NewDecoder(bytes.NewReader(data)) + decoder.DisallowUnknownFields() + err = decoder.Decode(&varServiceOrganization) + + if err != nil { + return err + } + + *o = ServiceOrganization(varServiceOrganization) + + return err +} + +type NullableServiceOrganization struct { + value *ServiceOrganization + isSet bool +} + +func (v NullableServiceOrganization) Get() *ServiceOrganization { + return v.value +} + +func (v *NullableServiceOrganization) Set(val *ServiceOrganization) { + v.value = val + v.isSet = true +} + +func (v NullableServiceOrganization) IsSet() bool { + return v.isSet +} + +func (v *NullableServiceOrganization) Unset() { + v.value = nil + v.isSet = false +} + +func NewNullableServiceOrganization(val *ServiceOrganization) *NullableServiceOrganization { + return &NullableServiceOrganization{value: val, isSet: true} +} + +func (v NullableServiceOrganization) MarshalJSON() ([]byte, error) { + return json.Marshal(v.value) +} + +func (v *NullableServiceOrganization) UnmarshalJSON(src []byte) error { + v.isSet = true + return json.Unmarshal(src, &v.value) +} + + diff --git a/apigen/drs/model_service_type.go b/apigen/drs/model_service_type.go new file mode 100644 index 0000000..2efc328 --- /dev/null +++ b/apigen/drs/model_service_type.go @@ -0,0 +1,218 @@ +/* +Data Repository Service + +No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) + +API version: 1.5.0 +Contact: ga4gh-cloud@ga4gh.org +*/ + +// Code generated by OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT. + +package drs + +import ( + "encoding/json" + "bytes" + "fmt" +) + +// checks if the ServiceType type satisfies the MappedNullable interface at compile time +var _ MappedNullable = &ServiceType{} + +// ServiceType Type of a GA4GH service +type ServiceType struct { + // Namespace in reverse domain name format. Use `org.ga4gh` for implementations compliant with official GA4GH specifications. For services with custom APIs not standardized by GA4GH, or implementations diverging from official GA4GH specifications, use a different namespace (e.g. your organization's reverse domain name). + Group string `json:"group"` + // Name of the API or GA4GH specification implemented. Official GA4GH types should be assigned as part of standards approval process. Custom artifacts are supported. + Artifact string `json:"artifact"` + // Version of the API or specification. GA4GH specifications use semantic versioning. + Version string `json:"version"` +} + +type _ServiceType ServiceType + +// NewServiceType instantiates a new ServiceType object +// This constructor will assign default values to properties that have it defined, +// and makes sure properties required by API are set, but the set of arguments +// will change when the set of required properties is changed +func NewServiceType(group string, artifact string, version string) *ServiceType { + this := ServiceType{} + this.Group = group + this.Artifact = artifact + this.Version = version + return &this +} + +// NewServiceTypeWithDefaults instantiates a new ServiceType object +// This constructor will only assign default values to properties that have it defined, +// but it doesn't guarantee that properties required by API are set +func NewServiceTypeWithDefaults() *ServiceType { + this := ServiceType{} + return &this +} + +// GetGroup returns the Group field value +func (o *ServiceType) GetGroup() string { + if o == nil { + var ret string + return ret + } + + return o.Group +} + +// GetGroupOk returns a tuple with the Group field value +// and a boolean to check if the value has been set. +func (o *ServiceType) GetGroupOk() (*string, bool) { + if o == nil { + return nil, false + } + return &o.Group, true +} + +// SetGroup sets field value +func (o *ServiceType) SetGroup(v string) { + o.Group = v +} + +// GetArtifact returns the Artifact field value +func (o *ServiceType) GetArtifact() string { + if o == nil { + var ret string + return ret + } + + return o.Artifact +} + +// GetArtifactOk returns a tuple with the Artifact field value +// and a boolean to check if the value has been set. +func (o *ServiceType) GetArtifactOk() (*string, bool) { + if o == nil { + return nil, false + } + return &o.Artifact, true +} + +// SetArtifact sets field value +func (o *ServiceType) SetArtifact(v string) { + o.Artifact = v +} + +// GetVersion returns the Version field value +func (o *ServiceType) GetVersion() string { + if o == nil { + var ret string + return ret + } + + return o.Version +} + +// GetVersionOk returns a tuple with the Version field value +// and a boolean to check if the value has been set. +func (o *ServiceType) GetVersionOk() (*string, bool) { + if o == nil { + return nil, false + } + return &o.Version, true +} + +// SetVersion sets field value +func (o *ServiceType) SetVersion(v string) { + o.Version = v +} + +func (o ServiceType) MarshalJSON() ([]byte, error) { + toSerialize,err := o.ToMap() + if err != nil { + return []byte{}, err + } + return json.Marshal(toSerialize) +} + +func (o ServiceType) ToMap() (map[string]interface{}, error) { + toSerialize := map[string]interface{}{} + toSerialize["group"] = o.Group + toSerialize["artifact"] = o.Artifact + toSerialize["version"] = o.Version + return toSerialize, nil +} + +func (o *ServiceType) UnmarshalJSON(data []byte) (err error) { + // This validates that all required properties are included in the JSON object + // by unmarshalling the object into a generic map with string keys and checking + // that every required field exists as a key in the generic map. + requiredProperties := []string{ + "group", + "artifact", + "version", + } + + allProperties := make(map[string]interface{}) + + err = json.Unmarshal(data, &allProperties) + + if err != nil { + return err; + } + + for _, requiredProperty := range(requiredProperties) { + if _, exists := allProperties[requiredProperty]; !exists { + return fmt.Errorf("no value given for required property %v", requiredProperty) + } + } + + varServiceType := _ServiceType{} + + decoder := json.NewDecoder(bytes.NewReader(data)) + decoder.DisallowUnknownFields() + err = decoder.Decode(&varServiceType) + + if err != nil { + return err + } + + *o = ServiceType(varServiceType) + + return err +} + +type NullableServiceType struct { + value *ServiceType + isSet bool +} + +func (v NullableServiceType) Get() *ServiceType { + return v.value +} + +func (v *NullableServiceType) Set(val *ServiceType) { + v.value = val + v.isSet = true +} + +func (v NullableServiceType) IsSet() bool { + return v.isSet +} + +func (v *NullableServiceType) Unset() { + v.value = nil + v.isSet = false +} + +func NewNullableServiceType(val *ServiceType) *NullableServiceType { + return &NullableServiceType{value: val, isSet: true} +} + +func (v NullableServiceType) MarshalJSON() ([]byte, error) { + return json.Marshal(v.value) +} + +func (v *NullableServiceType) UnmarshalJSON(src []byte) error { + v.isSet = true + return json.Unmarshal(src, &v.value) +} + + diff --git a/apigen/drs/model_summary.go b/apigen/drs/model_summary.go new file mode 100644 index 0000000..235923a --- /dev/null +++ b/apigen/drs/model_summary.go @@ -0,0 +1,202 @@ +/* +Data Repository Service + +No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) + +API version: 1.5.0 +Contact: ga4gh-cloud@ga4gh.org +*/ + +// Code generated by OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT. + +package drs + +import ( + "encoding/json" +) + +// checks if the Summary type satisfies the MappedNullable interface at compile time +var _ MappedNullable = &Summary{} + +// Summary A summary of what was resolved. +type Summary struct { + // Number of items requested. + Requested *int32 `json:"requested,omitempty"` + // Number of objects resolved. + Resolved *int32 `json:"resolved,omitempty"` + // Number of objects not resolved. + Unresolved *int32 `json:"unresolved,omitempty"` +} + +// NewSummary instantiates a new Summary object +// This constructor will assign default values to properties that have it defined, +// and makes sure properties required by API are set, but the set of arguments +// will change when the set of required properties is changed +func NewSummary() *Summary { + this := Summary{} + return &this +} + +// NewSummaryWithDefaults instantiates a new Summary object +// This constructor will only assign default values to properties that have it defined, +// but it doesn't guarantee that properties required by API are set +func NewSummaryWithDefaults() *Summary { + this := Summary{} + return &this +} + +// GetRequested returns the Requested field value if set, zero value otherwise. +func (o *Summary) GetRequested() int32 { + if o == nil || IsNil(o.Requested) { + var ret int32 + return ret + } + return *o.Requested +} + +// GetRequestedOk returns a tuple with the Requested field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *Summary) GetRequestedOk() (*int32, bool) { + if o == nil || IsNil(o.Requested) { + return nil, false + } + return o.Requested, true +} + +// HasRequested returns a boolean if a field has been set. +func (o *Summary) HasRequested() bool { + if o != nil && !IsNil(o.Requested) { + return true + } + + return false +} + +// SetRequested gets a reference to the given int32 and assigns it to the Requested field. +func (o *Summary) SetRequested(v int32) { + o.Requested = &v +} + +// GetResolved returns the Resolved field value if set, zero value otherwise. +func (o *Summary) GetResolved() int32 { + if o == nil || IsNil(o.Resolved) { + var ret int32 + return ret + } + return *o.Resolved +} + +// GetResolvedOk returns a tuple with the Resolved field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *Summary) GetResolvedOk() (*int32, bool) { + if o == nil || IsNil(o.Resolved) { + return nil, false + } + return o.Resolved, true +} + +// HasResolved returns a boolean if a field has been set. +func (o *Summary) HasResolved() bool { + if o != nil && !IsNil(o.Resolved) { + return true + } + + return false +} + +// SetResolved gets a reference to the given int32 and assigns it to the Resolved field. +func (o *Summary) SetResolved(v int32) { + o.Resolved = &v +} + +// GetUnresolved returns the Unresolved field value if set, zero value otherwise. +func (o *Summary) GetUnresolved() int32 { + if o == nil || IsNil(o.Unresolved) { + var ret int32 + return ret + } + return *o.Unresolved +} + +// GetUnresolvedOk returns a tuple with the Unresolved field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *Summary) GetUnresolvedOk() (*int32, bool) { + if o == nil || IsNil(o.Unresolved) { + return nil, false + } + return o.Unresolved, true +} + +// HasUnresolved returns a boolean if a field has been set. +func (o *Summary) HasUnresolved() bool { + if o != nil && !IsNil(o.Unresolved) { + return true + } + + return false +} + +// SetUnresolved gets a reference to the given int32 and assigns it to the Unresolved field. +func (o *Summary) SetUnresolved(v int32) { + o.Unresolved = &v +} + +func (o Summary) MarshalJSON() ([]byte, error) { + toSerialize,err := o.ToMap() + if err != nil { + return []byte{}, err + } + return json.Marshal(toSerialize) +} + +func (o Summary) ToMap() (map[string]interface{}, error) { + toSerialize := map[string]interface{}{} + if !IsNil(o.Requested) { + toSerialize["requested"] = o.Requested + } + if !IsNil(o.Resolved) { + toSerialize["resolved"] = o.Resolved + } + if !IsNil(o.Unresolved) { + toSerialize["unresolved"] = o.Unresolved + } + return toSerialize, nil +} + +type NullableSummary struct { + value *Summary + isSet bool +} + +func (v NullableSummary) Get() *Summary { + return v.value +} + +func (v *NullableSummary) Set(val *Summary) { + v.value = val + v.isSet = true +} + +func (v NullableSummary) IsSet() bool { + return v.isSet +} + +func (v *NullableSummary) Unset() { + v.value = nil + v.isSet = false +} + +func NewNullableSummary(val *Summary) *NullableSummary { + return &NullableSummary{value: val, isSet: true} +} + +func (v NullableSummary) MarshalJSON() ([]byte, error) { + return json.Marshal(v.value) +} + +func (v *NullableSummary) UnmarshalJSON(src []byte) error { + v.isSet = true + return json.Unmarshal(src, &v.value) +} + + diff --git a/apigen/drs/model_unresolved_inner.go b/apigen/drs/model_unresolved_inner.go new file mode 100644 index 0000000..564f4c3 --- /dev/null +++ b/apigen/drs/model_unresolved_inner.go @@ -0,0 +1,163 @@ +/* +Data Repository Service + +No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) + +API version: 1.5.0 +Contact: ga4gh-cloud@ga4gh.org +*/ + +// Code generated by OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT. + +package drs + +import ( + "encoding/json" +) + +// checks if the UnresolvedInner type satisfies the MappedNullable interface at compile time +var _ MappedNullable = &UnresolvedInner{} + +// UnresolvedInner struct for UnresolvedInner +type UnresolvedInner struct { + ErrorCode *int32 `json:"error_code,omitempty"` + ObjectIds []string `json:"object_ids,omitempty"` +} + +// NewUnresolvedInner instantiates a new UnresolvedInner object +// This constructor will assign default values to properties that have it defined, +// and makes sure properties required by API are set, but the set of arguments +// will change when the set of required properties is changed +func NewUnresolvedInner() *UnresolvedInner { + this := UnresolvedInner{} + return &this +} + +// NewUnresolvedInnerWithDefaults instantiates a new UnresolvedInner object +// This constructor will only assign default values to properties that have it defined, +// but it doesn't guarantee that properties required by API are set +func NewUnresolvedInnerWithDefaults() *UnresolvedInner { + this := UnresolvedInner{} + return &this +} + +// GetErrorCode returns the ErrorCode field value if set, zero value otherwise. +func (o *UnresolvedInner) GetErrorCode() int32 { + if o == nil || IsNil(o.ErrorCode) { + var ret int32 + return ret + } + return *o.ErrorCode +} + +// GetErrorCodeOk returns a tuple with the ErrorCode field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *UnresolvedInner) GetErrorCodeOk() (*int32, bool) { + if o == nil || IsNil(o.ErrorCode) { + return nil, false + } + return o.ErrorCode, true +} + +// HasErrorCode returns a boolean if a field has been set. +func (o *UnresolvedInner) HasErrorCode() bool { + if o != nil && !IsNil(o.ErrorCode) { + return true + } + + return false +} + +// SetErrorCode gets a reference to the given int32 and assigns it to the ErrorCode field. +func (o *UnresolvedInner) SetErrorCode(v int32) { + o.ErrorCode = &v +} + +// GetObjectIds returns the ObjectIds field value if set, zero value otherwise. +func (o *UnresolvedInner) GetObjectIds() []string { + if o == nil || IsNil(o.ObjectIds) { + var ret []string + return ret + } + return o.ObjectIds +} + +// GetObjectIdsOk returns a tuple with the ObjectIds field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *UnresolvedInner) GetObjectIdsOk() ([]string, bool) { + if o == nil || IsNil(o.ObjectIds) { + return nil, false + } + return o.ObjectIds, true +} + +// HasObjectIds returns a boolean if a field has been set. +func (o *UnresolvedInner) HasObjectIds() bool { + if o != nil && !IsNil(o.ObjectIds) { + return true + } + + return false +} + +// SetObjectIds gets a reference to the given []string and assigns it to the ObjectIds field. +func (o *UnresolvedInner) SetObjectIds(v []string) { + o.ObjectIds = v +} + +func (o UnresolvedInner) MarshalJSON() ([]byte, error) { + toSerialize,err := o.ToMap() + if err != nil { + return []byte{}, err + } + return json.Marshal(toSerialize) +} + +func (o UnresolvedInner) ToMap() (map[string]interface{}, error) { + toSerialize := map[string]interface{}{} + if !IsNil(o.ErrorCode) { + toSerialize["error_code"] = o.ErrorCode + } + if !IsNil(o.ObjectIds) { + toSerialize["object_ids"] = o.ObjectIds + } + return toSerialize, nil +} + +type NullableUnresolvedInner struct { + value *UnresolvedInner + isSet bool +} + +func (v NullableUnresolvedInner) Get() *UnresolvedInner { + return v.value +} + +func (v *NullableUnresolvedInner) Set(val *UnresolvedInner) { + v.value = val + v.isSet = true +} + +func (v NullableUnresolvedInner) IsSet() bool { + return v.isSet +} + +func (v *NullableUnresolvedInner) Unset() { + v.value = nil + v.isSet = false +} + +func NewNullableUnresolvedInner(val *UnresolvedInner) *NullableUnresolvedInner { + return &NullableUnresolvedInner{value: val, isSet: true} +} + +func (v NullableUnresolvedInner) MarshalJSON() ([]byte, error) { + return json.Marshal(v.value) +} + +func (v *NullableUnresolvedInner) UnmarshalJSON(src []byte) error { + v.isSet = true + return json.Unmarshal(src, &v.value) +} + + diff --git a/apigen/drs/model_upload_method.go b/apigen/drs/model_upload_method.go new file mode 100644 index 0000000..2bbdeea --- /dev/null +++ b/apigen/drs/model_upload_method.go @@ -0,0 +1,262 @@ +/* +Data Repository Service + +No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) + +API version: 1.5.0 +Contact: ga4gh-cloud@ga4gh.org +*/ + +// Code generated by OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT. + +package drs + +import ( + "encoding/json" + "bytes" + "fmt" +) + +// checks if the UploadMethod type satisfies the MappedNullable interface at compile time +var _ MappedNullable = &UploadMethod{} + +// UploadMethod struct for UploadMethod +type UploadMethod struct { + // Type of upload method. Implementations MAY support any subset of these types. The 'https' type can be used to return a presigned POST URL and is expected to be the most common implementation for typical file uploads. This method provides a simple HTTP POST interface that works with standard web clients. The 's3' type is primarily intended to support uploads of large files that want to take advantage of multipart uploads and automatic retries implemented in AWS libraries. This method provides direct access to S3-specific upload capabilities. Other common implementations include 'gs' for Google Cloud Storage and 'sftp' for secure FTP uploads. + Type string `json:"type"` + AccessUrl UploadMethodAccessUrl `json:"access_url"` + // Cloud region for the upload location. Optional for non-cloud storage types. + Region *string `json:"region,omitempty"` + // A dictionary of upload-specific configuration details that vary by upload method type. The contents and structure depend on the specific upload method being used. + UploadDetails map[string]interface{} `json:"upload_details,omitempty"` +} + +type _UploadMethod UploadMethod + +// NewUploadMethod instantiates a new UploadMethod object +// This constructor will assign default values to properties that have it defined, +// and makes sure properties required by API are set, but the set of arguments +// will change when the set of required properties is changed +func NewUploadMethod(type_ string, accessUrl UploadMethodAccessUrl) *UploadMethod { + this := UploadMethod{} + this.Type = type_ + this.AccessUrl = accessUrl + return &this +} + +// NewUploadMethodWithDefaults instantiates a new UploadMethod object +// This constructor will only assign default values to properties that have it defined, +// but it doesn't guarantee that properties required by API are set +func NewUploadMethodWithDefaults() *UploadMethod { + this := UploadMethod{} + return &this +} + +// GetType returns the Type field value +func (o *UploadMethod) GetType() string { + if o == nil { + var ret string + return ret + } + + return o.Type +} + +// GetTypeOk returns a tuple with the Type field value +// and a boolean to check if the value has been set. +func (o *UploadMethod) GetTypeOk() (*string, bool) { + if o == nil { + return nil, false + } + return &o.Type, true +} + +// SetType sets field value +func (o *UploadMethod) SetType(v string) { + o.Type = v +} + +// GetAccessUrl returns the AccessUrl field value +func (o *UploadMethod) GetAccessUrl() UploadMethodAccessUrl { + if o == nil { + var ret UploadMethodAccessUrl + return ret + } + + return o.AccessUrl +} + +// GetAccessUrlOk returns a tuple with the AccessUrl field value +// and a boolean to check if the value has been set. +func (o *UploadMethod) GetAccessUrlOk() (*UploadMethodAccessUrl, bool) { + if o == nil { + return nil, false + } + return &o.AccessUrl, true +} + +// SetAccessUrl sets field value +func (o *UploadMethod) SetAccessUrl(v UploadMethodAccessUrl) { + o.AccessUrl = v +} + +// GetRegion returns the Region field value if set, zero value otherwise. +func (o *UploadMethod) GetRegion() string { + if o == nil || IsNil(o.Region) { + var ret string + return ret + } + return *o.Region +} + +// GetRegionOk returns a tuple with the Region field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *UploadMethod) GetRegionOk() (*string, bool) { + if o == nil || IsNil(o.Region) { + return nil, false + } + return o.Region, true +} + +// HasRegion returns a boolean if a field has been set. +func (o *UploadMethod) HasRegion() bool { + if o != nil && !IsNil(o.Region) { + return true + } + + return false +} + +// SetRegion gets a reference to the given string and assigns it to the Region field. +func (o *UploadMethod) SetRegion(v string) { + o.Region = &v +} + +// GetUploadDetails returns the UploadDetails field value if set, zero value otherwise. +func (o *UploadMethod) GetUploadDetails() map[string]interface{} { + if o == nil || IsNil(o.UploadDetails) { + var ret map[string]interface{} + return ret + } + return o.UploadDetails +} + +// GetUploadDetailsOk returns a tuple with the UploadDetails field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *UploadMethod) GetUploadDetailsOk() (map[string]interface{}, bool) { + if o == nil || IsNil(o.UploadDetails) { + return map[string]interface{}{}, false + } + return o.UploadDetails, true +} + +// HasUploadDetails returns a boolean if a field has been set. +func (o *UploadMethod) HasUploadDetails() bool { + if o != nil && !IsNil(o.UploadDetails) { + return true + } + + return false +} + +// SetUploadDetails gets a reference to the given map[string]interface{} and assigns it to the UploadDetails field. +func (o *UploadMethod) SetUploadDetails(v map[string]interface{}) { + o.UploadDetails = v +} + +func (o UploadMethod) MarshalJSON() ([]byte, error) { + toSerialize,err := o.ToMap() + if err != nil { + return []byte{}, err + } + return json.Marshal(toSerialize) +} + +func (o UploadMethod) ToMap() (map[string]interface{}, error) { + toSerialize := map[string]interface{}{} + toSerialize["type"] = o.Type + toSerialize["access_url"] = o.AccessUrl + if !IsNil(o.Region) { + toSerialize["region"] = o.Region + } + if !IsNil(o.UploadDetails) { + toSerialize["upload_details"] = o.UploadDetails + } + return toSerialize, nil +} + +func (o *UploadMethod) UnmarshalJSON(data []byte) (err error) { + // This validates that all required properties are included in the JSON object + // by unmarshalling the object into a generic map with string keys and checking + // that every required field exists as a key in the generic map. + requiredProperties := []string{ + "type", + "access_url", + } + + allProperties := make(map[string]interface{}) + + err = json.Unmarshal(data, &allProperties) + + if err != nil { + return err; + } + + for _, requiredProperty := range(requiredProperties) { + if _, exists := allProperties[requiredProperty]; !exists { + return fmt.Errorf("no value given for required property %v", requiredProperty) + } + } + + varUploadMethod := _UploadMethod{} + + decoder := json.NewDecoder(bytes.NewReader(data)) + decoder.DisallowUnknownFields() + err = decoder.Decode(&varUploadMethod) + + if err != nil { + return err + } + + *o = UploadMethod(varUploadMethod) + + return err +} + +type NullableUploadMethod struct { + value *UploadMethod + isSet bool +} + +func (v NullableUploadMethod) Get() *UploadMethod { + return v.value +} + +func (v *NullableUploadMethod) Set(val *UploadMethod) { + v.value = val + v.isSet = true +} + +func (v NullableUploadMethod) IsSet() bool { + return v.isSet +} + +func (v *NullableUploadMethod) Unset() { + v.value = nil + v.isSet = false +} + +func NewNullableUploadMethod(val *UploadMethod) *NullableUploadMethod { + return &NullableUploadMethod{value: val, isSet: true} +} + +func (v NullableUploadMethod) MarshalJSON() ([]byte, error) { + return json.Marshal(v.value) +} + +func (v *NullableUploadMethod) UnmarshalJSON(src []byte) error { + v.isSet = true + return json.Unmarshal(src, &v.value) +} + + diff --git a/apigen/drs/model_upload_method_access_url.go b/apigen/drs/model_upload_method_access_url.go new file mode 100644 index 0000000..ef097f6 --- /dev/null +++ b/apigen/drs/model_upload_method_access_url.go @@ -0,0 +1,197 @@ +/* +Data Repository Service + +No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) + +API version: 1.5.0 +Contact: ga4gh-cloud@ga4gh.org +*/ + +// Code generated by OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT. + +package drs + +import ( + "encoding/json" + "bytes" + "fmt" +) + +// checks if the UploadMethodAccessUrl type satisfies the MappedNullable interface at compile time +var _ MappedNullable = &UploadMethodAccessUrl{} + +// UploadMethodAccessUrl An `AccessURL` that specifies where the file will be accessible after upload. This URL will be used as the access_url in the eventual DRS object, ensuring consistency between upload and retrieval operations. +type UploadMethodAccessUrl struct { + // Inlined Upload URL context. + Url string `json:"url"` + // Inlined Upload Headers. + Headers []string `json:"headers,omitempty"` +} + +type _UploadMethodAccessUrl UploadMethodAccessUrl + +// NewUploadMethodAccessUrl instantiates a new UploadMethodAccessUrl object +// This constructor will assign default values to properties that have it defined, +// and makes sure properties required by API are set, but the set of arguments +// will change when the set of required properties is changed +func NewUploadMethodAccessUrl(url string) *UploadMethodAccessUrl { + this := UploadMethodAccessUrl{} + this.Url = url + return &this +} + +// NewUploadMethodAccessUrlWithDefaults instantiates a new UploadMethodAccessUrl object +// This constructor will only assign default values to properties that have it defined, +// but it doesn't guarantee that properties required by API are set +func NewUploadMethodAccessUrlWithDefaults() *UploadMethodAccessUrl { + this := UploadMethodAccessUrl{} + return &this +} + +// GetUrl returns the Url field value +func (o *UploadMethodAccessUrl) GetUrl() string { + if o == nil { + var ret string + return ret + } + + return o.Url +} + +// GetUrlOk returns a tuple with the Url field value +// and a boolean to check if the value has been set. +func (o *UploadMethodAccessUrl) GetUrlOk() (*string, bool) { + if o == nil { + return nil, false + } + return &o.Url, true +} + +// SetUrl sets field value +func (o *UploadMethodAccessUrl) SetUrl(v string) { + o.Url = v +} + +// GetHeaders returns the Headers field value if set, zero value otherwise. +func (o *UploadMethodAccessUrl) GetHeaders() []string { + if o == nil || IsNil(o.Headers) { + var ret []string + return ret + } + return o.Headers +} + +// GetHeadersOk returns a tuple with the Headers field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *UploadMethodAccessUrl) GetHeadersOk() ([]string, bool) { + if o == nil || IsNil(o.Headers) { + return nil, false + } + return o.Headers, true +} + +// HasHeaders returns a boolean if a field has been set. +func (o *UploadMethodAccessUrl) HasHeaders() bool { + if o != nil && !IsNil(o.Headers) { + return true + } + + return false +} + +// SetHeaders gets a reference to the given []string and assigns it to the Headers field. +func (o *UploadMethodAccessUrl) SetHeaders(v []string) { + o.Headers = v +} + +func (o UploadMethodAccessUrl) MarshalJSON() ([]byte, error) { + toSerialize,err := o.ToMap() + if err != nil { + return []byte{}, err + } + return json.Marshal(toSerialize) +} + +func (o UploadMethodAccessUrl) ToMap() (map[string]interface{}, error) { + toSerialize := map[string]interface{}{} + toSerialize["url"] = o.Url + if !IsNil(o.Headers) { + toSerialize["headers"] = o.Headers + } + return toSerialize, nil +} + +func (o *UploadMethodAccessUrl) UnmarshalJSON(data []byte) (err error) { + // This validates that all required properties are included in the JSON object + // by unmarshalling the object into a generic map with string keys and checking + // that every required field exists as a key in the generic map. + requiredProperties := []string{ + "url", + } + + allProperties := make(map[string]interface{}) + + err = json.Unmarshal(data, &allProperties) + + if err != nil { + return err; + } + + for _, requiredProperty := range(requiredProperties) { + if _, exists := allProperties[requiredProperty]; !exists { + return fmt.Errorf("no value given for required property %v", requiredProperty) + } + } + + varUploadMethodAccessUrl := _UploadMethodAccessUrl{} + + decoder := json.NewDecoder(bytes.NewReader(data)) + decoder.DisallowUnknownFields() + err = decoder.Decode(&varUploadMethodAccessUrl) + + if err != nil { + return err + } + + *o = UploadMethodAccessUrl(varUploadMethodAccessUrl) + + return err +} + +type NullableUploadMethodAccessUrl struct { + value *UploadMethodAccessUrl + isSet bool +} + +func (v NullableUploadMethodAccessUrl) Get() *UploadMethodAccessUrl { + return v.value +} + +func (v *NullableUploadMethodAccessUrl) Set(val *UploadMethodAccessUrl) { + v.value = val + v.isSet = true +} + +func (v NullableUploadMethodAccessUrl) IsSet() bool { + return v.isSet +} + +func (v *NullableUploadMethodAccessUrl) Unset() { + v.value = nil + v.isSet = false +} + +func NewNullableUploadMethodAccessUrl(val *UploadMethodAccessUrl) *NullableUploadMethodAccessUrl { + return &NullableUploadMethodAccessUrl{value: val, isSet: true} +} + +func (v NullableUploadMethodAccessUrl) MarshalJSON() ([]byte, error) { + return json.Marshal(v.value) +} + +func (v *NullableUploadMethodAccessUrl) UnmarshalJSON(src []byte) error { + v.isSet = true + return json.Unmarshal(src, &v.value) +} + + diff --git a/apigen/drs/model_upload_request.go b/apigen/drs/model_upload_request.go new file mode 100644 index 0000000..25e7dfc --- /dev/null +++ b/apigen/drs/model_upload_request.go @@ -0,0 +1,197 @@ +/* +Data Repository Service + +No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) + +API version: 1.5.0 +Contact: ga4gh-cloud@ga4gh.org +*/ + +// Code generated by OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT. + +package drs + +import ( + "encoding/json" + "bytes" + "fmt" +) + +// checks if the UploadRequest type satisfies the MappedNullable interface at compile time +var _ MappedNullable = &UploadRequest{} + +// UploadRequest struct for UploadRequest +type UploadRequest struct { + // Array of upload requests for files + Requests []UploadRequestObject `json:"requests"` + // Optional array of GA4GH Passport JWTs for authorization + Passports []string `json:"passports,omitempty"` +} + +type _UploadRequest UploadRequest + +// NewUploadRequest instantiates a new UploadRequest object +// This constructor will assign default values to properties that have it defined, +// and makes sure properties required by API are set, but the set of arguments +// will change when the set of required properties is changed +func NewUploadRequest(requests []UploadRequestObject) *UploadRequest { + this := UploadRequest{} + this.Requests = requests + return &this +} + +// NewUploadRequestWithDefaults instantiates a new UploadRequest object +// This constructor will only assign default values to properties that have it defined, +// but it doesn't guarantee that properties required by API are set +func NewUploadRequestWithDefaults() *UploadRequest { + this := UploadRequest{} + return &this +} + +// GetRequests returns the Requests field value +func (o *UploadRequest) GetRequests() []UploadRequestObject { + if o == nil { + var ret []UploadRequestObject + return ret + } + + return o.Requests +} + +// GetRequestsOk returns a tuple with the Requests field value +// and a boolean to check if the value has been set. +func (o *UploadRequest) GetRequestsOk() ([]UploadRequestObject, bool) { + if o == nil { + return nil, false + } + return o.Requests, true +} + +// SetRequests sets field value +func (o *UploadRequest) SetRequests(v []UploadRequestObject) { + o.Requests = v +} + +// GetPassports returns the Passports field value if set, zero value otherwise. +func (o *UploadRequest) GetPassports() []string { + if o == nil || IsNil(o.Passports) { + var ret []string + return ret + } + return o.Passports +} + +// GetPassportsOk returns a tuple with the Passports field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *UploadRequest) GetPassportsOk() ([]string, bool) { + if o == nil || IsNil(o.Passports) { + return nil, false + } + return o.Passports, true +} + +// HasPassports returns a boolean if a field has been set. +func (o *UploadRequest) HasPassports() bool { + if o != nil && !IsNil(o.Passports) { + return true + } + + return false +} + +// SetPassports gets a reference to the given []string and assigns it to the Passports field. +func (o *UploadRequest) SetPassports(v []string) { + o.Passports = v +} + +func (o UploadRequest) MarshalJSON() ([]byte, error) { + toSerialize,err := o.ToMap() + if err != nil { + return []byte{}, err + } + return json.Marshal(toSerialize) +} + +func (o UploadRequest) ToMap() (map[string]interface{}, error) { + toSerialize := map[string]interface{}{} + toSerialize["requests"] = o.Requests + if !IsNil(o.Passports) { + toSerialize["passports"] = o.Passports + } + return toSerialize, nil +} + +func (o *UploadRequest) UnmarshalJSON(data []byte) (err error) { + // This validates that all required properties are included in the JSON object + // by unmarshalling the object into a generic map with string keys and checking + // that every required field exists as a key in the generic map. + requiredProperties := []string{ + "requests", + } + + allProperties := make(map[string]interface{}) + + err = json.Unmarshal(data, &allProperties) + + if err != nil { + return err; + } + + for _, requiredProperty := range(requiredProperties) { + if _, exists := allProperties[requiredProperty]; !exists { + return fmt.Errorf("no value given for required property %v", requiredProperty) + } + } + + varUploadRequest := _UploadRequest{} + + decoder := json.NewDecoder(bytes.NewReader(data)) + decoder.DisallowUnknownFields() + err = decoder.Decode(&varUploadRequest) + + if err != nil { + return err + } + + *o = UploadRequest(varUploadRequest) + + return err +} + +type NullableUploadRequest struct { + value *UploadRequest + isSet bool +} + +func (v NullableUploadRequest) Get() *UploadRequest { + return v.value +} + +func (v *NullableUploadRequest) Set(val *UploadRequest) { + v.value = val + v.isSet = true +} + +func (v NullableUploadRequest) IsSet() bool { + return v.isSet +} + +func (v *NullableUploadRequest) Unset() { + v.value = nil + v.isSet = false +} + +func NewNullableUploadRequest(val *UploadRequest) *NullableUploadRequest { + return &NullableUploadRequest{value: val, isSet: true} +} + +func (v NullableUploadRequest) MarshalJSON() ([]byte, error) { + return json.Marshal(v.value) +} + +func (v *NullableUploadRequest) UnmarshalJSON(src []byte) error { + v.isSet = true + return json.Unmarshal(src, &v.value) +} + + diff --git a/apigen/drs/model_upload_request_object.go b/apigen/drs/model_upload_request_object.go new file mode 100644 index 0000000..807ce4d --- /dev/null +++ b/apigen/drs/model_upload_request_object.go @@ -0,0 +1,321 @@ +/* +Data Repository Service + +No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) + +API version: 1.5.0 +Contact: ga4gh-cloud@ga4gh.org +*/ + +// Code generated by OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT. + +package drs + +import ( + "encoding/json" + "bytes" + "fmt" +) + +// checks if the UploadRequestObject type satisfies the MappedNullable interface at compile time +var _ MappedNullable = &UploadRequestObject{} + +// UploadRequestObject struct for UploadRequestObject +type UploadRequestObject struct { + // The name of the file to upload + Name string `json:"name"` + // Size of the file in bytes + Size int64 `json:"size"` + // MIME type of the file + MimeType string `json:"mime_type"` + // Array of checksums for file integrity verification + Checksums []Checksum `json:"checksums"` + // Optional description of the file + Description *string `json:"description,omitempty"` + // Optional array of alternative names for the file + Aliases []string `json:"aliases,omitempty"` +} + +type _UploadRequestObject UploadRequestObject + +// NewUploadRequestObject instantiates a new UploadRequestObject object +// This constructor will assign default values to properties that have it defined, +// and makes sure properties required by API are set, but the set of arguments +// will change when the set of required properties is changed +func NewUploadRequestObject(name string, size int64, mimeType string, checksums []Checksum) *UploadRequestObject { + this := UploadRequestObject{} + this.Name = name + this.Size = size + this.MimeType = mimeType + this.Checksums = checksums + return &this +} + +// NewUploadRequestObjectWithDefaults instantiates a new UploadRequestObject object +// This constructor will only assign default values to properties that have it defined, +// but it doesn't guarantee that properties required by API are set +func NewUploadRequestObjectWithDefaults() *UploadRequestObject { + this := UploadRequestObject{} + return &this +} + +// GetName returns the Name field value +func (o *UploadRequestObject) GetName() string { + if o == nil { + var ret string + return ret + } + + return o.Name +} + +// GetNameOk returns a tuple with the Name field value +// and a boolean to check if the value has been set. +func (o *UploadRequestObject) GetNameOk() (*string, bool) { + if o == nil { + return nil, false + } + return &o.Name, true +} + +// SetName sets field value +func (o *UploadRequestObject) SetName(v string) { + o.Name = v +} + +// GetSize returns the Size field value +func (o *UploadRequestObject) GetSize() int64 { + if o == nil { + var ret int64 + return ret + } + + return o.Size +} + +// GetSizeOk returns a tuple with the Size field value +// and a boolean to check if the value has been set. +func (o *UploadRequestObject) GetSizeOk() (*int64, bool) { + if o == nil { + return nil, false + } + return &o.Size, true +} + +// SetSize sets field value +func (o *UploadRequestObject) SetSize(v int64) { + o.Size = v +} + +// GetMimeType returns the MimeType field value +func (o *UploadRequestObject) GetMimeType() string { + if o == nil { + var ret string + return ret + } + + return o.MimeType +} + +// GetMimeTypeOk returns a tuple with the MimeType field value +// and a boolean to check if the value has been set. +func (o *UploadRequestObject) GetMimeTypeOk() (*string, bool) { + if o == nil { + return nil, false + } + return &o.MimeType, true +} + +// SetMimeType sets field value +func (o *UploadRequestObject) SetMimeType(v string) { + o.MimeType = v +} + +// GetChecksums returns the Checksums field value +func (o *UploadRequestObject) GetChecksums() []Checksum { + if o == nil { + var ret []Checksum + return ret + } + + return o.Checksums +} + +// GetChecksumsOk returns a tuple with the Checksums field value +// and a boolean to check if the value has been set. +func (o *UploadRequestObject) GetChecksumsOk() ([]Checksum, bool) { + if o == nil { + return nil, false + } + return o.Checksums, true +} + +// SetChecksums sets field value +func (o *UploadRequestObject) SetChecksums(v []Checksum) { + o.Checksums = v +} + +// GetDescription returns the Description field value if set, zero value otherwise. +func (o *UploadRequestObject) GetDescription() string { + if o == nil || IsNil(o.Description) { + var ret string + return ret + } + return *o.Description +} + +// GetDescriptionOk returns a tuple with the Description field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *UploadRequestObject) GetDescriptionOk() (*string, bool) { + if o == nil || IsNil(o.Description) { + return nil, false + } + return o.Description, true +} + +// HasDescription returns a boolean if a field has been set. +func (o *UploadRequestObject) HasDescription() bool { + if o != nil && !IsNil(o.Description) { + return true + } + + return false +} + +// SetDescription gets a reference to the given string and assigns it to the Description field. +func (o *UploadRequestObject) SetDescription(v string) { + o.Description = &v +} + +// GetAliases returns the Aliases field value if set, zero value otherwise. +func (o *UploadRequestObject) GetAliases() []string { + if o == nil || IsNil(o.Aliases) { + var ret []string + return ret + } + return o.Aliases +} + +// GetAliasesOk returns a tuple with the Aliases field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *UploadRequestObject) GetAliasesOk() ([]string, bool) { + if o == nil || IsNil(o.Aliases) { + return nil, false + } + return o.Aliases, true +} + +// HasAliases returns a boolean if a field has been set. +func (o *UploadRequestObject) HasAliases() bool { + if o != nil && !IsNil(o.Aliases) { + return true + } + + return false +} + +// SetAliases gets a reference to the given []string and assigns it to the Aliases field. +func (o *UploadRequestObject) SetAliases(v []string) { + o.Aliases = v +} + +func (o UploadRequestObject) MarshalJSON() ([]byte, error) { + toSerialize,err := o.ToMap() + if err != nil { + return []byte{}, err + } + return json.Marshal(toSerialize) +} + +func (o UploadRequestObject) ToMap() (map[string]interface{}, error) { + toSerialize := map[string]interface{}{} + toSerialize["name"] = o.Name + toSerialize["size"] = o.Size + toSerialize["mime_type"] = o.MimeType + toSerialize["checksums"] = o.Checksums + if !IsNil(o.Description) { + toSerialize["description"] = o.Description + } + if !IsNil(o.Aliases) { + toSerialize["aliases"] = o.Aliases + } + return toSerialize, nil +} + +func (o *UploadRequestObject) UnmarshalJSON(data []byte) (err error) { + // This validates that all required properties are included in the JSON object + // by unmarshalling the object into a generic map with string keys and checking + // that every required field exists as a key in the generic map. + requiredProperties := []string{ + "name", + "size", + "mime_type", + "checksums", + } + + allProperties := make(map[string]interface{}) + + err = json.Unmarshal(data, &allProperties) + + if err != nil { + return err; + } + + for _, requiredProperty := range(requiredProperties) { + if _, exists := allProperties[requiredProperty]; !exists { + return fmt.Errorf("no value given for required property %v", requiredProperty) + } + } + + varUploadRequestObject := _UploadRequestObject{} + + decoder := json.NewDecoder(bytes.NewReader(data)) + decoder.DisallowUnknownFields() + err = decoder.Decode(&varUploadRequestObject) + + if err != nil { + return err + } + + *o = UploadRequestObject(varUploadRequestObject) + + return err +} + +type NullableUploadRequestObject struct { + value *UploadRequestObject + isSet bool +} + +func (v NullableUploadRequestObject) Get() *UploadRequestObject { + return v.value +} + +func (v *NullableUploadRequestObject) Set(val *UploadRequestObject) { + v.value = val + v.isSet = true +} + +func (v NullableUploadRequestObject) IsSet() bool { + return v.isSet +} + +func (v *NullableUploadRequestObject) Unset() { + v.value = nil + v.isSet = false +} + +func NewNullableUploadRequestObject(val *UploadRequestObject) *NullableUploadRequestObject { + return &NullableUploadRequestObject{value: val, isSet: true} +} + +func (v NullableUploadRequestObject) MarshalJSON() ([]byte, error) { + return json.Marshal(v.value) +} + +func (v *NullableUploadRequestObject) UnmarshalJSON(src []byte) error { + v.isSet = true + return json.Unmarshal(src, &v.value) +} + + diff --git a/apigen/drs/model_upload_response.go b/apigen/drs/model_upload_response.go new file mode 100644 index 0000000..10f39af --- /dev/null +++ b/apigen/drs/model_upload_response.go @@ -0,0 +1,160 @@ +/* +Data Repository Service + +No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) + +API version: 1.5.0 +Contact: ga4gh-cloud@ga4gh.org +*/ + +// Code generated by OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT. + +package drs + +import ( + "encoding/json" + "bytes" + "fmt" +) + +// checks if the UploadResponse type satisfies the MappedNullable interface at compile time +var _ MappedNullable = &UploadResponse{} + +// UploadResponse struct for UploadResponse +type UploadResponse struct { + // List of upload responses for the requested files + Responses []UploadResponseObject `json:"responses"` +} + +type _UploadResponse UploadResponse + +// NewUploadResponse instantiates a new UploadResponse object +// This constructor will assign default values to properties that have it defined, +// and makes sure properties required by API are set, but the set of arguments +// will change when the set of required properties is changed +func NewUploadResponse(responses []UploadResponseObject) *UploadResponse { + this := UploadResponse{} + this.Responses = responses + return &this +} + +// NewUploadResponseWithDefaults instantiates a new UploadResponse object +// This constructor will only assign default values to properties that have it defined, +// but it doesn't guarantee that properties required by API are set +func NewUploadResponseWithDefaults() *UploadResponse { + this := UploadResponse{} + return &this +} + +// GetResponses returns the Responses field value +func (o *UploadResponse) GetResponses() []UploadResponseObject { + if o == nil { + var ret []UploadResponseObject + return ret + } + + return o.Responses +} + +// GetResponsesOk returns a tuple with the Responses field value +// and a boolean to check if the value has been set. +func (o *UploadResponse) GetResponsesOk() ([]UploadResponseObject, bool) { + if o == nil { + return nil, false + } + return o.Responses, true +} + +// SetResponses sets field value +func (o *UploadResponse) SetResponses(v []UploadResponseObject) { + o.Responses = v +} + +func (o UploadResponse) MarshalJSON() ([]byte, error) { + toSerialize,err := o.ToMap() + if err != nil { + return []byte{}, err + } + return json.Marshal(toSerialize) +} + +func (o UploadResponse) ToMap() (map[string]interface{}, error) { + toSerialize := map[string]interface{}{} + toSerialize["responses"] = o.Responses + return toSerialize, nil +} + +func (o *UploadResponse) UnmarshalJSON(data []byte) (err error) { + // This validates that all required properties are included in the JSON object + // by unmarshalling the object into a generic map with string keys and checking + // that every required field exists as a key in the generic map. + requiredProperties := []string{ + "responses", + } + + allProperties := make(map[string]interface{}) + + err = json.Unmarshal(data, &allProperties) + + if err != nil { + return err; + } + + for _, requiredProperty := range(requiredProperties) { + if _, exists := allProperties[requiredProperty]; !exists { + return fmt.Errorf("no value given for required property %v", requiredProperty) + } + } + + varUploadResponse := _UploadResponse{} + + decoder := json.NewDecoder(bytes.NewReader(data)) + decoder.DisallowUnknownFields() + err = decoder.Decode(&varUploadResponse) + + if err != nil { + return err + } + + *o = UploadResponse(varUploadResponse) + + return err +} + +type NullableUploadResponse struct { + value *UploadResponse + isSet bool +} + +func (v NullableUploadResponse) Get() *UploadResponse { + return v.value +} + +func (v *NullableUploadResponse) Set(val *UploadResponse) { + v.value = val + v.isSet = true +} + +func (v NullableUploadResponse) IsSet() bool { + return v.isSet +} + +func (v *NullableUploadResponse) Unset() { + v.value = nil + v.isSet = false +} + +func NewNullableUploadResponse(val *UploadResponse) *NullableUploadResponse { + return &NullableUploadResponse{value: val, isSet: true} +} + +func (v NullableUploadResponse) MarshalJSON() ([]byte, error) { + return json.Marshal(v.value) +} + +func (v *NullableUploadResponse) UnmarshalJSON(src []byte) error { + v.isSet = true + return json.Unmarshal(src, &v.value) +} + + diff --git a/apigen/drs/model_upload_response_object.go b/apigen/drs/model_upload_response_object.go new file mode 100644 index 0000000..c63169e --- /dev/null +++ b/apigen/drs/model_upload_response_object.go @@ -0,0 +1,358 @@ +/* +Data Repository Service + +No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) + +API version: 1.5.0 +Contact: ga4gh-cloud@ga4gh.org +*/ + +// Code generated by OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT. + +package drs + +import ( + "encoding/json" + "bytes" + "fmt" +) + +// checks if the UploadResponseObject type satisfies the MappedNullable interface at compile time +var _ MappedNullable = &UploadResponseObject{} + +// UploadResponseObject struct for UploadResponseObject +type UploadResponseObject struct { + // The name of the file + Name string `json:"name"` + // Size of the file in bytes + Size int64 `json:"size"` + // MIME type of the file + MimeType string `json:"mime_type"` + // Array of checksums for file integrity verification + Checksums []Checksum `json:"checksums"` + // Optional description of the file + Description *string `json:"description,omitempty"` + // Optional array of alternative names + Aliases []string `json:"aliases,omitempty"` + // Available methods for uploading this file + UploadMethods []UploadMethod `json:"upload_methods,omitempty"` +} + +type _UploadResponseObject UploadResponseObject + +// NewUploadResponseObject instantiates a new UploadResponseObject object +// This constructor will assign default values to properties that have it defined, +// and makes sure properties required by API are set, but the set of arguments +// will change when the set of required properties is changed +func NewUploadResponseObject(name string, size int64, mimeType string, checksums []Checksum) *UploadResponseObject { + this := UploadResponseObject{} + this.Name = name + this.Size = size + this.MimeType = mimeType + this.Checksums = checksums + return &this +} + +// NewUploadResponseObjectWithDefaults instantiates a new UploadResponseObject object +// This constructor will only assign default values to properties that have it defined, +// but it doesn't guarantee that properties required by API are set +func NewUploadResponseObjectWithDefaults() *UploadResponseObject { + this := UploadResponseObject{} + return &this +} + +// GetName returns the Name field value +func (o *UploadResponseObject) GetName() string { + if o == nil { + var ret string + return ret + } + + return o.Name +} + +// GetNameOk returns a tuple with the Name field value +// and a boolean to check if the value has been set. +func (o *UploadResponseObject) GetNameOk() (*string, bool) { + if o == nil { + return nil, false + } + return &o.Name, true +} + +// SetName sets field value +func (o *UploadResponseObject) SetName(v string) { + o.Name = v +} + +// GetSize returns the Size field value +func (o *UploadResponseObject) GetSize() int64 { + if o == nil { + var ret int64 + return ret + } + + return o.Size +} + +// GetSizeOk returns a tuple with the Size field value +// and a boolean to check if the value has been set. +func (o *UploadResponseObject) GetSizeOk() (*int64, bool) { + if o == nil { + return nil, false + } + return &o.Size, true +} + +// SetSize sets field value +func (o *UploadResponseObject) SetSize(v int64) { + o.Size = v +} + +// GetMimeType returns the MimeType field value +func (o *UploadResponseObject) GetMimeType() string { + if o == nil { + var ret string + return ret + } + + return o.MimeType +} + +// GetMimeTypeOk returns a tuple with the MimeType field value +// and a boolean to check if the value has been set. +func (o *UploadResponseObject) GetMimeTypeOk() (*string, bool) { + if o == nil { + return nil, false + } + return &o.MimeType, true +} + +// SetMimeType sets field value +func (o *UploadResponseObject) SetMimeType(v string) { + o.MimeType = v +} + +// GetChecksums returns the Checksums field value +func (o *UploadResponseObject) GetChecksums() []Checksum { + if o == nil { + var ret []Checksum + return ret + } + + return o.Checksums +} + +// GetChecksumsOk returns a tuple with the Checksums field value +// and a boolean to check if the value has been set. +func (o *UploadResponseObject) GetChecksumsOk() ([]Checksum, bool) { + if o == nil { + return nil, false + } + return o.Checksums, true +} + +// SetChecksums sets field value +func (o *UploadResponseObject) SetChecksums(v []Checksum) { + o.Checksums = v +} + +// GetDescription returns the Description field value if set, zero value otherwise. +func (o *UploadResponseObject) GetDescription() string { + if o == nil || IsNil(o.Description) { + var ret string + return ret + } + return *o.Description +} + +// GetDescriptionOk returns a tuple with the Description field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *UploadResponseObject) GetDescriptionOk() (*string, bool) { + if o == nil || IsNil(o.Description) { + return nil, false + } + return o.Description, true +} + +// HasDescription returns a boolean if a field has been set. +func (o *UploadResponseObject) HasDescription() bool { + if o != nil && !IsNil(o.Description) { + return true + } + + return false +} + +// SetDescription gets a reference to the given string and assigns it to the Description field. +func (o *UploadResponseObject) SetDescription(v string) { + o.Description = &v +} + +// GetAliases returns the Aliases field value if set, zero value otherwise. +func (o *UploadResponseObject) GetAliases() []string { + if o == nil || IsNil(o.Aliases) { + var ret []string + return ret + } + return o.Aliases +} + +// GetAliasesOk returns a tuple with the Aliases field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *UploadResponseObject) GetAliasesOk() ([]string, bool) { + if o == nil || IsNil(o.Aliases) { + return nil, false + } + return o.Aliases, true +} + +// HasAliases returns a boolean if a field has been set. +func (o *UploadResponseObject) HasAliases() bool { + if o != nil && !IsNil(o.Aliases) { + return true + } + + return false +} + +// SetAliases gets a reference to the given []string and assigns it to the Aliases field. +func (o *UploadResponseObject) SetAliases(v []string) { + o.Aliases = v +} + +// GetUploadMethods returns the UploadMethods field value if set, zero value otherwise. +func (o *UploadResponseObject) GetUploadMethods() []UploadMethod { + if o == nil || IsNil(o.UploadMethods) { + var ret []UploadMethod + return ret + } + return o.UploadMethods +} + +// GetUploadMethodsOk returns a tuple with the UploadMethods field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *UploadResponseObject) GetUploadMethodsOk() ([]UploadMethod, bool) { + if o == nil || IsNil(o.UploadMethods) { + return nil, false + } + return o.UploadMethods, true +} + +// HasUploadMethods returns a boolean if a field has been set. +func (o *UploadResponseObject) HasUploadMethods() bool { + if o != nil && !IsNil(o.UploadMethods) { + return true + } + + return false +} + +// SetUploadMethods gets a reference to the given []UploadMethod and assigns it to the UploadMethods field. +func (o *UploadResponseObject) SetUploadMethods(v []UploadMethod) { + o.UploadMethods = v +} + +func (o UploadResponseObject) MarshalJSON() ([]byte, error) { + toSerialize,err := o.ToMap() + if err != nil { + return []byte{}, err + } + return json.Marshal(toSerialize) +} + +func (o UploadResponseObject) ToMap() (map[string]interface{}, error) { + toSerialize := map[string]interface{}{} + toSerialize["name"] = o.Name + toSerialize["size"] = o.Size + toSerialize["mime_type"] = o.MimeType + toSerialize["checksums"] = o.Checksums + if !IsNil(o.Description) { + toSerialize["description"] = o.Description + } + if !IsNil(o.Aliases) { + toSerialize["aliases"] = o.Aliases + } + if !IsNil(o.UploadMethods) { + toSerialize["upload_methods"] = o.UploadMethods + } + return toSerialize, nil +} + +func (o *UploadResponseObject) UnmarshalJSON(data []byte) (err error) { + // This validates that all required properties are included in the JSON object + // by unmarshalling the object into a generic map with string keys and checking + // that every required field exists as a key in the generic map. + requiredProperties := []string{ + "name", + "size", + "mime_type", + "checksums", + } + + allProperties := make(map[string]interface{}) + + err = json.Unmarshal(data, &allProperties) + + if err != nil { + return err; + } + + for _, requiredProperty := range(requiredProperties) { + if _, exists := allProperties[requiredProperty]; !exists { + return fmt.Errorf("no value given for required property %v", requiredProperty) + } + } + + varUploadResponseObject := _UploadResponseObject{} + + decoder := json.NewDecoder(bytes.NewReader(data)) + decoder.DisallowUnknownFields() + err = decoder.Decode(&varUploadResponseObject) + + if err != nil { + return err + } + + *o = UploadResponseObject(varUploadResponseObject) + + return err +} + +type NullableUploadResponseObject struct { + value *UploadResponseObject + isSet bool +} + +func (v NullableUploadResponseObject) Get() *UploadResponseObject { + return v.value +} + +func (v *NullableUploadResponseObject) Set(val *UploadResponseObject) { + v.value = val + v.isSet = true +} + +func (v NullableUploadResponseObject) IsSet() bool { + return v.isSet +} + +func (v *NullableUploadResponseObject) Unset() { + v.value = nil + v.isSet = false +} + +func NewNullableUploadResponseObject(val *UploadResponseObject) *NullableUploadResponseObject { + return &NullableUploadResponseObject{value: val, isSet: true} +} + +func (v NullableUploadResponseObject) MarshalJSON() ([]byte, error) { + return json.Marshal(v.value) +} + +func (v *NullableUploadResponseObject) UnmarshalJSON(src []byte) error { + v.isSet = true + return json.Unmarshal(src, &v.value) +} + + diff --git a/apigen/drs/utils.go b/apigen/drs/utils.go new file mode 100644 index 0000000..4b16c12 --- /dev/null +++ b/apigen/drs/utils.go @@ -0,0 +1,362 @@ +/* +Data Repository Service + +No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) + +API version: 1.5.0 +Contact: ga4gh-cloud@ga4gh.org +*/ + +// Code generated by OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT. + +package drs + +import ( + "bytes" + "encoding/json" + "fmt" + "reflect" + "time" +) + +// PtrBool is a helper routine that returns a pointer to given boolean value. +func PtrBool(v bool) *bool { return &v } + +// PtrInt is a helper routine that returns a pointer to given integer value. +func PtrInt(v int) *int { return &v } + +// PtrInt32 is a helper routine that returns a pointer to given integer value. +func PtrInt32(v int32) *int32 { return &v } + +// PtrInt64 is a helper routine that returns a pointer to given integer value. +func PtrInt64(v int64) *int64 { return &v } + +// PtrFloat32 is a helper routine that returns a pointer to given float value. +func PtrFloat32(v float32) *float32 { return &v } + +// PtrFloat64 is a helper routine that returns a pointer to given float value. +func PtrFloat64(v float64) *float64 { return &v } + +// PtrString is a helper routine that returns a pointer to given string value. +func PtrString(v string) *string { return &v } + +// PtrTime is helper routine that returns a pointer to given Time value. +func PtrTime(v time.Time) *time.Time { return &v } + +type NullableBool struct { + value *bool + isSet bool +} + +func (v NullableBool) Get() *bool { + return v.value +} + +func (v *NullableBool) Set(val *bool) { + v.value = val + v.isSet = true +} + +func (v NullableBool) IsSet() bool { + return v.isSet +} + +func (v *NullableBool) Unset() { + v.value = nil + v.isSet = false +} + +func NewNullableBool(val *bool) *NullableBool { + return &NullableBool{value: val, isSet: true} +} + +func (v NullableBool) MarshalJSON() ([]byte, error) { + return json.Marshal(v.value) +} + +func (v *NullableBool) UnmarshalJSON(src []byte) error { + v.isSet = true + return json.Unmarshal(src, &v.value) +} + +type NullableInt struct { + value *int + isSet bool +} + +func (v NullableInt) Get() *int { + return v.value +} + +func (v *NullableInt) Set(val *int) { + v.value = val + v.isSet = true +} + +func (v NullableInt) IsSet() bool { + return v.isSet +} + +func (v *NullableInt) Unset() { + v.value = nil + v.isSet = false +} + +func NewNullableInt(val *int) *NullableInt { + return &NullableInt{value: val, isSet: true} +} + +func (v NullableInt) MarshalJSON() ([]byte, error) { + return json.Marshal(v.value) +} + +func (v *NullableInt) UnmarshalJSON(src []byte) error { + v.isSet = true + return json.Unmarshal(src, &v.value) +} + +type NullableInt32 struct { + value *int32 + isSet bool +} + +func (v NullableInt32) Get() *int32 { + return v.value +} + +func (v *NullableInt32) Set(val *int32) { + v.value = val + v.isSet = true +} + +func (v NullableInt32) IsSet() bool { + return v.isSet +} + +func (v *NullableInt32) Unset() { + v.value = nil + v.isSet = false +} + +func NewNullableInt32(val *int32) *NullableInt32 { + return &NullableInt32{value: val, isSet: true} +} + +func (v NullableInt32) MarshalJSON() ([]byte, error) { + return json.Marshal(v.value) +} + +func (v *NullableInt32) UnmarshalJSON(src []byte) error { + v.isSet = true + return json.Unmarshal(src, &v.value) +} + +type NullableInt64 struct { + value *int64 + isSet bool +} + +func (v NullableInt64) Get() *int64 { + return v.value +} + +func (v *NullableInt64) Set(val *int64) { + v.value = val + v.isSet = true +} + +func (v NullableInt64) IsSet() bool { + return v.isSet +} + +func (v *NullableInt64) Unset() { + v.value = nil + v.isSet = false +} + +func NewNullableInt64(val *int64) *NullableInt64 { + return &NullableInt64{value: val, isSet: true} +} + +func (v NullableInt64) MarshalJSON() ([]byte, error) { + return json.Marshal(v.value) +} + +func (v *NullableInt64) UnmarshalJSON(src []byte) error { + v.isSet = true + return json.Unmarshal(src, &v.value) +} + +type NullableFloat32 struct { + value *float32 + isSet bool +} + +func (v NullableFloat32) Get() *float32 { + return v.value +} + +func (v *NullableFloat32) Set(val *float32) { + v.value = val + v.isSet = true +} + +func (v NullableFloat32) IsSet() bool { + return v.isSet +} + +func (v *NullableFloat32) Unset() { + v.value = nil + v.isSet = false +} + +func NewNullableFloat32(val *float32) *NullableFloat32 { + return &NullableFloat32{value: val, isSet: true} +} + +func (v NullableFloat32) MarshalJSON() ([]byte, error) { + return json.Marshal(v.value) +} + +func (v *NullableFloat32) UnmarshalJSON(src []byte) error { + v.isSet = true + return json.Unmarshal(src, &v.value) +} + +type NullableFloat64 struct { + value *float64 + isSet bool +} + +func (v NullableFloat64) Get() *float64 { + return v.value +} + +func (v *NullableFloat64) Set(val *float64) { + v.value = val + v.isSet = true +} + +func (v NullableFloat64) IsSet() bool { + return v.isSet +} + +func (v *NullableFloat64) Unset() { + v.value = nil + v.isSet = false +} + +func NewNullableFloat64(val *float64) *NullableFloat64 { + return &NullableFloat64{value: val, isSet: true} +} + +func (v NullableFloat64) MarshalJSON() ([]byte, error) { + return json.Marshal(v.value) +} + +func (v *NullableFloat64) UnmarshalJSON(src []byte) error { + v.isSet = true + return json.Unmarshal(src, &v.value) +} + +type NullableString struct { + value *string + isSet bool +} + +func (v NullableString) Get() *string { + return v.value +} + +func (v *NullableString) Set(val *string) { + v.value = val + v.isSet = true +} + +func (v NullableString) IsSet() bool { + return v.isSet +} + +func (v *NullableString) Unset() { + v.value = nil + v.isSet = false +} + +func NewNullableString(val *string) *NullableString { + return &NullableString{value: val, isSet: true} +} + +func (v NullableString) MarshalJSON() ([]byte, error) { + return json.Marshal(v.value) +} + +func (v *NullableString) UnmarshalJSON(src []byte) error { + v.isSet = true + return json.Unmarshal(src, &v.value) +} + +type NullableTime struct { + value *time.Time + isSet bool +} + +func (v NullableTime) Get() *time.Time { + return v.value +} + +func (v *NullableTime) Set(val *time.Time) { + v.value = val + v.isSet = true +} + +func (v NullableTime) IsSet() bool { + return v.isSet +} + +func (v *NullableTime) Unset() { + v.value = nil + v.isSet = false +} + +func NewNullableTime(val *time.Time) *NullableTime { + return &NullableTime{value: val, isSet: true} +} + +func (v NullableTime) MarshalJSON() ([]byte, error) { + return json.Marshal(v.value) +} + +func (v *NullableTime) UnmarshalJSON(src []byte) error { + v.isSet = true + return json.Unmarshal(src, &v.value) +} + +// IsNil checks if an input is nil +func IsNil(i interface{}) bool { + if i == nil { + return true + } + switch reflect.TypeOf(i).Kind() { + case reflect.Chan, reflect.Func, reflect.Map, reflect.Ptr, reflect.UnsafePointer, reflect.Interface, reflect.Slice: + return reflect.ValueOf(i).IsNil() + case reflect.Array: + return reflect.ValueOf(i).IsZero() + } + return false +} + +type MappedNullable interface { + ToMap() (map[string]interface{}, error) +} + +// A wrapper for strict JSON decoding +func newStrictDecoder(data []byte) *json.Decoder { + dec := json.NewDecoder(bytes.NewBuffer(data)) + dec.DisallowUnknownFields() + return dec +} + +// Prevent trying to import "fmt" +func reportError(format string, a ...interface{}) error { + return fmt.Errorf(format, a...) +} \ No newline at end of file diff --git a/apigen/internalapi/model_bulk_create_request.go b/apigen/internalapi/model_bulk_create_request.go new file mode 100644 index 0000000..f405e25 --- /dev/null +++ b/apigen/internalapi/model_bulk_create_request.go @@ -0,0 +1,158 @@ +/* +Internal Compatibility API (DRS Server) + +Consolidated internal API delta for drs-server. This spec captures non-GA4GH DRS internal/compatibility routes. + +API version: 1.0.0 +*/ + +// Code generated by OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT. + +package internalapi + +import ( + "encoding/json" + "bytes" + "fmt" +) + +// checks if the BulkCreateRequest type satisfies the MappedNullable interface at compile time +var _ MappedNullable = &BulkCreateRequest{} + +// BulkCreateRequest struct for BulkCreateRequest +type BulkCreateRequest struct { + Records []InternalRecord `json:"records"` +} + +type _BulkCreateRequest BulkCreateRequest + +// NewBulkCreateRequest instantiates a new BulkCreateRequest object +// This constructor will assign default values to properties that have it defined, +// and makes sure properties required by API are set, but the set of arguments +// will change when the set of required properties is changed +func NewBulkCreateRequest(records []InternalRecord) *BulkCreateRequest { + this := BulkCreateRequest{} + this.Records = records + return &this +} + +// NewBulkCreateRequestWithDefaults instantiates a new BulkCreateRequest object +// This constructor will only assign default values to properties that have it defined, +// but it doesn't guarantee that properties required by API are set +func NewBulkCreateRequestWithDefaults() *BulkCreateRequest { + this := BulkCreateRequest{} + return &this +} + +// GetRecords returns the Records field value +func (o *BulkCreateRequest) GetRecords() []InternalRecord { + if o == nil { + var ret []InternalRecord + return ret + } + + return o.Records +} + +// GetRecordsOk returns a tuple with the Records field value +// and a boolean to check if the value has been set. +func (o *BulkCreateRequest) GetRecordsOk() ([]InternalRecord, bool) { + if o == nil { + return nil, false + } + return o.Records, true +} + +// SetRecords sets field value +func (o *BulkCreateRequest) SetRecords(v []InternalRecord) { + o.Records = v +} + +func (o BulkCreateRequest) MarshalJSON() ([]byte, error) { + toSerialize,err := o.ToMap() + if err != nil { + return []byte{}, err + } + return json.Marshal(toSerialize) +} + +func (o BulkCreateRequest) ToMap() (map[string]interface{}, error) { + toSerialize := map[string]interface{}{} + toSerialize["records"] = o.Records + return toSerialize, nil +} + +func (o *BulkCreateRequest) UnmarshalJSON(data []byte) (err error) { + // This validates that all required properties are included in the JSON object + // by unmarshalling the object into a generic map with string keys and checking + // that every required field exists as a key in the generic map. + requiredProperties := []string{ + "records", + } + + allProperties := make(map[string]interface{}) + + err = json.Unmarshal(data, &allProperties) + + if err != nil { + return err; + } + + for _, requiredProperty := range(requiredProperties) { + if _, exists := allProperties[requiredProperty]; !exists { + return fmt.Errorf("no value given for required property %v", requiredProperty) + } + } + + varBulkCreateRequest := _BulkCreateRequest{} + + decoder := json.NewDecoder(bytes.NewReader(data)) + decoder.DisallowUnknownFields() + err = decoder.Decode(&varBulkCreateRequest) + + if err != nil { + return err + } + + *o = BulkCreateRequest(varBulkCreateRequest) + + return err +} + +type NullableBulkCreateRequest struct { + value *BulkCreateRequest + isSet bool +} + +func (v NullableBulkCreateRequest) Get() *BulkCreateRequest { + return v.value +} + +func (v *NullableBulkCreateRequest) Set(val *BulkCreateRequest) { + v.value = val + v.isSet = true +} + +func (v NullableBulkCreateRequest) IsSet() bool { + return v.isSet +} + +func (v *NullableBulkCreateRequest) Unset() { + v.value = nil + v.isSet = false +} + +func NewNullableBulkCreateRequest(val *BulkCreateRequest) *NullableBulkCreateRequest { + return &NullableBulkCreateRequest{value: val, isSet: true} +} + +func (v NullableBulkCreateRequest) MarshalJSON() ([]byte, error) { + return json.Marshal(v.value) +} + +func (v *NullableBulkCreateRequest) UnmarshalJSON(src []byte) error { + v.isSet = true + return json.Unmarshal(src, &v.value) +} + + diff --git a/apigen/internalapi/model_bulk_documents_request.go b/apigen/internalapi/model_bulk_documents_request.go new file mode 100644 index 0000000..219beb4 --- /dev/null +++ b/apigen/internalapi/model_bulk_documents_request.go @@ -0,0 +1,171 @@ +/* +Internal Compatibility API (DRS Server) + +Consolidated internal API delta for drs-server. This spec captures non-GA4GH DRS internal/compatibility routes. + +API version: 1.0.0 +*/ + +// Code generated by OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT. + +package internalapi + +import ( + "encoding/json" + "fmt" + "gopkg.in/validator.v2" +) + +// BulkDocumentsRequest - struct for BulkDocumentsRequest +type BulkDocumentsRequest struct { + BulkDocumentsRequestOneOf *BulkDocumentsRequestOneOf + ArrayOfString *[]string +} + +// BulkDocumentsRequestOneOfAsBulkDocumentsRequest is a convenience function that returns BulkDocumentsRequestOneOf wrapped in BulkDocumentsRequest +func BulkDocumentsRequestOneOfAsBulkDocumentsRequest(v *BulkDocumentsRequestOneOf) BulkDocumentsRequest { + return BulkDocumentsRequest{ + BulkDocumentsRequestOneOf: v, + } +} + +// []stringAsBulkDocumentsRequest is a convenience function that returns []string wrapped in BulkDocumentsRequest +func ArrayOfStringAsBulkDocumentsRequest(v *[]string) BulkDocumentsRequest { + return BulkDocumentsRequest{ + ArrayOfString: v, + } +} + + +// Unmarshal JSON data into one of the pointers in the struct +func (dst *BulkDocumentsRequest) UnmarshalJSON(data []byte) error { + var err error + match := 0 + // try to unmarshal data into BulkDocumentsRequestOneOf + err = newStrictDecoder(data).Decode(&dst.BulkDocumentsRequestOneOf) + if err == nil { + jsonBulkDocumentsRequestOneOf, _ := json.Marshal(dst.BulkDocumentsRequestOneOf) + if string(jsonBulkDocumentsRequestOneOf) == "{}" { // empty struct + dst.BulkDocumentsRequestOneOf = nil + } else { + if err = validator.Validate(dst.BulkDocumentsRequestOneOf); err != nil { + dst.BulkDocumentsRequestOneOf = nil + } else { + match++ + } + } + } else { + dst.BulkDocumentsRequestOneOf = nil + } + + // try to unmarshal data into ArrayOfString + err = newStrictDecoder(data).Decode(&dst.ArrayOfString) + if err == nil { + jsonArrayOfString, _ := json.Marshal(dst.ArrayOfString) + if string(jsonArrayOfString) == "{}" { // empty struct + dst.ArrayOfString = nil + } else { + if err = validator.Validate(dst.ArrayOfString); err != nil { + dst.ArrayOfString = nil + } else { + match++ + } + } + } else { + dst.ArrayOfString = nil + } + + if match > 1 { // more than 1 match + // reset to nil + dst.BulkDocumentsRequestOneOf = nil + dst.ArrayOfString = nil + + return fmt.Errorf("data matches more than one schema in oneOf(BulkDocumentsRequest)") + } else if match == 1 { + return nil // exactly one match + } else { // no match + return fmt.Errorf("data failed to match schemas in oneOf(BulkDocumentsRequest)") + } +} + +// Marshal data from the first non-nil pointers in the struct to JSON +func (src BulkDocumentsRequest) MarshalJSON() ([]byte, error) { + if src.BulkDocumentsRequestOneOf != nil { + return json.Marshal(&src.BulkDocumentsRequestOneOf) + } + + if src.ArrayOfString != nil { + return json.Marshal(&src.ArrayOfString) + } + + return nil, nil // no data in oneOf schemas +} + +// Get the actual instance +func (obj *BulkDocumentsRequest) GetActualInstance() (interface{}) { + if obj == nil { + return nil + } + if obj.BulkDocumentsRequestOneOf != nil { + return obj.BulkDocumentsRequestOneOf + } + + if obj.ArrayOfString != nil { + return obj.ArrayOfString + } + + // all schemas are nil + return nil +} + +// Get the actual instance value +func (obj BulkDocumentsRequest) GetActualInstanceValue() (interface{}) { + if obj.BulkDocumentsRequestOneOf != nil { + return *obj.BulkDocumentsRequestOneOf + } + + if obj.ArrayOfString != nil { + return *obj.ArrayOfString + } + + // all schemas are nil + return nil +} + +type NullableBulkDocumentsRequest struct { + value *BulkDocumentsRequest + isSet bool +} + +func (v NullableBulkDocumentsRequest) Get() *BulkDocumentsRequest { + return v.value +} + +func (v *NullableBulkDocumentsRequest) Set(val *BulkDocumentsRequest) { + v.value = val + v.isSet = true +} + +func (v NullableBulkDocumentsRequest) IsSet() bool { + return v.isSet +} + +func (v *NullableBulkDocumentsRequest) Unset() { + v.value = nil + v.isSet = false +} + +func NewNullableBulkDocumentsRequest(val *BulkDocumentsRequest) *NullableBulkDocumentsRequest { + return &NullableBulkDocumentsRequest{value: val, isSet: true} +} + +func (v NullableBulkDocumentsRequest) MarshalJSON() ([]byte, error) { + return json.Marshal(v.value) +} + +func (v *NullableBulkDocumentsRequest) UnmarshalJSON(src []byte) error { + v.isSet = true + return json.Unmarshal(src, &v.value) +} + + diff --git a/apigen/internalapi/model_bulk_documents_request_one_of.go b/apigen/internalapi/model_bulk_documents_request_one_of.go new file mode 100644 index 0000000..7c9327a --- /dev/null +++ b/apigen/internalapi/model_bulk_documents_request_one_of.go @@ -0,0 +1,162 @@ +/* +Internal Compatibility API (DRS Server) + +Consolidated internal API delta for drs-server. This spec captures non-GA4GH DRS internal/compatibility routes. + +API version: 1.0.0 +*/ + +// Code generated by OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT. + +package internalapi + +import ( + "encoding/json" +) + +// checks if the BulkDocumentsRequestOneOf type satisfies the MappedNullable interface at compile time +var _ MappedNullable = &BulkDocumentsRequestOneOf{} + +// BulkDocumentsRequestOneOf struct for BulkDocumentsRequestOneOf +type BulkDocumentsRequestOneOf struct { + Ids []string `json:"ids,omitempty"` + Dids []string `json:"dids,omitempty"` +} + +// NewBulkDocumentsRequestOneOf instantiates a new BulkDocumentsRequestOneOf object +// This constructor will assign default values to properties that have it defined, +// and makes sure properties required by API are set, but the set of arguments +// will change when the set of required properties is changed +func NewBulkDocumentsRequestOneOf() *BulkDocumentsRequestOneOf { + this := BulkDocumentsRequestOneOf{} + return &this +} + +// NewBulkDocumentsRequestOneOfWithDefaults instantiates a new BulkDocumentsRequestOneOf object +// This constructor will only assign default values to properties that have it defined, +// but it doesn't guarantee that properties required by API are set +func NewBulkDocumentsRequestOneOfWithDefaults() *BulkDocumentsRequestOneOf { + this := BulkDocumentsRequestOneOf{} + return &this +} + +// GetIds returns the Ids field value if set, zero value otherwise. +func (o *BulkDocumentsRequestOneOf) GetIds() []string { + if o == nil || IsNil(o.Ids) { + var ret []string + return ret + } + return o.Ids +} + +// GetIdsOk returns a tuple with the Ids field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *BulkDocumentsRequestOneOf) GetIdsOk() ([]string, bool) { + if o == nil || IsNil(o.Ids) { + return nil, false + } + return o.Ids, true +} + +// HasIds returns a boolean if a field has been set. +func (o *BulkDocumentsRequestOneOf) HasIds() bool { + if o != nil && !IsNil(o.Ids) { + return true + } + + return false +} + +// SetIds gets a reference to the given []string and assigns it to the Ids field. +func (o *BulkDocumentsRequestOneOf) SetIds(v []string) { + o.Ids = v +} + +// GetDids returns the Dids field value if set, zero value otherwise. +func (o *BulkDocumentsRequestOneOf) GetDids() []string { + if o == nil || IsNil(o.Dids) { + var ret []string + return ret + } + return o.Dids +} + +// GetDidsOk returns a tuple with the Dids field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *BulkDocumentsRequestOneOf) GetDidsOk() ([]string, bool) { + if o == nil || IsNil(o.Dids) { + return nil, false + } + return o.Dids, true +} + +// HasDids returns a boolean if a field has been set. +func (o *BulkDocumentsRequestOneOf) HasDids() bool { + if o != nil && !IsNil(o.Dids) { + return true + } + + return false +} + +// SetDids gets a reference to the given []string and assigns it to the Dids field. +func (o *BulkDocumentsRequestOneOf) SetDids(v []string) { + o.Dids = v +} + +func (o BulkDocumentsRequestOneOf) MarshalJSON() ([]byte, error) { + toSerialize,err := o.ToMap() + if err != nil { + return []byte{}, err + } + return json.Marshal(toSerialize) +} + +func (o BulkDocumentsRequestOneOf) ToMap() (map[string]interface{}, error) { + toSerialize := map[string]interface{}{} + if !IsNil(o.Ids) { + toSerialize["ids"] = o.Ids + } + if !IsNil(o.Dids) { + toSerialize["dids"] = o.Dids + } + return toSerialize, nil +} + +type NullableBulkDocumentsRequestOneOf struct { + value *BulkDocumentsRequestOneOf + isSet bool +} + +func (v NullableBulkDocumentsRequestOneOf) Get() *BulkDocumentsRequestOneOf { + return v.value +} + +func (v *NullableBulkDocumentsRequestOneOf) Set(val *BulkDocumentsRequestOneOf) { + v.value = val + v.isSet = true +} + +func (v NullableBulkDocumentsRequestOneOf) IsSet() bool { + return v.isSet +} + +func (v *NullableBulkDocumentsRequestOneOf) Unset() { + v.value = nil + v.isSet = false +} + +func NewNullableBulkDocumentsRequestOneOf(val *BulkDocumentsRequestOneOf) *NullableBulkDocumentsRequestOneOf { + return &NullableBulkDocumentsRequestOneOf{value: val, isSet: true} +} + +func (v NullableBulkDocumentsRequestOneOf) MarshalJSON() ([]byte, error) { + return json.Marshal(v.value) +} + +func (v *NullableBulkDocumentsRequestOneOf) UnmarshalJSON(src []byte) error { + v.isSet = true + return json.Unmarshal(src, &v.value) +} + + diff --git a/apigen/internalapi/model_bulk_hashes_request.go b/apigen/internalapi/model_bulk_hashes_request.go new file mode 100644 index 0000000..f987377 --- /dev/null +++ b/apigen/internalapi/model_bulk_hashes_request.go @@ -0,0 +1,158 @@ +/* +Internal Compatibility API (DRS Server) + +Consolidated internal API delta for drs-server. This spec captures non-GA4GH DRS internal/compatibility routes. + +API version: 1.0.0 +*/ + +// Code generated by OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT. + +package internalapi + +import ( + "encoding/json" + "bytes" + "fmt" +) + +// checks if the BulkHashesRequest type satisfies the MappedNullable interface at compile time +var _ MappedNullable = &BulkHashesRequest{} + +// BulkHashesRequest struct for BulkHashesRequest +type BulkHashesRequest struct { + Hashes []string `json:"hashes"` +} + +type _BulkHashesRequest BulkHashesRequest + +// NewBulkHashesRequest instantiates a new BulkHashesRequest object +// This constructor will assign default values to properties that have it defined, +// and makes sure properties required by API are set, but the set of arguments +// will change when the set of required properties is changed +func NewBulkHashesRequest(hashes []string) *BulkHashesRequest { + this := BulkHashesRequest{} + this.Hashes = hashes + return &this +} + +// NewBulkHashesRequestWithDefaults instantiates a new BulkHashesRequest object +// This constructor will only assign default values to properties that have it defined, +// but it doesn't guarantee that properties required by API are set +func NewBulkHashesRequestWithDefaults() *BulkHashesRequest { + this := BulkHashesRequest{} + return &this +} + +// GetHashes returns the Hashes field value +func (o *BulkHashesRequest) GetHashes() []string { + if o == nil { + var ret []string + return ret + } + + return o.Hashes +} + +// GetHashesOk returns a tuple with the Hashes field value +// and a boolean to check if the value has been set. +func (o *BulkHashesRequest) GetHashesOk() ([]string, bool) { + if o == nil { + return nil, false + } + return o.Hashes, true +} + +// SetHashes sets field value +func (o *BulkHashesRequest) SetHashes(v []string) { + o.Hashes = v +} + +func (o BulkHashesRequest) MarshalJSON() ([]byte, error) { + toSerialize,err := o.ToMap() + if err != nil { + return []byte{}, err + } + return json.Marshal(toSerialize) +} + +func (o BulkHashesRequest) ToMap() (map[string]interface{}, error) { + toSerialize := map[string]interface{}{} + toSerialize["hashes"] = o.Hashes + return toSerialize, nil +} + +func (o *BulkHashesRequest) UnmarshalJSON(data []byte) (err error) { + // This validates that all required properties are included in the JSON object + // by unmarshalling the object into a generic map with string keys and checking + // that every required field exists as a key in the generic map. + requiredProperties := []string{ + "hashes", + } + + allProperties := make(map[string]interface{}) + + err = json.Unmarshal(data, &allProperties) + + if err != nil { + return err; + } + + for _, requiredProperty := range(requiredProperties) { + if _, exists := allProperties[requiredProperty]; !exists { + return fmt.Errorf("no value given for required property %v", requiredProperty) + } + } + + varBulkHashesRequest := _BulkHashesRequest{} + + decoder := json.NewDecoder(bytes.NewReader(data)) + decoder.DisallowUnknownFields() + err = decoder.Decode(&varBulkHashesRequest) + + if err != nil { + return err + } + + *o = BulkHashesRequest(varBulkHashesRequest) + + return err +} + +type NullableBulkHashesRequest struct { + value *BulkHashesRequest + isSet bool +} + +func (v NullableBulkHashesRequest) Get() *BulkHashesRequest { + return v.value +} + +func (v *NullableBulkHashesRequest) Set(val *BulkHashesRequest) { + v.value = val + v.isSet = true +} + +func (v NullableBulkHashesRequest) IsSet() bool { + return v.isSet +} + +func (v *NullableBulkHashesRequest) Unset() { + v.value = nil + v.isSet = false +} + +func NewNullableBulkHashesRequest(val *BulkHashesRequest) *NullableBulkHashesRequest { + return &NullableBulkHashesRequest{value: val, isSet: true} +} + +func (v NullableBulkHashesRequest) MarshalJSON() ([]byte, error) { + return json.Marshal(v.value) +} + +func (v *NullableBulkHashesRequest) UnmarshalJSON(src []byte) error { + v.isSet = true + return json.Unmarshal(src, &v.value) +} + + diff --git a/apigen/internalapi/model_bulk_sha256_validity_request.go b/apigen/internalapi/model_bulk_sha256_validity_request.go new file mode 100644 index 0000000..72275ae --- /dev/null +++ b/apigen/internalapi/model_bulk_sha256_validity_request.go @@ -0,0 +1,162 @@ +/* +Internal Compatibility API (DRS Server) + +Consolidated internal API delta for drs-server. This spec captures non-GA4GH DRS internal/compatibility routes. + +API version: 1.0.0 +*/ + +// Code generated by OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT. + +package internalapi + +import ( + "encoding/json" +) + +// checks if the BulkSHA256ValidityRequest type satisfies the MappedNullable interface at compile time +var _ MappedNullable = &BulkSHA256ValidityRequest{} + +// BulkSHA256ValidityRequest struct for BulkSHA256ValidityRequest +type BulkSHA256ValidityRequest struct { + Sha256 []string `json:"sha256,omitempty"` + Hashes []string `json:"hashes,omitempty"` +} + +// NewBulkSHA256ValidityRequest instantiates a new BulkSHA256ValidityRequest object +// This constructor will assign default values to properties that have it defined, +// and makes sure properties required by API are set, but the set of arguments +// will change when the set of required properties is changed +func NewBulkSHA256ValidityRequest() *BulkSHA256ValidityRequest { + this := BulkSHA256ValidityRequest{} + return &this +} + +// NewBulkSHA256ValidityRequestWithDefaults instantiates a new BulkSHA256ValidityRequest object +// This constructor will only assign default values to properties that have it defined, +// but it doesn't guarantee that properties required by API are set +func NewBulkSHA256ValidityRequestWithDefaults() *BulkSHA256ValidityRequest { + this := BulkSHA256ValidityRequest{} + return &this +} + +// GetSha256 returns the Sha256 field value if set, zero value otherwise. +func (o *BulkSHA256ValidityRequest) GetSha256() []string { + if o == nil || IsNil(o.Sha256) { + var ret []string + return ret + } + return o.Sha256 +} + +// GetSha256Ok returns a tuple with the Sha256 field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *BulkSHA256ValidityRequest) GetSha256Ok() ([]string, bool) { + if o == nil || IsNil(o.Sha256) { + return nil, false + } + return o.Sha256, true +} + +// HasSha256 returns a boolean if a field has been set. +func (o *BulkSHA256ValidityRequest) HasSha256() bool { + if o != nil && !IsNil(o.Sha256) { + return true + } + + return false +} + +// SetSha256 gets a reference to the given []string and assigns it to the Sha256 field. +func (o *BulkSHA256ValidityRequest) SetSha256(v []string) { + o.Sha256 = v +} + +// GetHashes returns the Hashes field value if set, zero value otherwise. +func (o *BulkSHA256ValidityRequest) GetHashes() []string { + if o == nil || IsNil(o.Hashes) { + var ret []string + return ret + } + return o.Hashes +} + +// GetHashesOk returns a tuple with the Hashes field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *BulkSHA256ValidityRequest) GetHashesOk() ([]string, bool) { + if o == nil || IsNil(o.Hashes) { + return nil, false + } + return o.Hashes, true +} + +// HasHashes returns a boolean if a field has been set. +func (o *BulkSHA256ValidityRequest) HasHashes() bool { + if o != nil && !IsNil(o.Hashes) { + return true + } + + return false +} + +// SetHashes gets a reference to the given []string and assigns it to the Hashes field. +func (o *BulkSHA256ValidityRequest) SetHashes(v []string) { + o.Hashes = v +} + +func (o BulkSHA256ValidityRequest) MarshalJSON() ([]byte, error) { + toSerialize,err := o.ToMap() + if err != nil { + return []byte{}, err + } + return json.Marshal(toSerialize) +} + +func (o BulkSHA256ValidityRequest) ToMap() (map[string]interface{}, error) { + toSerialize := map[string]interface{}{} + if !IsNil(o.Sha256) { + toSerialize["sha256"] = o.Sha256 + } + if !IsNil(o.Hashes) { + toSerialize["hashes"] = o.Hashes + } + return toSerialize, nil +} + +type NullableBulkSHA256ValidityRequest struct { + value *BulkSHA256ValidityRequest + isSet bool +} + +func (v NullableBulkSHA256ValidityRequest) Get() *BulkSHA256ValidityRequest { + return v.value +} + +func (v *NullableBulkSHA256ValidityRequest) Set(val *BulkSHA256ValidityRequest) { + v.value = val + v.isSet = true +} + +func (v NullableBulkSHA256ValidityRequest) IsSet() bool { + return v.isSet +} + +func (v *NullableBulkSHA256ValidityRequest) Unset() { + v.value = nil + v.isSet = false +} + +func NewNullableBulkSHA256ValidityRequest(val *BulkSHA256ValidityRequest) *NullableBulkSHA256ValidityRequest { + return &NullableBulkSHA256ValidityRequest{value: val, isSet: true} +} + +func (v NullableBulkSHA256ValidityRequest) MarshalJSON() ([]byte, error) { + return json.Marshal(v.value) +} + +func (v *NullableBulkSHA256ValidityRequest) UnmarshalJSON(src []byte) error { + v.isSet = true + return json.Unmarshal(src, &v.value) +} + + diff --git a/apigen/internalapi/model_delete_by_query_response.go b/apigen/internalapi/model_delete_by_query_response.go new file mode 100644 index 0000000..ec34fbc --- /dev/null +++ b/apigen/internalapi/model_delete_by_query_response.go @@ -0,0 +1,126 @@ +/* +Internal Compatibility API (DRS Server) + +Consolidated internal API delta for drs-server. This spec captures non-GA4GH DRS internal/compatibility routes. + +API version: 1.0.0 +*/ + +// Code generated by OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT. + +package internalapi + +import ( + "encoding/json" +) + +// checks if the DeleteByQueryResponse type satisfies the MappedNullable interface at compile time +var _ MappedNullable = &DeleteByQueryResponse{} + +// DeleteByQueryResponse struct for DeleteByQueryResponse +type DeleteByQueryResponse struct { + Deleted *int32 `json:"deleted,omitempty"` +} + +// NewDeleteByQueryResponse instantiates a new DeleteByQueryResponse object +// This constructor will assign default values to properties that have it defined, +// and makes sure properties required by API are set, but the set of arguments +// will change when the set of required properties is changed +func NewDeleteByQueryResponse() *DeleteByQueryResponse { + this := DeleteByQueryResponse{} + return &this +} + +// NewDeleteByQueryResponseWithDefaults instantiates a new DeleteByQueryResponse object +// This constructor will only assign default values to properties that have it defined, +// but it doesn't guarantee that properties required by API are set +func NewDeleteByQueryResponseWithDefaults() *DeleteByQueryResponse { + this := DeleteByQueryResponse{} + return &this +} + +// GetDeleted returns the Deleted field value if set, zero value otherwise. +func (o *DeleteByQueryResponse) GetDeleted() int32 { + if o == nil || IsNil(o.Deleted) { + var ret int32 + return ret + } + return *o.Deleted +} + +// GetDeletedOk returns a tuple with the Deleted field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *DeleteByQueryResponse) GetDeletedOk() (*int32, bool) { + if o == nil || IsNil(o.Deleted) { + return nil, false + } + return o.Deleted, true +} + +// HasDeleted returns a boolean if a field has been set. +func (o *DeleteByQueryResponse) HasDeleted() bool { + if o != nil && !IsNil(o.Deleted) { + return true + } + + return false +} + +// SetDeleted gets a reference to the given int32 and assigns it to the Deleted field. +func (o *DeleteByQueryResponse) SetDeleted(v int32) { + o.Deleted = &v +} + +func (o DeleteByQueryResponse) MarshalJSON() ([]byte, error) { + toSerialize,err := o.ToMap() + if err != nil { + return []byte{}, err + } + return json.Marshal(toSerialize) +} + +func (o DeleteByQueryResponse) ToMap() (map[string]interface{}, error) { + toSerialize := map[string]interface{}{} + if !IsNil(o.Deleted) { + toSerialize["deleted"] = o.Deleted + } + return toSerialize, nil +} + +type NullableDeleteByQueryResponse struct { + value *DeleteByQueryResponse + isSet bool +} + +func (v NullableDeleteByQueryResponse) Get() *DeleteByQueryResponse { + return v.value +} + +func (v *NullableDeleteByQueryResponse) Set(val *DeleteByQueryResponse) { + v.value = val + v.isSet = true +} + +func (v NullableDeleteByQueryResponse) IsSet() bool { + return v.isSet +} + +func (v *NullableDeleteByQueryResponse) Unset() { + v.value = nil + v.isSet = false +} + +func NewNullableDeleteByQueryResponse(val *DeleteByQueryResponse) *NullableDeleteByQueryResponse { + return &NullableDeleteByQueryResponse{value: val, isSet: true} +} + +func (v NullableDeleteByQueryResponse) MarshalJSON() ([]byte, error) { + return json.Marshal(v.value) +} + +func (v *NullableDeleteByQueryResponse) UnmarshalJSON(src []byte) error { + v.isSet = true + return json.Unmarshal(src, &v.value) +} + + diff --git a/apigen/internalapi/model_internal_multipart_complete_request.go b/apigen/internalapi/model_internal_multipart_complete_request.go new file mode 100644 index 0000000..1646e30 --- /dev/null +++ b/apigen/internalapi/model_internal_multipart_complete_request.go @@ -0,0 +1,250 @@ +/* +Internal Compatibility API (DRS Server) + +Consolidated internal API delta for drs-server. This spec captures non-GA4GH DRS internal/compatibility routes. + +API version: 1.0.0 +*/ + +// Code generated by OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT. + +package internalapi + +import ( + "encoding/json" + "bytes" + "fmt" +) + +// checks if the InternalMultipartCompleteRequest type satisfies the MappedNullable interface at compile time +var _ MappedNullable = &InternalMultipartCompleteRequest{} + +// InternalMultipartCompleteRequest struct for InternalMultipartCompleteRequest +type InternalMultipartCompleteRequest struct { + Key string `json:"key"` + Bucket *string `json:"bucket,omitempty"` + UploadId string `json:"uploadId"` + Parts []InternalMultipartPart `json:"parts"` +} + +type _InternalMultipartCompleteRequest InternalMultipartCompleteRequest + +// NewInternalMultipartCompleteRequest instantiates a new InternalMultipartCompleteRequest object +// This constructor will assign default values to properties that have it defined, +// and makes sure properties required by API are set, but the set of arguments +// will change when the set of required properties is changed +func NewInternalMultipartCompleteRequest(key string, uploadId string, parts []InternalMultipartPart) *InternalMultipartCompleteRequest { + this := InternalMultipartCompleteRequest{} + this.Key = key + this.UploadId = uploadId + this.Parts = parts + return &this +} + +// NewInternalMultipartCompleteRequestWithDefaults instantiates a new InternalMultipartCompleteRequest object +// This constructor will only assign default values to properties that have it defined, +// but it doesn't guarantee that properties required by API are set +func NewInternalMultipartCompleteRequestWithDefaults() *InternalMultipartCompleteRequest { + this := InternalMultipartCompleteRequest{} + return &this +} + +// GetKey returns the Key field value +func (o *InternalMultipartCompleteRequest) GetKey() string { + if o == nil { + var ret string + return ret + } + + return o.Key +} + +// GetKeyOk returns a tuple with the Key field value +// and a boolean to check if the value has been set. +func (o *InternalMultipartCompleteRequest) GetKeyOk() (*string, bool) { + if o == nil { + return nil, false + } + return &o.Key, true +} + +// SetKey sets field value +func (o *InternalMultipartCompleteRequest) SetKey(v string) { + o.Key = v +} + +// GetBucket returns the Bucket field value if set, zero value otherwise. +func (o *InternalMultipartCompleteRequest) GetBucket() string { + if o == nil || IsNil(o.Bucket) { + var ret string + return ret + } + return *o.Bucket +} + +// GetBucketOk returns a tuple with the Bucket field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *InternalMultipartCompleteRequest) GetBucketOk() (*string, bool) { + if o == nil || IsNil(o.Bucket) { + return nil, false + } + return o.Bucket, true +} + +// HasBucket returns a boolean if a field has been set. +func (o *InternalMultipartCompleteRequest) HasBucket() bool { + if o != nil && !IsNil(o.Bucket) { + return true + } + + return false +} + +// SetBucket gets a reference to the given string and assigns it to the Bucket field. +func (o *InternalMultipartCompleteRequest) SetBucket(v string) { + o.Bucket = &v +} + +// GetUploadId returns the UploadId field value +func (o *InternalMultipartCompleteRequest) GetUploadId() string { + if o == nil { + var ret string + return ret + } + + return o.UploadId +} + +// GetUploadIdOk returns a tuple with the UploadId field value +// and a boolean to check if the value has been set. +func (o *InternalMultipartCompleteRequest) GetUploadIdOk() (*string, bool) { + if o == nil { + return nil, false + } + return &o.UploadId, true +} + +// SetUploadId sets field value +func (o *InternalMultipartCompleteRequest) SetUploadId(v string) { + o.UploadId = v +} + +// GetParts returns the Parts field value +func (o *InternalMultipartCompleteRequest) GetParts() []InternalMultipartPart { + if o == nil { + var ret []InternalMultipartPart + return ret + } + + return o.Parts +} + +// GetPartsOk returns a tuple with the Parts field value +// and a boolean to check if the value has been set. +func (o *InternalMultipartCompleteRequest) GetPartsOk() ([]InternalMultipartPart, bool) { + if o == nil { + return nil, false + } + return o.Parts, true +} + +// SetParts sets field value +func (o *InternalMultipartCompleteRequest) SetParts(v []InternalMultipartPart) { + o.Parts = v +} + +func (o InternalMultipartCompleteRequest) MarshalJSON() ([]byte, error) { + toSerialize,err := o.ToMap() + if err != nil { + return []byte{}, err + } + return json.Marshal(toSerialize) +} + +func (o InternalMultipartCompleteRequest) ToMap() (map[string]interface{}, error) { + toSerialize := map[string]interface{}{} + toSerialize["key"] = o.Key + if !IsNil(o.Bucket) { + toSerialize["bucket"] = o.Bucket + } + toSerialize["uploadId"] = o.UploadId + toSerialize["parts"] = o.Parts + return toSerialize, nil +} + +func (o *InternalMultipartCompleteRequest) UnmarshalJSON(data []byte) (err error) { + // This validates that all required properties are included in the JSON object + // by unmarshalling the object into a generic map with string keys and checking + // that every required field exists as a key in the generic map. + requiredProperties := []string{ + "key", + "uploadId", + "parts", + } + + allProperties := make(map[string]interface{}) + + err = json.Unmarshal(data, &allProperties) + + if err != nil { + return err; + } + + for _, requiredProperty := range(requiredProperties) { + if _, exists := allProperties[requiredProperty]; !exists { + return fmt.Errorf("no value given for required property %v", requiredProperty) + } + } + + varInternalMultipartCompleteRequest := _InternalMultipartCompleteRequest{} + + decoder := json.NewDecoder(bytes.NewReader(data)) + decoder.DisallowUnknownFields() + err = decoder.Decode(&varInternalMultipartCompleteRequest) + + if err != nil { + return err + } + + *o = InternalMultipartCompleteRequest(varInternalMultipartCompleteRequest) + + return err +} + +type NullableInternalMultipartCompleteRequest struct { + value *InternalMultipartCompleteRequest + isSet bool +} + +func (v NullableInternalMultipartCompleteRequest) Get() *InternalMultipartCompleteRequest { + return v.value +} + +func (v *NullableInternalMultipartCompleteRequest) Set(val *InternalMultipartCompleteRequest) { + v.value = val + v.isSet = true +} + +func (v NullableInternalMultipartCompleteRequest) IsSet() bool { + return v.isSet +} + +func (v *NullableInternalMultipartCompleteRequest) Unset() { + v.value = nil + v.isSet = false +} + +func NewNullableInternalMultipartCompleteRequest(val *InternalMultipartCompleteRequest) *NullableInternalMultipartCompleteRequest { + return &NullableInternalMultipartCompleteRequest{value: val, isSet: true} +} + +func (v NullableInternalMultipartCompleteRequest) MarshalJSON() ([]byte, error) { + return json.Marshal(v.value) +} + +func (v *NullableInternalMultipartCompleteRequest) UnmarshalJSON(src []byte) error { + v.isSet = true + return json.Unmarshal(src, &v.value) +} + + diff --git a/apigen/internalapi/model_internal_multipart_init_request.go b/apigen/internalapi/model_internal_multipart_init_request.go new file mode 100644 index 0000000..ca7fba3 --- /dev/null +++ b/apigen/internalapi/model_internal_multipart_init_request.go @@ -0,0 +1,198 @@ +/* +Internal Compatibility API (DRS Server) + +Consolidated internal API delta for drs-server. This spec captures non-GA4GH DRS internal/compatibility routes. + +API version: 1.0.0 +*/ + +// Code generated by OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT. + +package internalapi + +import ( + "encoding/json" +) + +// checks if the InternalMultipartInitRequest type satisfies the MappedNullable interface at compile time +var _ MappedNullable = &InternalMultipartInitRequest{} + +// InternalMultipartInitRequest struct for InternalMultipartInitRequest +type InternalMultipartInitRequest struct { + Guid *string `json:"guid,omitempty"` + FileName *string `json:"file_name,omitempty"` + Bucket *string `json:"bucket,omitempty"` +} + +// NewInternalMultipartInitRequest instantiates a new InternalMultipartInitRequest object +// This constructor will assign default values to properties that have it defined, +// and makes sure properties required by API are set, but the set of arguments +// will change when the set of required properties is changed +func NewInternalMultipartInitRequest() *InternalMultipartInitRequest { + this := InternalMultipartInitRequest{} + return &this +} + +// NewInternalMultipartInitRequestWithDefaults instantiates a new InternalMultipartInitRequest object +// This constructor will only assign default values to properties that have it defined, +// but it doesn't guarantee that properties required by API are set +func NewInternalMultipartInitRequestWithDefaults() *InternalMultipartInitRequest { + this := InternalMultipartInitRequest{} + return &this +} + +// GetGuid returns the Guid field value if set, zero value otherwise. +func (o *InternalMultipartInitRequest) GetGuid() string { + if o == nil || IsNil(o.Guid) { + var ret string + return ret + } + return *o.Guid +} + +// GetGuidOk returns a tuple with the Guid field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *InternalMultipartInitRequest) GetGuidOk() (*string, bool) { + if o == nil || IsNil(o.Guid) { + return nil, false + } + return o.Guid, true +} + +// HasGuid returns a boolean if a field has been set. +func (o *InternalMultipartInitRequest) HasGuid() bool { + if o != nil && !IsNil(o.Guid) { + return true + } + + return false +} + +// SetGuid gets a reference to the given string and assigns it to the Guid field. +func (o *InternalMultipartInitRequest) SetGuid(v string) { + o.Guid = &v +} + +// GetFileName returns the FileName field value if set, zero value otherwise. +func (o *InternalMultipartInitRequest) GetFileName() string { + if o == nil || IsNil(o.FileName) { + var ret string + return ret + } + return *o.FileName +} + +// GetFileNameOk returns a tuple with the FileName field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *InternalMultipartInitRequest) GetFileNameOk() (*string, bool) { + if o == nil || IsNil(o.FileName) { + return nil, false + } + return o.FileName, true +} + +// HasFileName returns a boolean if a field has been set. +func (o *InternalMultipartInitRequest) HasFileName() bool { + if o != nil && !IsNil(o.FileName) { + return true + } + + return false +} + +// SetFileName gets a reference to the given string and assigns it to the FileName field. +func (o *InternalMultipartInitRequest) SetFileName(v string) { + o.FileName = &v +} + +// GetBucket returns the Bucket field value if set, zero value otherwise. +func (o *InternalMultipartInitRequest) GetBucket() string { + if o == nil || IsNil(o.Bucket) { + var ret string + return ret + } + return *o.Bucket +} + +// GetBucketOk returns a tuple with the Bucket field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *InternalMultipartInitRequest) GetBucketOk() (*string, bool) { + if o == nil || IsNil(o.Bucket) { + return nil, false + } + return o.Bucket, true +} + +// HasBucket returns a boolean if a field has been set. +func (o *InternalMultipartInitRequest) HasBucket() bool { + if o != nil && !IsNil(o.Bucket) { + return true + } + + return false +} + +// SetBucket gets a reference to the given string and assigns it to the Bucket field. +func (o *InternalMultipartInitRequest) SetBucket(v string) { + o.Bucket = &v +} + +func (o InternalMultipartInitRequest) MarshalJSON() ([]byte, error) { + toSerialize,err := o.ToMap() + if err != nil { + return []byte{}, err + } + return json.Marshal(toSerialize) +} + +func (o InternalMultipartInitRequest) ToMap() (map[string]interface{}, error) { + toSerialize := map[string]interface{}{} + if !IsNil(o.Guid) { + toSerialize["guid"] = o.Guid + } + if !IsNil(o.FileName) { + toSerialize["file_name"] = o.FileName + } + if !IsNil(o.Bucket) { + toSerialize["bucket"] = o.Bucket + } + return toSerialize, nil +} + +type NullableInternalMultipartInitRequest struct { + value *InternalMultipartInitRequest + isSet bool +} + +func (v NullableInternalMultipartInitRequest) Get() *InternalMultipartInitRequest { + return v.value +} + +func (v *NullableInternalMultipartInitRequest) Set(val *InternalMultipartInitRequest) { + v.value = val + v.isSet = true +} + +func (v NullableInternalMultipartInitRequest) IsSet() bool { + return v.isSet +} + +func (v *NullableInternalMultipartInitRequest) Unset() { + v.value = nil + v.isSet = false +} + +func NewNullableInternalMultipartInitRequest(val *InternalMultipartInitRequest) *NullableInternalMultipartInitRequest { + return &NullableInternalMultipartInitRequest{value: val, isSet: true} +} + +func (v NullableInternalMultipartInitRequest) MarshalJSON() ([]byte, error) { + return json.Marshal(v.value) +} + +func (v *NullableInternalMultipartInitRequest) UnmarshalJSON(src []byte) error { + v.isSet = true + return json.Unmarshal(src, &v.value) +} + + diff --git a/apigen/internalapi/model_internal_multipart_init_response.go b/apigen/internalapi/model_internal_multipart_init_response.go new file mode 100644 index 0000000..c2decac --- /dev/null +++ b/apigen/internalapi/model_internal_multipart_init_response.go @@ -0,0 +1,162 @@ +/* +Internal Compatibility API (DRS Server) + +Consolidated internal API delta for drs-server. This spec captures non-GA4GH DRS internal/compatibility routes. + +API version: 1.0.0 +*/ + +// Code generated by OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT. + +package internalapi + +import ( + "encoding/json" +) + +// checks if the InternalMultipartInitResponse type satisfies the MappedNullable interface at compile time +var _ MappedNullable = &InternalMultipartInitResponse{} + +// InternalMultipartInitResponse struct for InternalMultipartInitResponse +type InternalMultipartInitResponse struct { + Guid *string `json:"guid,omitempty"` + UploadId *string `json:"uploadId,omitempty"` +} + +// NewInternalMultipartInitResponse instantiates a new InternalMultipartInitResponse object +// This constructor will assign default values to properties that have it defined, +// and makes sure properties required by API are set, but the set of arguments +// will change when the set of required properties is changed +func NewInternalMultipartInitResponse() *InternalMultipartInitResponse { + this := InternalMultipartInitResponse{} + return &this +} + +// NewInternalMultipartInitResponseWithDefaults instantiates a new InternalMultipartInitResponse object +// This constructor will only assign default values to properties that have it defined, +// but it doesn't guarantee that properties required by API are set +func NewInternalMultipartInitResponseWithDefaults() *InternalMultipartInitResponse { + this := InternalMultipartInitResponse{} + return &this +} + +// GetGuid returns the Guid field value if set, zero value otherwise. +func (o *InternalMultipartInitResponse) GetGuid() string { + if o == nil || IsNil(o.Guid) { + var ret string + return ret + } + return *o.Guid +} + +// GetGuidOk returns a tuple with the Guid field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *InternalMultipartInitResponse) GetGuidOk() (*string, bool) { + if o == nil || IsNil(o.Guid) { + return nil, false + } + return o.Guid, true +} + +// HasGuid returns a boolean if a field has been set. +func (o *InternalMultipartInitResponse) HasGuid() bool { + if o != nil && !IsNil(o.Guid) { + return true + } + + return false +} + +// SetGuid gets a reference to the given string and assigns it to the Guid field. +func (o *InternalMultipartInitResponse) SetGuid(v string) { + o.Guid = &v +} + +// GetUploadId returns the UploadId field value if set, zero value otherwise. +func (o *InternalMultipartInitResponse) GetUploadId() string { + if o == nil || IsNil(o.UploadId) { + var ret string + return ret + } + return *o.UploadId +} + +// GetUploadIdOk returns a tuple with the UploadId field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *InternalMultipartInitResponse) GetUploadIdOk() (*string, bool) { + if o == nil || IsNil(o.UploadId) { + return nil, false + } + return o.UploadId, true +} + +// HasUploadId returns a boolean if a field has been set. +func (o *InternalMultipartInitResponse) HasUploadId() bool { + if o != nil && !IsNil(o.UploadId) { + return true + } + + return false +} + +// SetUploadId gets a reference to the given string and assigns it to the UploadId field. +func (o *InternalMultipartInitResponse) SetUploadId(v string) { + o.UploadId = &v +} + +func (o InternalMultipartInitResponse) MarshalJSON() ([]byte, error) { + toSerialize,err := o.ToMap() + if err != nil { + return []byte{}, err + } + return json.Marshal(toSerialize) +} + +func (o InternalMultipartInitResponse) ToMap() (map[string]interface{}, error) { + toSerialize := map[string]interface{}{} + if !IsNil(o.Guid) { + toSerialize["guid"] = o.Guid + } + if !IsNil(o.UploadId) { + toSerialize["uploadId"] = o.UploadId + } + return toSerialize, nil +} + +type NullableInternalMultipartInitResponse struct { + value *InternalMultipartInitResponse + isSet bool +} + +func (v NullableInternalMultipartInitResponse) Get() *InternalMultipartInitResponse { + return v.value +} + +func (v *NullableInternalMultipartInitResponse) Set(val *InternalMultipartInitResponse) { + v.value = val + v.isSet = true +} + +func (v NullableInternalMultipartInitResponse) IsSet() bool { + return v.isSet +} + +func (v *NullableInternalMultipartInitResponse) Unset() { + v.value = nil + v.isSet = false +} + +func NewNullableInternalMultipartInitResponse(val *InternalMultipartInitResponse) *NullableInternalMultipartInitResponse { + return &NullableInternalMultipartInitResponse{value: val, isSet: true} +} + +func (v NullableInternalMultipartInitResponse) MarshalJSON() ([]byte, error) { + return json.Marshal(v.value) +} + +func (v *NullableInternalMultipartInitResponse) UnmarshalJSON(src []byte) error { + v.isSet = true + return json.Unmarshal(src, &v.value) +} + + diff --git a/apigen/internalapi/model_internal_multipart_part.go b/apigen/internalapi/model_internal_multipart_part.go new file mode 100644 index 0000000..6bc1c95 --- /dev/null +++ b/apigen/internalapi/model_internal_multipart_part.go @@ -0,0 +1,186 @@ +/* +Internal Compatibility API (DRS Server) + +Consolidated internal API delta for drs-server. This spec captures non-GA4GH DRS internal/compatibility routes. + +API version: 1.0.0 +*/ + +// Code generated by OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT. + +package internalapi + +import ( + "encoding/json" + "bytes" + "fmt" +) + +// checks if the InternalMultipartPart type satisfies the MappedNullable interface at compile time +var _ MappedNullable = &InternalMultipartPart{} + +// InternalMultipartPart struct for InternalMultipartPart +type InternalMultipartPart struct { + PartNumber int32 `json:"PartNumber"` + ETag string `json:"ETag"` +} + +type _InternalMultipartPart InternalMultipartPart + +// NewInternalMultipartPart instantiates a new InternalMultipartPart object +// This constructor will assign default values to properties that have it defined, +// and makes sure properties required by API are set, but the set of arguments +// will change when the set of required properties is changed +func NewInternalMultipartPart(partNumber int32, eTag string) *InternalMultipartPart { + this := InternalMultipartPart{} + this.PartNumber = partNumber + this.ETag = eTag + return &this +} + +// NewInternalMultipartPartWithDefaults instantiates a new InternalMultipartPart object +// This constructor will only assign default values to properties that have it defined, +// but it doesn't guarantee that properties required by API are set +func NewInternalMultipartPartWithDefaults() *InternalMultipartPart { + this := InternalMultipartPart{} + return &this +} + +// GetPartNumber returns the PartNumber field value +func (o *InternalMultipartPart) GetPartNumber() int32 { + if o == nil { + var ret int32 + return ret + } + + return o.PartNumber +} + +// GetPartNumberOk returns a tuple with the PartNumber field value +// and a boolean to check if the value has been set. +func (o *InternalMultipartPart) GetPartNumberOk() (*int32, bool) { + if o == nil { + return nil, false + } + return &o.PartNumber, true +} + +// SetPartNumber sets field value +func (o *InternalMultipartPart) SetPartNumber(v int32) { + o.PartNumber = v +} + +// GetETag returns the ETag field value +func (o *InternalMultipartPart) GetETag() string { + if o == nil { + var ret string + return ret + } + + return o.ETag +} + +// GetETagOk returns a tuple with the ETag field value +// and a boolean to check if the value has been set. +func (o *InternalMultipartPart) GetETagOk() (*string, bool) { + if o == nil { + return nil, false + } + return &o.ETag, true +} + +// SetETag sets field value +func (o *InternalMultipartPart) SetETag(v string) { + o.ETag = v +} + +func (o InternalMultipartPart) MarshalJSON() ([]byte, error) { + toSerialize,err := o.ToMap() + if err != nil { + return []byte{}, err + } + return json.Marshal(toSerialize) +} + +func (o InternalMultipartPart) ToMap() (map[string]interface{}, error) { + toSerialize := map[string]interface{}{} + toSerialize["PartNumber"] = o.PartNumber + toSerialize["ETag"] = o.ETag + return toSerialize, nil +} + +func (o *InternalMultipartPart) UnmarshalJSON(data []byte) (err error) { + // This validates that all required properties are included in the JSON object + // by unmarshalling the object into a generic map with string keys and checking + // that every required field exists as a key in the generic map. + requiredProperties := []string{ + "PartNumber", + "ETag", + } + + allProperties := make(map[string]interface{}) + + err = json.Unmarshal(data, &allProperties) + + if err != nil { + return err; + } + + for _, requiredProperty := range(requiredProperties) { + if _, exists := allProperties[requiredProperty]; !exists { + return fmt.Errorf("no value given for required property %v", requiredProperty) + } + } + + varInternalMultipartPart := _InternalMultipartPart{} + + decoder := json.NewDecoder(bytes.NewReader(data)) + decoder.DisallowUnknownFields() + err = decoder.Decode(&varInternalMultipartPart) + + if err != nil { + return err + } + + *o = InternalMultipartPart(varInternalMultipartPart) + + return err +} + +type NullableInternalMultipartPart struct { + value *InternalMultipartPart + isSet bool +} + +func (v NullableInternalMultipartPart) Get() *InternalMultipartPart { + return v.value +} + +func (v *NullableInternalMultipartPart) Set(val *InternalMultipartPart) { + v.value = val + v.isSet = true +} + +func (v NullableInternalMultipartPart) IsSet() bool { + return v.isSet +} + +func (v *NullableInternalMultipartPart) Unset() { + v.value = nil + v.isSet = false +} + +func NewNullableInternalMultipartPart(val *InternalMultipartPart) *NullableInternalMultipartPart { + return &NullableInternalMultipartPart{value: val, isSet: true} +} + +func (v NullableInternalMultipartPart) MarshalJSON() ([]byte, error) { + return json.Marshal(v.value) +} + +func (v *NullableInternalMultipartPart) UnmarshalJSON(src []byte) error { + v.isSet = true + return json.Unmarshal(src, &v.value) +} + + diff --git a/apigen/internalapi/model_internal_multipart_upload_request.go b/apigen/internalapi/model_internal_multipart_upload_request.go new file mode 100644 index 0000000..00a446b --- /dev/null +++ b/apigen/internalapi/model_internal_multipart_upload_request.go @@ -0,0 +1,250 @@ +/* +Internal Compatibility API (DRS Server) + +Consolidated internal API delta for drs-server. This spec captures non-GA4GH DRS internal/compatibility routes. + +API version: 1.0.0 +*/ + +// Code generated by OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT. + +package internalapi + +import ( + "encoding/json" + "bytes" + "fmt" +) + +// checks if the InternalMultipartUploadRequest type satisfies the MappedNullable interface at compile time +var _ MappedNullable = &InternalMultipartUploadRequest{} + +// InternalMultipartUploadRequest struct for InternalMultipartUploadRequest +type InternalMultipartUploadRequest struct { + Key string `json:"key"` + Bucket *string `json:"bucket,omitempty"` + UploadId string `json:"uploadId"` + PartNumber int32 `json:"partNumber"` +} + +type _InternalMultipartUploadRequest InternalMultipartUploadRequest + +// NewInternalMultipartUploadRequest instantiates a new InternalMultipartUploadRequest object +// This constructor will assign default values to properties that have it defined, +// and makes sure properties required by API are set, but the set of arguments +// will change when the set of required properties is changed +func NewInternalMultipartUploadRequest(key string, uploadId string, partNumber int32) *InternalMultipartUploadRequest { + this := InternalMultipartUploadRequest{} + this.Key = key + this.UploadId = uploadId + this.PartNumber = partNumber + return &this +} + +// NewInternalMultipartUploadRequestWithDefaults instantiates a new InternalMultipartUploadRequest object +// This constructor will only assign default values to properties that have it defined, +// but it doesn't guarantee that properties required by API are set +func NewInternalMultipartUploadRequestWithDefaults() *InternalMultipartUploadRequest { + this := InternalMultipartUploadRequest{} + return &this +} + +// GetKey returns the Key field value +func (o *InternalMultipartUploadRequest) GetKey() string { + if o == nil { + var ret string + return ret + } + + return o.Key +} + +// GetKeyOk returns a tuple with the Key field value +// and a boolean to check if the value has been set. +func (o *InternalMultipartUploadRequest) GetKeyOk() (*string, bool) { + if o == nil { + return nil, false + } + return &o.Key, true +} + +// SetKey sets field value +func (o *InternalMultipartUploadRequest) SetKey(v string) { + o.Key = v +} + +// GetBucket returns the Bucket field value if set, zero value otherwise. +func (o *InternalMultipartUploadRequest) GetBucket() string { + if o == nil || IsNil(o.Bucket) { + var ret string + return ret + } + return *o.Bucket +} + +// GetBucketOk returns a tuple with the Bucket field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *InternalMultipartUploadRequest) GetBucketOk() (*string, bool) { + if o == nil || IsNil(o.Bucket) { + return nil, false + } + return o.Bucket, true +} + +// HasBucket returns a boolean if a field has been set. +func (o *InternalMultipartUploadRequest) HasBucket() bool { + if o != nil && !IsNil(o.Bucket) { + return true + } + + return false +} + +// SetBucket gets a reference to the given string and assigns it to the Bucket field. +func (o *InternalMultipartUploadRequest) SetBucket(v string) { + o.Bucket = &v +} + +// GetUploadId returns the UploadId field value +func (o *InternalMultipartUploadRequest) GetUploadId() string { + if o == nil { + var ret string + return ret + } + + return o.UploadId +} + +// GetUploadIdOk returns a tuple with the UploadId field value +// and a boolean to check if the value has been set. +func (o *InternalMultipartUploadRequest) GetUploadIdOk() (*string, bool) { + if o == nil { + return nil, false + } + return &o.UploadId, true +} + +// SetUploadId sets field value +func (o *InternalMultipartUploadRequest) SetUploadId(v string) { + o.UploadId = v +} + +// GetPartNumber returns the PartNumber field value +func (o *InternalMultipartUploadRequest) GetPartNumber() int32 { + if o == nil { + var ret int32 + return ret + } + + return o.PartNumber +} + +// GetPartNumberOk returns a tuple with the PartNumber field value +// and a boolean to check if the value has been set. +func (o *InternalMultipartUploadRequest) GetPartNumberOk() (*int32, bool) { + if o == nil { + return nil, false + } + return &o.PartNumber, true +} + +// SetPartNumber sets field value +func (o *InternalMultipartUploadRequest) SetPartNumber(v int32) { + o.PartNumber = v +} + +func (o InternalMultipartUploadRequest) MarshalJSON() ([]byte, error) { + toSerialize,err := o.ToMap() + if err != nil { + return []byte{}, err + } + return json.Marshal(toSerialize) +} + +func (o InternalMultipartUploadRequest) ToMap() (map[string]interface{}, error) { + toSerialize := map[string]interface{}{} + toSerialize["key"] = o.Key + if !IsNil(o.Bucket) { + toSerialize["bucket"] = o.Bucket + } + toSerialize["uploadId"] = o.UploadId + toSerialize["partNumber"] = o.PartNumber + return toSerialize, nil +} + +func (o *InternalMultipartUploadRequest) UnmarshalJSON(data []byte) (err error) { + // This validates that all required properties are included in the JSON object + // by unmarshalling the object into a generic map with string keys and checking + // that every required field exists as a key in the generic map. + requiredProperties := []string{ + "key", + "uploadId", + "partNumber", + } + + allProperties := make(map[string]interface{}) + + err = json.Unmarshal(data, &allProperties) + + if err != nil { + return err; + } + + for _, requiredProperty := range(requiredProperties) { + if _, exists := allProperties[requiredProperty]; !exists { + return fmt.Errorf("no value given for required property %v", requiredProperty) + } + } + + varInternalMultipartUploadRequest := _InternalMultipartUploadRequest{} + + decoder := json.NewDecoder(bytes.NewReader(data)) + decoder.DisallowUnknownFields() + err = decoder.Decode(&varInternalMultipartUploadRequest) + + if err != nil { + return err + } + + *o = InternalMultipartUploadRequest(varInternalMultipartUploadRequest) + + return err +} + +type NullableInternalMultipartUploadRequest struct { + value *InternalMultipartUploadRequest + isSet bool +} + +func (v NullableInternalMultipartUploadRequest) Get() *InternalMultipartUploadRequest { + return v.value +} + +func (v *NullableInternalMultipartUploadRequest) Set(val *InternalMultipartUploadRequest) { + v.value = val + v.isSet = true +} + +func (v NullableInternalMultipartUploadRequest) IsSet() bool { + return v.isSet +} + +func (v *NullableInternalMultipartUploadRequest) Unset() { + v.value = nil + v.isSet = false +} + +func NewNullableInternalMultipartUploadRequest(val *InternalMultipartUploadRequest) *NullableInternalMultipartUploadRequest { + return &NullableInternalMultipartUploadRequest{value: val, isSet: true} +} + +func (v NullableInternalMultipartUploadRequest) MarshalJSON() ([]byte, error) { + return json.Marshal(v.value) +} + +func (v *NullableInternalMultipartUploadRequest) UnmarshalJSON(src []byte) error { + v.isSet = true + return json.Unmarshal(src, &v.value) +} + + diff --git a/apigen/internalapi/model_internal_multipart_upload_response.go b/apigen/internalapi/model_internal_multipart_upload_response.go new file mode 100644 index 0000000..29b4d65 --- /dev/null +++ b/apigen/internalapi/model_internal_multipart_upload_response.go @@ -0,0 +1,126 @@ +/* +Internal Compatibility API (DRS Server) + +Consolidated internal API delta for drs-server. This spec captures non-GA4GH DRS internal/compatibility routes. + +API version: 1.0.0 +*/ + +// Code generated by OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT. + +package internalapi + +import ( + "encoding/json" +) + +// checks if the InternalMultipartUploadResponse type satisfies the MappedNullable interface at compile time +var _ MappedNullable = &InternalMultipartUploadResponse{} + +// InternalMultipartUploadResponse struct for InternalMultipartUploadResponse +type InternalMultipartUploadResponse struct { + PresignedUrl *string `json:"presigned_url,omitempty"` +} + +// NewInternalMultipartUploadResponse instantiates a new InternalMultipartUploadResponse object +// This constructor will assign default values to properties that have it defined, +// and makes sure properties required by API are set, but the set of arguments +// will change when the set of required properties is changed +func NewInternalMultipartUploadResponse() *InternalMultipartUploadResponse { + this := InternalMultipartUploadResponse{} + return &this +} + +// NewInternalMultipartUploadResponseWithDefaults instantiates a new InternalMultipartUploadResponse object +// This constructor will only assign default values to properties that have it defined, +// but it doesn't guarantee that properties required by API are set +func NewInternalMultipartUploadResponseWithDefaults() *InternalMultipartUploadResponse { + this := InternalMultipartUploadResponse{} + return &this +} + +// GetPresignedUrl returns the PresignedUrl field value if set, zero value otherwise. +func (o *InternalMultipartUploadResponse) GetPresignedUrl() string { + if o == nil || IsNil(o.PresignedUrl) { + var ret string + return ret + } + return *o.PresignedUrl +} + +// GetPresignedUrlOk returns a tuple with the PresignedUrl field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *InternalMultipartUploadResponse) GetPresignedUrlOk() (*string, bool) { + if o == nil || IsNil(o.PresignedUrl) { + return nil, false + } + return o.PresignedUrl, true +} + +// HasPresignedUrl returns a boolean if a field has been set. +func (o *InternalMultipartUploadResponse) HasPresignedUrl() bool { + if o != nil && !IsNil(o.PresignedUrl) { + return true + } + + return false +} + +// SetPresignedUrl gets a reference to the given string and assigns it to the PresignedUrl field. +func (o *InternalMultipartUploadResponse) SetPresignedUrl(v string) { + o.PresignedUrl = &v +} + +func (o InternalMultipartUploadResponse) MarshalJSON() ([]byte, error) { + toSerialize,err := o.ToMap() + if err != nil { + return []byte{}, err + } + return json.Marshal(toSerialize) +} + +func (o InternalMultipartUploadResponse) ToMap() (map[string]interface{}, error) { + toSerialize := map[string]interface{}{} + if !IsNil(o.PresignedUrl) { + toSerialize["presigned_url"] = o.PresignedUrl + } + return toSerialize, nil +} + +type NullableInternalMultipartUploadResponse struct { + value *InternalMultipartUploadResponse + isSet bool +} + +func (v NullableInternalMultipartUploadResponse) Get() *InternalMultipartUploadResponse { + return v.value +} + +func (v *NullableInternalMultipartUploadResponse) Set(val *InternalMultipartUploadResponse) { + v.value = val + v.isSet = true +} + +func (v NullableInternalMultipartUploadResponse) IsSet() bool { + return v.isSet +} + +func (v *NullableInternalMultipartUploadResponse) Unset() { + v.value = nil + v.isSet = false +} + +func NewNullableInternalMultipartUploadResponse(val *InternalMultipartUploadResponse) *NullableInternalMultipartUploadResponse { + return &NullableInternalMultipartUploadResponse{value: val, isSet: true} +} + +func (v NullableInternalMultipartUploadResponse) MarshalJSON() ([]byte, error) { + return json.Marshal(v.value) +} + +func (v *NullableInternalMultipartUploadResponse) UnmarshalJSON(src []byte) error { + v.isSet = true + return json.Unmarshal(src, &v.value) +} + + diff --git a/apigen/internalapi/model_internal_record.go b/apigen/internalapi/model_internal_record.go new file mode 100644 index 0000000..9794159 --- /dev/null +++ b/apigen/internalapi/model_internal_record.go @@ -0,0 +1,379 @@ +/* +Internal Compatibility API (DRS Server) + +Consolidated internal API delta for drs-server. This spec captures non-GA4GH DRS internal/compatibility routes. + +API version: 1.0.0 +*/ + +// Code generated by OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT. + +package internalapi + +import ( + "encoding/json" +) + +// checks if the InternalRecord type satisfies the MappedNullable interface at compile time +var _ MappedNullable = &InternalRecord{} + +// InternalRecord struct for InternalRecord +type InternalRecord struct { + Did *string `json:"did,omitempty"` + // Hash map, e.g. {\"sha256\":\"...\"} + Hashes *map[string]string `json:"hashes,omitempty"` + Size *int64 `json:"size,omitempty"` + Urls []string `json:"urls,omitempty"` + Authz []string `json:"authz,omitempty"` + FileName *string `json:"file_name,omitempty"` + Organization *string `json:"organization,omitempty"` + Project *string `json:"project,omitempty"` +} + +// NewInternalRecord instantiates a new InternalRecord object +// This constructor will assign default values to properties that have it defined, +// and makes sure properties required by API are set, but the set of arguments +// will change when the set of required properties is changed +func NewInternalRecord() *InternalRecord { + this := InternalRecord{} + return &this +} + +// NewInternalRecordWithDefaults instantiates a new InternalRecord object +// This constructor will only assign default values to properties that have it defined, +// but it doesn't guarantee that properties required by API are set +func NewInternalRecordWithDefaults() *InternalRecord { + this := InternalRecord{} + return &this +} + +// GetDid returns the Did field value if set, zero value otherwise. +func (o *InternalRecord) GetDid() string { + if o == nil || IsNil(o.Did) { + var ret string + return ret + } + return *o.Did +} + +// GetDidOk returns a tuple with the Did field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *InternalRecord) GetDidOk() (*string, bool) { + if o == nil || IsNil(o.Did) { + return nil, false + } + return o.Did, true +} + +// HasDid returns a boolean if a field has been set. +func (o *InternalRecord) HasDid() bool { + if o != nil && !IsNil(o.Did) { + return true + } + + return false +} + +// SetDid gets a reference to the given string and assigns it to the Did field. +func (o *InternalRecord) SetDid(v string) { + o.Did = &v +} + +// GetHashes returns the Hashes field value if set, zero value otherwise. +func (o *InternalRecord) GetHashes() map[string]string { + if o == nil || IsNil(o.Hashes) { + var ret map[string]string + return ret + } + return *o.Hashes +} + +// GetHashesOk returns a tuple with the Hashes field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *InternalRecord) GetHashesOk() (*map[string]string, bool) { + if o == nil || IsNil(o.Hashes) { + return nil, false + } + return o.Hashes, true +} + +// HasHashes returns a boolean if a field has been set. +func (o *InternalRecord) HasHashes() bool { + if o != nil && !IsNil(o.Hashes) { + return true + } + + return false +} + +// SetHashes gets a reference to the given map[string]string and assigns it to the Hashes field. +func (o *InternalRecord) SetHashes(v map[string]string) { + o.Hashes = &v +} + +// GetSize returns the Size field value if set, zero value otherwise. +func (o *InternalRecord) GetSize() int64 { + if o == nil || IsNil(o.Size) { + var ret int64 + return ret + } + return *o.Size +} + +// GetSizeOk returns a tuple with the Size field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *InternalRecord) GetSizeOk() (*int64, bool) { + if o == nil || IsNil(o.Size) { + return nil, false + } + return o.Size, true +} + +// HasSize returns a boolean if a field has been set. +func (o *InternalRecord) HasSize() bool { + if o != nil && !IsNil(o.Size) { + return true + } + + return false +} + +// SetSize gets a reference to the given int64 and assigns it to the Size field. +func (o *InternalRecord) SetSize(v int64) { + o.Size = &v +} + +// GetUrls returns the Urls field value if set, zero value otherwise. +func (o *InternalRecord) GetUrls() []string { + if o == nil || IsNil(o.Urls) { + var ret []string + return ret + } + return o.Urls +} + +// GetUrlsOk returns a tuple with the Urls field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *InternalRecord) GetUrlsOk() ([]string, bool) { + if o == nil || IsNil(o.Urls) { + return nil, false + } + return o.Urls, true +} + +// HasUrls returns a boolean if a field has been set. +func (o *InternalRecord) HasUrls() bool { + if o != nil && !IsNil(o.Urls) { + return true + } + + return false +} + +// SetUrls gets a reference to the given []string and assigns it to the Urls field. +func (o *InternalRecord) SetUrls(v []string) { + o.Urls = v +} + +// GetAuthz returns the Authz field value if set, zero value otherwise. +func (o *InternalRecord) GetAuthz() []string { + if o == nil || IsNil(o.Authz) { + var ret []string + return ret + } + return o.Authz +} + +// GetAuthzOk returns a tuple with the Authz field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *InternalRecord) GetAuthzOk() ([]string, bool) { + if o == nil || IsNil(o.Authz) { + return nil, false + } + return o.Authz, true +} + +// HasAuthz returns a boolean if a field has been set. +func (o *InternalRecord) HasAuthz() bool { + if o != nil && !IsNil(o.Authz) { + return true + } + + return false +} + +// SetAuthz gets a reference to the given []string and assigns it to the Authz field. +func (o *InternalRecord) SetAuthz(v []string) { + o.Authz = v +} + +// GetFileName returns the FileName field value if set, zero value otherwise. +func (o *InternalRecord) GetFileName() string { + if o == nil || IsNil(o.FileName) { + var ret string + return ret + } + return *o.FileName +} + +// GetFileNameOk returns a tuple with the FileName field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *InternalRecord) GetFileNameOk() (*string, bool) { + if o == nil || IsNil(o.FileName) { + return nil, false + } + return o.FileName, true +} + +// HasFileName returns a boolean if a field has been set. +func (o *InternalRecord) HasFileName() bool { + if o != nil && !IsNil(o.FileName) { + return true + } + + return false +} + +// SetFileName gets a reference to the given string and assigns it to the FileName field. +func (o *InternalRecord) SetFileName(v string) { + o.FileName = &v +} + +// GetOrganization returns the Organization field value if set, zero value otherwise. +func (o *InternalRecord) GetOrganization() string { + if o == nil || IsNil(o.Organization) { + var ret string + return ret + } + return *o.Organization +} + +// GetOrganizationOk returns a tuple with the Organization field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *InternalRecord) GetOrganizationOk() (*string, bool) { + if o == nil || IsNil(o.Organization) { + return nil, false + } + return o.Organization, true +} + +// HasOrganization returns a boolean if a field has been set. +func (o *InternalRecord) HasOrganization() bool { + if o != nil && !IsNil(o.Organization) { + return true + } + + return false +} + +// SetOrganization gets a reference to the given string and assigns it to the Organization field. +func (o *InternalRecord) SetOrganization(v string) { + o.Organization = &v +} + +// GetProject returns the Project field value if set, zero value otherwise. +func (o *InternalRecord) GetProject() string { + if o == nil || IsNil(o.Project) { + var ret string + return ret + } + return *o.Project +} + +// GetProjectOk returns a tuple with the Project field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *InternalRecord) GetProjectOk() (*string, bool) { + if o == nil || IsNil(o.Project) { + return nil, false + } + return o.Project, true +} + +// HasProject returns a boolean if a field has been set. +func (o *InternalRecord) HasProject() bool { + if o != nil && !IsNil(o.Project) { + return true + } + + return false +} + +// SetProject gets a reference to the given string and assigns it to the Project field. +func (o *InternalRecord) SetProject(v string) { + o.Project = &v +} + +func (o InternalRecord) MarshalJSON() ([]byte, error) { + toSerialize,err := o.ToMap() + if err != nil { + return []byte{}, err + } + return json.Marshal(toSerialize) +} + +func (o InternalRecord) ToMap() (map[string]interface{}, error) { + toSerialize := map[string]interface{}{} + if !IsNil(o.Did) { + toSerialize["did"] = o.Did + } + if !IsNil(o.Hashes) { + toSerialize["hashes"] = o.Hashes + } + if !IsNil(o.Size) { + toSerialize["size"] = o.Size + } + if !IsNil(o.Urls) { + toSerialize["urls"] = o.Urls + } + if !IsNil(o.Authz) { + toSerialize["authz"] = o.Authz + } + if !IsNil(o.FileName) { + toSerialize["file_name"] = o.FileName + } + if !IsNil(o.Organization) { + toSerialize["organization"] = o.Organization + } + if !IsNil(o.Project) { + toSerialize["project"] = o.Project + } + return toSerialize, nil +} + +type NullableInternalRecord struct { + value *InternalRecord + isSet bool +} + +func (v NullableInternalRecord) Get() *InternalRecord { + return v.value +} + +func (v *NullableInternalRecord) Set(val *InternalRecord) { + v.value = val + v.isSet = true +} + +func (v NullableInternalRecord) IsSet() bool { + return v.isSet +} + +func (v *NullableInternalRecord) Unset() { + v.value = nil + v.isSet = false +} + +func NewNullableInternalRecord(val *InternalRecord) *NullableInternalRecord { + return &NullableInternalRecord{value: val, isSet: true} +} + +func (v NullableInternalRecord) MarshalJSON() ([]byte, error) { + return json.Marshal(v.value) +} + +func (v *NullableInternalRecord) UnmarshalJSON(src []byte) error { + v.isSet = true + return json.Unmarshal(src, &v.value) +} + + diff --git a/apigen/internalapi/model_internal_record_response.go b/apigen/internalapi/model_internal_record_response.go new file mode 100644 index 0000000..cb62021 --- /dev/null +++ b/apigen/internalapi/model_internal_record_response.go @@ -0,0 +1,559 @@ +/* +Internal Compatibility API (DRS Server) + +Consolidated internal API delta for drs-server. This spec captures non-GA4GH DRS internal/compatibility routes. + +API version: 1.0.0 +*/ + +// Code generated by OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT. + +package internalapi + +import ( + "encoding/json" +) + +// checks if the InternalRecordResponse type satisfies the MappedNullable interface at compile time +var _ MappedNullable = &InternalRecordResponse{} + +// InternalRecordResponse struct for InternalRecordResponse +type InternalRecordResponse struct { + Did *string `json:"did,omitempty"` + // Hash map, e.g. {\"sha256\":\"...\"} + Hashes *map[string]string `json:"hashes,omitempty"` + Size *int64 `json:"size,omitempty"` + Urls []string `json:"urls,omitempty"` + Authz []string `json:"authz,omitempty"` + FileName *string `json:"file_name,omitempty"` + Organization *string `json:"organization,omitempty"` + Project *string `json:"project,omitempty"` + Baseid *string `json:"baseid,omitempty"` + Rev *string `json:"rev,omitempty"` + CreatedDate *string `json:"created_date,omitempty"` + UpdatedDate *string `json:"updated_date,omitempty"` + Uploader *string `json:"uploader,omitempty"` +} + +// NewInternalRecordResponse instantiates a new InternalRecordResponse object +// This constructor will assign default values to properties that have it defined, +// and makes sure properties required by API are set, but the set of arguments +// will change when the set of required properties is changed +func NewInternalRecordResponse() *InternalRecordResponse { + this := InternalRecordResponse{} + return &this +} + +// NewInternalRecordResponseWithDefaults instantiates a new InternalRecordResponse object +// This constructor will only assign default values to properties that have it defined, +// but it doesn't guarantee that properties required by API are set +func NewInternalRecordResponseWithDefaults() *InternalRecordResponse { + this := InternalRecordResponse{} + return &this +} + +// GetDid returns the Did field value if set, zero value otherwise. +func (o *InternalRecordResponse) GetDid() string { + if o == nil || IsNil(o.Did) { + var ret string + return ret + } + return *o.Did +} + +// GetDidOk returns a tuple with the Did field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *InternalRecordResponse) GetDidOk() (*string, bool) { + if o == nil || IsNil(o.Did) { + return nil, false + } + return o.Did, true +} + +// HasDid returns a boolean if a field has been set. +func (o *InternalRecordResponse) HasDid() bool { + if o != nil && !IsNil(o.Did) { + return true + } + + return false +} + +// SetDid gets a reference to the given string and assigns it to the Did field. +func (o *InternalRecordResponse) SetDid(v string) { + o.Did = &v +} + +// GetHashes returns the Hashes field value if set, zero value otherwise. +func (o *InternalRecordResponse) GetHashes() map[string]string { + if o == nil || IsNil(o.Hashes) { + var ret map[string]string + return ret + } + return *o.Hashes +} + +// GetHashesOk returns a tuple with the Hashes field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *InternalRecordResponse) GetHashesOk() (*map[string]string, bool) { + if o == nil || IsNil(o.Hashes) { + return nil, false + } + return o.Hashes, true +} + +// HasHashes returns a boolean if a field has been set. +func (o *InternalRecordResponse) HasHashes() bool { + if o != nil && !IsNil(o.Hashes) { + return true + } + + return false +} + +// SetHashes gets a reference to the given map[string]string and assigns it to the Hashes field. +func (o *InternalRecordResponse) SetHashes(v map[string]string) { + o.Hashes = &v +} + +// GetSize returns the Size field value if set, zero value otherwise. +func (o *InternalRecordResponse) GetSize() int64 { + if o == nil || IsNil(o.Size) { + var ret int64 + return ret + } + return *o.Size +} + +// GetSizeOk returns a tuple with the Size field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *InternalRecordResponse) GetSizeOk() (*int64, bool) { + if o == nil || IsNil(o.Size) { + return nil, false + } + return o.Size, true +} + +// HasSize returns a boolean if a field has been set. +func (o *InternalRecordResponse) HasSize() bool { + if o != nil && !IsNil(o.Size) { + return true + } + + return false +} + +// SetSize gets a reference to the given int64 and assigns it to the Size field. +func (o *InternalRecordResponse) SetSize(v int64) { + o.Size = &v +} + +// GetUrls returns the Urls field value if set, zero value otherwise. +func (o *InternalRecordResponse) GetUrls() []string { + if o == nil || IsNil(o.Urls) { + var ret []string + return ret + } + return o.Urls +} + +// GetUrlsOk returns a tuple with the Urls field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *InternalRecordResponse) GetUrlsOk() ([]string, bool) { + if o == nil || IsNil(o.Urls) { + return nil, false + } + return o.Urls, true +} + +// HasUrls returns a boolean if a field has been set. +func (o *InternalRecordResponse) HasUrls() bool { + if o != nil && !IsNil(o.Urls) { + return true + } + + return false +} + +// SetUrls gets a reference to the given []string and assigns it to the Urls field. +func (o *InternalRecordResponse) SetUrls(v []string) { + o.Urls = v +} + +// GetAuthz returns the Authz field value if set, zero value otherwise. +func (o *InternalRecordResponse) GetAuthz() []string { + if o == nil || IsNil(o.Authz) { + var ret []string + return ret + } + return o.Authz +} + +// GetAuthzOk returns a tuple with the Authz field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *InternalRecordResponse) GetAuthzOk() ([]string, bool) { + if o == nil || IsNil(o.Authz) { + return nil, false + } + return o.Authz, true +} + +// HasAuthz returns a boolean if a field has been set. +func (o *InternalRecordResponse) HasAuthz() bool { + if o != nil && !IsNil(o.Authz) { + return true + } + + return false +} + +// SetAuthz gets a reference to the given []string and assigns it to the Authz field. +func (o *InternalRecordResponse) SetAuthz(v []string) { + o.Authz = v +} + +// GetFileName returns the FileName field value if set, zero value otherwise. +func (o *InternalRecordResponse) GetFileName() string { + if o == nil || IsNil(o.FileName) { + var ret string + return ret + } + return *o.FileName +} + +// GetFileNameOk returns a tuple with the FileName field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *InternalRecordResponse) GetFileNameOk() (*string, bool) { + if o == nil || IsNil(o.FileName) { + return nil, false + } + return o.FileName, true +} + +// HasFileName returns a boolean if a field has been set. +func (o *InternalRecordResponse) HasFileName() bool { + if o != nil && !IsNil(o.FileName) { + return true + } + + return false +} + +// SetFileName gets a reference to the given string and assigns it to the FileName field. +func (o *InternalRecordResponse) SetFileName(v string) { + o.FileName = &v +} + +// GetOrganization returns the Organization field value if set, zero value otherwise. +func (o *InternalRecordResponse) GetOrganization() string { + if o == nil || IsNil(o.Organization) { + var ret string + return ret + } + return *o.Organization +} + +// GetOrganizationOk returns a tuple with the Organization field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *InternalRecordResponse) GetOrganizationOk() (*string, bool) { + if o == nil || IsNil(o.Organization) { + return nil, false + } + return o.Organization, true +} + +// HasOrganization returns a boolean if a field has been set. +func (o *InternalRecordResponse) HasOrganization() bool { + if o != nil && !IsNil(o.Organization) { + return true + } + + return false +} + +// SetOrganization gets a reference to the given string and assigns it to the Organization field. +func (o *InternalRecordResponse) SetOrganization(v string) { + o.Organization = &v +} + +// GetProject returns the Project field value if set, zero value otherwise. +func (o *InternalRecordResponse) GetProject() string { + if o == nil || IsNil(o.Project) { + var ret string + return ret + } + return *o.Project +} + +// GetProjectOk returns a tuple with the Project field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *InternalRecordResponse) GetProjectOk() (*string, bool) { + if o == nil || IsNil(o.Project) { + return nil, false + } + return o.Project, true +} + +// HasProject returns a boolean if a field has been set. +func (o *InternalRecordResponse) HasProject() bool { + if o != nil && !IsNil(o.Project) { + return true + } + + return false +} + +// SetProject gets a reference to the given string and assigns it to the Project field. +func (o *InternalRecordResponse) SetProject(v string) { + o.Project = &v +} + +// GetBaseid returns the Baseid field value if set, zero value otherwise. +func (o *InternalRecordResponse) GetBaseid() string { + if o == nil || IsNil(o.Baseid) { + var ret string + return ret + } + return *o.Baseid +} + +// GetBaseidOk returns a tuple with the Baseid field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *InternalRecordResponse) GetBaseidOk() (*string, bool) { + if o == nil || IsNil(o.Baseid) { + return nil, false + } + return o.Baseid, true +} + +// HasBaseid returns a boolean if a field has been set. +func (o *InternalRecordResponse) HasBaseid() bool { + if o != nil && !IsNil(o.Baseid) { + return true + } + + return false +} + +// SetBaseid gets a reference to the given string and assigns it to the Baseid field. +func (o *InternalRecordResponse) SetBaseid(v string) { + o.Baseid = &v +} + +// GetRev returns the Rev field value if set, zero value otherwise. +func (o *InternalRecordResponse) GetRev() string { + if o == nil || IsNil(o.Rev) { + var ret string + return ret + } + return *o.Rev +} + +// GetRevOk returns a tuple with the Rev field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *InternalRecordResponse) GetRevOk() (*string, bool) { + if o == nil || IsNil(o.Rev) { + return nil, false + } + return o.Rev, true +} + +// HasRev returns a boolean if a field has been set. +func (o *InternalRecordResponse) HasRev() bool { + if o != nil && !IsNil(o.Rev) { + return true + } + + return false +} + +// SetRev gets a reference to the given string and assigns it to the Rev field. +func (o *InternalRecordResponse) SetRev(v string) { + o.Rev = &v +} + +// GetCreatedDate returns the CreatedDate field value if set, zero value otherwise. +func (o *InternalRecordResponse) GetCreatedDate() string { + if o == nil || IsNil(o.CreatedDate) { + var ret string + return ret + } + return *o.CreatedDate +} + +// GetCreatedDateOk returns a tuple with the CreatedDate field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *InternalRecordResponse) GetCreatedDateOk() (*string, bool) { + if o == nil || IsNil(o.CreatedDate) { + return nil, false + } + return o.CreatedDate, true +} + +// HasCreatedDate returns a boolean if a field has been set. +func (o *InternalRecordResponse) HasCreatedDate() bool { + if o != nil && !IsNil(o.CreatedDate) { + return true + } + + return false +} + +// SetCreatedDate gets a reference to the given string and assigns it to the CreatedDate field. +func (o *InternalRecordResponse) SetCreatedDate(v string) { + o.CreatedDate = &v +} + +// GetUpdatedDate returns the UpdatedDate field value if set, zero value otherwise. +func (o *InternalRecordResponse) GetUpdatedDate() string { + if o == nil || IsNil(o.UpdatedDate) { + var ret string + return ret + } + return *o.UpdatedDate +} + +// GetUpdatedDateOk returns a tuple with the UpdatedDate field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *InternalRecordResponse) GetUpdatedDateOk() (*string, bool) { + if o == nil || IsNil(o.UpdatedDate) { + return nil, false + } + return o.UpdatedDate, true +} + +// HasUpdatedDate returns a boolean if a field has been set. +func (o *InternalRecordResponse) HasUpdatedDate() bool { + if o != nil && !IsNil(o.UpdatedDate) { + return true + } + + return false +} + +// SetUpdatedDate gets a reference to the given string and assigns it to the UpdatedDate field. +func (o *InternalRecordResponse) SetUpdatedDate(v string) { + o.UpdatedDate = &v +} + +// GetUploader returns the Uploader field value if set, zero value otherwise. +func (o *InternalRecordResponse) GetUploader() string { + if o == nil || IsNil(o.Uploader) { + var ret string + return ret + } + return *o.Uploader +} + +// GetUploaderOk returns a tuple with the Uploader field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *InternalRecordResponse) GetUploaderOk() (*string, bool) { + if o == nil || IsNil(o.Uploader) { + return nil, false + } + return o.Uploader, true +} + +// HasUploader returns a boolean if a field has been set. +func (o *InternalRecordResponse) HasUploader() bool { + if o != nil && !IsNil(o.Uploader) { + return true + } + + return false +} + +// SetUploader gets a reference to the given string and assigns it to the Uploader field. +func (o *InternalRecordResponse) SetUploader(v string) { + o.Uploader = &v +} + +func (o InternalRecordResponse) MarshalJSON() ([]byte, error) { + toSerialize,err := o.ToMap() + if err != nil { + return []byte{}, err + } + return json.Marshal(toSerialize) +} + +func (o InternalRecordResponse) ToMap() (map[string]interface{}, error) { + toSerialize := map[string]interface{}{} + if !IsNil(o.Did) { + toSerialize["did"] = o.Did + } + if !IsNil(o.Hashes) { + toSerialize["hashes"] = o.Hashes + } + if !IsNil(o.Size) { + toSerialize["size"] = o.Size + } + if !IsNil(o.Urls) { + toSerialize["urls"] = o.Urls + } + if !IsNil(o.Authz) { + toSerialize["authz"] = o.Authz + } + if !IsNil(o.FileName) { + toSerialize["file_name"] = o.FileName + } + if !IsNil(o.Organization) { + toSerialize["organization"] = o.Organization + } + if !IsNil(o.Project) { + toSerialize["project"] = o.Project + } + if !IsNil(o.Baseid) { + toSerialize["baseid"] = o.Baseid + } + if !IsNil(o.Rev) { + toSerialize["rev"] = o.Rev + } + if !IsNil(o.CreatedDate) { + toSerialize["created_date"] = o.CreatedDate + } + if !IsNil(o.UpdatedDate) { + toSerialize["updated_date"] = o.UpdatedDate + } + if !IsNil(o.Uploader) { + toSerialize["uploader"] = o.Uploader + } + return toSerialize, nil +} + +type NullableInternalRecordResponse struct { + value *InternalRecordResponse + isSet bool +} + +func (v NullableInternalRecordResponse) Get() *InternalRecordResponse { + return v.value +} + +func (v *NullableInternalRecordResponse) Set(val *InternalRecordResponse) { + v.value = val + v.isSet = true +} + +func (v NullableInternalRecordResponse) IsSet() bool { + return v.isSet +} + +func (v *NullableInternalRecordResponse) Unset() { + v.value = nil + v.isSet = false +} + +func NewNullableInternalRecordResponse(val *InternalRecordResponse) *NullableInternalRecordResponse { + return &NullableInternalRecordResponse{value: val, isSet: true} +} + +func (v NullableInternalRecordResponse) MarshalJSON() ([]byte, error) { + return json.Marshal(v.value) +} + +func (v *NullableInternalRecordResponse) UnmarshalJSON(src []byte) error { + v.isSet = true + return json.Unmarshal(src, &v.value) +} + + diff --git a/apigen/internalapi/model_internal_signed_url.go b/apigen/internalapi/model_internal_signed_url.go new file mode 100644 index 0000000..74cd66f --- /dev/null +++ b/apigen/internalapi/model_internal_signed_url.go @@ -0,0 +1,126 @@ +/* +Internal Compatibility API (DRS Server) + +Consolidated internal API delta for drs-server. This spec captures non-GA4GH DRS internal/compatibility routes. + +API version: 1.0.0 +*/ + +// Code generated by OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT. + +package internalapi + +import ( + "encoding/json" +) + +// checks if the InternalSignedURL type satisfies the MappedNullable interface at compile time +var _ MappedNullable = &InternalSignedURL{} + +// InternalSignedURL struct for InternalSignedURL +type InternalSignedURL struct { + Url *string `json:"url,omitempty"` +} + +// NewInternalSignedURL instantiates a new InternalSignedURL object +// This constructor will assign default values to properties that have it defined, +// and makes sure properties required by API are set, but the set of arguments +// will change when the set of required properties is changed +func NewInternalSignedURL() *InternalSignedURL { + this := InternalSignedURL{} + return &this +} + +// NewInternalSignedURLWithDefaults instantiates a new InternalSignedURL object +// This constructor will only assign default values to properties that have it defined, +// but it doesn't guarantee that properties required by API are set +func NewInternalSignedURLWithDefaults() *InternalSignedURL { + this := InternalSignedURL{} + return &this +} + +// GetUrl returns the Url field value if set, zero value otherwise. +func (o *InternalSignedURL) GetUrl() string { + if o == nil || IsNil(o.Url) { + var ret string + return ret + } + return *o.Url +} + +// GetUrlOk returns a tuple with the Url field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *InternalSignedURL) GetUrlOk() (*string, bool) { + if o == nil || IsNil(o.Url) { + return nil, false + } + return o.Url, true +} + +// HasUrl returns a boolean if a field has been set. +func (o *InternalSignedURL) HasUrl() bool { + if o != nil && !IsNil(o.Url) { + return true + } + + return false +} + +// SetUrl gets a reference to the given string and assigns it to the Url field. +func (o *InternalSignedURL) SetUrl(v string) { + o.Url = &v +} + +func (o InternalSignedURL) MarshalJSON() ([]byte, error) { + toSerialize,err := o.ToMap() + if err != nil { + return []byte{}, err + } + return json.Marshal(toSerialize) +} + +func (o InternalSignedURL) ToMap() (map[string]interface{}, error) { + toSerialize := map[string]interface{}{} + if !IsNil(o.Url) { + toSerialize["url"] = o.Url + } + return toSerialize, nil +} + +type NullableInternalSignedURL struct { + value *InternalSignedURL + isSet bool +} + +func (v NullableInternalSignedURL) Get() *InternalSignedURL { + return v.value +} + +func (v *NullableInternalSignedURL) Set(val *InternalSignedURL) { + v.value = val + v.isSet = true +} + +func (v NullableInternalSignedURL) IsSet() bool { + return v.isSet +} + +func (v *NullableInternalSignedURL) Unset() { + v.value = nil + v.isSet = false +} + +func NewNullableInternalSignedURL(val *InternalSignedURL) *NullableInternalSignedURL { + return &NullableInternalSignedURL{value: val, isSet: true} +} + +func (v NullableInternalSignedURL) MarshalJSON() ([]byte, error) { + return json.Marshal(v.value) +} + +func (v *NullableInternalSignedURL) UnmarshalJSON(src []byte) error { + v.isSet = true + return json.Unmarshal(src, &v.value) +} + + diff --git a/apigen/internalapi/model_internal_upload_blank_request.go b/apigen/internalapi/model_internal_upload_blank_request.go new file mode 100644 index 0000000..7de0aa0 --- /dev/null +++ b/apigen/internalapi/model_internal_upload_blank_request.go @@ -0,0 +1,162 @@ +/* +Internal Compatibility API (DRS Server) + +Consolidated internal API delta for drs-server. This spec captures non-GA4GH DRS internal/compatibility routes. + +API version: 1.0.0 +*/ + +// Code generated by OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT. + +package internalapi + +import ( + "encoding/json" +) + +// checks if the InternalUploadBlankRequest type satisfies the MappedNullable interface at compile time +var _ MappedNullable = &InternalUploadBlankRequest{} + +// InternalUploadBlankRequest struct for InternalUploadBlankRequest +type InternalUploadBlankRequest struct { + Guid *string `json:"guid,omitempty"` + Authz []string `json:"authz,omitempty"` +} + +// NewInternalUploadBlankRequest instantiates a new InternalUploadBlankRequest object +// This constructor will assign default values to properties that have it defined, +// and makes sure properties required by API are set, but the set of arguments +// will change when the set of required properties is changed +func NewInternalUploadBlankRequest() *InternalUploadBlankRequest { + this := InternalUploadBlankRequest{} + return &this +} + +// NewInternalUploadBlankRequestWithDefaults instantiates a new InternalUploadBlankRequest object +// This constructor will only assign default values to properties that have it defined, +// but it doesn't guarantee that properties required by API are set +func NewInternalUploadBlankRequestWithDefaults() *InternalUploadBlankRequest { + this := InternalUploadBlankRequest{} + return &this +} + +// GetGuid returns the Guid field value if set, zero value otherwise. +func (o *InternalUploadBlankRequest) GetGuid() string { + if o == nil || IsNil(o.Guid) { + var ret string + return ret + } + return *o.Guid +} + +// GetGuidOk returns a tuple with the Guid field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *InternalUploadBlankRequest) GetGuidOk() (*string, bool) { + if o == nil || IsNil(o.Guid) { + return nil, false + } + return o.Guid, true +} + +// HasGuid returns a boolean if a field has been set. +func (o *InternalUploadBlankRequest) HasGuid() bool { + if o != nil && !IsNil(o.Guid) { + return true + } + + return false +} + +// SetGuid gets a reference to the given string and assigns it to the Guid field. +func (o *InternalUploadBlankRequest) SetGuid(v string) { + o.Guid = &v +} + +// GetAuthz returns the Authz field value if set, zero value otherwise. +func (o *InternalUploadBlankRequest) GetAuthz() []string { + if o == nil || IsNil(o.Authz) { + var ret []string + return ret + } + return o.Authz +} + +// GetAuthzOk returns a tuple with the Authz field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *InternalUploadBlankRequest) GetAuthzOk() ([]string, bool) { + if o == nil || IsNil(o.Authz) { + return nil, false + } + return o.Authz, true +} + +// HasAuthz returns a boolean if a field has been set. +func (o *InternalUploadBlankRequest) HasAuthz() bool { + if o != nil && !IsNil(o.Authz) { + return true + } + + return false +} + +// SetAuthz gets a reference to the given []string and assigns it to the Authz field. +func (o *InternalUploadBlankRequest) SetAuthz(v []string) { + o.Authz = v +} + +func (o InternalUploadBlankRequest) MarshalJSON() ([]byte, error) { + toSerialize,err := o.ToMap() + if err != nil { + return []byte{}, err + } + return json.Marshal(toSerialize) +} + +func (o InternalUploadBlankRequest) ToMap() (map[string]interface{}, error) { + toSerialize := map[string]interface{}{} + if !IsNil(o.Guid) { + toSerialize["guid"] = o.Guid + } + if !IsNil(o.Authz) { + toSerialize["authz"] = o.Authz + } + return toSerialize, nil +} + +type NullableInternalUploadBlankRequest struct { + value *InternalUploadBlankRequest + isSet bool +} + +func (v NullableInternalUploadBlankRequest) Get() *InternalUploadBlankRequest { + return v.value +} + +func (v *NullableInternalUploadBlankRequest) Set(val *InternalUploadBlankRequest) { + v.value = val + v.isSet = true +} + +func (v NullableInternalUploadBlankRequest) IsSet() bool { + return v.isSet +} + +func (v *NullableInternalUploadBlankRequest) Unset() { + v.value = nil + v.isSet = false +} + +func NewNullableInternalUploadBlankRequest(val *InternalUploadBlankRequest) *NullableInternalUploadBlankRequest { + return &NullableInternalUploadBlankRequest{value: val, isSet: true} +} + +func (v NullableInternalUploadBlankRequest) MarshalJSON() ([]byte, error) { + return json.Marshal(v.value) +} + +func (v *NullableInternalUploadBlankRequest) UnmarshalJSON(src []byte) error { + v.isSet = true + return json.Unmarshal(src, &v.value) +} + + diff --git a/apigen/internalapi/model_internal_upload_blank_response.go b/apigen/internalapi/model_internal_upload_blank_response.go new file mode 100644 index 0000000..fe414fd --- /dev/null +++ b/apigen/internalapi/model_internal_upload_blank_response.go @@ -0,0 +1,162 @@ +/* +Internal Compatibility API (DRS Server) + +Consolidated internal API delta for drs-server. This spec captures non-GA4GH DRS internal/compatibility routes. + +API version: 1.0.0 +*/ + +// Code generated by OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT. + +package internalapi + +import ( + "encoding/json" +) + +// checks if the InternalUploadBlankResponse type satisfies the MappedNullable interface at compile time +var _ MappedNullable = &InternalUploadBlankResponse{} + +// InternalUploadBlankResponse struct for InternalUploadBlankResponse +type InternalUploadBlankResponse struct { + Guid *string `json:"guid,omitempty"` + Url *string `json:"url,omitempty"` +} + +// NewInternalUploadBlankResponse instantiates a new InternalUploadBlankResponse object +// This constructor will assign default values to properties that have it defined, +// and makes sure properties required by API are set, but the set of arguments +// will change when the set of required properties is changed +func NewInternalUploadBlankResponse() *InternalUploadBlankResponse { + this := InternalUploadBlankResponse{} + return &this +} + +// NewInternalUploadBlankResponseWithDefaults instantiates a new InternalUploadBlankResponse object +// This constructor will only assign default values to properties that have it defined, +// but it doesn't guarantee that properties required by API are set +func NewInternalUploadBlankResponseWithDefaults() *InternalUploadBlankResponse { + this := InternalUploadBlankResponse{} + return &this +} + +// GetGuid returns the Guid field value if set, zero value otherwise. +func (o *InternalUploadBlankResponse) GetGuid() string { + if o == nil || IsNil(o.Guid) { + var ret string + return ret + } + return *o.Guid +} + +// GetGuidOk returns a tuple with the Guid field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *InternalUploadBlankResponse) GetGuidOk() (*string, bool) { + if o == nil || IsNil(o.Guid) { + return nil, false + } + return o.Guid, true +} + +// HasGuid returns a boolean if a field has been set. +func (o *InternalUploadBlankResponse) HasGuid() bool { + if o != nil && !IsNil(o.Guid) { + return true + } + + return false +} + +// SetGuid gets a reference to the given string and assigns it to the Guid field. +func (o *InternalUploadBlankResponse) SetGuid(v string) { + o.Guid = &v +} + +// GetUrl returns the Url field value if set, zero value otherwise. +func (o *InternalUploadBlankResponse) GetUrl() string { + if o == nil || IsNil(o.Url) { + var ret string + return ret + } + return *o.Url +} + +// GetUrlOk returns a tuple with the Url field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *InternalUploadBlankResponse) GetUrlOk() (*string, bool) { + if o == nil || IsNil(o.Url) { + return nil, false + } + return o.Url, true +} + +// HasUrl returns a boolean if a field has been set. +func (o *InternalUploadBlankResponse) HasUrl() bool { + if o != nil && !IsNil(o.Url) { + return true + } + + return false +} + +// SetUrl gets a reference to the given string and assigns it to the Url field. +func (o *InternalUploadBlankResponse) SetUrl(v string) { + o.Url = &v +} + +func (o InternalUploadBlankResponse) MarshalJSON() ([]byte, error) { + toSerialize,err := o.ToMap() + if err != nil { + return []byte{}, err + } + return json.Marshal(toSerialize) +} + +func (o InternalUploadBlankResponse) ToMap() (map[string]interface{}, error) { + toSerialize := map[string]interface{}{} + if !IsNil(o.Guid) { + toSerialize["guid"] = o.Guid + } + if !IsNil(o.Url) { + toSerialize["url"] = o.Url + } + return toSerialize, nil +} + +type NullableInternalUploadBlankResponse struct { + value *InternalUploadBlankResponse + isSet bool +} + +func (v NullableInternalUploadBlankResponse) Get() *InternalUploadBlankResponse { + return v.value +} + +func (v *NullableInternalUploadBlankResponse) Set(val *InternalUploadBlankResponse) { + v.value = val + v.isSet = true +} + +func (v NullableInternalUploadBlankResponse) IsSet() bool { + return v.isSet +} + +func (v *NullableInternalUploadBlankResponse) Unset() { + v.value = nil + v.isSet = false +} + +func NewNullableInternalUploadBlankResponse(val *InternalUploadBlankResponse) *NullableInternalUploadBlankResponse { + return &NullableInternalUploadBlankResponse{value: val, isSet: true} +} + +func (v NullableInternalUploadBlankResponse) MarshalJSON() ([]byte, error) { + return json.Marshal(v.value) +} + +func (v *NullableInternalUploadBlankResponse) UnmarshalJSON(src []byte) error { + v.isSet = true + return json.Unmarshal(src, &v.value) +} + + diff --git a/apigen/internalapi/model_list_records_response.go b/apigen/internalapi/model_list_records_response.go new file mode 100644 index 0000000..21f51ce --- /dev/null +++ b/apigen/internalapi/model_list_records_response.go @@ -0,0 +1,126 @@ +/* +Internal Compatibility API (DRS Server) + +Consolidated internal API delta for drs-server. This spec captures non-GA4GH DRS internal/compatibility routes. + +API version: 1.0.0 +*/ + +// Code generated by OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT. + +package internalapi + +import ( + "encoding/json" +) + +// checks if the ListRecordsResponse type satisfies the MappedNullable interface at compile time +var _ MappedNullable = &ListRecordsResponse{} + +// ListRecordsResponse struct for ListRecordsResponse +type ListRecordsResponse struct { + Records []InternalRecordResponse `json:"records,omitempty"` +} + +// NewListRecordsResponse instantiates a new ListRecordsResponse object +// This constructor will assign default values to properties that have it defined, +// and makes sure properties required by API are set, but the set of arguments +// will change when the set of required properties is changed +func NewListRecordsResponse() *ListRecordsResponse { + this := ListRecordsResponse{} + return &this +} + +// NewListRecordsResponseWithDefaults instantiates a new ListRecordsResponse object +// This constructor will only assign default values to properties that have it defined, +// but it doesn't guarantee that properties required by API are set +func NewListRecordsResponseWithDefaults() *ListRecordsResponse { + this := ListRecordsResponse{} + return &this +} + +// GetRecords returns the Records field value if set, zero value otherwise. +func (o *ListRecordsResponse) GetRecords() []InternalRecordResponse { + if o == nil || IsNil(o.Records) { + var ret []InternalRecordResponse + return ret + } + return o.Records +} + +// GetRecordsOk returns a tuple with the Records field value if set, nil otherwise +// and a boolean to check if the value has been set. +func (o *ListRecordsResponse) GetRecordsOk() ([]InternalRecordResponse, bool) { + if o == nil || IsNil(o.Records) { + return nil, false + } + return o.Records, true +} + +// HasRecords returns a boolean if a field has been set. +func (o *ListRecordsResponse) HasRecords() bool { + if o != nil && !IsNil(o.Records) { + return true + } + + return false +} + +// SetRecords gets a reference to the given []InternalRecordResponse and assigns it to the Records field. +func (o *ListRecordsResponse) SetRecords(v []InternalRecordResponse) { + o.Records = v +} + +func (o ListRecordsResponse) MarshalJSON() ([]byte, error) { + toSerialize,err := o.ToMap() + if err != nil { + return []byte{}, err + } + return json.Marshal(toSerialize) +} + +func (o ListRecordsResponse) ToMap() (map[string]interface{}, error) { + toSerialize := map[string]interface{}{} + if !IsNil(o.Records) { + toSerialize["records"] = o.Records + } + return toSerialize, nil +} + +type NullableListRecordsResponse struct { + value *ListRecordsResponse + isSet bool +} + +func (v NullableListRecordsResponse) Get() *ListRecordsResponse { + return v.value +} + +func (v *NullableListRecordsResponse) Set(val *ListRecordsResponse) { + v.value = val + v.isSet = true +} + +func (v NullableListRecordsResponse) IsSet() bool { + return v.isSet +} + +func (v *NullableListRecordsResponse) Unset() { + v.value = nil + v.isSet = false +} + +func NewNullableListRecordsResponse(val *ListRecordsResponse) *NullableListRecordsResponse { + return &NullableListRecordsResponse{value: val, isSet: true} +} + +func (v NullableListRecordsResponse) MarshalJSON() ([]byte, error) { + return json.Marshal(v.value) +} + +func (v *NullableListRecordsResponse) UnmarshalJSON(src []byte) error { + v.isSet = true + return json.Unmarshal(src, &v.value) +} + + diff --git a/apigen/internalapi/utils.go b/apigen/internalapi/utils.go new file mode 100644 index 0000000..d0fb557 --- /dev/null +++ b/apigen/internalapi/utils.go @@ -0,0 +1,361 @@ +/* +Internal Compatibility API (DRS Server) + +Consolidated internal API delta for drs-server. This spec captures non-GA4GH DRS internal/compatibility routes. + +API version: 1.0.0 +*/ + +// Code generated by OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT. + +package internalapi + +import ( + "bytes" + "encoding/json" + "fmt" + "reflect" + "time" +) + +// PtrBool is a helper routine that returns a pointer to given boolean value. +func PtrBool(v bool) *bool { return &v } + +// PtrInt is a helper routine that returns a pointer to given integer value. +func PtrInt(v int) *int { return &v } + +// PtrInt32 is a helper routine that returns a pointer to given integer value. +func PtrInt32(v int32) *int32 { return &v } + +// PtrInt64 is a helper routine that returns a pointer to given integer value. +func PtrInt64(v int64) *int64 { return &v } + +// PtrFloat32 is a helper routine that returns a pointer to given float value. +func PtrFloat32(v float32) *float32 { return &v } + +// PtrFloat64 is a helper routine that returns a pointer to given float value. +func PtrFloat64(v float64) *float64 { return &v } + +// PtrString is a helper routine that returns a pointer to given string value. +func PtrString(v string) *string { return &v } + +// PtrTime is helper routine that returns a pointer to given Time value. +func PtrTime(v time.Time) *time.Time { return &v } + +type NullableBool struct { + value *bool + isSet bool +} + +func (v NullableBool) Get() *bool { + return v.value +} + +func (v *NullableBool) Set(val *bool) { + v.value = val + v.isSet = true +} + +func (v NullableBool) IsSet() bool { + return v.isSet +} + +func (v *NullableBool) Unset() { + v.value = nil + v.isSet = false +} + +func NewNullableBool(val *bool) *NullableBool { + return &NullableBool{value: val, isSet: true} +} + +func (v NullableBool) MarshalJSON() ([]byte, error) { + return json.Marshal(v.value) +} + +func (v *NullableBool) UnmarshalJSON(src []byte) error { + v.isSet = true + return json.Unmarshal(src, &v.value) +} + +type NullableInt struct { + value *int + isSet bool +} + +func (v NullableInt) Get() *int { + return v.value +} + +func (v *NullableInt) Set(val *int) { + v.value = val + v.isSet = true +} + +func (v NullableInt) IsSet() bool { + return v.isSet +} + +func (v *NullableInt) Unset() { + v.value = nil + v.isSet = false +} + +func NewNullableInt(val *int) *NullableInt { + return &NullableInt{value: val, isSet: true} +} + +func (v NullableInt) MarshalJSON() ([]byte, error) { + return json.Marshal(v.value) +} + +func (v *NullableInt) UnmarshalJSON(src []byte) error { + v.isSet = true + return json.Unmarshal(src, &v.value) +} + +type NullableInt32 struct { + value *int32 + isSet bool +} + +func (v NullableInt32) Get() *int32 { + return v.value +} + +func (v *NullableInt32) Set(val *int32) { + v.value = val + v.isSet = true +} + +func (v NullableInt32) IsSet() bool { + return v.isSet +} + +func (v *NullableInt32) Unset() { + v.value = nil + v.isSet = false +} + +func NewNullableInt32(val *int32) *NullableInt32 { + return &NullableInt32{value: val, isSet: true} +} + +func (v NullableInt32) MarshalJSON() ([]byte, error) { + return json.Marshal(v.value) +} + +func (v *NullableInt32) UnmarshalJSON(src []byte) error { + v.isSet = true + return json.Unmarshal(src, &v.value) +} + +type NullableInt64 struct { + value *int64 + isSet bool +} + +func (v NullableInt64) Get() *int64 { + return v.value +} + +func (v *NullableInt64) Set(val *int64) { + v.value = val + v.isSet = true +} + +func (v NullableInt64) IsSet() bool { + return v.isSet +} + +func (v *NullableInt64) Unset() { + v.value = nil + v.isSet = false +} + +func NewNullableInt64(val *int64) *NullableInt64 { + return &NullableInt64{value: val, isSet: true} +} + +func (v NullableInt64) MarshalJSON() ([]byte, error) { + return json.Marshal(v.value) +} + +func (v *NullableInt64) UnmarshalJSON(src []byte) error { + v.isSet = true + return json.Unmarshal(src, &v.value) +} + +type NullableFloat32 struct { + value *float32 + isSet bool +} + +func (v NullableFloat32) Get() *float32 { + return v.value +} + +func (v *NullableFloat32) Set(val *float32) { + v.value = val + v.isSet = true +} + +func (v NullableFloat32) IsSet() bool { + return v.isSet +} + +func (v *NullableFloat32) Unset() { + v.value = nil + v.isSet = false +} + +func NewNullableFloat32(val *float32) *NullableFloat32 { + return &NullableFloat32{value: val, isSet: true} +} + +func (v NullableFloat32) MarshalJSON() ([]byte, error) { + return json.Marshal(v.value) +} + +func (v *NullableFloat32) UnmarshalJSON(src []byte) error { + v.isSet = true + return json.Unmarshal(src, &v.value) +} + +type NullableFloat64 struct { + value *float64 + isSet bool +} + +func (v NullableFloat64) Get() *float64 { + return v.value +} + +func (v *NullableFloat64) Set(val *float64) { + v.value = val + v.isSet = true +} + +func (v NullableFloat64) IsSet() bool { + return v.isSet +} + +func (v *NullableFloat64) Unset() { + v.value = nil + v.isSet = false +} + +func NewNullableFloat64(val *float64) *NullableFloat64 { + return &NullableFloat64{value: val, isSet: true} +} + +func (v NullableFloat64) MarshalJSON() ([]byte, error) { + return json.Marshal(v.value) +} + +func (v *NullableFloat64) UnmarshalJSON(src []byte) error { + v.isSet = true + return json.Unmarshal(src, &v.value) +} + +type NullableString struct { + value *string + isSet bool +} + +func (v NullableString) Get() *string { + return v.value +} + +func (v *NullableString) Set(val *string) { + v.value = val + v.isSet = true +} + +func (v NullableString) IsSet() bool { + return v.isSet +} + +func (v *NullableString) Unset() { + v.value = nil + v.isSet = false +} + +func NewNullableString(val *string) *NullableString { + return &NullableString{value: val, isSet: true} +} + +func (v NullableString) MarshalJSON() ([]byte, error) { + return json.Marshal(v.value) +} + +func (v *NullableString) UnmarshalJSON(src []byte) error { + v.isSet = true + return json.Unmarshal(src, &v.value) +} + +type NullableTime struct { + value *time.Time + isSet bool +} + +func (v NullableTime) Get() *time.Time { + return v.value +} + +func (v *NullableTime) Set(val *time.Time) { + v.value = val + v.isSet = true +} + +func (v NullableTime) IsSet() bool { + return v.isSet +} + +func (v *NullableTime) Unset() { + v.value = nil + v.isSet = false +} + +func NewNullableTime(val *time.Time) *NullableTime { + return &NullableTime{value: val, isSet: true} +} + +func (v NullableTime) MarshalJSON() ([]byte, error) { + return json.Marshal(v.value) +} + +func (v *NullableTime) UnmarshalJSON(src []byte) error { + v.isSet = true + return json.Unmarshal(src, &v.value) +} + +// IsNil checks if an input is nil +func IsNil(i interface{}) bool { + if i == nil { + return true + } + switch reflect.TypeOf(i).Kind() { + case reflect.Chan, reflect.Func, reflect.Map, reflect.Ptr, reflect.UnsafePointer, reflect.Interface, reflect.Slice: + return reflect.ValueOf(i).IsNil() + case reflect.Array: + return reflect.ValueOf(i).IsZero() + } + return false +} + +type MappedNullable interface { + ToMap() (map[string]interface{}, error) +} + +// A wrapper for strict JSON decoding +func newStrictDecoder(data []byte) *json.Decoder { + dec := json.NewDecoder(bytes.NewBuffer(data)) + dec.DisallowUnknownFields() + return dec +} + +// Prevent trying to import "fmt" +func reportError(format string, a ...interface{}) error { + return fmt.Errorf(format, a...) +} \ No newline at end of file diff --git a/apigen/specs/drs-extensions-overlay.yaml b/apigen/specs/drs-extensions-overlay.yaml new file mode 100644 index 0000000..d6426a2 --- /dev/null +++ b/apigen/specs/drs-extensions-overlay.yaml @@ -0,0 +1,25 @@ +components: + schemas: + AccessMethod: + properties: + access_url: + type: object + required: [url] + properties: + url: { type: string, description: "A fully resolvable URL that can be used to fetch the actual object bytes." } + headers: { type: array, items: { type: string }, description: "GA4GH-compatible list of HTTP headers." } + authorizations: + type: object + properties: + drs_object_id: { type: string } + supported_types: { type: array, items: { type: string } } + passport_auth_issuers: { type: array, items: { type: string } } + bearer_auth_issuers: { type: array, items: { type: string } } + UploadMethod: + properties: + access_url: + type: object + required: [url] + properties: + url: { type: string, description: "Inlined Upload URL context." } + headers: { type: array, items: { type: string }, description: "Inlined Upload Headers." } From 1a13e24403b5564dacb3b0c799f6316ebb228cf8 Mon Sep 17 00:00:00 2001 From: matthewpeterkort Date: Fri, 27 Mar 2026 08:47:48 -0700 Subject: [PATCH 06/13] add ci --- .github/workflows/ci.yaml | 40 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 40 insertions(+) create mode 100644 .github/workflows/ci.yaml diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml new file mode 100644 index 0000000..9892247 --- /dev/null +++ b/.github/workflows/ci.yaml @@ -0,0 +1,40 @@ +name: Data Client CI + +on: + pull_request: + push: + branches: + - development + - main + - master + workflow_dispatch: + +concurrency: + group: "${{ github.workflow }}-${{ github.ref }}" + cancel-in-progress: true + +jobs: + lint-and-test: + name: Lint and Unit Tests + runs-on: ubuntu-latest + + steps: + - name: Check out code + uses: actions/checkout@v4 + with: + submodules: recursive + + - name: Set up Go + uses: actions/setup-go@v5 + with: + go-version-file: go.mod + cache: true + + - name: Download dependencies + run: GOTOOLCHAIN=auto go mod download + + - name: Run go vet + run: GOTOOLCHAIN=auto go vet $(GOTOOLCHAIN=auto go list ./... | grep -v '^github.com/calypr/data-client/tests$') + + - name: Run unit tests + run: GOTOOLCHAIN=auto go test -v $(GOTOOLCHAIN=auto go list ./... | grep -v '^github.com/calypr/data-client/tests$') From 715f029d1bbe8e09c1ce863810c44fa31838a0c4 Mon Sep 17 00:00:00 2001 From: matthewpeterkort Date: Fri, 27 Mar 2026 08:52:22 -0700 Subject: [PATCH 07/13] fix tests --- .github/workflows/ci.yaml | 8 ++------ mocks/mock_drs_client.go | 28 ++++++++++++++++++++++++++++ 2 files changed, 30 insertions(+), 6 deletions(-) diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml index 9892247..7d3ce32 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci.yaml @@ -3,10 +3,6 @@ name: Data Client CI on: pull_request: push: - branches: - - development - - main - - master workflow_dispatch: concurrency: @@ -34,7 +30,7 @@ jobs: run: GOTOOLCHAIN=auto go mod download - name: Run go vet - run: GOTOOLCHAIN=auto go vet $(GOTOOLCHAIN=auto go list ./... | grep -v '^github.com/calypr/data-client/tests$') + run: GOTOOLCHAIN=auto go vet ./... - name: Run unit tests - run: GOTOOLCHAIN=auto go test -v $(GOTOOLCHAIN=auto go list ./... | grep -v '^github.com/calypr/data-client/tests$') + run: GOTOOLCHAIN=auto go test -v ./... diff --git a/mocks/mock_drs_client.go b/mocks/mock_drs_client.go index 6e02ffc..e58e5bd 100644 --- a/mocks/mock_drs_client.go +++ b/mocks/mock_drs_client.go @@ -5,11 +5,14 @@ package mocks import ( context "context" + io "io" http "net/http" reflect "reflect" + common "github.com/calypr/data-client/common" drs "github.com/calypr/data-client/drs" hash "github.com/calypr/data-client/hash" + logs "github.com/calypr/data-client/logs" request "github.com/calypr/data-client/request" gomock "go.uber.org/mock/gomock" ) @@ -213,3 +216,28 @@ func (m *MockDrsClient) GetProjectSample(ctx context.Context, projectId string, func (m *MockDrsClient) DeleteRecordsByProject(ctx context.Context, projectId string) error { return nil } func (m *MockDrsClient) BuildDrsObj(fileName string, checksum string, size int64, drsId string) (*drs.DRSObject, error) { return nil, nil } func (m *MockDrsClient) DownloadFile(ctx context.Context, oid, destPath string) error { return nil } +func (m *MockDrsClient) Name() string { return "mock-drs-client" } +func (m *MockDrsClient) Logger() *logs.Gen3Logger { return nil } +func (m *MockDrsClient) ResolveDownloadURL(ctx context.Context, guid string, accessID string) (string, error) { + return "", nil +} +func (m *MockDrsClient) Download(ctx context.Context, fdr *common.FileDownloadResponseObject) (*http.Response, error) { + return nil, nil +} +func (m *MockDrsClient) ResolveUploadURL(ctx context.Context, guid string, filename string, metadata common.FileMetadata, bucket string) (string, error) { + return "", nil +} +func (m *MockDrsClient) InitMultipartUpload(ctx context.Context, guid string, filename string, bucket string) (*common.MultipartUploadInit, error) { + return nil, nil +} +func (m *MockDrsClient) GetMultipartUploadURL(ctx context.Context, key string, uploadID string, partNumber int32, bucket string) (string, error) { + return "", nil +} +func (m *MockDrsClient) CompleteMultipartUpload(ctx context.Context, key string, uploadID string, parts []common.MultipartUploadPart, bucket string) error { + return nil +} +func (m *MockDrsClient) Upload(ctx context.Context, url string, body io.Reader, size int64) error { return nil } +func (m *MockDrsClient) UploadPart(ctx context.Context, url string, body io.Reader, size int64) (string, error) { + return "", nil +} +func (m *MockDrsClient) DeleteFile(ctx context.Context, guid string) (string, error) { return "", nil } From dfecdcd8ecd2ea4df6cc970a8d2bc9bba2f494b3 Mon Sep 17 00:00:00 2001 From: matthewpeterkort Date: Tue, 31 Mar 2026 12:31:30 -0700 Subject: [PATCH 08/13] add client support for bulk url signing --- common/types.go | 22 ++++++- transfer/interface.go | 1 + transfer/service.go | 5 ++ transfer/signer/gen3/signer.go | 93 +++++++++++++++++++++++++++ transfer/signer/gen3/signer_test.go | 78 +++++++++++++++++++++++ transfer/signer/local/signer.go | 95 ++++++++++++++++++++++++++++ transfer/signer/local/signer_test.go | 79 +++++++++++++++++++++++ upload/multipart_test.go | 3 + 8 files changed, 373 insertions(+), 3 deletions(-) create mode 100644 transfer/signer/gen3/signer_test.go create mode 100644 transfer/signer/local/signer_test.go diff --git a/common/types.go b/common/types.go index 54b67d3..c52a744 100644 --- a/common/types.go +++ b/common/types.go @@ -73,7 +73,7 @@ type ManifestObject struct { Title string `json:"title"` Size int64 `json:"size"` } - + // ShepherdInitRequestObject represents the payload sent to Shepherd type ShepherdInitRequestObject struct { Filename string `json:"file_name"` @@ -81,13 +81,29 @@ type ShepherdInitRequestObject struct { Aliases []string `json:"aliases"` Metadata map[string]any `json:"metadata"` } - + type ShepherdAuthz struct { Version string `json:"version"` ResourcePaths []string `json:"resource_paths"` } - + type PresignedURLResponse struct { GUID string `json:"guid"` URL string `json:"upload_url"` } + +type UploadURLResolveRequest struct { + GUID string + Filename string + Metadata FileMetadata + Bucket string +} + +type UploadURLResolveResponse struct { + GUID string + Filename string + Bucket string + URL string + Status int + Error string +} diff --git a/transfer/interface.go b/transfer/interface.go index 3df9d8b..1a4540e 100644 --- a/transfer/interface.go +++ b/transfer/interface.go @@ -26,6 +26,7 @@ type Downloader interface { type Uploader interface { Service ResolveUploadURL(ctx context.Context, guid string, filename string, metadata common.FileMetadata, bucket string) (string, error) + ResolveUploadURLs(ctx context.Context, requests []common.UploadURLResolveRequest) ([]common.UploadURLResolveResponse, error) InitMultipartUpload(ctx context.Context, guid string, filename string, bucket string) (*common.MultipartUploadInit, error) GetMultipartUploadURL(ctx context.Context, key string, uploadID string, partNumber int32, bucket string) (string, error) CompleteMultipartUpload(ctx context.Context, key string, uploadID string, parts []common.MultipartUploadPart, bucket string) error diff --git a/transfer/service.go b/transfer/service.go index edb8d3a..d061942 100644 --- a/transfer/service.go +++ b/transfer/service.go @@ -15,6 +15,7 @@ type Signer interface { Name() string ResolveDownloadURL(ctx context.Context, guid string, accessID string) (string, error) ResolveUploadURL(ctx context.Context, guid string, filename string, metadata common.FileMetadata, bucket string) (string, error) + ResolveUploadURLs(ctx context.Context, requests []common.UploadURLResolveRequest) ([]common.UploadURLResolveResponse, error) InitMultipartUpload(ctx context.Context, guid string, filename string, bucket string) (*common.MultipartUploadInit, error) GetMultipartUploadURL(ctx context.Context, key string, uploadID string, partNumber int32, bucket string) (string, error) CompleteMultipartUpload(ctx context.Context, key string, uploadID string, parts []common.MultipartUploadPart, bucket string) error @@ -54,6 +55,10 @@ func (c *client) ResolveUploadURL(ctx context.Context, guid string, filename str return c.signer.ResolveUploadURL(ctx, guid, filename, metadata, bucket) } +func (c *client) ResolveUploadURLs(ctx context.Context, requests []common.UploadURLResolveRequest) ([]common.UploadURLResolveResponse, error) { + return c.signer.ResolveUploadURLs(ctx, requests) +} + func (c *client) InitMultipartUpload(ctx context.Context, guid string, filename string, bucket string) (*common.MultipartUploadInit, error) { return c.signer.InitMultipartUpload(ctx, guid, filename, bucket) } diff --git a/transfer/signer/gen3/signer.go b/transfer/signer/gen3/signer.go index 859aabf..b2c45bf 100644 --- a/transfer/signer/gen3/signer.go +++ b/transfer/signer/gen3/signer.go @@ -4,6 +4,7 @@ import ( "context" "encoding/json" "fmt" + "io" "net/http" "strings" @@ -106,6 +107,98 @@ func (g *Signer) ResolveUploadURL(ctx context.Context, guid string, filename str return res.URL, nil } +func (g *Signer) ResolveUploadURLs(ctx context.Context, requests []common.UploadURLResolveRequest) ([]common.UploadURLResolveResponse, error) { + if len(requests) == 0 { + return []common.UploadURLResolveResponse{}, nil + } + + type bulkUploadRequest struct { + Requests []struct { + FileID string `json:"file_id"` + Bucket string `json:"bucket,omitempty"` + FileName string `json:"file_name,omitempty"` + } `json:"requests"` + } + type bulkUploadResponse struct { + Results []struct { + FileID string `json:"file_id"` + Bucket string `json:"bucket,omitempty"` + FileName string `json:"file_name,omitempty"` + URL string `json:"url,omitempty"` + Status int `json:"status"` + Error string `json:"error,omitempty"` + } `json:"results"` + } + + payload := bulkUploadRequest{ + Requests: make([]struct { + FileID string `json:"file_id"` + Bucket string `json:"bucket,omitempty"` + FileName string `json:"file_name,omitempty"` + }, 0, len(requests)), + } + for _, req := range requests { + fileID := strings.TrimSpace(req.GUID) + if fileID == "" { + fileID = strings.TrimSpace(req.Filename) + } + payload.Requests = append(payload.Requests, struct { + FileID string `json:"file_id"` + Bucket string `json:"bucket,omitempty"` + FileName string `json:"file_name,omitempty"` + }{ + FileID: fileID, + Bucket: req.Bucket, + FileName: req.Filename, + }) + } + + endpoint := strings.TrimRight(strings.TrimSpace(g.cred.APIEndpoint), "/") + "/data/upload/bulk" + rb := g.req.New(http.MethodPost, endpoint) + if _, err := rb.WithJSONBody(payload); err != nil { + return nil, err + } + resp, err := g.req.Do(ctx, rb) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + if resp.StatusCode >= 400 { + body, _ := io.ReadAll(resp.Body) + return nil, fmt.Errorf("bulk upload URL request failed with status %d: %s", resp.StatusCode, strings.TrimSpace(string(body))) + } + + var out bulkUploadResponse + if err := json.NewDecoder(resp.Body).Decode(&out); err != nil { + return nil, err + } + + results := make([]common.UploadURLResolveResponse, len(requests)) + for i := range requests { + results[i] = common.UploadURLResolveResponse{ + GUID: requests[i].GUID, + Filename: requests[i].Filename, + Bucket: requests[i].Bucket, + Status: http.StatusBadGateway, + Error: "missing result for request", + } + } + for i := range out.Results { + if i >= len(results) { + break + } + r := out.Results[i] + results[i].URL = r.URL + results[i].Status = r.Status + results[i].Error = r.Error + if results[i].Status == 0 { + results[i].Status = http.StatusOK + } + } + return results, nil +} + func (g *Signer) InitMultipartUpload(ctx context.Context, guid string, filename string, bucket string) (*common.MultipartUploadInit, error) { res, err := g.fence.InitMultipartUpload(ctx, filename, bucket, guid) if err != nil { diff --git a/transfer/signer/gen3/signer_test.go b/transfer/signer/gen3/signer_test.go new file mode 100644 index 0000000..1949f2b --- /dev/null +++ b/transfer/signer/gen3/signer_test.go @@ -0,0 +1,78 @@ +package gen3 + +import ( + "context" + "encoding/json" + "net/http" + "net/http/httptest" + "testing" + + "github.com/calypr/data-client/common" + "github.com/calypr/data-client/conf" + "github.com/calypr/data-client/request" +) + +type testRequestClient struct { + client *http.Client +} + +func (t *testRequestClient) New(method, url string) *request.RequestBuilder { + return &request.RequestBuilder{Method: method, Url: url, Headers: map[string]string{}} +} + +func (t *testRequestClient) Do(ctx context.Context, rb *request.RequestBuilder) (*http.Response, error) { + req, err := http.NewRequestWithContext(ctx, rb.Method, rb.Url, rb.Body) + if err != nil { + return nil, err + } + for k, v := range rb.Headers { + req.Header.Set(k, v) + } + return t.client.Do(req) +} + +func TestResolveUploadURLsUsesSingleBulkRequest(t *testing.T) { + var calls int + srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.Method != http.MethodPost || r.URL.Path != "/data/upload/bulk" { + http.NotFound(w, r) + return + } + calls++ + var req map[string]any + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + t.Fatalf("decode request body: %v", err) + } + _ = json.NewEncoder(w).Encode(map[string]any{ + "results": []map[string]any{ + {"file_id": "did-1", "file_name": "one.bin", "url": "https://signed/one", "status": 200}, + {"file_id": "did-2", "file_name": "two.bin", "url": "https://signed/two", "status": 200}, + }, + }) + })) + defer srv.Close() + + signer := New( + &testRequestClient{client: srv.Client()}, + &conf.Credential{APIEndpoint: srv.URL}, + nil, + nil, + ) + + out, err := signer.ResolveUploadURLs(context.Background(), []common.UploadURLResolveRequest{ + {GUID: "did-1", Filename: "one.bin", Bucket: "b1"}, + {GUID: "did-2", Filename: "two.bin", Bucket: "b1"}, + }) + if err != nil { + t.Fatalf("ResolveUploadURLs error: %v", err) + } + if calls != 1 { + t.Fatalf("expected exactly one bulk call, got %d", calls) + } + if len(out) != 2 { + t.Fatalf("expected 2 responses, got %d", len(out)) + } + if out[0].URL == "" || out[1].URL == "" { + t.Fatalf("expected signed URLs in both results, got %+v", out) + } +} diff --git a/transfer/signer/local/signer.go b/transfer/signer/local/signer.go index 822b38a..1452e6e 100644 --- a/transfer/signer/local/signer.go +++ b/transfer/signer/local/signer.go @@ -20,6 +20,29 @@ type Signer struct { client drs.Client } +type bulkUploadRequest struct { + Requests []bulkUploadItem `json:"requests"` +} + +type bulkUploadItem struct { + FileID string `json:"file_id"` + Bucket string `json:"bucket,omitempty"` + FileName string `json:"file_name,omitempty"` +} + +type bulkUploadResponse struct { + Results []bulkUploadResult `json:"results"` +} + +type bulkUploadResult struct { + FileID string `json:"file_id"` + Bucket string `json:"bucket,omitempty"` + FileName string `json:"file_name,omitempty"` + URL string `json:"url,omitempty"` + Status int `json:"status"` + Error string `json:"error,omitempty"` +} + func New(baseURL string, req request.RequestInterface, dc drs.Client) *Signer { return &Signer{ baseURL: baseURL, @@ -107,6 +130,78 @@ func (d *Signer) ResolveUploadURL(ctx context.Context, guid string, filename str return res.URL, nil } +func (d *Signer) ResolveUploadURLs(ctx context.Context, requests []common.UploadURLResolveRequest) ([]common.UploadURLResolveResponse, error) { + if len(requests) == 0 { + return []common.UploadURLResolveResponse{}, nil + } + + u, err := d.buildURL("data/upload/bulk") + if err != nil { + return nil, err + } + + payload := bulkUploadRequest{Requests: make([]bulkUploadItem, 0, len(requests))} + for _, req := range requests { + fileID := strings.TrimSpace(req.GUID) + if fileID == "" { + fileID = strings.TrimSpace(req.Filename) + } + payload.Requests = append(payload.Requests, bulkUploadItem{ + FileID: fileID, + Bucket: req.Bucket, + FileName: req.Filename, + }) + } + + var out bulkUploadResponse + if err := d.doJSONRequest(ctx, http.MethodPost, u, payload, &out); err != nil { + return nil, err + } + + results := make([]common.UploadURLResolveResponse, len(requests)) + if len(out.Results) == len(requests) { + for i := range requests { + r := out.Results[i] + results[i] = common.UploadURLResolveResponse{ + GUID: requests[i].GUID, + Filename: requests[i].Filename, + Bucket: requests[i].Bucket, + URL: r.URL, + Status: r.Status, + Error: r.Error, + } + if results[i].Status == 0 { + results[i].Status = http.StatusOK + } + } + return results, nil + } + + // If response count mismatches, align by request order and mark unresolved entries. + for i := range requests { + results[i] = common.UploadURLResolveResponse{ + GUID: requests[i].GUID, + Filename: requests[i].Filename, + Bucket: requests[i].Bucket, + Status: http.StatusBadGateway, + Error: "missing result for request", + } + } + for i := range out.Results { + if i >= len(results) { + break + } + r := out.Results[i] + results[i].URL = r.URL + results[i].Status = r.Status + results[i].Error = r.Error + if results[i].Status == 0 { + results[i].Status = http.StatusOK + } + } + return results, nil +} + func (d *Signer) InitMultipartUpload(ctx context.Context, guid string, filename string, bucket string) (*common.MultipartUploadInit, error) { u, err := d.buildURL("data/multipart/init") if err != nil { diff --git a/transfer/signer/local/signer_test.go b/transfer/signer/local/signer_test.go new file mode 100644 index 0000000..1fc9190 --- /dev/null +++ b/transfer/signer/local/signer_test.go @@ -0,0 +1,79 @@ +package local + +import ( + "context" + "encoding/json" + "io" + "log/slog" + "net/http" + "net/http/httptest" + "strings" + "testing" + + "github.com/calypr/data-client/common" + "github.com/calypr/data-client/drs" + "github.com/calypr/data-client/request" +) + +type testRequestClient struct { + client *http.Client +} + +func (t *testRequestClient) New(method, url string) *request.RequestBuilder { + return &request.RequestBuilder{Method: method, Url: url, Headers: map[string]string{}} +} + +func (t *testRequestClient) Do(ctx context.Context, rb *request.RequestBuilder) (*http.Response, error) { + req, err := http.NewRequestWithContext(ctx, rb.Method, rb.Url, rb.Body) + if err != nil { + return nil, err + } + for k, v := range rb.Headers { + req.Header.Set(k, v) + } + return t.client.Do(req) +} + +func TestResolveUploadURLsBatch(t *testing.T) { + srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.Method != http.MethodPost || r.URL.Path != "/data/upload/bulk" { + http.NotFound(w, r) + return + } + var req map[string]any + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + t.Fatalf("decode request: %v", err) + } + results := map[string]any{ + "results": []map[string]any{ + {"file_id": "did-1", "file_name": "one.bin", "url": "https://signed/one", "status": 200}, + {"file_id": "did-2", "file_name": "two.bin", "status": 400, "error": "bucket credential not found"}, + }, + } + w.Header().Set("Content-Type", "application/json") + _ = json.NewEncoder(w).Encode(results) + })) + defer srv.Close() + + logger := slog.New(slog.NewTextHandler(io.Discard, nil)) + req := &testRequestClient{client: srv.Client()} + dc := drs.NewLocalDrsClient(req, srv.URL, logger) + signer := New(srv.URL, req, dc) + + out, err := signer.ResolveUploadURLs(context.Background(), []common.UploadURLResolveRequest{ + {GUID: "did-1", Filename: "one.bin", Bucket: "b1"}, + {GUID: "did-2", Filename: "two.bin", Bucket: "b1"}, + }) + if err != nil { + t.Fatalf("ResolveUploadURLs returned error: %v", err) + } + if len(out) != 2 { + t.Fatalf("expected 2 results, got %d", len(out)) + } + if out[0].Status != http.StatusOK || out[0].URL == "" { + t.Fatalf("expected first result success, got %+v", out[0]) + } + if out[1].Status != http.StatusBadRequest || !strings.Contains(out[1].Error, "bucket credential not found") { + t.Fatalf("expected second result error, got %+v", out[1]) + } +} diff --git a/upload/multipart_test.go b/upload/multipart_test.go index 0e321a5..810b402 100644 --- a/upload/multipart_test.go +++ b/upload/multipart_test.go @@ -41,6 +41,9 @@ func (f *fakeGen3Upload) New(method, url string) *request.RequestBuilder { func (f *fakeGen3Upload) ResolveUploadURL(ctx context.Context, guid string, filename string, metadata common.FileMetadata, bucket string) (string, error) { return "", fmt.Errorf("not implemented") } +func (f *fakeGen3Upload) ResolveUploadURLs(ctx context.Context, requests []common.UploadURLResolveRequest) ([]common.UploadURLResolveResponse, error) { + return nil, fmt.Errorf("not implemented") +} func (f *fakeGen3Upload) InitMultipartUpload(ctx context.Context, guid string, filename string, bucket string) (*common.MultipartUploadInit, error) { resp, err := f.Do(ctx, &request.RequestBuilder{Url: common.FenceDataMultipartInitEndpoint}) From 91b0b95801ebcf830bb85ab186531743b044ab41 Mon Sep 17 00:00:00 2001 From: matthewpeterkort Date: Thu, 2 Apr 2026 14:50:29 -0700 Subject: [PATCH 09/13] remove apigen from data-client --- .gitignore | 6 +- Makefile | 79 +- apigen/api/internal.openapi.yaml | 621 ---- apigen/api/openapi.yaml | 2733 ----------------- apigen/drs/model_access_method.go | 380 --- apigen/drs/model_access_method_access_url.go | 197 -- .../drs/model_access_method_authorizations.go | 235 -- .../drs/model_access_method_update_request.go | 197 -- apigen/drs/model_access_url.go | 197 -- apigen/drs/model_authorizations.go | 238 -- ...model_bulk_access_method_update_request.go | 197 -- ...ess_method_update_request_updates_inner.go | 189 -- apigen/drs/model_bulk_access_url.go | 269 -- apigen/drs/model_bulk_delete_request.go | 238 -- apigen/drs/model_bulk_object_access_id.go | 163 - ..._access_id_bulk_object_access_ids_inner.go | 165 - .../drs/model_bulk_object_id_no_passport.go | 128 - ...bulk_update_access_methods_200_response.go | 160 - apigen/drs/model_checksum.go | 189 -- apigen/drs/model_contents_object.go | 271 -- apigen/drs/model_delete_request.go | 169 - apigen/drs/model_drs_object.go | 573 ---- apigen/drs/model_drs_object_candidate.go | 448 --- apigen/drs/model_drs_service.go | 224 -- apigen/drs/model_drs_service_drs.go | 825 ----- apigen/drs/model_drs_service_type.go | 159 - apigen/drs/model_error.go | 165 - .../model_get_bulk_access_url_200_response.go | 200 -- .../model_get_bulk_objects_200_response.go | 200 -- apigen/drs/model_get_bulk_objects_request.go | 197 -- .../model_get_service_info_200_response.go | 562 ---- .../model_options_bulk_object_200_response.go | 200 -- apigen/drs/model_post_access_url_request.go | 128 - apigen/drs/model_post_object_request.go | 165 - .../model_register_objects_201_response.go | 160 - apigen/drs/model_register_objects_request.go | 197 -- apigen/drs/model_service.go | 497 --- apigen/drs/model_service_organization.go | 189 -- apigen/drs/model_service_type.go | 218 -- apigen/drs/model_summary.go | 202 -- apigen/drs/model_unresolved_inner.go | 163 - apigen/drs/model_upload_method.go | 262 -- apigen/drs/model_upload_method_access_url.go | 197 -- apigen/drs/model_upload_request.go | 197 -- apigen/drs/model_upload_request_object.go | 321 -- apigen/drs/model_upload_response.go | 160 - apigen/drs/model_upload_response_object.go | 358 --- apigen/drs/utils.go | 362 --- .../internalapi/model_bulk_create_request.go | 158 - .../model_bulk_documents_request.go | 171 -- .../model_bulk_documents_request_one_of.go | 162 - .../internalapi/model_bulk_hashes_request.go | 158 - .../model_bulk_sha256_validity_request.go | 162 - .../model_delete_by_query_response.go | 126 - ...del_internal_multipart_complete_request.go | 250 -- .../model_internal_multipart_init_request.go | 198 -- .../model_internal_multipart_init_response.go | 162 - .../model_internal_multipart_part.go | 186 -- ...model_internal_multipart_upload_request.go | 250 -- ...odel_internal_multipart_upload_response.go | 126 - apigen/internalapi/model_internal_record.go | 379 --- .../model_internal_record_response.go | 559 ---- .../internalapi/model_internal_signed_url.go | 126 - .../model_internal_upload_blank_request.go | 162 - .../model_internal_upload_blank_response.go | 162 - .../model_list_records_response.go | 126 - apigen/internalapi/utils.go | 361 --- apigen/specs/drs-extensions-overlay.yaml | 25 - download/file_info.go | 4 +- download/transfer_test.go | 4 +- drs/access_method_conversions.go | 53 + drs/client.go | 507 +-- drs/convert.go | 107 - drs/drs.go | 35 +- drs/internal_types.go | 107 - drs/object_builder.go | 6 +- drs/object_builder_test.go | 4 +- drs/resolve.go | 6 +- drs/syfon_adapter.go | 61 + drs/types.go | 42 +- drs/upsert.go | 2 +- go.mod | 20 +- go.sum | 22 +- hash/hash.go | 12 +- localclient/client.go | 2 +- mocks/mock_drs_client.go | 45 +- tests/download-multiple_test.go | 4 +- transfer/signer/gen3/signer.go | 85 +- transfer/signer/local/signer.go | 237 +- transfer/signer/local/signer_test.go | 2 +- upload/multipart.go | 20 +- upload/orchestrator.go | 13 +- upload/upload.go | 20 +- 93 files changed, 414 insertions(+), 19565 deletions(-) delete mode 100644 apigen/api/internal.openapi.yaml delete mode 100644 apigen/api/openapi.yaml delete mode 100644 apigen/drs/model_access_method.go delete mode 100644 apigen/drs/model_access_method_access_url.go delete mode 100644 apigen/drs/model_access_method_authorizations.go delete mode 100644 apigen/drs/model_access_method_update_request.go delete mode 100644 apigen/drs/model_access_url.go delete mode 100644 apigen/drs/model_authorizations.go delete mode 100644 apigen/drs/model_bulk_access_method_update_request.go delete mode 100644 apigen/drs/model_bulk_access_method_update_request_updates_inner.go delete mode 100644 apigen/drs/model_bulk_access_url.go delete mode 100644 apigen/drs/model_bulk_delete_request.go delete mode 100644 apigen/drs/model_bulk_object_access_id.go delete mode 100644 apigen/drs/model_bulk_object_access_id_bulk_object_access_ids_inner.go delete mode 100644 apigen/drs/model_bulk_object_id_no_passport.go delete mode 100644 apigen/drs/model_bulk_update_access_methods_200_response.go delete mode 100644 apigen/drs/model_checksum.go delete mode 100644 apigen/drs/model_contents_object.go delete mode 100644 apigen/drs/model_delete_request.go delete mode 100644 apigen/drs/model_drs_object.go delete mode 100644 apigen/drs/model_drs_object_candidate.go delete mode 100644 apigen/drs/model_drs_service.go delete mode 100644 apigen/drs/model_drs_service_drs.go delete mode 100644 apigen/drs/model_drs_service_type.go delete mode 100644 apigen/drs/model_error.go delete mode 100644 apigen/drs/model_get_bulk_access_url_200_response.go delete mode 100644 apigen/drs/model_get_bulk_objects_200_response.go delete mode 100644 apigen/drs/model_get_bulk_objects_request.go delete mode 100644 apigen/drs/model_get_service_info_200_response.go delete mode 100644 apigen/drs/model_options_bulk_object_200_response.go delete mode 100644 apigen/drs/model_post_access_url_request.go delete mode 100644 apigen/drs/model_post_object_request.go delete mode 100644 apigen/drs/model_register_objects_201_response.go delete mode 100644 apigen/drs/model_register_objects_request.go delete mode 100644 apigen/drs/model_service.go delete mode 100644 apigen/drs/model_service_organization.go delete mode 100644 apigen/drs/model_service_type.go delete mode 100644 apigen/drs/model_summary.go delete mode 100644 apigen/drs/model_unresolved_inner.go delete mode 100644 apigen/drs/model_upload_method.go delete mode 100644 apigen/drs/model_upload_method_access_url.go delete mode 100644 apigen/drs/model_upload_request.go delete mode 100644 apigen/drs/model_upload_request_object.go delete mode 100644 apigen/drs/model_upload_response.go delete mode 100644 apigen/drs/model_upload_response_object.go delete mode 100644 apigen/drs/utils.go delete mode 100644 apigen/internalapi/model_bulk_create_request.go delete mode 100644 apigen/internalapi/model_bulk_documents_request.go delete mode 100644 apigen/internalapi/model_bulk_documents_request_one_of.go delete mode 100644 apigen/internalapi/model_bulk_hashes_request.go delete mode 100644 apigen/internalapi/model_bulk_sha256_validity_request.go delete mode 100644 apigen/internalapi/model_delete_by_query_response.go delete mode 100644 apigen/internalapi/model_internal_multipart_complete_request.go delete mode 100644 apigen/internalapi/model_internal_multipart_init_request.go delete mode 100644 apigen/internalapi/model_internal_multipart_init_response.go delete mode 100644 apigen/internalapi/model_internal_multipart_part.go delete mode 100644 apigen/internalapi/model_internal_multipart_upload_request.go delete mode 100644 apigen/internalapi/model_internal_multipart_upload_response.go delete mode 100644 apigen/internalapi/model_internal_record.go delete mode 100644 apigen/internalapi/model_internal_record_response.go delete mode 100644 apigen/internalapi/model_internal_signed_url.go delete mode 100644 apigen/internalapi/model_internal_upload_blank_request.go delete mode 100644 apigen/internalapi/model_internal_upload_blank_response.go delete mode 100644 apigen/internalapi/model_list_records_response.go delete mode 100644 apigen/internalapi/utils.go delete mode 100644 apigen/specs/drs-extensions-overlay.yaml create mode 100644 drs/access_method_conversions.go delete mode 100644 drs/convert.go delete mode 100644 drs/internal_types.go create mode 100644 drs/syfon_adapter.go diff --git a/.gitignore b/.gitignore index 6aa2b55..453ca8f 100644 --- a/.gitignore +++ b/.gitignore @@ -28,4 +28,8 @@ # Build artifacts /build/ /bin/ -checksums.txt \ No newline at end of file +checksums.txt +# Local caches and binaries +/.gocache/ +/.tmp/ +/data-client diff --git a/Makefile b/Makefile index d1246a2..ebf8087 100644 --- a/Makefile +++ b/Makefile @@ -14,15 +14,8 @@ BIN_DIR := ./bin COVERAGE_THRESHOLD := 30 PACKAGE_COVERAGE_THRESHOLD := 20 -# OpenAPI Generation Variables -OPENAPI ?= ga4gh/data-repository-service-schemas/openapi/data_repository_service.openapi.yaml -OAG_IMAGE ?= openapitools/openapi-generator-cli:latest -REDOCLY_IMAGE ?= redocly/cli:latest -YQ_IMAGE ?= mikefarah/yq:latest -GEN_OUT ?= .tmp/apigen.gen -INTERNAL_OPENAPI ?= apigen/api/internal.openapi.yaml -INTERNAL_GEN_OUT ?= .tmp/apigen-internal.gen -SCHEMAS_SUBMODULE ?= ga4gh/data-repository-service-schemas +# OpenAPI generation now lives in syfon. +SYFON_DIR ?= ../syfon # --- Targets --- @@ -68,70 +61,22 @@ generate: ## gen: Generates Go models from OpenAPI specs gen: @set -euo pipefail; \ - mkdir -p .tmp; \ - spec="$(OPENAPI)"; \ - if [[ ! -f "$$spec" ]]; then \ - echo "ERROR: OpenAPI spec '$$spec' not found. Run: make init-schemas"; \ + if [[ ! -d "$(SYFON_DIR)" ]]; then \ + echo "ERROR: syfon repo not found at $(SYFON_DIR)"; \ exit 1; \ fi; \ - if ! command -v docker >/dev/null 2>&1; then \ - echo "ERROR: docker is required for 'make gen'."; \ - exit 1; \ - fi; \ - echo "Bundling canonical OpenAPI spec with Redocly..."; \ - docker run --rm \ - --user "$$(id -u):$$(id -g)" \ - -v "$(PWD):/local" \ - $(REDOCLY_IMAGE) bundle /local/$$spec --output /local/.tmp/drs.base.yaml --ext yaml; \ - echo "Merging internal Extensions with yq..."; \ - docker run --rm \ - --user "$$(id -u):$$(id -g)" \ - -v "$(PWD):/local" \ - $(YQ_IMAGE) eval-all 'select(fileIndex == 0) * select(fileIndex == 1)' /local/.tmp/drs.base.yaml /local/apigen/specs/drs-extensions-overlay.yaml > apigen/api/openapi.yaml; \ - rm -rf "$(GEN_OUT)"; \ - docker run --rm --pull=missing \ - --user "$$(id -u):$$(id -g)" \ - -v "$(PWD):/local" \ - $(OAG_IMAGE) generate \ - -g go \ - --skip-validate-spec \ - --git-repo-id data-client \ - --git-user-id calypr \ - -i /local/apigen/api/openapi.yaml \ - -o /local/$(GEN_OUT) \ - --global-property models,modelDocs=false,modelTests=false,supportingFiles=utils.go \ - --additional-properties packageName=drs,enumClassPrefix=true; \ - mkdir -p apigen/api apigen; \ - rm -rf apigen/drs; \ - mkdir -p apigen/drs; \ - find "$(GEN_OUT)" -maxdepth 1 -type f -name '*.go' -exec mv {} apigen/drs/ \; ; \ - echo "Generated DRS client models into ./apigen/drs"; \ - if [[ -f "$(INTERNAL_OPENAPI)" ]]; then $(MAKE) gen-internal; fi + echo "--> OpenAPI generation is centralized in syfon"; \ + $(MAKE) -C "$(SYFON_DIR)" gen .PHONY: gen-internal gen-internal: @set -euo pipefail; \ - rm -rf "$(INTERNAL_GEN_OUT)"; \ - docker run --rm --pull=missing \ - --user "$$(id -u):$$(id -g)" \ - -v "$(PWD):/local" \ - $(OAG_IMAGE) generate \ - -g go \ - --skip-validate-spec \ - --git-repo-id data-client \ - --git-user-id calypr \ - -i /local/apigen/api/internal.openapi.yaml \ - -o /local/$(INTERNAL_GEN_OUT) \ - --global-property models,modelDocs=false,modelTests=false,supportingFiles=utils.go \ - --additional-properties packageName=internalapi,enumClassPrefix=true; \ - rm -rf apigen/internalapi; \ - mkdir -p apigen/internalapi; \ - find "$(INTERNAL_GEN_OUT)" -maxdepth 1 -type f -name '*.go' -exec mv {} apigen/internalapi/ \; ; \ - echo "Generated Internal models into ./apigen/internalapi" - -.PHONY: init-schemas -init-schemas: - @git submodule update --init --recursive --depth 1 "$(SCHEMAS_SUBMODULE)" + if [[ ! -d "$(SYFON_DIR)" ]]; then \ + echo "ERROR: syfon repo not found at $(SYFON_DIR)"; \ + exit 1; \ + fi; \ + echo "--> Internal model generation is centralized in syfon"; \ + $(MAKE) -C "$(SYFON_DIR)" gen-internal ## tidy: Cleans up module dependencies and formats go files tidy: diff --git a/apigen/api/internal.openapi.yaml b/apigen/api/internal.openapi.yaml deleted file mode 100644 index 27c1c9d..0000000 --- a/apigen/api/internal.openapi.yaml +++ /dev/null @@ -1,621 +0,0 @@ -openapi: 3.0.3 -info: - title: Internal Compatibility API (DRS Server) - version: 1.0.0 - description: | - Consolidated internal API delta for drs-server. - This spec captures non-GA4GH DRS internal/compatibility routes. -servers: - - url: / -tags: - - name: internal-index - - name: internal-data -paths: - /index: - get: - tags: - - internal-index - operationId: internalList - parameters: - - in: query - name: hash - schema: - type: string - - in: query - name: authz - schema: - type: string - - in: query - name: organization - schema: - type: string - - in: query - name: program - schema: - type: string - - in: query - name: project - schema: - type: string - responses: - '200': - description: List response - content: - application/json: - schema: - $ref: '#/components/schemas/ListRecordsResponse' - '400': - description: Validation error - '401': - description: Authentication required - '403': - description: Forbidden - '500': - description: Internal server error - '501': - description: Listing mode not implemented without query params - post: - tags: - - internal-index - operationId: internalCreate - requestBody: - required: true - content: - application/json: - schema: - $ref: '#/components/schemas/InternalRecord' - responses: - '201': - description: Created - content: - application/json: - schema: - $ref: '#/components/schemas/InternalRecordResponse' - '400': - description: Validation error - '401': - description: Authentication required - '403': - description: Forbidden - '500': - description: Internal server error - delete: - tags: - - internal-index - operationId: internalDeleteByQuery - parameters: - - in: query - name: authz - schema: - type: string - - in: query - name: organization - schema: - type: string - - in: query - name: program - schema: - type: string - - in: query - name: project - schema: - type: string - responses: - '200': - description: Deleted count - content: - application/json: - schema: - $ref: '#/components/schemas/DeleteByQueryResponse' - '400': - description: Validation error - '401': - description: Authentication required - '403': - description: Forbidden - '500': - description: Internal server error - /index/{id}: - get: - tags: - - internal-index - operationId: internalGet - parameters: - - in: path - name: id - required: true - schema: - type: string - responses: - '200': - description: Record - content: - application/json: - schema: - $ref: '#/components/schemas/InternalRecordResponse' - '404': - description: Not found - '500': - description: Internal server error - put: - tags: - - internal-index - operationId: internalUpdate - parameters: - - in: path - name: id - required: true - schema: - type: string - requestBody: - required: true - content: - application/json: - schema: - $ref: '#/components/schemas/InternalRecord' - responses: - '200': - description: Updated - content: - application/json: - schema: - $ref: '#/components/schemas/InternalRecordResponse' - '400': - description: Validation error - '404': - description: Not found - '500': - description: Internal server error - delete: - tags: - - internal-index - operationId: internalDelete - parameters: - - in: path - name: id - required: true - schema: - type: string - responses: - '200': - description: Deleted - '404': - description: Not found - '500': - description: Internal server error - /index/bulk: - post: - tags: - - internal-index - operationId: internalBulkCreate - requestBody: - required: true - content: - application/json: - schema: - $ref: '#/components/schemas/BulkCreateRequest' - responses: - '201': - description: Created records - content: - application/json: - schema: - $ref: '#/components/schemas/ListRecordsResponse' - '400': - description: Validation error - '401': - description: Authentication required - '403': - description: Forbidden - '500': - description: Internal server error - /index/bulk/hashes: - post: - tags: - - internal-index - operationId: internalBulkHashes - requestBody: - required: true - content: - application/json: - schema: - $ref: '#/components/schemas/BulkHashesRequest' - responses: - '200': - description: Matched records - content: - application/json: - schema: - $ref: '#/components/schemas/ListRecordsResponse' - '400': - description: Validation error - '500': - description: Internal server error - /index/bulk/sha256/validity: - post: - tags: - - internal-index - operationId: internalBulkSHA256Validity - requestBody: - required: true - content: - application/json: - schema: - $ref: '#/components/schemas/BulkSHA256ValidityRequest' - responses: - '200': - description: sha256 validity map - content: - application/json: - schema: - type: object - additionalProperties: - type: boolean - '400': - description: Validation error - '500': - description: Internal server error - /index/bulk/documents: - post: - tags: - - internal-index - operationId: internalBulkDocuments - requestBody: - required: true - content: - application/json: - schema: - $ref: '#/components/schemas/BulkDocumentsRequest' - responses: - '200': - description: Records - content: - application/json: - schema: - type: array - items: - $ref: '#/components/schemas/InternalRecordResponse' - '400': - description: Validation error - '500': - description: Internal server error - /data/download/{file_id}: - get: - tags: - - internal-data - operationId: internalDownloadData - parameters: - - in: path - name: file_id - required: true - schema: - type: string - responses: - '200': - description: Signed download URL - content: - application/json: - schema: - $ref: '#/components/schemas/InternalSignedURL' - '401': - description: Authentication required - '403': - description: Forbidden - '404': - description: Not found - '500': - description: Internal server error - /data/upload: - post: - tags: - - internal-data - operationId: internalUploadBlank - requestBody: - required: false - content: - application/json: - schema: - $ref: '#/components/schemas/InternalUploadBlankRequest' - responses: - '200': - description: Upload URL issued - content: - application/json: - schema: - $ref: '#/components/schemas/InternalUploadBlankResponse' - '400': - description: Validation error - '401': - description: Authentication required - '403': - description: Forbidden - '500': - description: Internal server error - /data/upload/{file_id}: - get: - tags: - - internal-data - operationId: internalUploadURL - parameters: - - in: path - name: file_id - required: true - schema: - type: string - responses: - '200': - description: Signed upload URL - content: - application/json: - schema: - $ref: '#/components/schemas/InternalSignedURL' - '400': - description: Validation error - '401': - description: Authentication required - '403': - description: Forbidden - '500': - description: Internal server error - /data/multipart/init: - post: - tags: - - internal-data - operationId: internalMultipartInit - requestBody: - required: true - content: - application/json: - schema: - $ref: '#/components/schemas/InternalMultipartInitRequest' - responses: - '200': - description: Multipart upload initialized - content: - application/json: - schema: - $ref: '#/components/schemas/InternalMultipartInitResponse' - '400': - description: Validation error - '401': - description: Authentication required - '403': - description: Forbidden - '500': - description: Internal server error - /data/multipart/upload: - post: - tags: - - internal-data - operationId: internalMultipartUpload - requestBody: - required: true - content: - application/json: - schema: - $ref: '#/components/schemas/InternalMultipartUploadRequest' - responses: - '200': - description: Part presigned URL - content: - application/json: - schema: - $ref: '#/components/schemas/InternalMultipartUploadResponse' - '400': - description: Validation error - '401': - description: Authentication required - '403': - description: Forbidden - '500': - description: Internal server error - /data/multipart/complete: - post: - tags: - - internal-data - operationId: internalMultipartComplete - requestBody: - required: true - content: - application/json: - schema: - $ref: '#/components/schemas/InternalMultipartCompleteRequest' - responses: - '200': - description: Upload completed - '400': - description: Validation error - '401': - description: Authentication required - '403': - description: Forbidden - '500': - description: Internal server error -components: - schemas: - HashInfo: - type: object - additionalProperties: - type: string - description: Hash map, e.g. {"sha256":"..."} - InternalRecord: - type: object - properties: - did: - type: string - hashes: - $ref: '#/components/schemas/HashInfo' - size: - type: integer - format: int64 - urls: - type: array - items: - type: string - authz: - type: array - items: - type: string - file_name: - type: string - organization: - type: string - project: - type: string - InternalRecordResponse: - allOf: - - $ref: '#/components/schemas/InternalRecord' - - type: object - properties: - baseid: - type: string - rev: - type: string - created_date: - type: string - updated_date: - type: string - uploader: - type: string - ListRecordsResponse: - type: object - properties: - records: - type: array - items: - $ref: '#/components/schemas/InternalRecordResponse' - BulkCreateRequest: - type: object - required: - - records - properties: - records: - type: array - items: - $ref: '#/components/schemas/InternalRecord' - BulkHashesRequest: - type: object - required: - - hashes - properties: - hashes: - type: array - items: - type: string - BulkSHA256ValidityRequest: - type: object - properties: - sha256: - type: array - items: - type: string - hashes: - type: array - items: - type: string - BulkDocumentsRequest: - oneOf: - - type: array - items: - type: string - - type: object - properties: - ids: - type: array - items: - type: string - dids: - type: array - items: - type: string - DeleteByQueryResponse: - type: object - properties: - deleted: - type: integer - InternalSignedURL: - type: object - properties: - url: - type: string - InternalUploadBlankRequest: - type: object - properties: - guid: - type: string - authz: - type: array - items: - type: string - InternalUploadBlankResponse: - type: object - properties: - guid: - type: string - url: - type: string - InternalMultipartInitRequest: - type: object - properties: - guid: - type: string - file_name: - type: string - bucket: - type: string - InternalMultipartInitResponse: - type: object - properties: - guid: - type: string - uploadId: - type: string - InternalMultipartUploadRequest: - type: object - required: - - key - - uploadId - - partNumber - properties: - key: - type: string - bucket: - type: string - uploadId: - type: string - partNumber: - type: integer - format: int32 - InternalMultipartUploadResponse: - type: object - properties: - presigned_url: - type: string - InternalMultipartPart: - type: object - required: - - PartNumber - - ETag - properties: - PartNumber: - type: integer - format: int32 - ETag: - type: string - InternalMultipartCompleteRequest: - type: object - required: - - key - - uploadId - - parts - properties: - key: - type: string - bucket: - type: string - uploadId: - type: string - parts: - type: array - items: - $ref: '#/components/schemas/InternalMultipartPart' diff --git a/apigen/api/openapi.yaml b/apigen/api/openapi.yaml deleted file mode 100644 index 3cb3496..0000000 --- a/apigen/api/openapi.yaml +++ /dev/null @@ -1,2733 +0,0 @@ -openapi: 3.0.3 -info: - title: Data Repository Service - version: 1.5.0 - x-logo: - url: https://www.ga4gh.org/wp-content/themes/ga4gh/dist/assets/svg/logos/logo-full-color.svg - termsOfService: https://www.ga4gh.org/terms-and-conditions/ - contact: - name: GA4GH Cloud Work Stream - email: ga4gh-cloud@ga4gh.org - license: - name: Apache 2.0 - url: https://raw.githubusercontent.com/ga4gh/data-repository-service-schemas/master/LICENSE -servers: - - url: https://{serverURL}/ga4gh/drs/v1 - variables: - serverURL: - default: drs.example.org - description: | - DRS server endpoints MUST be prefixed by the '/ga4gh/drs/v1' endpoint path -security: - - {} - - BasicAuth: [] - - BearerAuth: [] -tags: - - name: Introduction - description: | - The Data Repository Service (DRS) API provides a generic interface to data repositories so data consumers, including workflow systems, can access data objects in a single, standard way regardless of where they are stored and how they are managed. The primary functionality of DRS is to map a logical ID to a means for physically retrieving the data represented by the ID. The sections below describe the characteristics of those IDs, the types of data supported, how they can be pointed to using URIs, and how clients can use these URIs to ultimately make successful DRS API requests. This document also describes the DRS API in detail and provides information on the specific endpoints, request formats, and responses. This specification is intended for developers of DRS-compatible services and of clients that will call these DRS services. - - The key words MUST, MUST NOT, REQUIRED, SHALL, SHALL NOT, SHOULD, SHOULD NOT, RECOMMENDED, MAY, and OPTIONAL in this document are to be interpreted as described in [RFC 2119](https://datatracker.ietf.org/doc/html/rfc2119). - - name: DRS API Principles - description: | - ## DRS IDs - - Each implementation of DRS can choose its own id scheme, as long as it follows these guidelines: - - * DRS IDs are strings made up of uppercase and lowercase letters, decimal digits, hyphen, period, underscore and tilde [A-Za-z0-9.-_~]. See [RFC 3986 § 2.3](https://datatracker.ietf.org/doc/html/rfc3986#section-2.3). - * DRS IDs can contain other characters, but they MUST be encoded into valid DRS IDs whenever they are used in API calls. This is because non-encoded IDs may interfere with the interpretation of the objects/{id}/access endpoint. To overcome this limitation use percent-encoding of the ID, see [RFC 3986 § 2.4](https://datatracker.ietf.org/doc/html/rfc3986#section-2.4) - * One DRS ID MUST always return the same object data (or, in the case of a collection, the same set of objects). This constraint aids with reproducibility. - * DRS implementations MAY have more than one ID that maps to the same object. - * DRS version 1.x does NOT support semantics around multiple versions of an object. (For example, there’s no notion of “get latest version” or “list all versions”.) Individual implementations MAY choose an ID scheme that includes version hints. - - ## DRS URIs - - For convenience, including when passing content references to a [WES server](https://github.com/ga4gh/workflow-execution-service-schemas), we define a [URI scheme](https://en.wikipedia.org/wiki/Uniform_Resource_Identifier#Generic_syntax) for DRS-accessible content. This section documents the syntax of DRS URIs, and the rules clients follow for translating a DRS URI into a URL that they use for making the DRS API calls described in this spec. - - There are two styles of DRS URIs, Hostname-based and Compact Identifier-based, both using the `drs://` URI scheme. DRS servers may choose either style when exposing references to their content;. DRS clients MUST support resolving both styles. - - Tip: - > See [Appendix: Background Notes on DRS URIs](#tag/Background-Notes-on-DRS-URIs) for more information on our design motivations for DRS URIs. - - ### Hostname-based DRS URIs - - Hostname-based DRS URIs are simpler than compact identifier-based URIs. They contain the DRS server name and the DRS ID only and can be converted directly into a fetchable URL based on a simple rule. They take the form: - - ``` - drs:/// - ``` - - DRS URIs of this form mean *\"you can fetch the content with DRS id \ from the DRS server at \\"*. - For example, here are the client resolution steps if the URI is: - - ``` - drs://drs.example.org/314159 - ``` - - 1. The client parses the string to extract the hostname of “drs.example.org” and the id of “314159”. - 2. The client makes a GET request to the DRS server, using the standard DRS URL syntax: - - ``` - GET https://drs.example.org/ga4gh/drs/v1/objects/314159 - ``` - - The protocol is always https and the port is always the standard 443 SSL port. It is invalid to include a different port in a DRS hostname-based URI. - - Tip: - > See the [Appendix: Hostname-Based URIs](#tag/Hostname-Based-URIs) for information on how hostname-based DRS URI resolution to URLs is likely to change in the future, when the DRS v2 major release happens. - - ### Compact Identifier-based DRS URIs - - Compact Identifier-based DRS URIs use resolver registry services (specifically, [identifiers.org](https://identifiers.org/) and [n2t.net (Name-To-Thing)](https://n2t.net/)) to provide a layer of indirection between the DRS URI and the DRS server name — the actual DNS name of the DRS server is not present in the URI. This approach is based on the Joint Declaration of Data Citation Principles as detailed by [Wimalaratne et al (2018)](https://www.nature.com/articles/sdata201829). - - For more information, see the document [More Background on Compact Identifiers](./more-background-on-compact-identifiers.html). - - Compact Identifiers take the form: - - ``` - drs://[provider_code/]namespace:accession - ``` - - Together, provider code and the namespace are referred to as the `prefix`. The provider code is optional and is used by identifiers.org/n2t.net for compact identifier resolver mirrors. Both the `provider_code` and `namespace` disallow spaces or punctuation, only lowercase alphanumerical characters, underscores and dots are allowed (e.g. [A-Za-z0-9._]). - - Tip: - > See the [Appendix: Compact Identifier-Based URIs](#tag/Compact-Identifier-Based-URIs) for more background on Compact Identifiers and resolver registry services like identifiers.org/n2t.net (aka meta-resolvers), how to register prefixes, possible caching strategies, and security considerations. - - #### For DRS Servers - - If your DRS implementation will issue DRS URIs based *on your own* compact identifiers, you MUST first register a new prefix with identifiers.org (which is automatically mirrored to n2t.net). You will also need to include a provider resolver resource in this registration which links the prefix to your DRS server, so that DRS clients can get sufficient information to make a successful DRS GET request. For clarity, we recommend you choose a namespace beginning with `drs`. - - #### For DRS Clients - - A DRS client parses the DRS URI compact identifier components to extract the prefix and the accession, and then uses meta-resolver APIs to locate the actual DRS server. For example, here are the client resolution steps if the URI is: - - ``` - drs://drs.42:314159 - ``` - - 1. The client parses the string to extract the prefix of `drs.42` and the accession of `314159`, using the first occurrence of a colon (":") character after the initial `drs://` as a delimiter. (The colon character is not allowed in a Hostname-based DRS URI, making it easy to tell them apart.) - - 2. The client makes API calls to a meta-resolver to look up the URL pattern for the namespace. (See [Calling Meta-Resolver APIs for Compact Identifier-Based DRS URIs](#section/Calling-Meta-Resolver-APIs-for-Compact-Identifier-Based-DRS-URIs) for details.) The URL pattern is a string containing a `{$id}` parameter, such as: - - ``` - https://drs.myexample.org/ga4gh/drs/v1/objects/{$id} - ``` - - 3. The client generates a DRS URL from the URL template by replacing {$id} with the accession it extracted in step 1. It then makes a GET request to the DRS server: - - ``` - GET https://drs.myexample.org/ga4gh/drs/v1/objects/314159 - ``` - - 4. The client follows any HTTP redirects returned in step 3, in case the resolver goes through an extra layer of redirection. - - For performance reasons, DRS clients SHOULD cache the URL pattern returned in step 2, with a suggested 24 hour cache life. - - ### Choosing a URI Style - - DRS servers can choose to issue either hostname-based or compact identifier-based DRS URIs, and can be confident that compliant DRS clients will support both. DRS clients must be able to accommodate both URI types. Tradeoffs that DRS server builders, and third parties who need to cite DRS objects in datasets, workflows or elsewhere, may want to consider include: - - *Table 1: Choosing a URI Style* - - | | Hostname-based | Compact Identifier-based | - |-------------------|----------------|--------------------------| - | URI Durability | URIs are valid for as long as the server operator maintains ownership of the published DNS address. (They can of course point that address at different physical serving infrastructure as often as they would like.) | URIs are valid for as long as the server operator maintains ownership of the published compact identifier resolver namespace. (They also depend on the meta-resolvers like identifiers.org/n2t.net remaining operational, which is intended to be essentially forever.) | - | Client Efficiency | URIs require minimal client logic, and no network requests, to resolve. | URIs require small client logic, and 1-2 cacheable network requests, to resolve. | - | Security | Servers have full control over their own security practices. | Server operators, in addition to maintaining their own security practices, should confirm they are comfortable with the resolver registry security practices, including protection against denial of service and namespace-hijacking attacks. (See the [Appendix: Compact Identifier-Based URIs](#tag/Compact-Identifier-Based-URIs) for more information on resolver registry security.) | - - ## DRS Datatypes - DRS's job is data access, period. Therefore, the DRS API supports a simple flat content model -- every `DrsObject`, like a file, represents a single opaque blob of bytes. DRS has no understanding of the meaning of objects and only provides simple domain-agnostic metadata. Understanding the semantics of specific object types is the responsibility of the applications that use DRS to fetch those objects (e.g. samtools for BAM files, DICOM viewers for DICOM objects). - - ### Atomic Objects - DRS can be used to access individual objects of all kinds, simple or complex, large or small, stored in type-specific formats (e.g. BAM files, VCF files, CSV files). At the API level these are all the same; at the application level, DRS clients and servers are expected to agree on object semantics using non-DRS mechanisms, including but not limited to the GA4GH Data Connect API. - - ### Compound Objects - DRS can also be used to access compound objects, consisting of two or more atomic objects related to each other in a well-specified way. See the [Appendix: Compound Objects](#tag/Working-With-Compound-Objects) for suggested best practices for working with compound objects. - - ### [DEPRECATED] Bundles - Previous versions of the DRS API spec included support for a *bundle* content type, which was a folder-like collection of other DRS objects (either blobs or bundles), represented by a `DrsObject` with a `contents` array. As of v1.3, bundles have been deprecated in favor of the best practices documented in the [Appendix: Compound Objects](#tag/Working-With-Compound-Objects). A future version of the API spec may remove bundle support entirely and/or replace bundles with a scalable approach based on the needs of our driver projects. - - ## Read-only - - DRS v1 is a read-only API. We expect that each implementation will define its own mechanisms and interfaces (graphical and/or programmatic) for adding and updating data. - - ## Standards - - The DRS API specification is written in OpenAPI and embodies a RESTful service philosophy. It uses JSON in requests and responses and standard HTTPS on port 443 for information transport. Optionally, it - supports authentication and authorization using the [GA4GH Passport](https://github.com/ga4gh-duri/ga4gh-duri.github.io/tree/master/researcher_ids) standard. - - name: Authorization & Authentication - description: "## Making DRS Requests\n\nThe DRS implementation is responsible for defining and enforcing an authorization policy that determines which users are allowed to make which requests. GA4GH recommends that DRS implementations use an OAuth 2.0 [bearer token](https://oauth.net/2/bearer-tokens/) or a [GA4GH Passport](https://github.com/ga4gh-duri/ga4gh-duri.github.io/tree/master/researcher_ids), although they can choose other mechanisms if appropriate.\n\n## Fetching DRS Objects\n\nThe DRS API allows implementers to support a variety of different content access policies, depending on what `AccessMethod` records they return. Implementers have a choice to make the\nGET /objects/{object_id} and GET /objects/{object_id}/access/{access_id} calls open or requiring a Basic, Bearer, or Passport token (Passport requiring a POST). The following describes the\nvarious access approaches following a successful GET/POST /objects/{object_id} request in order to them obtain access to the bytes for a given object ID/access ID:\n\n* public content:\n * server provides an `access_url` with a `url` and no `headers`\n * caller fetches the object bytes without providing any auth info\n* private content that requires the caller to have out-of-band auth knowledge (e.g. service account credentials):\n * server provides an `access_url` with a `url` and no `headers`\n * caller fetches the object bytes, passing the auth info they obtained out-of-band\n* private content that requires the caller to pass an Authorization token:\n * server provides an `access_url` with a `url` and `headers`\n * caller fetches the object bytes, passing auth info via the specified header(s)\n* private content that uses an expensive-to-generate auth mechanism (e.g. a signed URL):\n * server provides an `access_id`\n * caller passes the `access_id` to the `/access` endpoint\n * server provides an `access_url` with the generated mechanism (e.g. a signed URL in the `url` field)\n * caller fetches the object bytes from the `url` (passing auth info from the specified headers, if any)\n\nIn the approaches above [GA4GH Passports](https://github.com/ga4gh-duri/ga4gh-duri.github.io/tree/master/researcher_ids) are not mentioned and that is on purpose. A DRS server may return a Bearer token or other platform-specific token in a header in response to a valid Bearer token or GA4GH Passport (Option 3 above). But it is not the responsibility of a DRS server to return a Passport, that is the responsibility of a Passport Broker and outside the scope of DRS.\n\nDRS implementers should ensure their solutions restrict access to targets as much as possible, detect attempts to exploit through log monitoring, and they are prepared to take action if an exploit in their DRS implementation is detected.\n\n## Authentication\n\n### Discovery\n\nThe APIs to fetch [DrsObjects](#tag/DrsObjectModel) and [AccessURLs](#tag/AccessURLModel) may require authorization. The authorization mode may vary between DRS objects hosted by a service. The authorization mode may vary between the APIs to fetch a [DrsObject](#tag/DrsObjectModel) and an associated [AccessURL](#tag/AccessURLModel). Implementers should indicate how to authenticate to fetch a [DrsObject](#tag/DrsObjectModel) by implementing the [OptionsOjbect](#operation/OptionsObject) API. Implementers should indicate how to authenticate to fetch an [AccessURL](#tag/AccessURLModel) within a [DrsObject](#tag/DrsObjectModel). \n\n### Modes\n\n#### BasicAuth\n\nA valid authorization token must be passed in the 'Authorization' header, e.g. \"Basic ${token_string}\"\n\n| Security Scheme Type | HTTP |\n|----------------------|------|\n| **HTTP Authorization Scheme** | basic |\n\n#### BearerAuth\n\nA valid authorization token must be passed in the 'Authorization' header, e.g. \"Bearer ${token_string}\"\n\n| Security Scheme Type | HTTP |\n|----------------------|------|\n| **HTTP Authorization Scheme** | bearer |\n\n#### PassportAuth\n\nA valid authorization [GA4GH Passport](https://github.com/ga4gh-duri/ga4gh-duri.github.io/tree/master/researcher_ids) token must be passed in the body of a POST request\n\n| Security Scheme Type | HTTP |\n|----------------------|------|\n| **HTTP POST** | tokens[] |\n" - - name: Objects - - name: Upload Request - - name: Access Method Updates - description: "# Access Method Updates\n\n> **Optional Functionality**: Access method updates are optional extensions to the DRS API. Not all DRS servers are required to implement this functionality. Clients should check `/service-info` for `accessMethodUpdateSupported` before attempting to use these endpoints.\n\nAccess method update endpoints allows authorized clients to modify how existing DRS objects can be accessed without changing the core object metadata (size, checksums, name). This is useful for storage migrations, adding mirrors, or updating URLs.\n\nThese endpoints will overwrite existing access methods for an object, if clients want to add access methods in addition to existing ones for objects they should first retrieve the current access methods and include them in the update request along with the new methods.\n\n## Use Cases\n\n- **Storage Migration**: Move data between storage providers while keeping same DRS object\n- **Mirror Addition**: Add CDN or regional access points for better performance \n- **URL Refresh**: Update changed domain names\n- **Access Optimization**: Add or remove access methods based on performance or cost\n\n## Design Principles\n\n- **Optional**: Access method update support is completely optional\n- **Immutable Core**: Only access methods can be updated - size, checksums, name remain unchanged\n- **Atomic Bulk Operations**: All updates succeed or all fail (transactional)\n- **Optional Validation**: Servers MAY validate new access methods point to same data\n- **Flexible Authentication**: Supports GA4GH Passports, Bearer tokens, API keys\n\n## Service Discovery\n\nCheck `/service-info` for access method update capabilities:\n\n```json\n{\n \"drs\": {\n \"accessMethodUpdateSupported\": true,\n \"maxBulkAccessMethodUpdateLength\": 100,\n \"validateAccessMethodUpdates\": false\n }\n}\n```\n\n- **`accessMethodUpdateSupported`**: Whether server supports access method updates\n- **`maxBulkAccessMethodUpdateLength`**: Maximum objects per bulk update request\n- **`validateAccessMethodUpdates`**: Whether server validates new access methods\n\n## Single Object Update\n\nUpdate access methods for a single DRS object:\n\n```bash\ncurl -X POST \"https://drs.example.org/objects/obj_123/access-methods\" \\\n -H \"Content-Type: application/json\" \\\n -d '{\n \"access_methods\": [\n {\n \"type\": \"https\",\n \"access_url\": {\n \"url\": \"https://new-cdn.example.org/data/file.bam\"\n }\n },\n {\n \"type\": \"s3\",\n \"access_id\": \"s3,\n \"access_url\": {\n \"url\": \"s3://new-bucket/migrated/file.bam\"\n }\n }\n ]\n }'\n```\n\n## Bulk Object Update\n\nUpdate access methods for multiple objects atomically:\n\n```bash\ncurl -X POST \"https://drs.example.org/objects/access-methods\" \\\n -H \"Content-Type: application/json\" \\\n -d '{\n \"updates\": [\n {\n \"object_id\": \"obj_123\",\n \"access_methods\": [\n {\n \"type\": \"https\",\n \"access_url\": {\"url\": \"https://new-location.com/file1.bam\"}\n }\n ]\n },\n {\n \"object_id\": \"obj_456\", \n \"access_methods\": [\n {\n \"type\": \"s3\",\n \"access_url\": {\"url\": \"s3://new-bucket/file2.vcf\"}\n }\n ]\n }\n ]\n }'\n```\n\n## Authentication\n\n**GA4GH Passports** (in request body):\n```json\n{\n \"access_methods\": [...],\n \"passports\": [\"eyJhbGci...\"]\n}\n```\n\n**Bearer Tokens** (in headers):\n```bash\ncurl -H \"Authorization: Bearer token\" -d '{\"access_methods\": [...]}' ...\n```\n\n## Validation\n\nServers MAY validate that new access methods point to the same data by checkingm file availability, checksums or file content. Validation behavior is advertised in `validateAccessMethodUpdates` service-info field.\n\n\n## Error Responses\n\n- **400**: Invalid access methods or validation failure\n- **401**: Authentication required\n- **403**: Insufficient permissions for object(s)\n- **404**: Object not found or access method updates not supported\n- **413**: Bulk request exceeds `maxBulkAccessMethodUpdateLength` limit\n\n## Examples\n\n**Storage Migration:**\n```bash\n# Check server capabilities\ncurl \"https://drs.example.org/service-info\"\n\n# Update single object after migration\ncurl -X POST \"https://drs.example.org/objects/obj_123/access-methods\" \\\n -d '{\"access_methods\": [{\"type\": \"s3\", \"access_url\": {\"url\": \"s3://new-bucket/file.bam\"}}]}'\n```\n\n**Add CDN Mirror:**\n```bash\n# Add additional access method without removing existing ones\ncurl -X POST \"https://drs.example.org/objects/obj_456/access-methods\" \\\n -d '{\n \"access_methods\": [\n {\"type\": \"https\", \"access_url\": {\"url\": \"https://origin.example.org/file.vcf\"}},\n {\"type\": \"https\", \"access_url\": {\"url\": \"https://cdn.example.org/file.vcf\"}}\n ]\n }'\n```\n\n**Bulk Migration:**\n```bash\n# Migrate multiple objects atomically\ncurl -X POST \"https://drs.example.org/objects/access-methods\" \\\n -d '{\n \"updates\": [\n {\"object_id\": \"obj_1\", \"access_methods\": [...]},\n {\"object_id\": \"obj_2\", \"access_methods\": [...]}\n ]\n }'\n```\n\n## Best Practices\n\n**Clients**: Check service-info first, handle atomic transaction failures, respect bulk limits, verify permissions\n\n**Servers**: Advertise capabilities clearly, implement atomic transactions for bulk operations, validate permissions, consider optional validation for data integrity\n\n## Backward Compatibility\n\nAccess method update functionality is designed to be backward compatible:\n\n- **No Impact on Existing Endpoints**: All existing DRS endpoints remain unchanged\n- **Optional Implementation**: Servers can ignore this functionality entirely \n- **Graceful Degradation**: Clients receive 404 responses when not supported\n- **Safe Defaults**: New service-info fields have safe default values" - - name: Service Info - - name: AccessMethodModel - x-displayName: AccessMethod - description: | - - - name: AccessURLModel - x-displayName: AccessURL - description: | - - - name: ChecksumModel - x-displayName: Checksum - description: | - - - name: ContentsObjectModel - x-displayName: ContentsObject - description: | - - - name: DrsObjectModel - x-displayName: DrsObject - description: | - - - name: DrsObjectCandidateModel - x-displayName: DrsObjectCandidate - description: | - - - name: ErrorModel - x-displayName: Error - description: | - - - name: UploadRequestModel - x-displayName: UploadRequest - description: | - - - name: UploadResponseModel - x-displayName: UploadResponse - description: | - - - name: UploadRequestObjectModel - x-displayName: UploadRequestObject - description: | - - - name: UploadResponseObjectModel - x-displayName: UploadResponseObject - description: | - - - name: UploadMethodModel - x-displayName: UploadMethod - description: | - - - name: DeleteRequestModel - x-displayName: DeleteRequest - description: | - - - name: BulkDeleteRequestModel - x-displayName: BulkDeleteRequest - description: | - - - name: DeleteResultModel - x-displayName: DeleteResult - description: | - - - name: BulkDeleteResponseModel - x-displayName: BulkDeleteResponse - description: | - - - name: Motivation - description: | - - - - - -
- Data sharing requires portable data, consistent with the FAIR data principles (findable, accessible, interoperable, reusable). Today’s researchers and clinicians are surrounded by potentially useful data, but often need bespoke tools and processes to work with each dataset. Today’s data publishers don’t have a reliable way to make their data useful to all (and only) the people they choose. And today’s data controllers are tasked with implementing standard controls of non-standard mechanisms for data access. - - - - Figure 1: there’s an ocean of data, with many different tools to drink from it, but no guarantee that any tool will work with any subset of the data - -
- - - - - - -
- We need a standard way for data producers to make their data available to data consumers, that supports the control needs of the former and the access needs of the latter. And we need it to be interoperable, so anyone who builds access tools and systems can be confident they’ll work with all the data out there, and anyone who publishes data can be confident it will work with all the tools out there. - - - - Figure 2: by defining a standard Data Repository API, and adapting tools to use it, every data publisher can now make their data useful to every data consumer - -
- - - - - - -
- We envision a world where: -
    -
  • - there are many many data consumers, working in research and in care, who can use the tools of their choice to access any and all data that they have permission to see -
  • -
  • - there are many data access tools and platforms, supporting discovery, visualization, analysis, and collaboration -
  • -
  • - there are many data repositories, each with their own policies and characteristics, which can be accessed by a variety of tools -
  • -
  • - there are many data publishing tools and platforms, supporting a variety of data lifecycles and formats -
  • -
  • - there are many many data producers, generating data of all types, who can use the tools of their choice to make their data as widely available as is appropriate -
  • -
-
- - - Figure 3: a standard Data Repository API enables an ecosystem of data producers and consumers - -
- - This spec defines a standard **Data Repository Service (DRS) API** (“the yellow box”), to enable that ecosystem of data producers and consumers. Our goal is that the only thing data consumers need to know about a data repo is *\"here’s the DRS endpoint to access it\"*, and the only thing data publishers need to know to tap into the world of consumption tools is *\"here’s how to tell it where my DRS endpoint lives\"*. - - ## Federation - - The world’s biomedical data is controlled by groups with very different policies and restrictions on where their data lives and how it can be accessed. A primary purpose of DRS is to support unified access to disparate and distributed data. (As opposed to the alternative centralized model of "let’s just bring all the data into one single data repository”, which would be technically easier but is no more realistic than “let’s just bring all the websites into one single web host”.) - - In a DRS-enabled world, tool builders don’t have to worry about where the data their tools operate on lives — they can count on DRS to give them access. And tool users only need to know which DRS server is managing the data they need, and whether they have permission to access it; they don’t have to worry about how to physically get access to, or (worse) make a copy of the data. For example, if I have appropriate permissions, I can run a pooled analysis where I run a single tool across data managed by different DRS servers, potentially in different locations. - - name: Working With Compound Objects - description: | - ## Compound Objects - - The DRS API supports access to data objects, with each `DrsObject` representing a single opaque blob of bytes. Much content (e.g. VCF files) is well represented as a single atomic `DrsObject`. Some content, however (e.g. DICOM images) is best represented as a compound object consisting of a structured collection of atomic `DrsObject`s. In both cases, DRS isn't aware of the semantics of the objects it serves -- understanding those semantics is the responsibility of the applications that call DRS. - - Common examples of compound objects in biomedicine include: - * BAM+BAI genomic reads, with a small index (the BAI object) to large data (the BAM object), each object using a well-defined file format. - * DICOM images, with a contents object pointing to one or more raw image objects, each containing pixels from different aspects of a single logical biomedical image (e.g. different z-coordinates) - * studies, with a single table of contents listing multiple objects of various types that were generated together and are meant to be processed together - - ## Best Practice: Manifests - - As with atomic objects, DRS applications and servers are expected to agree on the semantics of compound objects using non-DRS mechanisms. The recommended best practice for representing a particular compound object type is: - 1. Define a manifest file syntax, which contains the DRS IDs of the constituent atomic objects, plus type-specific information about the relationship between those constituents. - * Manifest file syntax isn't prescribed by the spec, but we expect they will often be JSON files. - * For example, for a BAM+BAI pair the manifest file could contain two key-value pairs mapping the type of each constituent file to its DRS ID. - 3. Make manifest objects and their constituent objects available using standard DRS mechanisms -- each object is referenced via its own DRS ID, just like any other atomic object. - * For example, for a BAM+BAI pair, there would be three DRS IDs -- one for the manifest, one for the BAM, and one for the BAI. - 5. Document the expected client logic for processing compound objects of interest. This logic typically consists of using standard DRS mechanisms to fetch the manifest, parsing its syntax, extracting the DRS IDs of constituent objects, and using standard DRS mechanisms to fetch the constituents as needed. - * In some cases the application will always want to fetch all of the constituents; in other cases it may want to initially fetch a subset, and only fetch the others on demand. For example, a DICOM image viewer may only want to fetch the layers that are being rendered. - - name: Background Notes on DRS URIs - description: | - ## Design Motivation - - DRS URIs are aligned with the [FAIR data principles](https://www.nature.com/articles/sdata201618) and the [Joint Declaration of Data Citation Principles](https://www.nature.com/articles/sdata20182) — both hostname-based and compact identifier-based URIs provide globally unique, machine-resolvable, persistent identifiers for data. - - * We require all URIs to begin with `drs://` as a signal to humans and systems consuming these URIs that the response they will ultimately receive, after transforming the URI to a fetchable URL, will be a DRS JSON packet. This signal differentiates DRS URIs from the wide variety of other entities (HTML documents, PDFs, ontology notes, etc.) that can be represented by compact identifiers. - * We support hostname-based URIs because of their simplicity and efficiency for server and client implementers. - * We support compact identifier-based URIs, and the meta-resolver services of identifiers.org and n2t.net (Name-to-Thing), because of the wide adoption of compact identifiers in the research community. as detailed by [Wimalaratne et al (2018)](https://www.nature.com/articles/sdata201829) in "Uniform resolution of compact identifiers for biomedical data." - - name: Compact Identifier-Based URIs - description: | - **Note: Identifiers.org/n2t.net API Changes** - - The examples below show the current API interactions with [n2t.net](https://n2t.net/e/compact_ids.html) and [identifiers.org](https://docs.identifiers.org/) which may change over time. Please refer to the documentation from each site for the most up-to-date information. We will make best efforts to keep the DRS specification current but DRS clients MUST maintain their ability to use either the identifiers.org or n2t.net APIs to resolve compact identifier-based DRS URIs. - - ## Registering a DRS Server on a Meta-Resolver - - See the documentation on the [n2t.net](https://n2t.net/e/compact_ids.html) and [identifiers.org](https://docs.identifiers.org/) meta-resolvers for adding your own compact identifier type and registering your DRS server as a resolver. You can register new prefixes (or mirrors by adding resource provider codes) for free using a simple online form. For more information see [More Background on Compact Identifiers](./more-background-on-compact-identifiers.html). - - ## Calling Meta-Resolver APIs for Compact Identifier-Based DRS URIs - - Clients resolving Compact Identifier-based URIs need to convert a prefix (e.g. “drs.42”) into a URL pattern. They can do so by calling either the identifiers.org or the n2t.net API, since the two meta-resolvers keep their mapping databases in sync. - - ### Calling the identifiers.org API as a Client - - It takes two API calls to get the URL pattern. - - 1. The client makes a GET request to identifiers.org to find information about the prefix: - - ``` - GET https://registry.api.identifiers.org/restApi/namespaces/search/findByPrefix?prefix=drs.42 - ``` - - This request returns a JSON structure including various URLs containing an embedded namespace id, such as: - - ``` - "namespace" : { - "href":"https://registry.api.identifiers.org/restApi/namespaces/1234" - } - ``` - - 2. The client extracts the namespace id (in this example 1234), and uses it to make a second GET request to identifiers.org to find information about the namespace: - - ``` - GET https://registry.api.identifiers.org/restApi/resources/search/findAllByNamespaceId?id=1234 - ``` - - This request returns a JSON structure including an urlPattern field, whose value is a URL pattern containing a ${id} parameter, such as: - - ``` - "urlPattern" : "https://drs.myexample.org/ga4gh/drs/v1/objects/{$id}" - ``` - - ### Calling the n2t.net API as a Client - - It takes one API call to get the URL pattern. - - The client makes a GET request to n2t.net to find information about the namespace. (Note the trailing colon.) - - ``` - GET https://n2t.net/drs.42: - ``` - - This request returns a text structure including a redirect field, whose value is a URL pattern containing an `$id` parameter, such as: - - ``` - redirect: https://drs.myexample.org/ga4gh/drs/v1/objects/$id - ``` - - ## Caching with Compact Identifiers - - Identifiers.org/n2t.net compact identifier resolver records do not change frequently. This reality is useful for caching resolver records and their URL patterns for performance reasons. Builders of systems that use compact identifier-based DRS URIs should cache prefix resolver records from identifiers.org/n2t.net and occasionally refresh the records (such as every 24 hours). This approach will reduce the burden on these community services since we anticipate many DRS URIs will be regularly resolved in workflow systems. Alternatively, system builders may decide to directly mirror the registries themselves, instructions are provided on the identifiers.org/n2t.net websites. - - ## Security with Compact Identifiers - - As mentioned earlier, identifiers.org/n2t.net performs some basic verification of new prefixes and provider code mirror registrations on their sites. However, builders of systems that consume and resolve DRS URIs may have certain security compliance requirements and regulations that prohibit relying on an external site for resolving compact identifiers. In this case, systems under these security and compliance constraints may wish to whitelist certain compact identifier resolvers and/or vet records from identifiers.org/n2t.net before enabling in their systems. - - ## Accession Encoding to Valid DRS IDs - - The compact identifier format used by identifiers.org/n2t.net does not percent-encode reserved URI characters but, instead, relies on the first ":" character to separate prefix from accession. Since these accessions can contain any characters, and characters like "/" will interfere with DRS API calls, you *must* percent encode the accessions extracted from DRS compact identifier-based URIs when using as DRS IDs in subsequent DRS GET requests. An easy way for a DRS client to handle this is to get the initial DRS object JSON response from whatever redirects the compact identifier resolves to, then look for the `self_uri` in the JSON, which will give you the correctly percent-encoded DRS ID for subsequent DRS API calls such as the `access` method. - - ## Additional Examples - - For additional examples, see the document [More Background on Compact Identifiers](./more-background-on-compact-identifiers.html). - - name: Hostname-Based URIs - description: | - ## Encoding DRS IDs - - In hostname-based DRS URIs, the ID is always percent-encoded to ensure special characters do not interfere with subsequent DRS endpoint calls. As such, ":" is not allowed in the URI and is a convenient way of differentiating from a compact identifier-based DRS URI. Also, if a given DRS service implementation uses compact identifier accessions as their DRS IDs, they must be percent encoded before using them as DRS IDs in hostname-based DRS URIs and subsequent GET requests to a DRS service endpoint. - - name: GA4GH Service Registry - description: | - The [GA4GH Service Registry API specification](https://github.com/ga4gh-discovery/ga4gh-service-registry) allows information about GA4GH-compliant web services, including DRS services, to be aggregated into registries and made available via a standard API. The following considerations should be followed when registering DRS services within a service registry. - - * The DRS service attributes returned by `/service-info` (i.e. `id`, `name`, `description`, etc.) should have the same values as the registry entry for that service. - * The value of the `type` object's `artifact` property should be `drs` (i.e. the same as it appears in `service-info`) - * Each entry in a Service Registry must have a `url`, indicating the base URL to the web service. For DRS services, the registered `url` must include everything up to - the standardized `/ga4gh/drs/v1` path. Clients should be able to assume that: - + Adding `/ga4gh/drs/v1/objects/{object_id}` to the registered `url` will hit the `DrsObject` endpoint - + Adding `/ga4gh/drs/v1/service-info` to the registered `url` will hit the Service Info endpoint - - Example listing of a DRS API registration from a service registry's `/services` endpoint: - - ``` - [ - { - "id": "com.example.drs", - "name": "Example DRS API", - "type": { - "group": "org.ga4gh", - "artifact": "drs", - "version": "1.5.0" - }, - "description": "The Data Repository Service (DRS) API ...", - "organization": { - "id": "com.example", - "name": "Example Company" - }, - "contactUrl": "mailto:support@example.com", - "documentationUrl": "https://docs.example.com/docs/drs", - "createdAt": "2021-08-09T00:00:00Z", - "updatedAt": "2021-08-09T12:30:00Z", - "environment": "production", - "version": "1.13.4", - "url": "https://drs-service.example.com" - } - ] - ``` - - name: Upload Requests and Object Registration - description: "# Upload Requests and Object Registration\n\n> **Optional Functionality**: Upload and object registration are optional DRS extensions. Clients should check `/service-info` for `uploadRequestSupported` and `objectRegistrationSupported` before attempting to use these endpoints.\n\nThe DRS upload and object registration endpoints allows clients to negotiate with servers on mutually convenient storage backends and then register uploads as DRS objects through a three-phase workflow:\n\n1. **Request Upload URLs**: POST `/upload-request` with file metadata to receive upload methods and credentials\n2. **Upload Files**: Use returned URLs and credentials to upload files to storage using existing upload mechanisms. DRS is not involved in this step at all, DRS simply enables clients and servers to agree on a mutually convenient storage service.\n3. **Register Objects**: POST `/objects/register` to register \"candidate\" DRS objects with the server\n\nThis approach separates storage service and credential negotiation from file transfer and object registration, supporting a vendor-neutral means of sharing data in a DRS network.\n\nThe `/objects/register` endpoint can be used independently to register existing data without using the `/upload-request` endpoint, and servers can choose to only support object registration and not file uploads by setting the `uploadRequestSupported` and `objectRegistrationSupported` flags appropriately in `/service-info`.\n\nUpload requests and object registration endpoints only support bulk requests to simplify implementation and reflect real-world usage patterns. Bioinformatics workflows often involve uploading multiple related files together (e.g., BAM and VCF files with their indices, or analysis result sets), making bulk operations a natural fit. Single files/objects are handled as lists with one element. Implementations of the `/objects/register` endpoint SHOULD implement transaction semantics so that either all of the objects are successfully registered or none of them are, and clients should be robust to this behaviour. Transaction semantics for the `/upload-request` are encouraged but not required due to the variety and complexity of data transfer technologies.\n\nThe `/upload-request` endpoint does not require any state to be maintained on the DRS server (intermediate DRS object IDs etc.) it is simply a means for a server to provide details of where a client can upload data, and it should ensure that it trusts the client before providing such details. This means that if uploads fail and there is no later call to `/objects/register` there is no DRS state to manage, simplifying server implementation.\n\nServers SHOULD ensure that any data from unsuccessful uploads (e.g. incomplete multi-part uploads) are cleaned up, for example by using lifecycle configuration in the backend storage. There is _no_ means of requiring that a client ultimately registers a DRS object pointing at data uploaded, and so servers should consider implementing some form of storage \"garbage\" collection (or simply set a short lifecycle policy on the upload location and move uploaded data that is later registered as DRS objects to other locations, updating the `access_method`s accordingly). Servers should also implement some means of constraining upload size (quotas etc.) to protect against accidental or malicious unconstrained uploads.\n\nThe `/upload-request` endpoint can return one or more `upload_method`s of different types for each requested file, and backend specific details such as bucket names, object keys and credentials are supplied in a generic `upload_details` field. A straightforward implementation might return an single time-limited pre-signed POST URL as the `post_url` for an `upload_method` of type `https` which incorporates authentication into the URL, but because DRS is often used for large files such as BAMs and CRAMs we also want to support more sophisticated upload approaches implemented by storage backends such as multi-part uploads, automatic retries etc. The `upload_details` field can also be used to include bucket names, keys and temporary credentials that can be used in native clients and SDKs. This offers a natural way to adapt this protocol to new storage technologies. Refer to the examples below for some suggested implementations.\n\n## Service Discovery\n\nCheck `/service-info` for upload capabilities:\n\n```json\n{\n \"drs\": {\n \"uploadRequestSupported\": true,\n \"objectRegistrationSupported\": true,\n \"supportedUploadMethods\": [\"s3\", \"https\", \"gs\"],\n \"maxUploadSize\": 5368709120,\n \"maxUploadRequestLength\": 50,\n \"maxRegisterRequestLength\": 50,\n \"validateUploadChecksums\": true,\n \"validateUploadFileSizes\": false,\n \"relatedFileStorageSupported\": true\n }\n}\n```\n\nUpload related fields:\n\n- `uploadRequestSupported`: Upload request operations available via `/upload-request`\n- `objectRegistrationSupported`: Object registration operations available via `/objects/register`\n- `supportedUploadMethods`: Available storage backends \n- `maxUploadSize`: File size limit (bytes)\n- `maxUploadRequestLength`: Files per request limit for upload requests\n- `maxRegisterRequestLength`: Candidate objects per request limit for registration\n- `validateUploadChecksums`/`validateUploadFileSizes`: Server validation behavior\n- `relatedFileStorageSupported`: Files from same upload request stored under common prefixes\n\n## Upload Methods\n\nUpon receipt of a request for an upload method for a specific file, the server will respond with one or more `upload_method` with associated `type` and corresponding `upload_details` with upload locations, temporary credentials etc. These details are specific to backend implementations.\n\nExample storage backends:\n\n- **https**: Presigned POST URLs for HTTP uploads\n- **s3**: Direct S3 upload with temporary AWS credentials\n- **gs**: Google Cloud Storage with OAuth2 tokens\n- **ftp/sftp**: Traditional file transfer protocols using negotiated credentials\n\nServers may return a subset of advertised methods based on file characteristics, for example they may choose to store large objects such as WGS BAM files in different backends to small csv files.\n\n## Related File Storage (Optional)\n\nServers MAY support storing files from the same upload request under common prefixes, enabling bioinformatics workflows that expect co-located files:\n\n- **CRAM + CRAI**: Alignment files with index files\n- **VCF + TBI**: Variant files with tabix indexes \n- **FASTQ.ora + ORADATA.tar.gz**: Compressed files with associated reference data\n\nCheck `relatedFileStorageSupported` in service-info or examine upload URLs for common prefixes.\n\n## Object Registration\n\nAfter upload, clients can register files in bulk as DRS objects using POST `/objects/register`. Registration is all-or-nothing. If any candidate object fails to be registered in the server, the entire request fails and no objects are registered.\n\n**Candidate DRS object equirements**:\n\n- Complete metadata (name, size, checksums, MIME type)\n- Access methods pointing to file locations \n- Valid authorization (if required)\n- Do not include server-generated fields (id, self_uri, timestamps)\n\nUpon receipt of candidate objects for registration the server will create unique object IDs and returns complete DRS objects. Note that the server is not obliged to retain the clients supplied `access_method`s and is free to move data to different locations/backends once the object is registered. This means that a server can choose to receive uploads in a dedicated \"dropzone\", with hard quotas and additional security, and then move them to more permanent storage once the DRS object is registered. Clients SHOULD NOT cache the response from `/objects/register` as the `access_method`s might change after registration.\n\nThe `/objects/register` endpoint can also be used independently to register existing data that is already stored in accessible locations, without using the `/upload-request` workflow. This is useful for registering pre-existing datasets or files uploaded through other means. Servers may choose only to support registration and not uploads, and should advertise this in `/service-info`\n\n## Authentication & Validation\n\n**Authentication**: Supports GA4GH Passports, Basic auth, and Bearer tokens.\n\n**Checksums**: Required for all files (SHA-256, MD5, or IANA-registered algorithms). Servers MAY validate checksums and file sizes as advertised in service-info flags.\n\n## Error Handling\n\n**Client Errors (4xx)**:\n\n- Invalid metadata (400)\n- Missing auth (401)\n- Insufficient permissions (403)\n\n**Server Errors (5xx)**:\n\n- Storage unavailable (500)\n- Capacity limits (503)\n\n## Best Practices\n\n**Clients**: Check service-info first, calculate checksums, be robust to failed object registration\n**Servers**: Use short-lived tightly scoped credentials, support multiple upload methods, implement rate limiting, ensure unique storage backend names to avoid inadvertent overwrites (e.g. using UUIDs), ensure that quotas are enforced and incomplete or unregistered uploads are deleted\n**Security**: Time-limited credentials, single-use URLs, logging for audit\n\n## Security Considerations\n\n**Credential Scoping**: Implementers SHOULD scope upload credentials to the minimum necessary permissions and duration. Credentials should:\n\n- Allow write access only to the specific upload URL/path provided\n- Have the shortest practical expiration time (e.g. 15 minutes to 1 hour)\n- Be restricted to the specific file size and content type when possible\n- Not grant broader storage access beyond the intended upload location\n\nThis principle of least privilege reduces security exposure if credentials are compromised or misused.\n\n## Example Workflows\n\n### Simple HTTPS Upload\n\nUpload Request:\n\n```http\nPOST /upload-request\nContent-Type: application/json\n\n{\n \"requests\": [\n {\n \"name\": \"variants.vcf\",\n \"size\": 52428800,\n \"mime_type\": \"text/plain\",\n \"checksums\": [\n {\n \"checksum\": \"5d41402abc4b2a76b9719d911017c592\",\n \"type\": \"md5\"\n }\n ]\n }\n ]\n}\n```\n\nResponse:\n\n```json\n{\n \"responses\": [\n {\n \"name\": \"variants.vcf\",\n \"size\": 52428800,\n \"mime_type\": \"text/plain\",\n \"checksums\": [\n {\n \"checksum\": \"5d41402abc4b2a76b9719d911017c592\",\n \"type\": \"md5\"\n }\n ],\n \"upload_methods\": [\n {\n \"type\": \"https\",\n \"access_url\": {\n \"url\": \"https://uploads.example.org/variants.vcf\"\n },\n \"upload_details\": {\n \"post_url\": \"https://uploads.example.org/presigned-upload?signature=FAKE_SIG\"\n }\n }\n ]\n }\n ]\n}\n```\n\nUpload via HTTPS:\n\n```bash\n# Simple PUT upload to presigned URL\ncurl -X PUT \"https://uploads.example.org/presigned-upload?signature=FAKE_SIG\" \\\n --data-binary @variants.vcf\n```\n\nRegister DRS Object:\n\n```http\nPOST /objects/register\nContent-Type: application/json\n\n{\n \"candidates\": [\n {\n \"name\": \"variants.vcf\",\n \"size\": 52428800,\n \"mime_type\": \"text/plain\",\n \"checksums\": [\n {\n \"checksum\": \"5d41402abc4b2a76b9719d911017c592\",\n \"type\": \"md5\"\n }\n ],\n \"access_methods\": [\n {\n \"type\": \"https\",\n \"access_url\": {\n \"url\": \"https://uploads.example.org/variants.vcf\"\n }\n }\n ],\n \"description\": \"Variant calls in VCF format\"\n }\n ]\n}\n```\n\nResponse:\n\n```json\n{\n \"objects\": [\n {\n \"id\": \"drs_obj_f6e5d4c3b2a1\",\n \"self_uri\": \"drs://drs.example.org/drs_obj_f6e5d4c3b2a1\",\n \"name\": \"variants.vcf\",\n \"size\": 52428800,\n \"mime_type\": \"text/plain\",\n \"created_time\": \"2024-01-15T10:45:00Z\",\n \"updated_time\": \"2024-01-15T10:45:00Z\",\n \"version\": \"1.0\",\n \"checksums\": [\n {\n \"checksum\": \"5d41402abc4b2a76b9719d911017c592\",\n \"type\": \"md5\"\n }\n ],\n \"access_methods\": [\n {\n \"type\": \"https\",\n \"access_url\": {\n \"url\": \"https://uploads.example.org/variants.vcf\"\n }\n }\n ],\n \"description\": \"Variant calls in VCF format\"\n }\n ]\n}\n```\n\n### S3 Bulk Upload (BAM + Index)\n\nRequest Upload Methods for Related Files\n\n```http\nPOST /upload-request\nContent-Type: application/json\n\n{\n \"requests\": [\n {\n \"name\": \"sample.bam\",\n \"size\": 1073741824,\n \"mime_type\": \"application/octet-stream\",\n \"checksums\": [\n {\n \"checksum\": \"d41d8cd98f00b204e9800998ecf8427e\",\n \"type\": \"md5\"\n }\n ]\n },\n {\n \"name\": \"sample.bam.bai\",\n \"size\": 2097152,\n \"mime_type\": \"application/octet-stream\",\n \"checksums\": [\n {\n \"checksum\": \"098f6bcd4621d373cade4e832627b4f6\",\n \"type\": \"md5\"\n }\n ]\n }\n ]\n}\n```\n\nResponse:\n\n```json\n{\n \"responses\": [\n {\n \"name\": \"sample.bam\",\n \"size\": 1073741824,\n \"mime_type\": \"application/octet-stream\",\n \"checksums\": [\n {\n \"checksum\": \"d41d8cd98f00b204e9800998ecf8427e\",\n \"type\": \"md5\"\n }\n ],\n \"upload_methods\": [\n {\n \"type\": \"s3\",\n \"access_url\": {\n \"url\": \"s3://genomics-uploads/x7k9m/sample.bam\"\n },\n \"upload_details\": {\n \"bucket\": \"genomics-uploads\",\n \"key\": \"x7k9m/sample.bam\",\n \"access_key_id\": \"FAKE_ACCESS_KEY_123\",\n \"secret_access_key\": \"FAKE_SECRET_KEY_456\",\n \"session_token\": \"FAKE_SESSION_TOKEN_789\",\n \"expires_at\": \"2024-01-15T12:00:00Z\"\n }\n }\n ]\n },\n {\n \"name\": \"sample.bam.bai\",\n \"size\": 2097152,\n \"mime_type\": \"application/octet-stream\",\n \"checksums\": [\n {\n \"checksum\": \"098f6bcd4621d373cade4e832627b4f6\",\n \"type\": \"md5\"\n }\n ],\n \"upload_methods\": [\n {\n \"type\": \"s3\",\n \"access_url\": {\n \"url\": \"s3://genomics-uploads/x7k9m/sample.bam.bai\"\n },\n \"upload_details\": {\n \"bucket\": \"genomics-uploads\",\n \"key\": \"x7k9m/sample.bam.bai\",\n \"access_key_id\": \"FAKE_ACCESS_KEY_123\",\n \"secret_access_key\": \"FAKE_SECRET_KEY_456\",\n \"session_token\": \"FAKE_SESSION_TOKEN_789\",\n \"expires_at\": \"2024-01-15T12:00:00Z\"\n }\n }\n ]\n }\n ]\n}\n```\n\nUpload Both Files to S3:\n\n```bash\n# Upload BAM and index files using the supplied credentials (note common prefix)\naws s3 cp sample.bam s3://genomics-uploads/x7k9m/sample.bam\naws s3 cp sample.bam.bai s3://genomics-uploads/x7k9m/sample.bam.bai\n```\n\nRegister Both DRS Objects:\n\n```http\nPOST /objects/register\nContent-Type: application/json\n\n{\n \"candidates\": [\n {\n \"name\": \"sample.bam\",\n \"size\": 1073741824,\n \"mime_type\": \"application/octet-stream\",\n \"checksums\": [\n {\n \"checksum\": \"d41d8cd98f00b204e9800998ecf8427e\",\n \"type\": \"md5\"\n }\n ],\n \"access_methods\": [\n {\n \"type\": \"s3\",\n \"access_id\": \"s3\",\n \"access_url\": {\n \"url\": \"s3://genomics-uploads/x7k9m/sample.bam\"\n }\n }\n ],\n \"description\": \"BAM alignment file\"\n },\n {\n \"name\": \"sample.bam.bai\",\n \"size\": 2097152,\n \"mime_type\": \"application/octet-stream\",\n \"checksums\": [\n {\n \"checksum\": \"098f6bcd4621d373cade4e832627b4f6\",\n \"type\": \"md5\"\n }\n ],\n \"access_methods\": [\n {\n \"type\": \"s3\",\n \"access_id\": \"s3\",\n \"access_url\": {\n \"url\": \"s3://genomics-uploads/x7k9m/sample.bam.bai\"\n }\n }\n ],\n \"description\": \"BAM index file\"\n }\n ]\n}\n```\n\nResponse:\n\n```json\n{\n \"objects\": [\n {\n \"id\": \"drs_obj_a1b2c3d4e5f6\",\n \"self_uri\": \"drs://drs.example.org/drs_obj_a1b2c3d4e5f6\",\n \"name\": \"sample.bam\",\n \"size\": 1073741824,\n \"mime_type\": \"application/octet-stream\",\n \"created_time\": \"2024-01-15T10:30:00Z\",\n \"updated_time\": \"2024-01-15T10:30:00Z\",\n \"version\": \"1.0\",\n \"checksums\": [\n {\n \"checksum\": \"d41d8cd98f00b204e9800998ecf8427e\",\n \"type\": \"md5\"\n }\n ],\n \"access_methods\": [\n {\n \"type\": \"s3\",\n \"access_id\": \"s3\",\n \"access_url\": {\n \"url\": \"s3://genomics-uploads/x7k9m/sample.bam\"\n }\n }\n ],\n \"description\": \"BAM alignment file\"\n },\n {\n \"id\": \"drs_obj_b2c3d4e5f6a1\",\n \"self_uri\": \"drs://drs.example.org/drs_obj_b2c3d4e5f6a1\",\n \"name\": \"sample.bam.bai\",\n \"size\": 2097152,\n \"mime_type\": \"application/octet-stream\",\n \"created_time\": \"2024-01-15T10:30:00Z\",\n \"updated_time\": \"2024-01-15T10:30:00Z\",\n \"version\": \"1.0\",\n \"checksums\": [\n {\n \"checksum\": \"098f6bcd4621d373cade4e832627b4f6\",\n \"type\": \"md5\"\n }\n ],\n \"access_methods\": [\n {\n \"type\": \"s3\",\n \"access_id\": \"s3\",\n \"access_url\": {\n \"url\": \"s3://genomics-uploads/x7k9m/sample.bam.bai\"\n }\n }\n ],\n \"description\": \"BAM index file\"\n }\n ]\n}\n```\n" - - name: Object Deletion - description: "# Object Deletion\n\n> **Optional Functionality**: Delete support is an **optional** extension to the DRS API. Not all DRS servers are required to implement delete functionality. Clients should check for the availability of delete endpoints before attempting to use them.\n\nDRS delete functionality allows suitably authenticated clients to request that DRS objects are removed from the server and, optionally, to request that the server attempt to delete the underlying data.\n\nServers should ensure that they trust clients from whom they receive delete requests, and may choose to implement \"soft\" deletes to minimise the risk of accidental or malicious requests. The DRS specification does not currently provide explicit support for soft deletes. Because delete support is optional, servers operating in untrusted environments may choose not to support delete operations at all.\n\nIn combination with the `/objects/register` endpoint, metadata only delete requests offer a means for clients to update DRS metadata without affecting the underlying data, and without introducing additional update operations which would complicate server implementation.\n\nClients can express a preference that the underlying data referred to by the deleted DRS object(s) is deleted with the `delete_storage_data` parameter. Servers are free to interpret this as they choose, and can advertise whether they support it at all with the `deleteStorageDataSupported` flag. Servers that choose to attempt to honour the request need not perform this operation synchronously and may, for example, register the file for later deletion. Implementations may also choose to ensure that no other DRS object registered in the server refers to the underlying data before deleting. Servers may not have the necessary permissions to delete the data from the backend even if they would like to do so, or may encounter errors when they attempt deletion. In the case that a DRS object refers to data stored in multiple backends (e.g. has multiple `access_method`s) the server may attempt to delete the data from all or only some of the backends.\n\nFor these reasons clients MUST NOT depend on the server deleting the underlying storage data even if the server advertises that `deleteStorageDataSupported` and the client sets the `delete_storage_data` flag.\n\nIn situations where the DRS server controls the storage backend, DRS delete support offers a convenient vendor-neutral way for clients to update and delete DRS objects and corresponding data.\n\nFor bulk deletes using the `/objects/delete` endpoint the server SHOULD implement transaction semantics: if any object fails validation or deletion, the entire request should fail and no objects are deleted and no attempt is made to delete from underlying storage for any object.\n\n## Design principles\n\n- **Optional**: Delete support is completely optional\n- **Safety**: Preserves underlying data in storage unless explicitly requested\n- **Backward compatible**: No impact on existing DRS functionality\n- **Flexible authentication**: Supports GA4GH Passports, Bearer tokens, API keys\n- **Use POST rather than DELETE**: GA4GH Passports require request bodies, which DELETE methods don't reliably support across all HTTP infrastructure. POST ensures broad compatibility.\n\n## Service Discovery\n\nCheck `/service-info` for delete capabilities:\n\n```json\n{\n \"drs\": {\n \"uploadRequestSupported\": true,\n \"objectRegistrationSupported\": true,\n \"supportedUploadMethods\": [\"s3\", \"https\"],\n \"relatedFileStorageSupported\": true,\n \"deleteSupported\": true,\n \"maxBulkDeleteLength\": 100,\n \"deleteStorageDataSupported\": true\n }\n}\n```\n\n- **`deleteSupported`**: Whether server supports deletion\n- **`maxBulkDeleteLength`**: Maximum objects per bulk delete request \n- **`deleteStorageDataSupported`**: Whether server can attempt to delete underlying storage files\n\n### Single Object Delete: `POST /objects/{object_id}/delete`\n\n```bash\ncurl -X POST \"https://drs.example.org/objects/drs_object_123456/delete\" \\\n -H \"Content-Type: application/json\" \\\n -d '{\"passports\": [\"...\"], \"delete_storage_data\": false}'\n# Response: 204 No Content (indicates metadata deletion success only)\n```\n\n**Note**: HTTP responses indicate metadata deletion status only. Storage deletion (`delete_storage_data: true`) is a best effort attempt with no guarantee of success.\n\n### Bulk Object Delete: `POST /objects/delete`\n\n```bash\ncurl -X POST \"https://drs.example.org/objects/delete\" \\\n -H \"Content-Type: application/json\" \\\n -d '{\n \"bulk_object_ids\": [\"obj_1\", \"obj_2\", \"obj_3\"],\n \"passports\": [\"...\"],\n \"delete_storage_data\": false\n }'\n# Response: 204 No Content (all metadata deleted) or 4xx error (no objects deleted)\n```\n\n## Authentication\n\n**GA4GH Passports** (in request body):\n\n```json\n{\"passports\": [\"eyJhbGci...\"], \"delete_storage_data\": false}\n```\n\n**Bearer Tokens** (in headers):\n\n```bash\ncurl -H \"Authorization: Bearer token\" -d '{\"delete_storage_data\": false}' ...\n```\n\n## Underlying Storage Data\n\n**Important**: Storage data deletion is never guaranteed. Even when `delete_storage_data: true` is requested and the server supports it, the actual deletion may fail due to permissions, network issues, or storage service errors. Clients shoud not depend on storage deletion success.\n\nClients can request that the server attempts to delete the underlying data referred to by the DRS object using the `delete_storage_data` parameter.\n\n**`delete_storage_data: false`** (default): Removes DRS object metadata only, preserves underlying storage files\n\n**`delete_storage_data: true`**: Removes metadata AND requests server attempt to delete underlying storage files (requires `deleteStorageDataSupported: true`, **success not guaranteed**)\n\n## Update Pattern\n\nRather than introducing additional operations and endpoints for updating DRS objects, servers can allow clients to use the metadata-only deletion and object registration endpoints to create a new DRS object with updated metadata while leaving the underlying data in place.\n\n**Metadata update steps:**\n\n1. Delete metadata only: `POST /objects/{id}/delete` with `delete_storage_data: false`\n2. Re-register object: `POST /objects/register` with updated metadata\n\n```bash\n# Delete metadata (preserves storage)\ncurl -X POST \".../objects/obj_123/delete\" -d '{\"delete_storage_data\": false}'\n# Re-register with updates\ncurl -X POST \".../objects/register\" -d '{\"candidates\": [{\"name\": \"updated.txt\", ...}]}'\n```\n\n## Error Responses\n\n- **400**: Unsupported storage deletion or invalid request parameters\n- **403**: Insufficient permissions for any object in the request\n- **404**: Any object not found or delete endpoints not supported by server\n- **413**: Bulk request exceeds `maxBulkDeleteLength` limit\n\n## Examples\n\n**Metadata Update:**\n\n```bash\ncurl \".../service-info\" # Check capabilities\ncurl -X POST \".../objects/obj_123/delete\" -d '{\"delete_storage_data\": false}'\ncurl -X POST \".../objects/register\" -d '{\"candidates\": [{\"name\": \"updated.vcf\", ...}]}'\n```\n\n**Complete Removal:**\n\n```bash\ncurl -X POST \".../objects/obj_456/delete\" -H \"Authorization: Bearer token\" \\\n -d '{\"delete_storage_data\": true}'\n```\n\n**Bulk Delete (Atomic):**\n\n```bash\ncurl -X POST \".../objects/delete\" -d '{\n \"bulk_object_ids\": [\"obj_1\", \"obj_2\"],\n \"passports\": [\"...\"],\n \"delete_storage_data\": false\n}'\n# All objects deleted or none deleted (transactional)\n```\n\n## Best Practices\n\n**Clients:** Check service-info, default to safe deletion, handle transactional failures, respect limits, confirm destructive operations, do not rely on underlying storage deletion\n\n**Servers:** Advertise capabilities, validate permissions, implement atomic transactions, implement limits, use versioning to avoid inadvertent deletion.\n\n## Security Considerations\n\n- **Authentication**: Validate GA4GH Passports and Bearer tokens\n- **HTTPS Required**: Protect credentials in transit\n- **Rate Limiting**: Prevent abuse of delete endpoints\n- **Input Validation**: Sanitize all request parameters\n\n## Backward Compatibility\n\nDelete functionality is designed to be backward compatible:\n\n- **No Impact on Existing Endpoints**: All existing DRS endpoints remain unchanged\n- **Optional Implementation**: Servers can ignore delete functionality entirely\n- **Graceful Degradation**: Clients receive 404 responses when delete is not supported\n- **Safe Defaults**: New fields in service-info have safe default values, and requests default to leaving underlying data in place.\n" - - name: Access Method Update - description: "# Access Method Updates\n\n> **Optional Functionality**: Access method updates are optional extensions to the DRS API. Not all DRS servers are required to implement this functionality. Clients should check `/service-info` for `accessMethodUpdateSupported` before attempting to use these endpoints.\n\nAccess method update endpoints allows authorized clients to modify how existing DRS objects can be accessed without changing the core object metadata (size, checksums, name). This is useful for storage migrations, adding mirrors, or updating URLs.\n\nThese endpoints will overwrite existing access methods for an object, if clients want to add access methods in addition to existing ones for objects they should first retrieve the current access methods and include them in the update request along with the new methods.\n\n## Use Cases\n\n- **Storage Migration**: Move data between storage providers while keeping same DRS object\n- **Mirror Addition**: Add CDN or regional access points for better performance \n- **URL Refresh**: Update changed domain names\n- **Access Optimization**: Add or remove access methods based on performance or cost\n\n## Design Principles\n\n- **Optional**: Access method update support is completely optional\n- **Immutable Core**: Only access methods can be updated - size, checksums, name remain unchanged\n- **Atomic Bulk Operations**: All updates succeed or all fail (transactional)\n- **Optional Validation**: Servers MAY validate new access methods point to same data\n- **Flexible Authentication**: Supports GA4GH Passports, Bearer tokens, API keys\n\n## Service Discovery\n\nCheck `/service-info` for access method update capabilities:\n\n```json\n{\n \"drs\": {\n \"accessMethodUpdateSupported\": true,\n \"maxBulkAccessMethodUpdateLength\": 100,\n \"validateAccessMethodUpdates\": false\n }\n}\n```\n\n- **`accessMethodUpdateSupported`**: Whether server supports access method updates\n- **`maxBulkAccessMethodUpdateLength`**: Maximum objects per bulk update request\n- **`validateAccessMethodUpdates`**: Whether server validates new access methods\n\n## Single Object Update\n\nUpdate access methods for a single DRS object:\n\n```bash\ncurl -X POST \"https://drs.example.org/objects/obj_123/access-methods\" \\\n -H \"Content-Type: application/json\" \\\n -d '{\n \"access_methods\": [\n {\n \"type\": \"https\",\n \"access_url\": {\n \"url\": \"https://new-cdn.example.org/data/file.bam\"\n }\n },\n {\n \"type\": \"s3\",\n \"access_id\": \"s3,\n \"access_url\": {\n \"url\": \"s3://new-bucket/migrated/file.bam\"\n }\n }\n ]\n }'\n```\n\n## Bulk Object Update\n\nUpdate access methods for multiple objects atomically:\n\n```bash\ncurl -X POST \"https://drs.example.org/objects/access-methods\" \\\n -H \"Content-Type: application/json\" \\\n -d '{\n \"updates\": [\n {\n \"object_id\": \"obj_123\",\n \"access_methods\": [\n {\n \"type\": \"https\",\n \"access_url\": {\"url\": \"https://new-location.com/file1.bam\"}\n }\n ]\n },\n {\n \"object_id\": \"obj_456\", \n \"access_methods\": [\n {\n \"type\": \"s3\",\n \"access_url\": {\"url\": \"s3://new-bucket/file2.vcf\"}\n }\n ]\n }\n ]\n }'\n```\n\n## Authentication\n\n**GA4GH Passports** (in request body):\n```json\n{\n \"access_methods\": [...],\n \"passports\": [\"eyJhbGci...\"]\n}\n```\n\n**Bearer Tokens** (in headers):\n```bash\ncurl -H \"Authorization: Bearer token\" -d '{\"access_methods\": [...]}' ...\n```\n\n## Validation\n\nServers MAY validate that new access methods point to the same data by checkingm file availability, checksums or file content. Validation behavior is advertised in `validateAccessMethodUpdates` service-info field.\n\n\n## Error Responses\n\n- **400**: Invalid access methods or validation failure\n- **401**: Authentication required\n- **403**: Insufficient permissions for object(s)\n- **404**: Object not found or access method updates not supported\n- **413**: Bulk request exceeds `maxBulkAccessMethodUpdateLength` limit\n\n## Examples\n\n**Storage Migration:**\n```bash\n# Check server capabilities\ncurl \"https://drs.example.org/service-info\"\n\n# Update single object after migration\ncurl -X POST \"https://drs.example.org/objects/obj_123/access-methods\" \\\n -d '{\"access_methods\": [{\"type\": \"s3\", \"access_url\": {\"url\": \"s3://new-bucket/file.bam\"}}]}'\n```\n\n**Add CDN Mirror:**\n```bash\n# Add additional access method without removing existing ones\ncurl -X POST \"https://drs.example.org/objects/obj_456/access-methods\" \\\n -d '{\n \"access_methods\": [\n {\"type\": \"https\", \"access_url\": {\"url\": \"https://origin.example.org/file.vcf\"}},\n {\"type\": \"https\", \"access_url\": {\"url\": \"https://cdn.example.org/file.vcf\"}}\n ]\n }'\n```\n\n**Bulk Migration:**\n```bash\n# Migrate multiple objects atomically\ncurl -X POST \"https://drs.example.org/objects/access-methods\" \\\n -d '{\n \"updates\": [\n {\"object_id\": \"obj_1\", \"access_methods\": [...]},\n {\"object_id\": \"obj_2\", \"access_methods\": [...]}\n ]\n }'\n```\n\n## Best Practices\n\n**Clients**: Check service-info first, handle atomic transaction failures, respect bulk limits, verify permissions\n\n**Servers**: Advertise capabilities clearly, implement atomic transactions for bulk operations, validate permissions, consider optional validation for data integrity\n\n## Backward Compatibility\n\nAccess method update functionality is designed to be backward compatible:\n\n- **No Impact on Existing Endpoints**: All existing DRS endpoints remain unchanged\n- **Optional Implementation**: Servers can ignore this functionality entirely \n- **Graceful Degradation**: Clients receive 404 responses when not supported\n- **Safe Defaults**: New service-info fields have safe default values" -paths: - /service-info: - get: - summary: Retrieve information about this service - description: "Returns information about the DRS service along with stats pertaning to total object count and cumulative size in bytes.\nAlso indicates whether the server supports optional upload and delete operations and which methods are available.\n\nExtends the\n[v1.0.0 GA4GH Service Info specification](https://github.com/ga4gh-discovery/ga4gh-service-info)\nas the standardized format for GA4GH web services to self-describe.\n\nAccording to the \n[service-info type registry](https://github.com/ga4gh/TASC/blob/master/service-info/ga4gh-service-info.json)\nmaintained by the [Technical Alignment Sub Committee (TASC)](https://github.com/ga4gh/TASC),\na DRS service MUST have:\n * a `type.group` value of `org.ga4gh`\n * a `type.artifact` value of `drs`\n\n**Example 1: Server with upload and delete capabilities**\n```\n{\n \"id\": \"com.example.drs\",\n \"description\": \"Serves data according to DRS specification\",\n ...\n \"type\": {\n \"group\": \"org.ga4gh\",\n \"artifact\": \"drs\",\n \"version\": \"1.5\"\n }\n ...\n \"drs\":{\n \"maxBulkRequestLength\": 200,\n \"objectCount\": 774560,\n \"totalObjectSize\": 4018437188907752,\n \"uploadRequestSupported\": true,\n \"objectRegistrationSupported\": true,\n \"supportedUploadMethods\": [\"s3\", \"https\", \"gs\"],\n \"maxUploadSize\": 5368709120,\n \"maxUploadRequestLength\": 50,\n \"validateUploadChecksums\": true,\n \"validateUploadFileSizes\": false,\n \"relatedFileStorageSupported\": true,\n \"deleteSupported\": true,\n \"maxBulkDeleteLength\": 100,\n \"deleteStorageDataSupported\": true\n }\n}\n```\n\n**Example 2: Read-only server (no upload or delete)**\n```\n{\n \"id\": \"com.example.readonly-drs\",\n \"description\": \"Read-only DRS service\",\n ...\n \"type\": {\n \"group\": \"org.ga4gh\",\n \"artifact\": \"drs\",\n \"version\": \"1.5\"\n }\n ...\n \"drs\":{\n \"maxBulkRequestLength\": 500,\n \"objectCount\": 1250000,\n \"totalObjectSize\": 8500000000000000\n }\n}\n```\n\n**Example 3: Server with metadata-only delete capability**\n```\n{\n \"id\": \"com.example.metadata-drs\",\n \"description\": \"DRS service with metadata-only delete\",\n ...\n \"type\": {\n \"group\": \"org.ga4gh\",\n \"artifact\": \"drs\",\n \"version\": \"1.5\"\n }\n ...\n \"drs\":{\n \"maxBulkRequestLength\": 200,\n \"objectCount\": 500000,\n \"totalObjectSize\": 2500000000000000,\n \"deleteSupported\": true,\n \"maxBulkDeleteLength\": 50,\n \"deleteStorageDataSupported\": false\n }\n}\n```\n\nSee the [Service Registry Appendix](#tag/GA4GH-Service-Registry) for more information on how to register a DRS service with a service registry." - operationId: GetServiceInfo - responses: - '200': - $ref: '#/components/responses/200ServiceInfo' - '500': - $ref: '#/components/responses/500InternalServerError' - tags: - - Service Info - /objects/{object_id}: - options: - summary: Get Authorization info about a DrsObject. - security: - - {} - description: Returns a list of `Authorizations` that can be used to determine how to authorize requests to `GetObject` or `PostObject`. - operationId: OptionsObject - parameters: - - $ref: '#/components/parameters/ObjectId' - responses: - '200': - $ref: '#/components/responses/200OkAuthorizations' - '204': - $ref: '#/components/responses/AuthorizationsNotSupported' - '400': - $ref: '#/components/responses/400BadRequest' - '404': - $ref: '#/components/responses/404NotFoundDrsObject' - '405': - $ref: '#/components/responses/AuthorizationsNotSupported' - '500': - $ref: '#/components/responses/500InternalServerError' - tags: - - Objects - x-swagger-router-controller: ga4gh.drs.server - get: - summary: Get info about a DrsObject. - description: Returns object metadata, and a list of access methods that can be used to fetch object bytes. - operationId: GetObject - parameters: - - $ref: '#/components/parameters/ObjectId' - - $ref: '#/components/parameters/Expand' - responses: - '200': - $ref: '#/components/responses/200OkDrsObject' - '202': - $ref: '#/components/responses/202Accepted' - '400': - $ref: '#/components/responses/400BadRequest' - '401': - $ref: '#/components/responses/401Unauthorized' - '403': - $ref: '#/components/responses/403Forbidden' - '404': - $ref: '#/components/responses/404NotFoundDrsObject' - '500': - $ref: '#/components/responses/500InternalServerError' - tags: - - Objects - x-swagger-router-controller: ga4gh.drs.server - post: - summary: Get info about a DrsObject through POST'ing a Passport. - description: |- - Returns object metadata and a list of access methods that can be used to fetch object bytes. Method is a POST to accommodate a JWT GA4GH Passport sent in the request body in order to authorize access. - **Note**: To upload new files and register them as DRS objects, use the `/upload-request` endpoint to obtain upload methods and temporary credentials, then use POST `/objects/register` endpoint to register multiple objects at once. Note that upload functionality is optional and not all DRS servers implement the upload endpoints. - operationId: PostObject - security: - - PassportAuth: [] - responses: - '200': - $ref: '#/components/responses/200OkDrsObject' - '202': - $ref: '#/components/responses/202Accepted' - '400': - $ref: '#/components/responses/400BadRequest' - '401': - $ref: '#/components/responses/401Unauthorized' - '403': - $ref: '#/components/responses/403Forbidden' - '404': - $ref: '#/components/responses/404NotFoundAccess' - '500': - $ref: '#/components/responses/500InternalServerError' - tags: - - Objects - x-swagger-router-controller: ga4gh.drs.server - parameters: - - $ref: '#/components/parameters/ObjectId' - requestBody: - $ref: '#/components/requestBodies/PostObjectBody' - /objects/{object_id}/delete: - post: - summary: Delete a DRS object (optional endpoint) - description: |- - **Optional Endpoint**: This endpoint is not required for DRS server implementations. Not all DRS servers support delete functionality. - Deletes a DRS object by ID. This operation removes the DRS object metadata and optionally attempts to delete the underlying storage data based on the delete_storage_data parameter and server capabilities. - By default, only DRS object metadata is deleted while preserving underlying storage data. To attempt storage data deletion, clients must explicitly set delete_storage_data to true and the server must support storage data deletion (advertised via `deleteStorageDataSupported` in service-info). Servers will make a best effort attempt to delete storage data, but success is not guaranteed. - This endpoint uses POST method to accommodate GA4GH Passport authentication in the request body, ensuring compatibility across all HTTP clients and proxies. - **Important**: HTTP responses (204 No Content) indicate metadata deletion success only, not storage deletion success (which are not guaranteed to complete synchronously if they occur at all) - operationId: DeleteObject - security: - - {} - - BasicAuth: [] - - BearerAuth: [] - - PassportAuth: [] - parameters: - - $ref: '#/components/parameters/ObjectId' - requestBody: - $ref: '#/components/requestBodies/DeleteBody' - responses: - '204': - $ref: '#/components/responses/204DeleteSuccess' - '400': - $ref: '#/components/responses/400BadRequestDelete' - '401': - $ref: '#/components/responses/401Unauthorized' - '403': - $ref: '#/components/responses/403ForbiddenDelete' - '404': - $ref: '#/components/responses/404NotFoundDelete' - '500': - $ref: '#/components/responses/500InternalServerError' - tags: - - Objects - x-swagger-router-controller: ga4gh.drs.server - x-codegen-request-body-name: body - examples: - successful_metadata_delete: - summary: Successful metadata-only deletion - description: Complete example of successfully deleting DRS object metadata while preserving storage data - value: - request: - method: POST - url: /objects/drs_object_123456/delete - headers: - Content-Type: application/json - body: - passports: - - eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJnYTRnaF9wYXNzcG9ydF92MSI6W119.JJ5rN0ktP0qwyZmIPpxmF_p7JsxAZH6L6brUxtad3CM - delete_storage_data: false - response: - status: 204 - headers: - Content-Length: '0' - successful_full_delete: - summary: Successful full deletion - description: Complete example of successfully deleting both metadata and storage data - value: - request: - method: POST - url: /objects/drs_object_123456/delete - headers: - Content-Type: application/json - body: - passports: - - eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJnYTRnaF9wYXNzcG9ydF92MSI6W119.JJ5rN0ktP0qwyZmIPpxmF_p7JsxAZH6L6brUxtad3CM - delete_storage_data: true - response: - status: 204 - headers: - Content-Length: '0' - object_not_found: - summary: Object not found error - description: Complete example when trying to delete a non-existent object - value: - request: - method: POST - url: /objects/nonexistent_object/delete - headers: - Content-Type: application/json - body: - delete_storage_data: false - response: - status: 404 - headers: - Content-Type: application/json - body: - msg: DRS object nonexistent_object does not exist - status_code: 404 - insufficient_permissions: - summary: Insufficient permissions error - description: Complete example when client lacks delete permissions - value: - request: - method: POST - url: /objects/drs_object_123456/delete - headers: - Content-Type: application/json - body: - passports: - - eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpbnZhbGlkX3Bhc3Nwb3J0IjoidHJ1ZSJ9.invalid_signature - delete_storage_data: false - response: - status: 403 - headers: - Content-Type: application/json - body: - msg: Client lacks delete permission for object drs_object_123456 - status_code: 403 - /objects/delete: - post: - summary: Delete multiple DRS objects - description: |- - **Optional Endpoint**: This endpoint is not required for DRS server implementations. Not all DRS servers support delete functionality. - Delete multiple DRS objects in a single atomic transaction. If ANY object fails to be deleted, the ENTIRE request fails and NO objects are deleted. This ensures data consistency and prevents partial deletion scenarios. - **RECOMMENDED - Transactional Behavior**: Deletion operations SHOULD be atomic transactions. If ANY object fails validation or deletion, the ENTIRE request SHOULD fail and NO objects SHOULD be deleted. Servers SHOULD implement this as an all-or-nothing operation to ensure data consistency, but MAY implement partial deletion with appropriate error reporting if transactional behavior is not feasible. - **Authentication**: GA4GH Passports can be provided in the request body for authorization. - **Storage Data Deletion**: The `delete_storage_data` parameter controls whether the server will attempt to delete underlying storage files along with DRS metadata. This defaults to false for safety. Servers will make a best effort attempt to delete storage data, but success is not guaranteed. - **Server Responsibilities**: - SHOULD treat deletion as an atomic transaction (all succeed or all fail) - SHOULD validate ALL object IDs exist and are accessible before deleting ANY - SHOULD roll back any partial changes if any object fails deletion - SHOULD return 400 if any object ID is invalid or inaccessible when using transactional behavior - **Client Responsibilities**: - Provide valid object IDs for all objects to be deleted - Handle potential failure of entire batch if any single object cannot be deleted - Check service-info for `maxBulkDeleteLength` limits before making requests - operationId: bulkDeleteObjects - tags: - - Objects - requestBody: - $ref: '#/components/requestBodies/BulkDeleteBody' - responses: - '204': - $ref: '#/components/responses/204DeleteSuccess' - '400': - $ref: '#/components/responses/400BadRequestDelete' - '401': - $ref: '#/components/responses/401Unauthorized' - '403': - $ref: '#/components/responses/403ForbiddenDelete' - '404': - $ref: '#/components/responses/404NotFoundDelete' - '413': - $ref: '#/components/responses/413RequestTooLarge' - '500': - $ref: '#/components/responses/500InternalServerError' - security: - - {} - - BasicAuth: [] - - BearerAuth: [] - - PassportAuth: [] - x-codegen-request-body-name: body - examples: - successful_bulk_delete: - summary: Successful bulk deletion - description: Complete example of successfully deleting multiple objects - value: - request: - method: POST - url: /objects/delete - headers: - Content-Type: application/json - body: - bulk_object_ids: - - drs_object_123456 - - drs_object_789012 - - drs_object_345678 - passports: - - eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJnYTRnaF9wYXNzcG9ydF92MSI6W119.JJ5rN0ktP0qwyZmIPpxmF_p7JsxAZH6L6brUxtad3CM - delete_storage_data: false - response: - status: 204 - headers: - Content-Length: '0' - failed_bulk_delete: - summary: Failed bulk deletion (transactional) - description: Complete example of bulk deletion failing due to one invalid object ID - no objects are deleted - value: - request: - method: POST - url: /objects/delete - headers: - Content-Type: application/json - body: - bulk_object_ids: - - drs_object_123456 - - nonexistent_object - - drs_object_345678 - passports: - - eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJnYTRnaF9wYXNzcG9ydF92MSI6W119.JJ5rN0ktP0qwyZmIPpxmF_p7JsxAZH6L6brUxtad3CM - delete_storage_data: false - response: - status: 404 - headers: - Content-Type: application/json - body: - msg: Object 'nonexistent_object' not found. No objects were deleted due to transactional behavior. - status_code: 404 - bulk_limit_exceeded: - summary: Bulk limit exceeded error - description: Complete example when bulk request exceeds server limits - value: - request: - method: POST - url: /objects/delete - headers: - Content-Type: application/json - body: - bulk_object_ids: - - obj1 - - obj2 - - obj3 - - '...150 objects total' - delete_storage_data: false - response: - status: 413 - headers: - Content-Type: application/json - body: - msg: Bulk delete request contains 150 objects but server maximum is 100. Check maxBulkDeleteLength in service-info. - status_code: 413 - unsupported_storage_deletion: - summary: Unsupported storage deletion error - description: Complete example when client requests storage deletion but server doesn't support it - value: - request: - method: POST - url: /objects/delete - headers: - Content-Type: application/json - body: - bulk_object_ids: - - drs_object_123456 - - drs_object_789012 - delete_storage_data: true - response: - status: 400 - headers: - Content-Type: application/json - body: - msg: Server does not support storage data deletion. Set delete_storage_data to false or omit the parameter. - status_code: 400 - /objects: - options: - summary: Get Authorization info about multiple DrsObjects. - security: - - {} - description: Returns a structure that contains for each DrsObjects a list of `Authorizations` that can be used to determine how to authorize requests to `GetObject` or `PostObject` (or bulk equivalents). - operationId: OptionsBulkObject - responses: - '200': - $ref: '#/components/responses/200OkBulkAuthorizations' - '204': - $ref: '#/components/responses/AuthorizationsNotSupported' - '400': - $ref: '#/components/responses/400BadRequest' - '404': - $ref: '#/components/responses/404NotFoundDrsObject' - '405': - $ref: '#/components/responses/AuthorizationsNotSupported' - '413': - $ref: '#/components/responses/413RequestTooLarge' - '500': - $ref: '#/components/responses/500InternalServerError' - tags: - - Objects - x-swagger-router-controller: ga4gh.drs.server - requestBody: - required: true - content: - application/json: - schema: - $ref: '#/components/schemas/BulkObjectIdNoPassport' - post: - summary: Get info about multiple DrsObjects with an optional Passport(s). - description: |- - Returns an array of object metadata and access methods for the specified object IDs. - The request is limited to use passports (one or more) or a single bearer token, so make sure your bulk request is for objects that all use the same passports/token. - **Note**: To register new DRS objects, use the dedicated `/objects/register` endpoint. - operationId: GetBulkObjects - security: - - PassportAuth: [] - parameters: - - $ref: '#/components/parameters/Expand' - responses: - '200': - $ref: '#/components/responses/200OkDrsObjects' - '202': - $ref: '#/components/responses/202Accepted' - '400': - $ref: '#/components/responses/400BadRequest' - '401': - $ref: '#/components/responses/401Unauthorized' - '403': - $ref: '#/components/responses/403Forbidden' - '404': - $ref: '#/components/responses/404NotFoundDrsObject' - '413': - $ref: '#/components/responses/413RequestTooLarge' - '500': - $ref: '#/components/responses/500InternalServerError' - tags: - - Objects - x-swagger-router-controller: ga4gh.drs.server - requestBody: - $ref: '#/components/requestBodies/BulkObjectBody' - /objects/register: - post: - summary: Register DRS objects - description: "**Optional Endpoint**: This endpoint is not required for DRS server implementations. Not all DRS servers support object registration. \nRegisters one or more \"candidate\" DRS objects with the server. If it accepts the request, the server will create unique object IDs for each registered object and return them in fully-formed DRS objects in response.\nThis endpoint can be used after uploading files using methods negotiated with the `/upload-request` endpoint to register the uploaded files as DRS objects, or to register existinf data. The request body should contain candidate DRS objects with all required metadata including access methods that correspond to the upload methods used during file upload.\n**RECOMMENDED - Transactional Behavior**: Registration operations SHOULD be atomic transactions. If ANY candidate object fails validation or registration, the ENTIRE request SHOULD fail and NO objects SHOULD be registered. Servers SHOULD implement this as an all-or-nothing operation to ensure data consistency, but MAY implement partial registration with appropriate error reporting if transactional behavior is not feasible.\n**Authentication**: GA4GH Passports can be provided in the request body for authorization. Bearer tokens can be supplied in headers.\n**Server Responsibilities**: - SHOULD treat registration as an atomic transaction (all succeed or all fail) - SHOULD validate ALL candidate objects before registering ANY - Create unique object IDs for each registered object - Add timestamps (created_time, updated_time) - SHOULD roll back any partial changes if any candidate fails validation\n**Client Responsibilities**: - Provide required DRS object metadata for all candidates - Include access methods corresponding to uploaded file locations - Ensure checksums match uploaded file content - Handle potential failure of entire batch if any single object is invalid" - operationId: RegisterObjects - security: - - {} - - BasicAuth: [] - - BearerAuth: [] - - PassportAuth: [] - requestBody: - $ref: '#/components/requestBodies/RegisterObjectsBody' - responses: - '201': - $ref: '#/components/responses/201ObjectsCreated' - '400': - $ref: '#/components/responses/400BadRequest' - '401': - $ref: '#/components/responses/401Unauthorized' - '403': - $ref: '#/components/responses/403Forbidden' - '413': - $ref: '#/components/responses/413RequestTooLarge' - '500': - $ref: '#/components/responses/500InternalServerError' - tags: - - Objects - x-swagger-router-controller: ga4gh.drs.server - x-codegen-request-body-name: body - /objects/{object_id}/access/{access_id}: - get: - summary: Get a URL for fetching bytes - description: |- - Returns a URL that can be used to fetch the bytes of a `DrsObject`. - This method only needs to be called when using an `AccessMethod` that contains an `access_id` (e.g., for servers that use signed URLs for fetching object bytes). - operationId: GetAccessURL - responses: - '200': - $ref: '#/components/responses/200OkAccess' - '202': - $ref: '#/components/responses/202Accepted' - '400': - $ref: '#/components/responses/400BadRequest' - '401': - $ref: '#/components/responses/401Unauthorized' - '403': - $ref: '#/components/responses/403Forbidden' - '404': - $ref: '#/components/responses/404NotFoundAccess' - '500': - $ref: '#/components/responses/500InternalServerError' - tags: - - Objects - x-swagger-router-controller: ga4gh.drs.server - parameters: - - $ref: '#/components/parameters/ObjectId' - - $ref: '#/components/parameters/AccessId' - post: - summary: Get a URL for fetching bytes through POST'ing a Passport - description: |- - Returns a URL that can be used to fetch the bytes of a `DrsObject`. - This method only needs to be called when using an `AccessMethod` that contains an `access_id` (e.g., for servers that use signed URLs for fetching object bytes). - Method is a POST to accommodate a JWT GA4GH Passport sent in the formData in order to authorize access. - operationId: PostAccessURL - security: - - PassportAuth: [] - responses: - '200': - $ref: '#/components/responses/200OkAccess' - '202': - $ref: '#/components/responses/202Accepted' - '400': - $ref: '#/components/responses/400BadRequest' - '401': - $ref: '#/components/responses/401Unauthorized' - '403': - $ref: '#/components/responses/403Forbidden' - '404': - $ref: '#/components/responses/404NotFoundAccess' - '500': - $ref: '#/components/responses/500InternalServerError' - tags: - - Objects - x-swagger-router-controller: ga4gh.drs.server - parameters: - - $ref: '#/components/parameters/ObjectId' - - $ref: '#/components/parameters/AccessId' - requestBody: - $ref: '#/components/requestBodies/Passports' - /objects/access: - post: - summary: Get URLs for fetching bytes from multiple objects with an optional Passport(s). - description: |- - Returns an array of URL objects that can be used to fetch the bytes of multiple `DrsObject`s. - This method only needs to be called when using an `AccessMethod` that contains an `access_id` (e.g., for servers that use signed URLs for fetching object bytes). - Currently this is limited to use passports (one or more) or a single bearer token, so make sure your bulk request is for objects that all use the same passports/token. - operationId: GetBulkAccessURL - security: - - PassportAuth: [] - responses: - '200': - $ref: '#/components/responses/200OkAccesses' - '202': - $ref: '#/components/responses/202Accepted' - '400': - $ref: '#/components/responses/400BadRequest' - '401': - $ref: '#/components/responses/401Unauthorized' - '403': - $ref: '#/components/responses/403Forbidden' - '404': - $ref: '#/components/responses/404NotFoundAccess' - '413': - $ref: '#/components/responses/413RequestTooLarge' - '500': - $ref: '#/components/responses/500InternalServerError' - tags: - - Objects - x-swagger-router-controller: ga4gh.drs.server - requestBody: - required: true - content: - application/json: - schema: - $ref: '#/components/schemas/BulkObjectAccessId' - /objects/{object_id}/access-methods: - post: - summary: Update access methods for a DRS object - description: |- - **Optional Endpoint**: Not all DRS servers support access method updates. - Update the access methods for an existing DRS object. Only access methods are modified - core object metadata (size, checksums, name) remains unchanged. Servers MAY validate that new access methods point to the same data. - Note that existing access methods are overwritten, if clients want to add additional access methods they should first retrieve the current methods and include them along with the new methods in this request. - **Authentication**: GA4GH Passports can be provided in the request body. - operationId: updateObjectAccessMethods - parameters: - - name: object_id - in: path - required: true - schema: - type: string - description: DRS object identifier - requestBody: - $ref: '#/components/requestBodies/AccessMethodUpdateBody' - responses: - '200': - $ref: '#/components/responses/200AccessMethodUpdate' - '400': - $ref: '#/components/responses/400BadRequest' - '401': - $ref: '#/components/responses/401Unauthorized' - '403': - $ref: '#/components/responses/403Forbidden' - '404': - $ref: '#/components/responses/404NotFoundDrsObject' - '500': - $ref: '#/components/responses/500InternalServerError' - security: - - {} - - BasicAuth: [] - - BearerAuth: [] - - PassportAuth: [] - tags: - - Objects - /objects/checksum/{checksum}: - get: - summary: Get DRS objects that are a match for the checksum. - description: |- - Returns an array of `DRSObjects` that match a given checksum. - The checksum type is not provide, the checksum check is done against all checksum types. - operationId: GetObjectsByChecksum - security: - - PassportAuth: [] - parameters: - - $ref: '#/components/parameters/Checksum' - responses: - '200': - $ref: '#/components/responses/200OkDrsObjects' - '202': - $ref: '#/components/responses/202Accepted' - '400': - $ref: '#/components/responses/400BadRequest' - '401': - $ref: '#/components/responses/401Unauthorized' - '403': - $ref: '#/components/responses/403Forbidden' - '404': - $ref: '#/components/responses/404NotFoundDrsObject' - '500': - $ref: '#/components/responses/500InternalServerError' - tags: - - Objects - x-swagger-router-controller: ga4gh.drs.server - /objects/access-methods: - post: - summary: Bulk update access methods for multiple DRS objects - description: |- - **Optional Endpoint**: Not all DRS servers support access method updates. - Update access methods for multiple DRS objects in a single atomic transaction. If ANY object fails to update, the ENTIRE request fails and NO objects are updated. Only access methods are modified - core object metadata remains unchanged. - Note that existing access methods are overwritten, if clients want to add additional access methods they should first retrieve the current methods and include them along with the new methods in this request. - **Authentication**: GA4GH Passports can be provided in the request body. - operationId: bulkUpdateAccessMethods - requestBody: - $ref: '#/components/requestBodies/BulkAccessMethodUpdateBody' - responses: - '200': - $ref: '#/components/responses/200BulkAccessMethodUpdate' - '400': - $ref: '#/components/responses/400BadRequest' - '401': - $ref: '#/components/responses/401Unauthorized' - '403': - $ref: '#/components/responses/403Forbidden' - '404': - $ref: '#/components/responses/404NotFoundDrsObject' - '413': - $ref: '#/components/responses/413RequestTooLarge' - '500': - $ref: '#/components/responses/500InternalServerError' - security: - - {} - - BasicAuth: [] - - BearerAuth: [] - - PassportAuth: [] - tags: - - Objects - /upload-request: - post: - summary: Request upload methods for files - description: "**Optional Endpoint**: This endpoint is not required for DRS server implementations. Not all DRS servers support upload functionality. \nRequest upload method details and temporary credentials for uploading one or more files to an underlying storage service. This endpoint allows clients to obtain the necessary information to upload files before they are registered as DRS objects.\n**Discovery**: Before using this endpoint, clients should check the `/service-info` endpoint to determine if upload operations are supported. Look for `drs.uploadRequestSupported: true` and `drs.supportedUploadMethods` to understand which upload methods are available. Also check `drs.maxUploadSize` and `drs.maxUploadRequestLength` for server limits.\n**Usage Flow:**\n1. **Discovery**: Client checks `/service-info` endpoint to confirm upload support (`drs.uploadRequestSupported: true`) and available methods (`drs.supportedUploadMethods`)\n2. Client sends an upload request with file metadata (name, size, checksums, MIME type)\n3. Server responds with available upload methods (S3, HTTPS, Google Cloud Storage, etc.) and temporary credentials\n4. Client selects one or more upload methods from the response and uses the corresponding credentials to upload the file to the storage service\n5. Once uploaded, the client registers the files as DRS objects including access methods that correspond to the upload methods used with a POST request to `/objects/register`, the server will return fully formed DRS objects with server minted unique IDs.\n6. The registered DRS object becomes accessible through standard DRS API endpoints\n\n**Authentication:**\nThe endpoint supports multiple authentication methods including GA4GH Passport tokens sent in the request body. Passport tokens enable fine-grained authorization based on data access policies.\n**Upload Methods**: Response may include multiple options (s3, https, gs, ftp/sftp) for flexibility. Note that servers may return a subset of their advertised `supportedUploadMethods` based on file-specific factors such as file type, size, or server policies.\n**File Integrity**: All requests must include at least one checksum per file (SHA-256, MD5, or other IANA-registered algorithms).\n**Server Validation**: Servers MAY validate checksums/sizes but are not required to. Check service-info for validation behavior. Servers do not validate MIME types against actual file content - clients are responsible for providing accurate MIME type information." - operationId: PostUploadRequest - security: - - {} - - BasicAuth: [] - - BearerAuth: [] - - PassportAuth: [] - requestBody: - $ref: '#/components/requestBodies/UploadRequestBody' - responses: - '200': - $ref: '#/components/responses/200UploadRequest' - '400': - $ref: '#/components/responses/400BadRequest' - '401': - $ref: '#/components/responses/401Unauthorized' - '403': - $ref: '#/components/responses/403Forbidden' - '500': - $ref: '#/components/responses/500InternalServerError' - tags: - - Upload Request -components: - securitySchemes: - BasicAuth: - type: http - scheme: basic - description: | - A valid authorization token must be passed in the 'Authorization' header, - e.g. "Basic ${token_string}" - BearerAuth: - type: http - scheme: bearer - description: A valid authorization token must be passed in the 'Authorization' header, e.g. "Bearer ${token_string}" - PassportAuth: - type: http - scheme: bearer - x-in: body - bearerFormat: JWT - description: A valid GA4GH Passport must be passed in the body of an HTTP POST request as a tokens[] array. - schemas: - ServiceType: - description: Type of a GA4GH service - type: object - required: - - group - - artifact - - version - properties: - group: - type: string - description: Namespace in reverse domain name format. Use `org.ga4gh` for implementations compliant with official GA4GH specifications. For services with custom APIs not standardized by GA4GH, or implementations diverging from official GA4GH specifications, use a different namespace (e.g. your organization's reverse domain name). - example: org.ga4gh - artifact: - type: string - description: Name of the API or GA4GH specification implemented. Official GA4GH types should be assigned as part of standards approval process. Custom artifacts are supported. - example: beacon - version: - type: string - description: Version of the API or specification. GA4GH specifications use semantic versioning. - example: 1.0.0 - Service: - description: GA4GH service - type: object - required: - - id - - name - - type - - organization - - version - properties: - id: - type: string - description: Unique ID of this service. Reverse domain name notation is recommended, though not required. The identifier should attempt to be globally unique so it can be used in downstream aggregator services e.g. Service Registry. - example: org.ga4gh.myservice - name: - type: string - description: Name of this service. Should be human readable. - example: My project - type: - $ref: '#/components/schemas/ServiceType' - description: - type: string - description: Description of the service. Should be human readable and provide information about the service. - example: This service provides... - organization: - type: object - description: Organization providing the service - required: - - name - - url - properties: - name: - type: string - description: Name of the organization responsible for the service - example: My organization - url: - type: string - format: uri - description: URL of the website of the organization (RFC 3986 format) - example: https://example.com - contactUrl: - type: string - format: uri - description: URL of the contact for the provider of this service, e.g. a link to a contact form (RFC 3986 format), or an email (RFC 2368 format). - example: mailto:support@example.com - documentationUrl: - type: string - format: uri - description: URL of the documentation of this service (RFC 3986 format). This should help someone learn how to use your service, including any specifics required to access data, e.g. authentication. - example: https://docs.myservice.example.com - createdAt: - type: string - format: date-time - description: Timestamp describing when the service was first deployed and available (RFC 3339 format) - example: '2019-06-04T12:58:19Z' - updatedAt: - type: string - format: date-time - description: Timestamp describing when the service was last updated (RFC 3339 format) - example: '2019-06-04T12:58:19Z' - environment: - type: string - description: Environment the service is running in. Use this to distinguish between production, development and testing/staging deployments. Suggested values are prod, test, dev, staging. However this is advised and not enforced. - example: test - version: - type: string - description: Version of the service being described. Semantic versioning is recommended, but other identifiers, such as dates or commit hashes, are also allowed. The version should be changed whenever the service is updated. - example: 1.0.0 - DrsService: - type: object - required: - - type - - maxBulkRequestLength - properties: - maxBulkRequestLength: - type: integer - description: DEPRECATED - In 2.0 this will move to under the drs section of service info and not at the root level. The max length the bulk request endpoints can handle (>= 1) before generating a 413 error e.g. how long can the arrays bulk_object_ids and bulk_object_access_ids be for this server. - type: - type: object - required: - - artifact - properties: - artifact: - type: string - enum: - - drs - example: drs - drs: - type: object - required: - - maxBulkRequestLength - properties: - maxBulkRequestLength: - type: integer - description: The max length the bulk request endpoints can handle (>= 1) before generating a 413 error e.g. how long can the arrays bulk_object_ids and bulk_object_access_ids be for this server. - objectCount: - type: integer - description: The total number of objects in this DRS service. - totalObjectSize: - type: integer - description: The total size of all objects in this DRS service in bytes. As a general best practice, file bytes are counted for each unique file and not cloud mirrors or other redundant copies. - uploadRequestSupported: - type: boolean - description: Indicates whether this DRS server supports upload request operations via the `/upload-request` endpoint. If true, clients can request upload methods and credentials for uploading files. If false or missing, the server does not support upload request coordination. - default: false - objectRegistrationSupported: - type: boolean - description: Indicates whether this DRS server supports object registration operations via the `/objects/register` endpoint. If true, clients can register uploaded files or existing data as DRS objects. If false or missing, the server does not support object registration. - default: false - supportedUploadMethods: - type: array - items: - type: string - enum: - - s3 - - gs - - https - - ftp - - sftp - description: |- - List of upload methods supported by this DRS server. Only present when uploadRequestSupported is true. Clients can use this information to determine which upload methods are available before making upload requests. - - **s3**: Direct S3 upload with temporary AWS credentials - **gs**: Google Cloud Storage upload with access tokens - **https**: Presigned POST URL for HTTP uploads - **ftp**: File Transfer Protocol uploads - **sftp**: Secure File Transfer Protocol uploads - **gsiftp**: GridFTP secure file transfer - **globus**: Globus transfer service for high-performance data movement - maxUploadSize: - type: integer - format: int64 - description: Maximum file size in bytes that can be uploaded via the upload endpoints. Only present when uploadRequestSupported is true. If not specified, there is no explicit size limit. - maxUploadRequestLength: - type: integer - description: Maximum number of files that can be included in a single upload request. Only present when uploadRequestSupported is true. If not specified, defaults to the same value as maxBulkRequestLength. - maxRegisterRequestLength: - type: integer - description: Maximum number of candidate objects that can be included in a single registration request. Only present when objectRegistrationSupported is true. If not specified, defaults to the same value as maxBulkRequestLength. - validateUploadChecksums: - type: boolean - description: Indicates whether this DRS server validates uploaded file checksums against the provided metadata. If true, the server will verify that uploaded files match their declared checksums and may reject uploads with mismatches. If false or missing, the server does not perform checksum validation and relies on client-provided metadata. Only present when uploadRequestSupported or objectRegistrationSupported is true. - default: false - validateUploadFileSizes: - type: boolean - description: Indicates whether this DRS server validates uploaded file sizes against the provided metadata. If true, the server will verify that uploaded files match their declared sizes and may reject uploads with mismatches. If false or missing, the server does not perform file size validation and relies on client-provided metadata. Only present when uploadRequestSupported or objectRegistrationSupported is true. - default: false - relatedFileStorageSupported: - type: boolean - description: Indicates whether this DRS server supports storing files from the same upload request under a common prefix or folder structure. If true, the server will organize related files together in storage, enabling bioinformatics workflows that expect co-located files (e.g., CRAM + CRAI, VCF + TBI). If false or missing, the server may distribute files across different storage locations or prefixes. Only present when uploadRequestSupported is true. This feature is particularly valuable for genomics tools like samtools that expect index files to be co-located with data files. - default: false - deleteSupported: - type: boolean - description: Indicates whether this DRS server supports delete operations via the delete endpoints. If true, clients can delete DRS objects using POST requests to `/objects/{object_id}/delete` and `/objects/delete`. If false or missing, the server does not support delete operations and will return 404 for delete endpoint requests. Like upload functionality, delete support is entirely optional and servers remain DRS compliant without it. - default: false - maxBulkDeleteLength: - type: integer - description: Maximum number of objects that can be deleted in a single bulk delete request via `/objects/delete`. Only present when deleteSupported is true. If not specified when delete is supported, defaults to the same value as maxBulkRequestLength. Servers may enforce lower limits for delete operations compared to other bulk operations for safety reasons. - deleteStorageDataSupported: - type: boolean - description: 'Indicates whether this DRS server supports attempting to delete underlying storage data when clients request it. If true, the server will attempt to delete both metadata and storage files when `delete_storage_data: true` is specified in delete requests. If false or missing, the server only supports metadata deletion regardless of client request, preserving underlying storage data. Only present when deleteSupported is true. This is a capability flag indicating what the server can attempt, not a default behavior setting. Note: Storage deletion attempts may fail due to permissions, network issues, or storage service errors.' - default: false - accessMethodUpdateSupported: - type: boolean - description: Indicates whether this DRS server supports updating access methods for existing objects. If true, clients can update access methods using `/objects/{object_id}/access-methods` and `/objects/access-methods` endpoints. If false or missing, the server does not support access method updates. - default: false - maxBulkAccessMethodUpdateLength: - type: integer - description: Maximum number of objects that can be updated in a single bulk access method update request. Only present when accessMethodUpdateSupported is true. If not specified, defaults to maxBulkRequestLength. - validateAccessMethodUpdates: - type: boolean - description: Indicates whether this DRS server validates new access methods by verifying they point to the same data. If true, the server will attempt to verify checksums/content before updating access methods. If false or missing, the server trusts client-provided access methods without validation. Only present when accessMethodUpdateSupported is true. - default: false - Error: - type: object - description: An object that can optionally include information about the error. - properties: - msg: - type: string - description: A detailed error message. - status_code: - type: integer - description: The integer representing the HTTP status code (e.g. 200, 404). - Checksum: - type: object - required: - - checksum - - type - properties: - checksum: - type: string - description: The hex-string encoded checksum for the data - type: - type: string - description: |- - The digest method used to create the checksum. - The value (e.g. `sha-256`) SHOULD be listed as `Hash Name String` in the https://www.iana.org/assignments/named-information/named-information.xhtml#hash-alg[IANA Named Information Hash Algorithm Registry]. Other values MAY be used, as long as implementors are aware of the issues discussed in https://tools.ietf.org/html/rfc6920#section-9.4[RFC6920]. - GA4GH may provide more explicit guidance for use of non-IANA-registered algorithms in the future. Until then, if implementers do choose such an algorithm (e.g. because it's implemented by their storage provider), they SHOULD use an existing standard `type` value such as `md5`, `etag`, `crc32c`, `trunc512`, or `sha1`. - example: sha-256 - AccessURL: - type: object - required: - - url - properties: - url: - type: string - description: A fully resolvable URL that can be used to fetch the actual object bytes. - headers: - type: array - items: - type: string - description: An optional list of headers to include in the HTTP request to `url`. These headers can be used to provide auth tokens required to fetch the object bytes. - example: 'Authorization: Basic Z2E0Z2g6ZHJz' - Authorizations: - type: object - properties: - drs_object_id: - type: string - supported_types: - type: array - items: - type: string - enum: - - None - - BasicAuth - - BearerAuth - - PassportAuth - description: An Optional list of support authorization types. More than one can be supported and tried in sequence. Defaults to `None` if empty or missing. - passport_auth_issuers: - type: array - items: - type: string - description: If authorizations contain `PassportAuth` this is a required list of visa issuers (as found in a visa's `iss` claim) that may authorize access to this object. The caller must only provide passports that contain visas from this list. It is strongly recommended that the caller validate that it is appropriate to send the requested passport/visa to the DRS server to mitigate attacks by malicious DRS servers requesting credentials they should not have. - bearer_auth_issuers: - type: array - items: - type: string - description: If authorizations contain `BearerAuth` this is an optional list of issuers that may authorize access to this object. The caller must provide a token from one of these issuers. If this is empty or missing it assumed the caller knows which token to send via other means. It is strongly recommended that the caller validate that it is appropriate to send the requested token to the DRS server to mitigate attacks by malicious DRS servers requesting credentials they should not have. - AccessMethod: - type: object - required: - - type - properties: - type: - type: string - enum: - - s3 - - gs - - ftp - - gsiftp - - globus - - htsget - - https - - file - description: Type of the access method. - access_url: - allOf: - - $ref: '#/components/schemas/AccessURL' - - description: An `AccessURL` that can be used to fetch the actual object bytes. Note that at least one of `access_url` and `access_id` must be provided. - type: object - required: [url] - properties: - url: - type: string - description: "A fully resolvable URL that can be used to fetch the actual object bytes." - headers: - type: array - items: - type: string - description: "GA4GH-compatible list of HTTP headers." - access_id: - type: string - description: An arbitrary string to be passed to the `/access` method to get an `AccessURL`. This string must be unique within the scope of a single object. Note that at least one of `access_url` and `access_id` must be provided. - cloud: - type: string - description: Name of the cloud service provider that the object belongs to. If the cloud service is Amazon Web Services, Google Cloud Platform or Azure the values should be `aws`, `gcp`, or `azure` respectively. - example: aws, gcp, or azure - region: - type: string - description: Name of the region in the cloud service provider that the object belongs to. - example: us-east-1 - available: - type: boolean - description: Availablity of file in the cloud. This label defines if this file is immediately accessible via DRS. Any delay or requirement of thawing mechanism if the file is in offline/archival storage is classified as false, meaning it is unavailable. - example: true - authorizations: - allOf: - - $ref: '#/components/schemas/Authorizations' - - description: When `access_id` is provided, `authorizations` provides information about how to authorize the `/access` method. - type: object - properties: - drs_object_id: - type: string - supported_types: - type: array - items: - type: string - passport_auth_issuers: - type: array - items: - type: string - bearer_auth_issuers: - type: array - items: - type: string - ContentsObject: - type: object - required: - - name - properties: - name: - type: string - description: A name declared by the bundle author that must be used when materialising this object, overriding any name directly associated with the object itself. The name must be unique within the containing bundle. This string is made up of uppercase and lowercase letters, decimal digits, hyphen, period, and underscore [A-Za-z0-9.-_]. See http://pubs.opengroup.org/onlinepubs/9699919799/basedefs/V1_chap03.html#tag_03_282[portable filenames]. - id: - type: string - description: A DRS identifier of a `DrsObject` (either a single blob or a nested bundle). If this ContentsObject is an object within a nested bundle, then the id is optional. Otherwise, the id is required. - drs_uri: - type: array - description: A list of full DRS identifier URI paths that may be used to obtain the object. These URIs may be external to this DRS instance. - example: drs://drs.example.org/314159 - items: - type: string - contents: - type: array - description: If this ContentsObject describes a nested bundle and the caller specified "?expand=true" on the request, then this contents array must be present and describe the objects within the nested bundle. - items: - $ref: '#/components/schemas/ContentsObject' - DrsObject: - type: object - required: - - id - - self_uri - - size - - created_time - - checksums - properties: - id: - type: string - description: An identifier unique to this `DrsObject` - name: - type: string - description: |- - A string that can be used to name a `DrsObject`. - This string is made up of uppercase and lowercase letters, decimal digits, hyphen, period, and underscore [A-Za-z0-9.-_]. See http://pubs.opengroup.org/onlinepubs/9699919799/basedefs/V1_chap03.html#tag_03_282[portable filenames]. - self_uri: - type: string - description: |- - A drs:// hostname-based URI, as defined in the DRS documentation, that tells clients how to access this object. - The intent of this field is to make DRS objects self-contained, and therefore easier for clients to store and pass around. For example, if you arrive at this DRS JSON by resolving a compact identifier-based DRS URI, the `self_uri` presents you with a hostname and properly encoded DRS ID for use in subsequent `access` endpoint calls. - example: drs://drs.example.org/314159 - size: - type: integer - format: int64 - description: |- - For blobs, the blob size in bytes. - For bundles, the cumulative size, in bytes, of items in the `contents` field. - created_time: - type: string - format: date-time - description: |- - Timestamp of content creation in RFC3339. - (This is the creation time of the underlying content, not of the JSON object.) - updated_time: - type: string - format: date-time - description: Timestamp of content update in RFC3339, identical to `created_time` in systems that do not support updates. (This is the update time of the underlying content, not of the JSON object.) - version: - type: string - description: |- - A string representing a version. - (Some systems may use checksum, a RFC3339 timestamp, or an incrementing version number.) - mime_type: - type: string - description: A string providing the mime-type of the `DrsObject`. - example: application/json - checksums: - type: array - minItems: 1 - items: - $ref: '#/components/schemas/Checksum' - description: |- - The checksum of the `DrsObject`. At least one checksum must be provided. - For blobs, the checksum is computed over the bytes in the blob. - For bundles, the checksum is computed over a sorted concatenation of the checksums of its top-level contained objects (not recursive, names not included). The list of checksums is sorted alphabetically (hex-code) before concatenation and a further checksum is performed on the concatenated checksum value. - For example, if a bundle contains blobs with the following checksums: - md5(blob1) = 72794b6d - md5(blob2) = 5e089d29 - Then the checksum of the bundle is: - md5( concat( sort( md5(blob1), md5(blob2) ) ) ) - = md5( concat( sort( 72794b6d, 5e089d29 ) ) ) - = md5( concat( 5e089d29, 72794b6d ) ) - = md5( 5e089d2972794b6d ) - = f7a29a04 - access_methods: - type: array - minItems: 1 - items: - $ref: '#/components/schemas/AccessMethod' - description: |- - The list of access methods that can be used to fetch the `DrsObject`. - Required for single blobs; optional for bundles. - contents: - type: array - description: |- - If not set, this `DrsObject` is a single blob. - If set, this `DrsObject` is a bundle containing the listed `ContentsObject` s (some of which may be further nested). - items: - $ref: '#/components/schemas/ContentsObject' - description: - type: string - description: A human readable description of the `DrsObject`. - aliases: - type: array - items: - type: string - description: A list of strings that can be used to find other metadata about this `DrsObject` from external metadata sources. These aliases can be used to represent secondary accession numbers or external GUIDs. - DeleteRequest: - type: object - description: Request body for single object delete operations - properties: - passports: - type: array - items: - type: string - example: eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJnYTRnaF9wYXNzcG9ydF92MSI6W119.JJ5rN0ktP0qwyZmIPpxmF_p7JsxAZH6L6brUxtad3CM - description: the encoded JWT GA4GH Passport that contains embedded Visas. The overall JWT is signed as are the individual Passport Visas. - delete_storage_data: - type: boolean - default: false - description: If true, delete both DRS object metadata and underlying storage data (follows server's deleteStorageDataSupported capability). If false (default), only delete DRS object metadata while preserving underlying storage data. Clients must explicitly set this to true to enable storage data deletion, ensuring intentional choice for this potentially destructive operation. - BulkDeleteRequest: - type: object - description: Request body for bulk delete operations - required: - - bulk_object_ids - properties: - bulk_object_ids: - type: array - items: - type: string - description: Array of DRS object IDs to delete - example: - - drs_object_123456 - - drs_object_789012 - - drs_object_345678 - passports: - type: array - items: - type: string - example: eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJnYTRnaF9wYXNzcG9ydF92MSI6W119.JJ5rN0ktP0qwyZmIPpxmF_p7JsxAZH6L6brUxtad3CM - description: the encoded JWT GA4GH Passport that contains embedded Visas. The overall JWT is signed as are the individual Passport Visas. - delete_storage_data: - type: boolean - default: false - description: If true, delete both DRS object metadata and underlying storage data (follows server's deleteStorageDataSupported capability). If false (default), only delete DRS object metadata while preserving underlying storage data. Clients must explicitly set this to true to enable storage data deletion, ensuring intentional choice for this potentially destructive operation. - summary: - type: object - description: A summary of what was resolved. - properties: - requested: - type: integer - description: Number of items requested. - resolved: - type: integer - description: Number of objects resolved. - unresolved: - type: integer - description: Number of objects not resolved. - unresolved: - type: array - description: Error codes for each unresolved drs objects. - items: - type: object - properties: - error_code: - type: integer - object_ids: - type: array - items: - type: string - BulkObjectIdNoPassport: - type: object - description: The object that contains the DRS object IDs array - properties: - bulk_object_ids: - type: array - items: - type: string - description: An array of ObjectIDs. - DrsObjectCandidate: - type: object - required: - - size - - checksums - properties: - name: - type: string - description: |- - A string that can be used to name a `DrsObject`. - This string is made up of uppercase and lowercase letters, decimal digits, hyphen, period, and underscore [A-Za-z0-9.-_]. See http://pubs.opengroup.org/onlinepubs/9699919799/basedefs/V1_chap03.html#tag_03_282[portable filenames]. - size: - type: integer - format: int64 - description: |- - For blobs, the blob size in bytes. - For bundles, the cumulative size, in bytes, of items in the `contents` field. - version: - type: string - description: |- - A string representing a version. - (Some systems may use checksum, a RFC3339 timestamp, or an incrementing version number.) - mime_type: - type: string - description: A string providing the mime-type of the `DrsObject`. - example: application/json - checksums: - type: array - minItems: 1 - items: - $ref: '#/components/schemas/Checksum' - description: |- - The checksum of the `DrsObject`. At least one checksum must be provided. - For blobs, the checksum is computed over the bytes in the blob. - For bundles, the checksum is computed over a sorted concatenation of the checksums of its top-level contained objects (not recursive, names not included). The list of checksums is sorted alphabetically (hex-code) before concatenation and a further checksum is performed on the concatenated checksum value. - For example, if a bundle contains blobs with the following checksums: - md5(blob1) = 72794b6d - md5(blob2) = 5e089d29 - Then the checksum of the bundle is: - md5( concat( sort( md5(blob1), md5(blob2) ) ) ) - = md5( concat( sort( 72794b6d, 5e089d29 ) ) ) - = md5( concat( 5e089d29, 72794b6d ) ) - = md5( 5e089d2972794b6d ) - = f7a29a04 - access_methods: - type: array - minItems: 1 - items: - $ref: '#/components/schemas/AccessMethod' - description: |- - The list of access methods that can be used to fetch the `DrsObject`. - Required for single blobs; optional for bundles. - contents: - type: array - description: |- - If not set, this `DrsObject` is a single blob. - If set, this `DrsObject` is a bundle containing the listed `ContentsObject` s (some of which may be further nested). - items: - $ref: '#/components/schemas/ContentsObject' - description: - type: string - description: A human readable description of the `DrsObject`. - aliases: - type: array - items: - type: string - description: A list of strings that can be used to find other metadata about this `DrsObject` from external metadata sources. These aliases can be used to represent secondary accession numbers or external GUIDs. - BulkObjectAccessId: - type: object - description: The object that contains object_id/access_id tuples - properties: - passports: - type: array - items: - type: string - bulk_object_access_ids: - type: array - items: - type: object - properties: - bulk_object_id: - type: string - description: DRS object ID - bulk_access_ids: - type: array - description: DRS object access ID - items: - type: string - BulkAccessURL: - type: object - required: - - url - properties: - drs_object_id: - type: string - drs_access_id: - type: string - url: - type: string - description: A fully resolvable URL that can be used to fetch the actual object bytes. - headers: - type: array - items: - type: string - description: An optional list of headers to include in the HTTP request to `url`. These headers can be used to provide auth tokens required to fetch the object bytes. - example: 'Authorization: Basic Z2E0Z2g6ZHJz' - AccessMethodUpdateRequest: - type: object - required: - - access_methods - properties: - access_methods: - type: array - items: - $ref: '#/components/schemas/AccessMethod' - minItems: 1 - description: New access methods for the DRS object - passports: - type: array - items: - type: string - description: Optional GA4GH Passport JWTs for authorization - BulkAccessMethodUpdateRequest: - type: object - required: - - updates - properties: - updates: - type: array - items: - type: object - required: - - object_id - - access_methods - properties: - object_id: - type: string - description: DRS object ID to update - access_methods: - type: array - items: - $ref: '#/components/schemas/AccessMethod' - minItems: 1 - description: New access methods for this object - minItems: 1 - description: Array of access method updates to perform - passports: - type: array - items: - type: string - description: Optional GA4GH Passport JWTs for authorization - UploadRequestObject: - type: object - required: - - name - - size - - mime_type - - checksums - properties: - name: - type: string - description: The name of the file to upload - size: - type: integer - format: int64 - description: Size of the file in bytes - mime_type: - type: string - description: MIME type of the file - checksums: - type: array - items: - $ref: '#/components/schemas/Checksum' - minItems: 1 - description: Array of checksums for file integrity verification - description: - type: string - description: Optional description of the file - aliases: - type: array - items: - type: string - description: Optional array of alternative names for the file - UploadRequest: - type: object - required: - - requests - properties: - requests: - type: array - items: - $ref: '#/components/schemas/UploadRequestObject' - minItems: 1 - description: Array of upload requests for files - passports: - type: array - items: - type: string - description: Optional array of GA4GH Passport JWTs for authorization - UploadMethod: - type: object - required: - - type - - access_url - properties: - type: - type: string - enum: - - s3 - - gs - - https - - ftp - - sftp - - gsiftp - - globus - description: |- - Type of upload method. Implementations MAY support any subset of these types. - The 'https' type can be used to return a presigned POST URL and is expected to be the most common implementation for typical file uploads. This method provides a simple HTTP POST interface that works with standard web clients. - The 's3' type is primarily intended to support uploads of large files that want to take advantage of multipart uploads and automatic retries implemented in AWS libraries. This method provides direct access to S3-specific upload capabilities. - Other common implementations include 'gs' for Google Cloud Storage and 'sftp' for secure FTP uploads. - access_url: - allOf: - - $ref: '#/components/schemas/AccessURL' - - description: An `AccessURL` that specifies where the file will be accessible after upload. This URL will be used as the access_url in the eventual DRS object, ensuring consistency between upload and retrieval operations. - type: object - required: [url] - properties: - url: - type: string - description: "Inlined Upload URL context." - headers: - type: array - items: - type: string - description: "Inlined Upload Headers." - region: - type: string - description: Cloud region for the upload location. Optional for non-cloud storage types. - example: us-east-1 - upload_details: - type: object - additionalProperties: true - description: A dictionary of upload-specific configuration details that vary by upload method type. The contents and structure depend on the specific upload method being used. - UploadResponseObject: - type: object - required: - - name - - size - - mime_type - - checksums - properties: - name: - type: string - description: The name of the file - size: - type: integer - format: int64 - description: Size of the file in bytes - mime_type: - type: string - description: MIME type of the file - checksums: - type: array - items: - $ref: '#/components/schemas/Checksum' - minItems: 1 - description: Array of checksums for file integrity verification - description: - type: string - description: Optional description of the file - aliases: - type: array - items: - type: string - description: Optional array of alternative names - upload_methods: - type: array - items: - $ref: '#/components/schemas/UploadMethod' - description: Available methods for uploading this file - UploadResponse: - type: object - required: - - responses - properties: - responses: - type: array - items: - $ref: '#/components/schemas/UploadResponseObject' - description: List of upload responses for the requested files - responses: - 200ServiceInfo: - description: Retrieve info about the DRS service - content: - application/json: - schema: - allOf: - - $ref: '#/components/schemas/Service' - - $ref: '#/components/schemas/DrsService' - 500InternalServerError: - description: An unexpected error occurred. - content: - application/json: - schema: - $ref: '#/components/schemas/Error' - 200OkDrsObject: - description: The `DrsObject` was found successfully - content: - application/json: - schema: - $ref: '#/components/schemas/DrsObject' - 202Accepted: - description: | - The operation is delayed and will continue asynchronously. The client should retry this same request after the delay specified by Retry-After header. - headers: - Retry-After: - description: | - Delay in seconds. The client should retry this same request after waiting for this duration. To simplify client response processing, this must be an integral relative time in seconds. This value SHOULD represent the minimum duration the client should wait before attempting the operation again with a reasonable expectation of success. When it is not feasible for the server to determine the actual expected delay, the server may return a brief, fixed value instead. - schema: - type: integer - format: int64 - 400BadRequest: - description: The request is malformed. - content: - application/json: - schema: - $ref: '#/components/schemas/Error' - 401Unauthorized: - description: The request is unauthorized. - content: - application/json: - schema: - $ref: '#/components/schemas/Error' - 403Forbidden: - description: The requester is not authorized to perform this action. - content: - application/json: - schema: - $ref: '#/components/schemas/Error' - 404NotFoundDrsObject: - description: The requested `DrsObject` wasn't found. - content: - application/json: - schema: - $ref: '#/components/schemas/Error' - 404NotFoundAccess: - description: The requested `AccessURL` wasn't found. - content: - application/json: - schema: - $ref: '#/components/schemas/Error' - 200OkAuthorizations: - description: '`Authorizations` were found successfully' - content: - application/json: - schema: - $ref: '#/components/schemas/Authorizations' - AuthorizationsNotSupported: - description: '`Authorizations` are not supported for this object. Default to `None`.' - 204DeleteSuccess: - description: All DRS objects were successfully deleted. For bulk operations, this indicates that the entire atomic transaction completed successfully - all requested objects have been deleted. Storage data deletion (if requested) was attempted but success is not guaranteed. - 400BadRequestDelete: - description: 'The delete request is malformed or contains unsupported parameters (e.g., delete_storage_data: true when server doesn''t support storage data deletion).' - content: - application/json: - schema: - $ref: '#/components/schemas/Error' - examples: - unsupported_storage_deletion: - summary: Storage data deletion not supported - description: Client requested storage data deletion but server doesn't support it - value: - msg: Server does not support storage data deletion. Set delete_storage_data to false or omit the parameter. - status_code: 400 - invalid_request_format: - summary: Malformed request body - description: Request body contains invalid JSON or missing required fields - value: - msg: 'Invalid request body: bulk_object_ids is required for bulk delete operations' - status_code: 400 - empty_object_list: - summary: Empty object ID list - description: Bulk delete request with empty object ID array - value: - msg: bulk_object_ids cannot be empty - status_code: 400 - 403ForbiddenDelete: - description: The client is not authorized to delete the requested DRS object. - content: - application/json: - schema: - $ref: '#/components/schemas/Error' - examples: - insufficient_permissions: - summary: Insufficient delete permissions - description: Client lacks permission to delete the specified object - value: - msg: Client lacks delete permission for object drs_object_123456 - status_code: 403 - invalid_passport: - summary: Invalid GA4GH Passport - description: Provided GA4GH Passport is invalid or expired - value: - msg: Invalid or expired GA4GH Passport provided - status_code: 403 - missing_visa: - summary: Missing required visa - description: GA4GH Passport lacks required visa for delete operation - value: - msg: GA4GH Passport does not contain required visa for delete operation on this object - status_code: 403 - 404NotFoundDelete: - description: The requested DRS object for deletion wasn't found, or delete endpoints are not supported by this server. - content: - application/json: - schema: - $ref: '#/components/schemas/Error' - examples: - object_not_found: - summary: DRS object not found - description: The specified DRS object does not exist - value: - msg: DRS object drs_object_123456 does not exist - status_code: 404 - delete_not_supported: - summary: Delete operations not supported - description: This server does not support delete operations - value: - msg: Delete operations are not supported by this server - status_code: 404 - endpoint_not_found: - summary: Delete endpoint not available - description: Delete endpoints are not implemented on this server - value: - msg: The requested endpoint /objects/delete is not available on this server - status_code: 404 - 413RequestTooLarge: - description: The bulk request is too large. - content: - application/json: - schema: - $ref: '#/components/schemas/Error' - examples: - bulk_limit_exceeded: - summary: Bulk delete limit exceeded - description: Request contains more objects than server's maximum bulk delete limit - value: - msg: Bulk delete request contains 150 objects but server maximum is 100. Check maxBulkDeleteLength in service-info. - status_code: 413 - request_size_too_large: - summary: Request payload too large - description: The overall request payload exceeds server limits - value: - msg: Request payload size exceeds server limit of 1MB - status_code: 413 - 200OkDrsObjects: - description: The `DrsObjects` were found successfully - content: - application/json: - schema: - type: object - properties: - summary: - $ref: '#/components/schemas/summary' - unresolved_drs_objects: - $ref: '#/components/schemas/unresolved' - resolved_drs_object: - type: array - items: - $ref: '#/components/schemas/DrsObject' - 200OkBulkAuthorizations: - description: '`Authorizations` were found successfully' - content: - application/json: - schema: - type: object - properties: - summary: - $ref: '#/components/schemas/summary' - unresolved_drs_objects: - $ref: '#/components/schemas/unresolved' - resolved_drs_object: - type: array - items: - $ref: '#/components/schemas/Authorizations' - 201ObjectsCreated: - description: DRS objects were successfully registered as an atomic transaction. Returns the complete DRS objects with server-minted IDs and timestamps. All candidate objects were validated and registered together - if any had failed, none would have been registered. - content: - application/json: - schema: - type: object - required: - - objects - properties: - objects: - type: array - items: - $ref: '#/components/schemas/DrsObject' - description: Array of registered DRS objects in the same order as the candidates in the request - examples: - single_object_created: - summary: Single object registered - description: Response after registering one DRS object - value: - objects: - - id: drs_obj_a1b2c3d4e5f6 - self_uri: drs://drs.example.org/drs_obj_a1b2c3d4e5f6 - name: sample_data.vcf - size: 1048576 - mime_type: text/plain - created_time: '2024-01-15T10:30:00Z' - updated_time: '2024-01-15T10:30:00Z' - version: '1.0' - checksums: - - checksum: e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855 - type: sha-256 - access_methods: - - type: s3 - access_url: - url: s3://my-bucket/uploads/sample_data.vcf - description: Variant call format file for sample analysis - multiple_objects_created: - summary: Multiple objects registered - description: Response after registering multiple DRS objects - value: - objects: - - id: drs_obj_a1b2c3d4e5f6 - self_uri: drs://drs.example.org/drs_obj_a1b2c3d4e5f6 - name: genome_assembly.fasta - size: 3221225472 - mime_type: text/plain - created_time: '2024-01-15T09:00:00Z' - updated_time: '2024-01-15T09:00:00Z' - version: '1.0' - checksums: - - checksum: a665a45920422f9d417e4867efdc4fb8a04a1f3fff1fa07e998e86f7f7a27ae3 - type: sha-256 - access_methods: - - type: s3 - access_url: - url: s3://genomics-bucket/assemblies/hg38.fasta - description: Human genome reference assembly - - id: drs_obj_f6e5d4c3b2a1 - self_uri: drs://drs.example.org/drs_obj_f6e5d4c3b2a1 - name: annotations.gff3 - size: 524288000 - mime_type: text/plain - created_time: '2024-01-15T09:15:00Z' - updated_time: '2024-01-15T09:15:00Z' - version: '1.0' - checksums: - - checksum: b94d27b9934d3e08a52e52d7da7dabfac484efe37a5380ee9088f7ace2efcde9 - type: sha-256 - access_methods: - - type: https - access_url: - url: https://data.example.org/files/annotations.gff3 - description: Gene annotations in GFF3 format - 200OkAccess: - description: The `AccessURL` was found successfully - content: - application/json: - schema: - $ref: '#/components/schemas/AccessURL' - 200OkAccesses: - description: The `AccessURL` was found successfully - content: - application/json: - schema: - type: object - properties: - summary: - $ref: '#/components/schemas/summary' - unresolved_drs_objects: - $ref: '#/components/schemas/unresolved' - resolved_drs_object_access_urls: - type: array - items: - $ref: '#/components/schemas/BulkAccessURL' - 200AccessMethodUpdate: - description: Access methods successfully updated. Returns the updated DRS object with new access methods and updated timestamp. - content: - application/json: - schema: - $ref: '#/components/schemas/DrsObject' - 200BulkAccessMethodUpdate: - description: Access methods successfully updated for all objects. Returns updated DRS objects with new access methods and updated timestamps. - content: - application/json: - schema: - type: object - required: - - objects - properties: - objects: - type: array - items: - $ref: '#/components/schemas/DrsObject' - description: Array of updated DRS objects - 200UploadRequest: - description: Upload request processed successfully. Returns upload methods and temporary credentials for the requested files. - content: - application/json: - schema: - $ref: '#/components/schemas/UploadResponse' - examples: - s3_upload: - summary: S3 upload method response - description: Response with S3 upload method and temporary credentials - value: - responses: - - name: sample_data.vcf - size: 1048576 - mime_type: text/plain - checksums: - - checksum: e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855 - type: sha-256 - description: Variant call format file for sample analysis - aliases: - - sample_001_variants - - vcf_batch_2024 - upload_methods: - - type: s3 - access_url: - url: https://my-bucket.s3.amazonaws.com/uploads/drs_object_123456 - region: us-east-1 - upload_details: - bucket: my-bucket - key: uploads/drs_object_123456 - access_key_id: AKIAIOSFODNN7EXAMPLE - secret_access_key: wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY - session_token: AQoEXAMPLEH4aoAH0gNCAPyJxz4BlCFFxWNE1OPTgk5TthT+FvwqnKwRcOIfrRh3c/LTo6UDdyJwOOvEVPvLXCrrrUtdnniCEXAMPLE/IvU1dYUg2RVAJBanLiHb4IgRmpRV3zrkuWJOgQs8IZZaIv2BXIa2R4OlgkBN9bkUDNCJiBeb/AXlzBBko7b15fjrBs2+cTQtpZ3CYWFXG8C5zqx37wnOE49mRl/+OtkIKGO7fAE - expires_at: '2024-01-01T12:00:00Z' - https_upload: - summary: HTTPS upload method response - description: Response with HTTPS presigned POST URL for direct upload - value: - responses: - - name: genome_assembly.fasta - size: 3221225472 - mime_type: text/plain - checksums: - - checksum: a665a45920422f9d417e4867efdc4fb8a04a1f3fff1fa07e998e86f7f7a27ae3 - type: sha-256 - - checksum: 098f6bcd4621d373cade4e832627b4f6 - type: md5 - description: Human genome reference assembly - aliases: - - hg38_reference - upload_methods: - - type: https - access_url: - url: https://upload.example.org/v1/files/drs_object_789012 - upload_details: - post_url: https://upload.example.org/v1/files/drs_object_789012?signature=abc123 - multiple_methods: - summary: Multiple upload methods response - description: Response offering multiple upload method options for flexibility - value: - responses: - - name: annotations.gff3 - size: 524288000 - mime_type: text/plain - checksums: - - checksum: b94d27b9934d3e08a52e52d7da7dabfac484efe37a5380ee9088f7ace2efcde9 - type: sha-256 - description: Gene annotations in GFF3 format - upload_methods: - - type: s3 - access_url: - url: https://genomics-bucket.s3.us-west-2.amazonaws.com/uploads/drs_object_345678 - region: us-west-2 - upload_details: - bucket: genomics-bucket - key: uploads/drs_object_345678 - access_key_id: AKIAI44QH8DHBEXAMPLE - secret_access_key: je7MtGbClwBF/2Zp9Utk/h3yCo8nvbEXAMPLEKEY - session_token: temporary_session_token_here - expires_at: '2024-01-01T12:00:00Z' - - type: https - access_url: - url: https://upload-api.example.org/files/drs_object_345678 - upload_details: - post_url: https://upload-api.example.org/files/drs_object_345678?token=upload_token_12345 - - type: gs - access_url: - url: https://storage.googleapis.com/genomics-uploads/drs_object_345678 - region: us-central1 - upload_details: - bucket: genomics-uploads - key: drs_object_345678 - access_token: ya29.AHES6ZRVmB7fkLtd1XTmq6mo0S1wqZZi3-Lh_s-6Uw7p8vtgSwg - expires_at: '2024-01-01T12:00:00Z' - parameters: - ObjectId: - in: path - name: object_id - required: true - description: '`DrsObject` identifier' - schema: - type: string - Expand: - in: query - name: expand - schema: - type: boolean - example: false - description: |- - If false and the object_id refers to a bundle, then the ContentsObject array contains only those objects directly contained in the bundle. That is, if the bundle contains other bundles, those other bundles are not recursively included in the result. - If true and the object_id refers to a bundle, then the entire set of objects in the bundle is expanded. That is, if the bundle contains other bundles, then those other bundles are recursively expanded and included in the result. Recursion continues through the entire sub-tree of the bundle. - If the object_id refers to a blob, then the query parameter is ignored. - AccessId: - in: path - name: access_id - required: true - description: An `access_id` from the `access_methods` list of a `DrsObject` - schema: - type: string - Checksum: - in: path - name: checksum - required: true - description: A `checksum` value from the `checksums` list of a `DrsObject` - schema: - type: string - requestBodies: - PostObjectBody: - required: true - content: - application/json: - schema: - type: object - properties: - expand: - type: boolean - example: false - description: |- - If false and the object_id refers to a bundle, then the ContentsObject array contains only those objects directly contained in the bundle. That is, if the bundle contains other bundles, those other bundles are not recursively included in the result. - If true and the object_id refers to a bundle, then the entire set of objects in the bundle is expanded. That is, if the bundle contains other bundles, then those other bundles are recursively expanded and included in the result. Recursion continues through the entire sub-tree of the bundle. - If the object_id refers to a blob, then the query parameter is ignored. - passports: - type: array - items: - type: string - example: eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJnYTRnaF9wYXNzcG9ydF92MSI6W119.JJ5rN0ktP0qwyZmIPpxmF_p7JsxAZH6L6brUxtad3CM - description: the encoded JWT GA4GH Passport that contains embedded Visas. The overall JWT is signed as are the individual Passport Visas. - examples: - retrieve_with_auth: - summary: Retrieve object with authentication - description: Request object metadata with passport authentication - value: - expand: false - passports: - - eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJnYTRnaF9wYXNzcG9ydF92MSI6W119.JJ5rN0ktP0qwyZmIPpxmF_p7JsxAZH6L6brUxtad3CM - retrieve_expanded_bundle: - summary: Retrieve expanded bundle with authentication - description: Request expanded bundle contents with passport authentication - value: - expand: true - passports: - - eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJnYTRnaF9wYXNzcG9ydF92MSI6W119.JJ5rN0ktP0qwyZmIPpxmF_p7JsxAZH6L6brUxtad3CM - - eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCJ9.eyJnYTRnaF9wYXNzcG9ydF92MSI6W119.additional_passport_signature - DeleteBody: - required: false - content: - application/json: - schema: - $ref: '#/components/schemas/DeleteRequest' - examples: - metadata_only_delete: - summary: Delete metadata only (default) - description: Delete DRS object metadata while preserving underlying storage data. This is the default and safest option. - value: - passports: - - eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJnYTRnaF9wYXNzcG9ydF92MSI6W119.JJ5rN0ktP0qwyZmIPpxmF_p7JsxAZH6L6brUxtad3CM - delete_storage_data: false - full_delete: - summary: Delete metadata and storage data - description: Delete both DRS object metadata and underlying storage data (requires server support via deleteStorageDataSupported) - value: - passports: - - eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJnYTRnaF9wYXNzcG9ydF92MSI6W119.JJ5rN0ktP0qwyZmIPpxmF_p7JsxAZH6L6brUxtad3CM - delete_storage_data: true - no_auth_delete: - summary: Delete without authentication - description: Delete operation without GA4GH Passport authentication (for public objects or when using Bearer token in headers) - value: - delete_storage_data: false - minimal_request: - summary: Minimal delete request - description: Simplest delete request with no authentication and default behavior (metadata only) - value: {} - multiple_passports: - summary: Multiple GA4GH Passports - description: Delete request with multiple GA4GH Passports for complex authorization scenarios - value: - passports: - - eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJnYTRnaF9wYXNzcG9ydF92MSI6W119.JJ5rN0ktP0qwyZmIPpxmF_p7JsxAZH6L6brUxtad3CM - - eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCJ9.eyJnYTRnaF9wYXNzcG9ydF92MSI6W119.AbCdEfGhIjKlMnOpQrStUvWxYz - delete_storage_data: false - update_workflow: - summary: Safe update workflow - description: Delete metadata only to enable safe update pattern (delete metadata, then re-register with new metadata) - value: - passports: - - eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJnYTRnaF9wYXNzcG9ydF92MSI6W119.JJ5rN0ktP0qwyZmIPpxmF_p7JsxAZH6L6brUxtad3CM - delete_storage_data: false - BulkDeleteBody: - required: true - content: - application/json: - schema: - $ref: '#/components/schemas/BulkDeleteRequest' - examples: - bulk_metadata_delete: - summary: Bulk delete metadata only - description: Delete multiple DRS objects metadata while preserving underlying storage data (default and safest option) - value: - bulk_object_ids: - - drs_object_123456 - - drs_object_789012 - - drs_object_345678 - passports: - - eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJnYTRnaF9wYXNzcG9ydF92MSI6W119.JJ5rN0ktP0qwyZmIPpxmF_p7JsxAZH6L6brUxtad3CM - delete_storage_data: false - bulk_full_delete: - summary: Bulk delete metadata and storage data - description: Delete both metadata and storage data for multiple objects (requires server support via deleteStorageDataSupported) - value: - bulk_object_ids: - - drs_object_123456 - - drs_object_789012 - passports: - - eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJnYTRnaF9wYXNzcG9ydF92MSI6W119.JJ5rN0ktP0qwyZmIPpxmF_p7JsxAZH6L6brUxtad3CM - delete_storage_data: true - bulk_no_auth_delete: - summary: Bulk delete without authentication - description: Bulk delete operation without GA4GH Passport authentication (for public objects or when using Bearer token in headers) - value: - bulk_object_ids: - - drs_object_123456 - - drs_object_789012 - delete_storage_data: false - large_bulk_delete: - summary: Large bulk delete operation - description: Delete many objects in a single request (check maxBulkDeleteLength in service-info for limits) - value: - bulk_object_ids: - - drs_object_001 - - drs_object_002 - - drs_object_003 - - drs_object_004 - - drs_object_005 - - drs_object_006 - - drs_object_007 - - drs_object_008 - - drs_object_009 - - drs_object_010 - passports: - - eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJnYTRnaF9wYXNzcG9ydF92MSI6W119.JJ5rN0ktP0qwyZmIPpxmF_p7JsxAZH6L6brUxtad3CM - delete_storage_data: false - mixed_object_types: - summary: Mixed object types deletion - description: Delete objects with different ID formats and types in a single request - value: - bulk_object_ids: - - drs://example.org/123456 - - local_object_789 - - uuid:550e8400-e29b-41d4-a716-446655440000 - - compact:prefix:identifier - passports: - - eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJnYTRnaF9wYXNzcG9ydF92MSI6W119.JJ5rN0ktP0qwyZmIPpxmF_p7JsxAZH6L6brUxtad3CM - delete_storage_data: false - minimal_bulk_request: - summary: Minimal bulk delete request - description: Simplest bulk delete request with required fields only - value: - bulk_object_ids: - - drs_object_123456 - - drs_object_789012 - BulkObjectBody: - required: true - content: - application/json: - schema: - type: object - required: - - bulk_object_ids - properties: - passports: - type: array - items: - type: string - example: eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJnYTRnaF9wYXNzcG9ydF92MSI6W119.JJ5rN0ktP0qwyZmIPpxmF_p7JsxAZH6L6brUxtad3CM - description: the encoded JWT GA4GH Passport that contains embedded Visas. The overall JWT is signed as are the individual Passport Visas. - bulk_object_ids: - type: array - items: - type: string - minItems: 1 - description: An array of ObjectIDs to retrieve metadata for - examples: - bulk_retrieve: - summary: Bulk retrieve objects - description: Retrieve metadata for multiple existing DRS objects using their IDs - value: - passports: - - eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJnYTRnaF9wYXNzcG9ydF92MSI6W119.JJ5rN0ktP0qwyZmIPpxmF_p7JsxAZH6L6brUxtad3CM - bulk_object_ids: - - drs_object_123456 - - drs_object_789012 - - drs_object_345678 - bulk_retrieve_no_auth: - summary: Bulk retrieve without authentication - description: Retrieve metadata for public DRS objects - value: - bulk_object_ids: - - drs_object_public_123 - - drs_object_public_456 - RegisterObjectsBody: - description: Request body for registering DRS objects after upload - required: true - content: - application/json: - schema: - type: object - required: - - candidates - properties: - candidates: - type: array - items: - $ref: '#/components/schemas/DrsObjectCandidate' - minItems: 1 - description: Array of DRS object candidates to register (server will mint IDs and timestamps) - passports: - type: array - items: - type: string - description: Optional array of GA4GH Passport JWTs for authorization - examples: - single_object_registration: - summary: Register a single object - description: Register one DRS object after upload - value: - candidates: - - name: sample_data.vcf - size: 1048576 - mime_type: text/plain - checksums: - - checksum: e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855 - type: sha-256 - description: Variant call format file for sample analysis - access_methods: - - type: s3 - access_url: - url: s3://my-bucket/uploads/sample_data.vcf - passports: - - eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9... - bulk_object_registration: - summary: Register multiple objects - description: Register multiple DRS objects in a single request - value: - candidates: - - name: genome_assembly.fasta - size: 3221225472 - mime_type: text/plain - checksums: - - checksum: a665a45920422f9d417e4867efdc4fb8a04a1f3fff1fa07e998e86f7f7a27ae3 - type: sha-256 - description: Human genome reference assembly - access_methods: - - type: s3 - access_url: - url: s3://genomics-bucket/assemblies/hg38.fasta - - name: annotations.gff3 - size: 524288000 - mime_type: text/plain - checksums: - - checksum: b94d27b9934d3e08a52e52d7da7dabfac484efe37a5380ee9088f7ace2efcde9 - type: sha-256 - description: Gene annotations in GFF3 format - access_methods: - - type: https - access_url: - url: https://data.example.org/files/annotations.gff3 - Passports: - required: true - content: - application/json: - schema: - type: object - properties: - passports: - type: array - items: - type: string - example: eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJnYTRnaF9wYXNzcG9ydF92MSI6W119.JJ5rN0ktP0qwyZmIPpxmF_p7JsxAZH6L6brUxtad3CM - description: the encoded JWT GA4GH Passport that contains embedded Visas. The overall JWT is signed as are the individual Passport Visas. - AccessMethodUpdateBody: - description: Request body for updating access methods of a DRS object - required: true - content: - application/json: - schema: - $ref: '#/components/schemas/AccessMethodUpdateRequest' - BulkAccessMethodUpdateBody: - description: Request body for bulk updating access methods of multiple DRS objects - required: true - content: - application/json: - schema: - $ref: '#/components/schemas/BulkAccessMethodUpdateRequest' - UploadRequestBody: - required: true - content: - application/json: - schema: - $ref: '#/components/schemas/UploadRequest' - examples: - single_file: - summary: Single file upload request - description: Request upload methods for a single file - value: - requests: - - name: sample_data.vcf - size: 1048576 - mime_type: text/plain - checksums: - - checksum: e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855 - type: sha-256 - description: Variant call format file for sample analysis - aliases: - - sample_001_variants - - vcf_batch_2024 - passports: - - eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJnYTRnaF9wYXNzcG9ydF92MSI6W119.JJ5rN0ktP0qwyZmIPpxmF_p7JsxAZH6L6brUxtad3CM - multiple_files: - summary: Multiple files upload request - description: Request upload methods for multiple files with different types - value: - requests: - - name: genome_assembly.fasta - size: 3221225472 - mime_type: text/plain - checksums: - - checksum: a665a45920422f9d417e4867efdc4fb8a04a1f3fff1fa07e998e86f7f7a27ae3 - type: sha-256 - - checksum: 098f6bcd4621d373cade4e832627b4f6 - type: md5 - description: Human genome reference assembly - aliases: - - hg38_reference - - name: annotations.gff3 - size: 524288000 - mime_type: text/plain - checksums: - - checksum: b94d27b9934d3e08a52e52d7da7dabfac484efe37a5380ee9088f7ace2efcde9 - type: sha-256 - description: Gene annotations in GFF3 format - - name: metadata.json - size: 2048 - mime_type: application/json - checksums: - - checksum: c89e4c5c7f2c8c8e8c8c8c8c8c8c8c8c8c8c8c8c8c8c8c8c8c8c8c8c8c8c8c8c - type: sha-256 - description: Sample metadata and experimental conditions - no_passports: - summary: Upload request without authentication - description: Request for public upload endpoints that don't require authentication - value: - requests: - - name: public_dataset.csv - size: 10240 - mime_type: text/csv - checksums: - - checksum: d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35 - type: sha-256 - description: Public research dataset -x-tagGroups: - - name: Overview - tags: - - Introduction - - DRS API Principles - - Authorization & Authentication - - name: Operations - tags: - - Objects - - Upload Request - - Service Info - - name: Models - tags: - - AccessMethodModel - - AccessURLModel - - ChecksumModel - - ContentsObjectModel - - DrsObjectModel - - DrsObjectCandidateModel - - ErrorModel - - UploadRequestModel - - UploadResponseModel - - UploadRequestObjectModel - - UploadResponseObjectModel - - UploadMethodModel - - DeleteRequestModel - - BulkDeleteRequestModel - - DeleteResultModel - - BulkDeleteResponseModel - - name: Appendices - tags: - - Motivation - - Working With Compound Objects - - Background Notes on DRS URIs - - Compact Identifier-Based URIs - - Hostname-Based URIs - - GA4GH Service Registry - - Upload Requests and Object Registration - - Object Deletion - - Access Method Update diff --git a/apigen/drs/model_access_method.go b/apigen/drs/model_access_method.go deleted file mode 100644 index 3667762..0000000 --- a/apigen/drs/model_access_method.go +++ /dev/null @@ -1,380 +0,0 @@ -/* -Data Repository Service - -No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - -API version: 1.5.0 -Contact: ga4gh-cloud@ga4gh.org -*/ - -// Code generated by OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT. - -package drs - -import ( - "encoding/json" - "bytes" - "fmt" -) - -// checks if the AccessMethod type satisfies the MappedNullable interface at compile time -var _ MappedNullable = &AccessMethod{} - -// AccessMethod struct for AccessMethod -type AccessMethod struct { - // Type of the access method. - Type string `json:"type"` - AccessUrl *AccessMethodAccessUrl `json:"access_url,omitempty"` - // An arbitrary string to be passed to the `/access` method to get an `AccessURL`. This string must be unique within the scope of a single object. Note that at least one of `access_url` and `access_id` must be provided. - AccessId *string `json:"access_id,omitempty"` - // Name of the cloud service provider that the object belongs to. If the cloud service is Amazon Web Services, Google Cloud Platform or Azure the values should be `aws`, `gcp`, or `azure` respectively. - Cloud *string `json:"cloud,omitempty"` - // Name of the region in the cloud service provider that the object belongs to. - Region *string `json:"region,omitempty"` - // Availablity of file in the cloud. This label defines if this file is immediately accessible via DRS. Any delay or requirement of thawing mechanism if the file is in offline/archival storage is classified as false, meaning it is unavailable. - Available *bool `json:"available,omitempty"` - Authorizations *AccessMethodAuthorizations `json:"authorizations,omitempty"` -} - -type _AccessMethod AccessMethod - -// NewAccessMethod instantiates a new AccessMethod object -// This constructor will assign default values to properties that have it defined, -// and makes sure properties required by API are set, but the set of arguments -// will change when the set of required properties is changed -func NewAccessMethod(type_ string) *AccessMethod { - this := AccessMethod{} - this.Type = type_ - return &this -} - -// NewAccessMethodWithDefaults instantiates a new AccessMethod object -// This constructor will only assign default values to properties that have it defined, -// but it doesn't guarantee that properties required by API are set -func NewAccessMethodWithDefaults() *AccessMethod { - this := AccessMethod{} - return &this -} - -// GetType returns the Type field value -func (o *AccessMethod) GetType() string { - if o == nil { - var ret string - return ret - } - - return o.Type -} - -// GetTypeOk returns a tuple with the Type field value -// and a boolean to check if the value has been set. -func (o *AccessMethod) GetTypeOk() (*string, bool) { - if o == nil { - return nil, false - } - return &o.Type, true -} - -// SetType sets field value -func (o *AccessMethod) SetType(v string) { - o.Type = v -} - -// GetAccessUrl returns the AccessUrl field value if set, zero value otherwise. -func (o *AccessMethod) GetAccessUrl() AccessMethodAccessUrl { - if o == nil || IsNil(o.AccessUrl) { - var ret AccessMethodAccessUrl - return ret - } - return *o.AccessUrl -} - -// GetAccessUrlOk returns a tuple with the AccessUrl field value if set, nil otherwise -// and a boolean to check if the value has been set. -func (o *AccessMethod) GetAccessUrlOk() (*AccessMethodAccessUrl, bool) { - if o == nil || IsNil(o.AccessUrl) { - return nil, false - } - return o.AccessUrl, true -} - -// HasAccessUrl returns a boolean if a field has been set. -func (o *AccessMethod) HasAccessUrl() bool { - if o != nil && !IsNil(o.AccessUrl) { - return true - } - - return false -} - -// SetAccessUrl gets a reference to the given AccessMethodAccessUrl and assigns it to the AccessUrl field. -func (o *AccessMethod) SetAccessUrl(v AccessMethodAccessUrl) { - o.AccessUrl = &v -} - -// GetAccessId returns the AccessId field value if set, zero value otherwise. -func (o *AccessMethod) GetAccessId() string { - if o == nil || IsNil(o.AccessId) { - var ret string - return ret - } - return *o.AccessId -} - -// GetAccessIdOk returns a tuple with the AccessId field value if set, nil otherwise -// and a boolean to check if the value has been set. -func (o *AccessMethod) GetAccessIdOk() (*string, bool) { - if o == nil || IsNil(o.AccessId) { - return nil, false - } - return o.AccessId, true -} - -// HasAccessId returns a boolean if a field has been set. -func (o *AccessMethod) HasAccessId() bool { - if o != nil && !IsNil(o.AccessId) { - return true - } - - return false -} - -// SetAccessId gets a reference to the given string and assigns it to the AccessId field. -func (o *AccessMethod) SetAccessId(v string) { - o.AccessId = &v -} - -// GetCloud returns the Cloud field value if set, zero value otherwise. -func (o *AccessMethod) GetCloud() string { - if o == nil || IsNil(o.Cloud) { - var ret string - return ret - } - return *o.Cloud -} - -// GetCloudOk returns a tuple with the Cloud field value if set, nil otherwise -// and a boolean to check if the value has been set. -func (o *AccessMethod) GetCloudOk() (*string, bool) { - if o == nil || IsNil(o.Cloud) { - return nil, false - } - return o.Cloud, true -} - -// HasCloud returns a boolean if a field has been set. -func (o *AccessMethod) HasCloud() bool { - if o != nil && !IsNil(o.Cloud) { - return true - } - - return false -} - -// SetCloud gets a reference to the given string and assigns it to the Cloud field. -func (o *AccessMethod) SetCloud(v string) { - o.Cloud = &v -} - -// GetRegion returns the Region field value if set, zero value otherwise. -func (o *AccessMethod) GetRegion() string { - if o == nil || IsNil(o.Region) { - var ret string - return ret - } - return *o.Region -} - -// GetRegionOk returns a tuple with the Region field value if set, nil otherwise -// and a boolean to check if the value has been set. -func (o *AccessMethod) GetRegionOk() (*string, bool) { - if o == nil || IsNil(o.Region) { - return nil, false - } - return o.Region, true -} - -// HasRegion returns a boolean if a field has been set. -func (o *AccessMethod) HasRegion() bool { - if o != nil && !IsNil(o.Region) { - return true - } - - return false -} - -// SetRegion gets a reference to the given string and assigns it to the Region field. -func (o *AccessMethod) SetRegion(v string) { - o.Region = &v -} - -// GetAvailable returns the Available field value if set, zero value otherwise. -func (o *AccessMethod) GetAvailable() bool { - if o == nil || IsNil(o.Available) { - var ret bool - return ret - } - return *o.Available -} - -// GetAvailableOk returns a tuple with the Available field value if set, nil otherwise -// and a boolean to check if the value has been set. -func (o *AccessMethod) GetAvailableOk() (*bool, bool) { - if o == nil || IsNil(o.Available) { - return nil, false - } - return o.Available, true -} - -// HasAvailable returns a boolean if a field has been set. -func (o *AccessMethod) HasAvailable() bool { - if o != nil && !IsNil(o.Available) { - return true - } - - return false -} - -// SetAvailable gets a reference to the given bool and assigns it to the Available field. -func (o *AccessMethod) SetAvailable(v bool) { - o.Available = &v -} - -// GetAuthorizations returns the Authorizations field value if set, zero value otherwise. -func (o *AccessMethod) GetAuthorizations() AccessMethodAuthorizations { - if o == nil || IsNil(o.Authorizations) { - var ret AccessMethodAuthorizations - return ret - } - return *o.Authorizations -} - -// GetAuthorizationsOk returns a tuple with the Authorizations field value if set, nil otherwise -// and a boolean to check if the value has been set. -func (o *AccessMethod) GetAuthorizationsOk() (*AccessMethodAuthorizations, bool) { - if o == nil || IsNil(o.Authorizations) { - return nil, false - } - return o.Authorizations, true -} - -// HasAuthorizations returns a boolean if a field has been set. -func (o *AccessMethod) HasAuthorizations() bool { - if o != nil && !IsNil(o.Authorizations) { - return true - } - - return false -} - -// SetAuthorizations gets a reference to the given AccessMethodAuthorizations and assigns it to the Authorizations field. -func (o *AccessMethod) SetAuthorizations(v AccessMethodAuthorizations) { - o.Authorizations = &v -} - -func (o AccessMethod) MarshalJSON() ([]byte, error) { - toSerialize,err := o.ToMap() - if err != nil { - return []byte{}, err - } - return json.Marshal(toSerialize) -} - -func (o AccessMethod) ToMap() (map[string]interface{}, error) { - toSerialize := map[string]interface{}{} - toSerialize["type"] = o.Type - if !IsNil(o.AccessUrl) { - toSerialize["access_url"] = o.AccessUrl - } - if !IsNil(o.AccessId) { - toSerialize["access_id"] = o.AccessId - } - if !IsNil(o.Cloud) { - toSerialize["cloud"] = o.Cloud - } - if !IsNil(o.Region) { - toSerialize["region"] = o.Region - } - if !IsNil(o.Available) { - toSerialize["available"] = o.Available - } - if !IsNil(o.Authorizations) { - toSerialize["authorizations"] = o.Authorizations - } - return toSerialize, nil -} - -func (o *AccessMethod) UnmarshalJSON(data []byte) (err error) { - // This validates that all required properties are included in the JSON object - // by unmarshalling the object into a generic map with string keys and checking - // that every required field exists as a key in the generic map. - requiredProperties := []string{ - "type", - } - - allProperties := make(map[string]interface{}) - - err = json.Unmarshal(data, &allProperties) - - if err != nil { - return err; - } - - for _, requiredProperty := range(requiredProperties) { - if _, exists := allProperties[requiredProperty]; !exists { - return fmt.Errorf("no value given for required property %v", requiredProperty) - } - } - - varAccessMethod := _AccessMethod{} - - decoder := json.NewDecoder(bytes.NewReader(data)) - decoder.DisallowUnknownFields() - err = decoder.Decode(&varAccessMethod) - - if err != nil { - return err - } - - *o = AccessMethod(varAccessMethod) - - return err -} - -type NullableAccessMethod struct { - value *AccessMethod - isSet bool -} - -func (v NullableAccessMethod) Get() *AccessMethod { - return v.value -} - -func (v *NullableAccessMethod) Set(val *AccessMethod) { - v.value = val - v.isSet = true -} - -func (v NullableAccessMethod) IsSet() bool { - return v.isSet -} - -func (v *NullableAccessMethod) Unset() { - v.value = nil - v.isSet = false -} - -func NewNullableAccessMethod(val *AccessMethod) *NullableAccessMethod { - return &NullableAccessMethod{value: val, isSet: true} -} - -func (v NullableAccessMethod) MarshalJSON() ([]byte, error) { - return json.Marshal(v.value) -} - -func (v *NullableAccessMethod) UnmarshalJSON(src []byte) error { - v.isSet = true - return json.Unmarshal(src, &v.value) -} - - diff --git a/apigen/drs/model_access_method_access_url.go b/apigen/drs/model_access_method_access_url.go deleted file mode 100644 index c19a8a9..0000000 --- a/apigen/drs/model_access_method_access_url.go +++ /dev/null @@ -1,197 +0,0 @@ -/* -Data Repository Service - -No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - -API version: 1.5.0 -Contact: ga4gh-cloud@ga4gh.org -*/ - -// Code generated by OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT. - -package drs - -import ( - "encoding/json" - "bytes" - "fmt" -) - -// checks if the AccessMethodAccessUrl type satisfies the MappedNullable interface at compile time -var _ MappedNullable = &AccessMethodAccessUrl{} - -// AccessMethodAccessUrl An `AccessURL` that can be used to fetch the actual object bytes. Note that at least one of `access_url` and `access_id` must be provided. -type AccessMethodAccessUrl struct { - // A fully resolvable URL that can be used to fetch the actual object bytes. - Url string `json:"url"` - // GA4GH-compatible list of HTTP headers. - Headers []string `json:"headers,omitempty"` -} - -type _AccessMethodAccessUrl AccessMethodAccessUrl - -// NewAccessMethodAccessUrl instantiates a new AccessMethodAccessUrl object -// This constructor will assign default values to properties that have it defined, -// and makes sure properties required by API are set, but the set of arguments -// will change when the set of required properties is changed -func NewAccessMethodAccessUrl(url string) *AccessMethodAccessUrl { - this := AccessMethodAccessUrl{} - this.Url = url - return &this -} - -// NewAccessMethodAccessUrlWithDefaults instantiates a new AccessMethodAccessUrl object -// This constructor will only assign default values to properties that have it defined, -// but it doesn't guarantee that properties required by API are set -func NewAccessMethodAccessUrlWithDefaults() *AccessMethodAccessUrl { - this := AccessMethodAccessUrl{} - return &this -} - -// GetUrl returns the Url field value -func (o *AccessMethodAccessUrl) GetUrl() string { - if o == nil { - var ret string - return ret - } - - return o.Url -} - -// GetUrlOk returns a tuple with the Url field value -// and a boolean to check if the value has been set. -func (o *AccessMethodAccessUrl) GetUrlOk() (*string, bool) { - if o == nil { - return nil, false - } - return &o.Url, true -} - -// SetUrl sets field value -func (o *AccessMethodAccessUrl) SetUrl(v string) { - o.Url = v -} - -// GetHeaders returns the Headers field value if set, zero value otherwise. -func (o *AccessMethodAccessUrl) GetHeaders() []string { - if o == nil || IsNil(o.Headers) { - var ret []string - return ret - } - return o.Headers -} - -// GetHeadersOk returns a tuple with the Headers field value if set, nil otherwise -// and a boolean to check if the value has been set. -func (o *AccessMethodAccessUrl) GetHeadersOk() ([]string, bool) { - if o == nil || IsNil(o.Headers) { - return nil, false - } - return o.Headers, true -} - -// HasHeaders returns a boolean if a field has been set. -func (o *AccessMethodAccessUrl) HasHeaders() bool { - if o != nil && !IsNil(o.Headers) { - return true - } - - return false -} - -// SetHeaders gets a reference to the given []string and assigns it to the Headers field. -func (o *AccessMethodAccessUrl) SetHeaders(v []string) { - o.Headers = v -} - -func (o AccessMethodAccessUrl) MarshalJSON() ([]byte, error) { - toSerialize,err := o.ToMap() - if err != nil { - return []byte{}, err - } - return json.Marshal(toSerialize) -} - -func (o AccessMethodAccessUrl) ToMap() (map[string]interface{}, error) { - toSerialize := map[string]interface{}{} - toSerialize["url"] = o.Url - if !IsNil(o.Headers) { - toSerialize["headers"] = o.Headers - } - return toSerialize, nil -} - -func (o *AccessMethodAccessUrl) UnmarshalJSON(data []byte) (err error) { - // This validates that all required properties are included in the JSON object - // by unmarshalling the object into a generic map with string keys and checking - // that every required field exists as a key in the generic map. - requiredProperties := []string{ - "url", - } - - allProperties := make(map[string]interface{}) - - err = json.Unmarshal(data, &allProperties) - - if err != nil { - return err; - } - - for _, requiredProperty := range(requiredProperties) { - if _, exists := allProperties[requiredProperty]; !exists { - return fmt.Errorf("no value given for required property %v", requiredProperty) - } - } - - varAccessMethodAccessUrl := _AccessMethodAccessUrl{} - - decoder := json.NewDecoder(bytes.NewReader(data)) - decoder.DisallowUnknownFields() - err = decoder.Decode(&varAccessMethodAccessUrl) - - if err != nil { - return err - } - - *o = AccessMethodAccessUrl(varAccessMethodAccessUrl) - - return err -} - -type NullableAccessMethodAccessUrl struct { - value *AccessMethodAccessUrl - isSet bool -} - -func (v NullableAccessMethodAccessUrl) Get() *AccessMethodAccessUrl { - return v.value -} - -func (v *NullableAccessMethodAccessUrl) Set(val *AccessMethodAccessUrl) { - v.value = val - v.isSet = true -} - -func (v NullableAccessMethodAccessUrl) IsSet() bool { - return v.isSet -} - -func (v *NullableAccessMethodAccessUrl) Unset() { - v.value = nil - v.isSet = false -} - -func NewNullableAccessMethodAccessUrl(val *AccessMethodAccessUrl) *NullableAccessMethodAccessUrl { - return &NullableAccessMethodAccessUrl{value: val, isSet: true} -} - -func (v NullableAccessMethodAccessUrl) MarshalJSON() ([]byte, error) { - return json.Marshal(v.value) -} - -func (v *NullableAccessMethodAccessUrl) UnmarshalJSON(src []byte) error { - v.isSet = true - return json.Unmarshal(src, &v.value) -} - - diff --git a/apigen/drs/model_access_method_authorizations.go b/apigen/drs/model_access_method_authorizations.go deleted file mode 100644 index a5bc108..0000000 --- a/apigen/drs/model_access_method_authorizations.go +++ /dev/null @@ -1,235 +0,0 @@ -/* -Data Repository Service - -No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - -API version: 1.5.0 -Contact: ga4gh-cloud@ga4gh.org -*/ - -// Code generated by OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT. - -package drs - -import ( - "encoding/json" -) - -// checks if the AccessMethodAuthorizations type satisfies the MappedNullable interface at compile time -var _ MappedNullable = &AccessMethodAuthorizations{} - -// AccessMethodAuthorizations When `access_id` is provided, `authorizations` provides information about how to authorize the `/access` method. -type AccessMethodAuthorizations struct { - DrsObjectId *string `json:"drs_object_id,omitempty"` - SupportedTypes []string `json:"supported_types,omitempty"` - PassportAuthIssuers []string `json:"passport_auth_issuers,omitempty"` - BearerAuthIssuers []string `json:"bearer_auth_issuers,omitempty"` -} - -// NewAccessMethodAuthorizations instantiates a new AccessMethodAuthorizations object -// This constructor will assign default values to properties that have it defined, -// and makes sure properties required by API are set, but the set of arguments -// will change when the set of required properties is changed -func NewAccessMethodAuthorizations() *AccessMethodAuthorizations { - this := AccessMethodAuthorizations{} - return &this -} - -// NewAccessMethodAuthorizationsWithDefaults instantiates a new AccessMethodAuthorizations object -// This constructor will only assign default values to properties that have it defined, -// but it doesn't guarantee that properties required by API are set -func NewAccessMethodAuthorizationsWithDefaults() *AccessMethodAuthorizations { - this := AccessMethodAuthorizations{} - return &this -} - -// GetDrsObjectId returns the DrsObjectId field value if set, zero value otherwise. -func (o *AccessMethodAuthorizations) GetDrsObjectId() string { - if o == nil || IsNil(o.DrsObjectId) { - var ret string - return ret - } - return *o.DrsObjectId -} - -// GetDrsObjectIdOk returns a tuple with the DrsObjectId field value if set, nil otherwise -// and a boolean to check if the value has been set. -func (o *AccessMethodAuthorizations) GetDrsObjectIdOk() (*string, bool) { - if o == nil || IsNil(o.DrsObjectId) { - return nil, false - } - return o.DrsObjectId, true -} - -// HasDrsObjectId returns a boolean if a field has been set. -func (o *AccessMethodAuthorizations) HasDrsObjectId() bool { - if o != nil && !IsNil(o.DrsObjectId) { - return true - } - - return false -} - -// SetDrsObjectId gets a reference to the given string and assigns it to the DrsObjectId field. -func (o *AccessMethodAuthorizations) SetDrsObjectId(v string) { - o.DrsObjectId = &v -} - -// GetSupportedTypes returns the SupportedTypes field value if set, zero value otherwise. -func (o *AccessMethodAuthorizations) GetSupportedTypes() []string { - if o == nil || IsNil(o.SupportedTypes) { - var ret []string - return ret - } - return o.SupportedTypes -} - -// GetSupportedTypesOk returns a tuple with the SupportedTypes field value if set, nil otherwise -// and a boolean to check if the value has been set. -func (o *AccessMethodAuthorizations) GetSupportedTypesOk() ([]string, bool) { - if o == nil || IsNil(o.SupportedTypes) { - return nil, false - } - return o.SupportedTypes, true -} - -// HasSupportedTypes returns a boolean if a field has been set. -func (o *AccessMethodAuthorizations) HasSupportedTypes() bool { - if o != nil && !IsNil(o.SupportedTypes) { - return true - } - - return false -} - -// SetSupportedTypes gets a reference to the given []string and assigns it to the SupportedTypes field. -func (o *AccessMethodAuthorizations) SetSupportedTypes(v []string) { - o.SupportedTypes = v -} - -// GetPassportAuthIssuers returns the PassportAuthIssuers field value if set, zero value otherwise. -func (o *AccessMethodAuthorizations) GetPassportAuthIssuers() []string { - if o == nil || IsNil(o.PassportAuthIssuers) { - var ret []string - return ret - } - return o.PassportAuthIssuers -} - -// GetPassportAuthIssuersOk returns a tuple with the PassportAuthIssuers field value if set, nil otherwise -// and a boolean to check if the value has been set. -func (o *AccessMethodAuthorizations) GetPassportAuthIssuersOk() ([]string, bool) { - if o == nil || IsNil(o.PassportAuthIssuers) { - return nil, false - } - return o.PassportAuthIssuers, true -} - -// HasPassportAuthIssuers returns a boolean if a field has been set. -func (o *AccessMethodAuthorizations) HasPassportAuthIssuers() bool { - if o != nil && !IsNil(o.PassportAuthIssuers) { - return true - } - - return false -} - -// SetPassportAuthIssuers gets a reference to the given []string and assigns it to the PassportAuthIssuers field. -func (o *AccessMethodAuthorizations) SetPassportAuthIssuers(v []string) { - o.PassportAuthIssuers = v -} - -// GetBearerAuthIssuers returns the BearerAuthIssuers field value if set, zero value otherwise. -func (o *AccessMethodAuthorizations) GetBearerAuthIssuers() []string { - if o == nil || IsNil(o.BearerAuthIssuers) { - var ret []string - return ret - } - return o.BearerAuthIssuers -} - -// GetBearerAuthIssuersOk returns a tuple with the BearerAuthIssuers field value if set, nil otherwise -// and a boolean to check if the value has been set. -func (o *AccessMethodAuthorizations) GetBearerAuthIssuersOk() ([]string, bool) { - if o == nil || IsNil(o.BearerAuthIssuers) { - return nil, false - } - return o.BearerAuthIssuers, true -} - -// HasBearerAuthIssuers returns a boolean if a field has been set. -func (o *AccessMethodAuthorizations) HasBearerAuthIssuers() bool { - if o != nil && !IsNil(o.BearerAuthIssuers) { - return true - } - - return false -} - -// SetBearerAuthIssuers gets a reference to the given []string and assigns it to the BearerAuthIssuers field. -func (o *AccessMethodAuthorizations) SetBearerAuthIssuers(v []string) { - o.BearerAuthIssuers = v -} - -func (o AccessMethodAuthorizations) MarshalJSON() ([]byte, error) { - toSerialize,err := o.ToMap() - if err != nil { - return []byte{}, err - } - return json.Marshal(toSerialize) -} - -func (o AccessMethodAuthorizations) ToMap() (map[string]interface{}, error) { - toSerialize := map[string]interface{}{} - if !IsNil(o.DrsObjectId) { - toSerialize["drs_object_id"] = o.DrsObjectId - } - if !IsNil(o.SupportedTypes) { - toSerialize["supported_types"] = o.SupportedTypes - } - if !IsNil(o.PassportAuthIssuers) { - toSerialize["passport_auth_issuers"] = o.PassportAuthIssuers - } - if !IsNil(o.BearerAuthIssuers) { - toSerialize["bearer_auth_issuers"] = o.BearerAuthIssuers - } - return toSerialize, nil -} - -type NullableAccessMethodAuthorizations struct { - value *AccessMethodAuthorizations - isSet bool -} - -func (v NullableAccessMethodAuthorizations) Get() *AccessMethodAuthorizations { - return v.value -} - -func (v *NullableAccessMethodAuthorizations) Set(val *AccessMethodAuthorizations) { - v.value = val - v.isSet = true -} - -func (v NullableAccessMethodAuthorizations) IsSet() bool { - return v.isSet -} - -func (v *NullableAccessMethodAuthorizations) Unset() { - v.value = nil - v.isSet = false -} - -func NewNullableAccessMethodAuthorizations(val *AccessMethodAuthorizations) *NullableAccessMethodAuthorizations { - return &NullableAccessMethodAuthorizations{value: val, isSet: true} -} - -func (v NullableAccessMethodAuthorizations) MarshalJSON() ([]byte, error) { - return json.Marshal(v.value) -} - -func (v *NullableAccessMethodAuthorizations) UnmarshalJSON(src []byte) error { - v.isSet = true - return json.Unmarshal(src, &v.value) -} - - diff --git a/apigen/drs/model_access_method_update_request.go b/apigen/drs/model_access_method_update_request.go deleted file mode 100644 index 09244c3..0000000 --- a/apigen/drs/model_access_method_update_request.go +++ /dev/null @@ -1,197 +0,0 @@ -/* -Data Repository Service - -No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - -API version: 1.5.0 -Contact: ga4gh-cloud@ga4gh.org -*/ - -// Code generated by OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT. - -package drs - -import ( - "encoding/json" - "bytes" - "fmt" -) - -// checks if the AccessMethodUpdateRequest type satisfies the MappedNullable interface at compile time -var _ MappedNullable = &AccessMethodUpdateRequest{} - -// AccessMethodUpdateRequest struct for AccessMethodUpdateRequest -type AccessMethodUpdateRequest struct { - // New access methods for the DRS object - AccessMethods []AccessMethod `json:"access_methods"` - // Optional GA4GH Passport JWTs for authorization - Passports []string `json:"passports,omitempty"` -} - -type _AccessMethodUpdateRequest AccessMethodUpdateRequest - -// NewAccessMethodUpdateRequest instantiates a new AccessMethodUpdateRequest object -// This constructor will assign default values to properties that have it defined, -// and makes sure properties required by API are set, but the set of arguments -// will change when the set of required properties is changed -func NewAccessMethodUpdateRequest(accessMethods []AccessMethod) *AccessMethodUpdateRequest { - this := AccessMethodUpdateRequest{} - this.AccessMethods = accessMethods - return &this -} - -// NewAccessMethodUpdateRequestWithDefaults instantiates a new AccessMethodUpdateRequest object -// This constructor will only assign default values to properties that have it defined, -// but it doesn't guarantee that properties required by API are set -func NewAccessMethodUpdateRequestWithDefaults() *AccessMethodUpdateRequest { - this := AccessMethodUpdateRequest{} - return &this -} - -// GetAccessMethods returns the AccessMethods field value -func (o *AccessMethodUpdateRequest) GetAccessMethods() []AccessMethod { - if o == nil { - var ret []AccessMethod - return ret - } - - return o.AccessMethods -} - -// GetAccessMethodsOk returns a tuple with the AccessMethods field value -// and a boolean to check if the value has been set. -func (o *AccessMethodUpdateRequest) GetAccessMethodsOk() ([]AccessMethod, bool) { - if o == nil { - return nil, false - } - return o.AccessMethods, true -} - -// SetAccessMethods sets field value -func (o *AccessMethodUpdateRequest) SetAccessMethods(v []AccessMethod) { - o.AccessMethods = v -} - -// GetPassports returns the Passports field value if set, zero value otherwise. -func (o *AccessMethodUpdateRequest) GetPassports() []string { - if o == nil || IsNil(o.Passports) { - var ret []string - return ret - } - return o.Passports -} - -// GetPassportsOk returns a tuple with the Passports field value if set, nil otherwise -// and a boolean to check if the value has been set. -func (o *AccessMethodUpdateRequest) GetPassportsOk() ([]string, bool) { - if o == nil || IsNil(o.Passports) { - return nil, false - } - return o.Passports, true -} - -// HasPassports returns a boolean if a field has been set. -func (o *AccessMethodUpdateRequest) HasPassports() bool { - if o != nil && !IsNil(o.Passports) { - return true - } - - return false -} - -// SetPassports gets a reference to the given []string and assigns it to the Passports field. -func (o *AccessMethodUpdateRequest) SetPassports(v []string) { - o.Passports = v -} - -func (o AccessMethodUpdateRequest) MarshalJSON() ([]byte, error) { - toSerialize,err := o.ToMap() - if err != nil { - return []byte{}, err - } - return json.Marshal(toSerialize) -} - -func (o AccessMethodUpdateRequest) ToMap() (map[string]interface{}, error) { - toSerialize := map[string]interface{}{} - toSerialize["access_methods"] = o.AccessMethods - if !IsNil(o.Passports) { - toSerialize["passports"] = o.Passports - } - return toSerialize, nil -} - -func (o *AccessMethodUpdateRequest) UnmarshalJSON(data []byte) (err error) { - // This validates that all required properties are included in the JSON object - // by unmarshalling the object into a generic map with string keys and checking - // that every required field exists as a key in the generic map. - requiredProperties := []string{ - "access_methods", - } - - allProperties := make(map[string]interface{}) - - err = json.Unmarshal(data, &allProperties) - - if err != nil { - return err; - } - - for _, requiredProperty := range(requiredProperties) { - if _, exists := allProperties[requiredProperty]; !exists { - return fmt.Errorf("no value given for required property %v", requiredProperty) - } - } - - varAccessMethodUpdateRequest := _AccessMethodUpdateRequest{} - - decoder := json.NewDecoder(bytes.NewReader(data)) - decoder.DisallowUnknownFields() - err = decoder.Decode(&varAccessMethodUpdateRequest) - - if err != nil { - return err - } - - *o = AccessMethodUpdateRequest(varAccessMethodUpdateRequest) - - return err -} - -type NullableAccessMethodUpdateRequest struct { - value *AccessMethodUpdateRequest - isSet bool -} - -func (v NullableAccessMethodUpdateRequest) Get() *AccessMethodUpdateRequest { - return v.value -} - -func (v *NullableAccessMethodUpdateRequest) Set(val *AccessMethodUpdateRequest) { - v.value = val - v.isSet = true -} - -func (v NullableAccessMethodUpdateRequest) IsSet() bool { - return v.isSet -} - -func (v *NullableAccessMethodUpdateRequest) Unset() { - v.value = nil - v.isSet = false -} - -func NewNullableAccessMethodUpdateRequest(val *AccessMethodUpdateRequest) *NullableAccessMethodUpdateRequest { - return &NullableAccessMethodUpdateRequest{value: val, isSet: true} -} - -func (v NullableAccessMethodUpdateRequest) MarshalJSON() ([]byte, error) { - return json.Marshal(v.value) -} - -func (v *NullableAccessMethodUpdateRequest) UnmarshalJSON(src []byte) error { - v.isSet = true - return json.Unmarshal(src, &v.value) -} - - diff --git a/apigen/drs/model_access_url.go b/apigen/drs/model_access_url.go deleted file mode 100644 index ac64dad..0000000 --- a/apigen/drs/model_access_url.go +++ /dev/null @@ -1,197 +0,0 @@ -/* -Data Repository Service - -No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - -API version: 1.5.0 -Contact: ga4gh-cloud@ga4gh.org -*/ - -// Code generated by OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT. - -package drs - -import ( - "encoding/json" - "bytes" - "fmt" -) - -// checks if the AccessURL type satisfies the MappedNullable interface at compile time -var _ MappedNullable = &AccessURL{} - -// AccessURL struct for AccessURL -type AccessURL struct { - // A fully resolvable URL that can be used to fetch the actual object bytes. - Url string `json:"url"` - // An optional list of headers to include in the HTTP request to `url`. These headers can be used to provide auth tokens required to fetch the object bytes. - Headers []string `json:"headers,omitempty"` -} - -type _AccessURL AccessURL - -// NewAccessURL instantiates a new AccessURL object -// This constructor will assign default values to properties that have it defined, -// and makes sure properties required by API are set, but the set of arguments -// will change when the set of required properties is changed -func NewAccessURL(url string) *AccessURL { - this := AccessURL{} - this.Url = url - return &this -} - -// NewAccessURLWithDefaults instantiates a new AccessURL object -// This constructor will only assign default values to properties that have it defined, -// but it doesn't guarantee that properties required by API are set -func NewAccessURLWithDefaults() *AccessURL { - this := AccessURL{} - return &this -} - -// GetUrl returns the Url field value -func (o *AccessURL) GetUrl() string { - if o == nil { - var ret string - return ret - } - - return o.Url -} - -// GetUrlOk returns a tuple with the Url field value -// and a boolean to check if the value has been set. -func (o *AccessURL) GetUrlOk() (*string, bool) { - if o == nil { - return nil, false - } - return &o.Url, true -} - -// SetUrl sets field value -func (o *AccessURL) SetUrl(v string) { - o.Url = v -} - -// GetHeaders returns the Headers field value if set, zero value otherwise. -func (o *AccessURL) GetHeaders() []string { - if o == nil || IsNil(o.Headers) { - var ret []string - return ret - } - return o.Headers -} - -// GetHeadersOk returns a tuple with the Headers field value if set, nil otherwise -// and a boolean to check if the value has been set. -func (o *AccessURL) GetHeadersOk() ([]string, bool) { - if o == nil || IsNil(o.Headers) { - return nil, false - } - return o.Headers, true -} - -// HasHeaders returns a boolean if a field has been set. -func (o *AccessURL) HasHeaders() bool { - if o != nil && !IsNil(o.Headers) { - return true - } - - return false -} - -// SetHeaders gets a reference to the given []string and assigns it to the Headers field. -func (o *AccessURL) SetHeaders(v []string) { - o.Headers = v -} - -func (o AccessURL) MarshalJSON() ([]byte, error) { - toSerialize,err := o.ToMap() - if err != nil { - return []byte{}, err - } - return json.Marshal(toSerialize) -} - -func (o AccessURL) ToMap() (map[string]interface{}, error) { - toSerialize := map[string]interface{}{} - toSerialize["url"] = o.Url - if !IsNil(o.Headers) { - toSerialize["headers"] = o.Headers - } - return toSerialize, nil -} - -func (o *AccessURL) UnmarshalJSON(data []byte) (err error) { - // This validates that all required properties are included in the JSON object - // by unmarshalling the object into a generic map with string keys and checking - // that every required field exists as a key in the generic map. - requiredProperties := []string{ - "url", - } - - allProperties := make(map[string]interface{}) - - err = json.Unmarshal(data, &allProperties) - - if err != nil { - return err; - } - - for _, requiredProperty := range(requiredProperties) { - if _, exists := allProperties[requiredProperty]; !exists { - return fmt.Errorf("no value given for required property %v", requiredProperty) - } - } - - varAccessURL := _AccessURL{} - - decoder := json.NewDecoder(bytes.NewReader(data)) - decoder.DisallowUnknownFields() - err = decoder.Decode(&varAccessURL) - - if err != nil { - return err - } - - *o = AccessURL(varAccessURL) - - return err -} - -type NullableAccessURL struct { - value *AccessURL - isSet bool -} - -func (v NullableAccessURL) Get() *AccessURL { - return v.value -} - -func (v *NullableAccessURL) Set(val *AccessURL) { - v.value = val - v.isSet = true -} - -func (v NullableAccessURL) IsSet() bool { - return v.isSet -} - -func (v *NullableAccessURL) Unset() { - v.value = nil - v.isSet = false -} - -func NewNullableAccessURL(val *AccessURL) *NullableAccessURL { - return &NullableAccessURL{value: val, isSet: true} -} - -func (v NullableAccessURL) MarshalJSON() ([]byte, error) { - return json.Marshal(v.value) -} - -func (v *NullableAccessURL) UnmarshalJSON(src []byte) error { - v.isSet = true - return json.Unmarshal(src, &v.value) -} - - diff --git a/apigen/drs/model_authorizations.go b/apigen/drs/model_authorizations.go deleted file mode 100644 index d761106..0000000 --- a/apigen/drs/model_authorizations.go +++ /dev/null @@ -1,238 +0,0 @@ -/* -Data Repository Service - -No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - -API version: 1.5.0 -Contact: ga4gh-cloud@ga4gh.org -*/ - -// Code generated by OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT. - -package drs - -import ( - "encoding/json" -) - -// checks if the Authorizations type satisfies the MappedNullable interface at compile time -var _ MappedNullable = &Authorizations{} - -// Authorizations struct for Authorizations -type Authorizations struct { - DrsObjectId *string `json:"drs_object_id,omitempty"` - // An Optional list of support authorization types. More than one can be supported and tried in sequence. Defaults to `None` if empty or missing. - SupportedTypes []string `json:"supported_types,omitempty"` - // If authorizations contain `PassportAuth` this is a required list of visa issuers (as found in a visa's `iss` claim) that may authorize access to this object. The caller must only provide passports that contain visas from this list. It is strongly recommended that the caller validate that it is appropriate to send the requested passport/visa to the DRS server to mitigate attacks by malicious DRS servers requesting credentials they should not have. - PassportAuthIssuers []string `json:"passport_auth_issuers,omitempty"` - // If authorizations contain `BearerAuth` this is an optional list of issuers that may authorize access to this object. The caller must provide a token from one of these issuers. If this is empty or missing it assumed the caller knows which token to send via other means. It is strongly recommended that the caller validate that it is appropriate to send the requested token to the DRS server to mitigate attacks by malicious DRS servers requesting credentials they should not have. - BearerAuthIssuers []string `json:"bearer_auth_issuers,omitempty"` -} - -// NewAuthorizations instantiates a new Authorizations object -// This constructor will assign default values to properties that have it defined, -// and makes sure properties required by API are set, but the set of arguments -// will change when the set of required properties is changed -func NewAuthorizations() *Authorizations { - this := Authorizations{} - return &this -} - -// NewAuthorizationsWithDefaults instantiates a new Authorizations object -// This constructor will only assign default values to properties that have it defined, -// but it doesn't guarantee that properties required by API are set -func NewAuthorizationsWithDefaults() *Authorizations { - this := Authorizations{} - return &this -} - -// GetDrsObjectId returns the DrsObjectId field value if set, zero value otherwise. -func (o *Authorizations) GetDrsObjectId() string { - if o == nil || IsNil(o.DrsObjectId) { - var ret string - return ret - } - return *o.DrsObjectId -} - -// GetDrsObjectIdOk returns a tuple with the DrsObjectId field value if set, nil otherwise -// and a boolean to check if the value has been set. -func (o *Authorizations) GetDrsObjectIdOk() (*string, bool) { - if o == nil || IsNil(o.DrsObjectId) { - return nil, false - } - return o.DrsObjectId, true -} - -// HasDrsObjectId returns a boolean if a field has been set. -func (o *Authorizations) HasDrsObjectId() bool { - if o != nil && !IsNil(o.DrsObjectId) { - return true - } - - return false -} - -// SetDrsObjectId gets a reference to the given string and assigns it to the DrsObjectId field. -func (o *Authorizations) SetDrsObjectId(v string) { - o.DrsObjectId = &v -} - -// GetSupportedTypes returns the SupportedTypes field value if set, zero value otherwise. -func (o *Authorizations) GetSupportedTypes() []string { - if o == nil || IsNil(o.SupportedTypes) { - var ret []string - return ret - } - return o.SupportedTypes -} - -// GetSupportedTypesOk returns a tuple with the SupportedTypes field value if set, nil otherwise -// and a boolean to check if the value has been set. -func (o *Authorizations) GetSupportedTypesOk() ([]string, bool) { - if o == nil || IsNil(o.SupportedTypes) { - return nil, false - } - return o.SupportedTypes, true -} - -// HasSupportedTypes returns a boolean if a field has been set. -func (o *Authorizations) HasSupportedTypes() bool { - if o != nil && !IsNil(o.SupportedTypes) { - return true - } - - return false -} - -// SetSupportedTypes gets a reference to the given []string and assigns it to the SupportedTypes field. -func (o *Authorizations) SetSupportedTypes(v []string) { - o.SupportedTypes = v -} - -// GetPassportAuthIssuers returns the PassportAuthIssuers field value if set, zero value otherwise. -func (o *Authorizations) GetPassportAuthIssuers() []string { - if o == nil || IsNil(o.PassportAuthIssuers) { - var ret []string - return ret - } - return o.PassportAuthIssuers -} - -// GetPassportAuthIssuersOk returns a tuple with the PassportAuthIssuers field value if set, nil otherwise -// and a boolean to check if the value has been set. -func (o *Authorizations) GetPassportAuthIssuersOk() ([]string, bool) { - if o == nil || IsNil(o.PassportAuthIssuers) { - return nil, false - } - return o.PassportAuthIssuers, true -} - -// HasPassportAuthIssuers returns a boolean if a field has been set. -func (o *Authorizations) HasPassportAuthIssuers() bool { - if o != nil && !IsNil(o.PassportAuthIssuers) { - return true - } - - return false -} - -// SetPassportAuthIssuers gets a reference to the given []string and assigns it to the PassportAuthIssuers field. -func (o *Authorizations) SetPassportAuthIssuers(v []string) { - o.PassportAuthIssuers = v -} - -// GetBearerAuthIssuers returns the BearerAuthIssuers field value if set, zero value otherwise. -func (o *Authorizations) GetBearerAuthIssuers() []string { - if o == nil || IsNil(o.BearerAuthIssuers) { - var ret []string - return ret - } - return o.BearerAuthIssuers -} - -// GetBearerAuthIssuersOk returns a tuple with the BearerAuthIssuers field value if set, nil otherwise -// and a boolean to check if the value has been set. -func (o *Authorizations) GetBearerAuthIssuersOk() ([]string, bool) { - if o == nil || IsNil(o.BearerAuthIssuers) { - return nil, false - } - return o.BearerAuthIssuers, true -} - -// HasBearerAuthIssuers returns a boolean if a field has been set. -func (o *Authorizations) HasBearerAuthIssuers() bool { - if o != nil && !IsNil(o.BearerAuthIssuers) { - return true - } - - return false -} - -// SetBearerAuthIssuers gets a reference to the given []string and assigns it to the BearerAuthIssuers field. -func (o *Authorizations) SetBearerAuthIssuers(v []string) { - o.BearerAuthIssuers = v -} - -func (o Authorizations) MarshalJSON() ([]byte, error) { - toSerialize,err := o.ToMap() - if err != nil { - return []byte{}, err - } - return json.Marshal(toSerialize) -} - -func (o Authorizations) ToMap() (map[string]interface{}, error) { - toSerialize := map[string]interface{}{} - if !IsNil(o.DrsObjectId) { - toSerialize["drs_object_id"] = o.DrsObjectId - } - if !IsNil(o.SupportedTypes) { - toSerialize["supported_types"] = o.SupportedTypes - } - if !IsNil(o.PassportAuthIssuers) { - toSerialize["passport_auth_issuers"] = o.PassportAuthIssuers - } - if !IsNil(o.BearerAuthIssuers) { - toSerialize["bearer_auth_issuers"] = o.BearerAuthIssuers - } - return toSerialize, nil -} - -type NullableAuthorizations struct { - value *Authorizations - isSet bool -} - -func (v NullableAuthorizations) Get() *Authorizations { - return v.value -} - -func (v *NullableAuthorizations) Set(val *Authorizations) { - v.value = val - v.isSet = true -} - -func (v NullableAuthorizations) IsSet() bool { - return v.isSet -} - -func (v *NullableAuthorizations) Unset() { - v.value = nil - v.isSet = false -} - -func NewNullableAuthorizations(val *Authorizations) *NullableAuthorizations { - return &NullableAuthorizations{value: val, isSet: true} -} - -func (v NullableAuthorizations) MarshalJSON() ([]byte, error) { - return json.Marshal(v.value) -} - -func (v *NullableAuthorizations) UnmarshalJSON(src []byte) error { - v.isSet = true - return json.Unmarshal(src, &v.value) -} - - diff --git a/apigen/drs/model_bulk_access_method_update_request.go b/apigen/drs/model_bulk_access_method_update_request.go deleted file mode 100644 index 972e2ea..0000000 --- a/apigen/drs/model_bulk_access_method_update_request.go +++ /dev/null @@ -1,197 +0,0 @@ -/* -Data Repository Service - -No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - -API version: 1.5.0 -Contact: ga4gh-cloud@ga4gh.org -*/ - -// Code generated by OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT. - -package drs - -import ( - "encoding/json" - "bytes" - "fmt" -) - -// checks if the BulkAccessMethodUpdateRequest type satisfies the MappedNullable interface at compile time -var _ MappedNullable = &BulkAccessMethodUpdateRequest{} - -// BulkAccessMethodUpdateRequest struct for BulkAccessMethodUpdateRequest -type BulkAccessMethodUpdateRequest struct { - // Array of access method updates to perform - Updates []BulkAccessMethodUpdateRequestUpdatesInner `json:"updates"` - // Optional GA4GH Passport JWTs for authorization - Passports []string `json:"passports,omitempty"` -} - -type _BulkAccessMethodUpdateRequest BulkAccessMethodUpdateRequest - -// NewBulkAccessMethodUpdateRequest instantiates a new BulkAccessMethodUpdateRequest object -// This constructor will assign default values to properties that have it defined, -// and makes sure properties required by API are set, but the set of arguments -// will change when the set of required properties is changed -func NewBulkAccessMethodUpdateRequest(updates []BulkAccessMethodUpdateRequestUpdatesInner) *BulkAccessMethodUpdateRequest { - this := BulkAccessMethodUpdateRequest{} - this.Updates = updates - return &this -} - -// NewBulkAccessMethodUpdateRequestWithDefaults instantiates a new BulkAccessMethodUpdateRequest object -// This constructor will only assign default values to properties that have it defined, -// but it doesn't guarantee that properties required by API are set -func NewBulkAccessMethodUpdateRequestWithDefaults() *BulkAccessMethodUpdateRequest { - this := BulkAccessMethodUpdateRequest{} - return &this -} - -// GetUpdates returns the Updates field value -func (o *BulkAccessMethodUpdateRequest) GetUpdates() []BulkAccessMethodUpdateRequestUpdatesInner { - if o == nil { - var ret []BulkAccessMethodUpdateRequestUpdatesInner - return ret - } - - return o.Updates -} - -// GetUpdatesOk returns a tuple with the Updates field value -// and a boolean to check if the value has been set. -func (o *BulkAccessMethodUpdateRequest) GetUpdatesOk() ([]BulkAccessMethodUpdateRequestUpdatesInner, bool) { - if o == nil { - return nil, false - } - return o.Updates, true -} - -// SetUpdates sets field value -func (o *BulkAccessMethodUpdateRequest) SetUpdates(v []BulkAccessMethodUpdateRequestUpdatesInner) { - o.Updates = v -} - -// GetPassports returns the Passports field value if set, zero value otherwise. -func (o *BulkAccessMethodUpdateRequest) GetPassports() []string { - if o == nil || IsNil(o.Passports) { - var ret []string - return ret - } - return o.Passports -} - -// GetPassportsOk returns a tuple with the Passports field value if set, nil otherwise -// and a boolean to check if the value has been set. -func (o *BulkAccessMethodUpdateRequest) GetPassportsOk() ([]string, bool) { - if o == nil || IsNil(o.Passports) { - return nil, false - } - return o.Passports, true -} - -// HasPassports returns a boolean if a field has been set. -func (o *BulkAccessMethodUpdateRequest) HasPassports() bool { - if o != nil && !IsNil(o.Passports) { - return true - } - - return false -} - -// SetPassports gets a reference to the given []string and assigns it to the Passports field. -func (o *BulkAccessMethodUpdateRequest) SetPassports(v []string) { - o.Passports = v -} - -func (o BulkAccessMethodUpdateRequest) MarshalJSON() ([]byte, error) { - toSerialize,err := o.ToMap() - if err != nil { - return []byte{}, err - } - return json.Marshal(toSerialize) -} - -func (o BulkAccessMethodUpdateRequest) ToMap() (map[string]interface{}, error) { - toSerialize := map[string]interface{}{} - toSerialize["updates"] = o.Updates - if !IsNil(o.Passports) { - toSerialize["passports"] = o.Passports - } - return toSerialize, nil -} - -func (o *BulkAccessMethodUpdateRequest) UnmarshalJSON(data []byte) (err error) { - // This validates that all required properties are included in the JSON object - // by unmarshalling the object into a generic map with string keys and checking - // that every required field exists as a key in the generic map. - requiredProperties := []string{ - "updates", - } - - allProperties := make(map[string]interface{}) - - err = json.Unmarshal(data, &allProperties) - - if err != nil { - return err; - } - - for _, requiredProperty := range(requiredProperties) { - if _, exists := allProperties[requiredProperty]; !exists { - return fmt.Errorf("no value given for required property %v", requiredProperty) - } - } - - varBulkAccessMethodUpdateRequest := _BulkAccessMethodUpdateRequest{} - - decoder := json.NewDecoder(bytes.NewReader(data)) - decoder.DisallowUnknownFields() - err = decoder.Decode(&varBulkAccessMethodUpdateRequest) - - if err != nil { - return err - } - - *o = BulkAccessMethodUpdateRequest(varBulkAccessMethodUpdateRequest) - - return err -} - -type NullableBulkAccessMethodUpdateRequest struct { - value *BulkAccessMethodUpdateRequest - isSet bool -} - -func (v NullableBulkAccessMethodUpdateRequest) Get() *BulkAccessMethodUpdateRequest { - return v.value -} - -func (v *NullableBulkAccessMethodUpdateRequest) Set(val *BulkAccessMethodUpdateRequest) { - v.value = val - v.isSet = true -} - -func (v NullableBulkAccessMethodUpdateRequest) IsSet() bool { - return v.isSet -} - -func (v *NullableBulkAccessMethodUpdateRequest) Unset() { - v.value = nil - v.isSet = false -} - -func NewNullableBulkAccessMethodUpdateRequest(val *BulkAccessMethodUpdateRequest) *NullableBulkAccessMethodUpdateRequest { - return &NullableBulkAccessMethodUpdateRequest{value: val, isSet: true} -} - -func (v NullableBulkAccessMethodUpdateRequest) MarshalJSON() ([]byte, error) { - return json.Marshal(v.value) -} - -func (v *NullableBulkAccessMethodUpdateRequest) UnmarshalJSON(src []byte) error { - v.isSet = true - return json.Unmarshal(src, &v.value) -} - - diff --git a/apigen/drs/model_bulk_access_method_update_request_updates_inner.go b/apigen/drs/model_bulk_access_method_update_request_updates_inner.go deleted file mode 100644 index 1232659..0000000 --- a/apigen/drs/model_bulk_access_method_update_request_updates_inner.go +++ /dev/null @@ -1,189 +0,0 @@ -/* -Data Repository Service - -No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - -API version: 1.5.0 -Contact: ga4gh-cloud@ga4gh.org -*/ - -// Code generated by OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT. - -package drs - -import ( - "encoding/json" - "bytes" - "fmt" -) - -// checks if the BulkAccessMethodUpdateRequestUpdatesInner type satisfies the MappedNullable interface at compile time -var _ MappedNullable = &BulkAccessMethodUpdateRequestUpdatesInner{} - -// BulkAccessMethodUpdateRequestUpdatesInner struct for BulkAccessMethodUpdateRequestUpdatesInner -type BulkAccessMethodUpdateRequestUpdatesInner struct { - // DRS object ID to update - ObjectId string `json:"object_id"` - // New access methods for this object - AccessMethods []AccessMethod `json:"access_methods"` -} - -type _BulkAccessMethodUpdateRequestUpdatesInner BulkAccessMethodUpdateRequestUpdatesInner - -// NewBulkAccessMethodUpdateRequestUpdatesInner instantiates a new BulkAccessMethodUpdateRequestUpdatesInner object -// This constructor will assign default values to properties that have it defined, -// and makes sure properties required by API are set, but the set of arguments -// will change when the set of required properties is changed -func NewBulkAccessMethodUpdateRequestUpdatesInner(objectId string, accessMethods []AccessMethod) *BulkAccessMethodUpdateRequestUpdatesInner { - this := BulkAccessMethodUpdateRequestUpdatesInner{} - this.ObjectId = objectId - this.AccessMethods = accessMethods - return &this -} - -// NewBulkAccessMethodUpdateRequestUpdatesInnerWithDefaults instantiates a new BulkAccessMethodUpdateRequestUpdatesInner object -// This constructor will only assign default values to properties that have it defined, -// but it doesn't guarantee that properties required by API are set -func NewBulkAccessMethodUpdateRequestUpdatesInnerWithDefaults() *BulkAccessMethodUpdateRequestUpdatesInner { - this := BulkAccessMethodUpdateRequestUpdatesInner{} - return &this -} - -// GetObjectId returns the ObjectId field value -func (o *BulkAccessMethodUpdateRequestUpdatesInner) GetObjectId() string { - if o == nil { - var ret string - return ret - } - - return o.ObjectId -} - -// GetObjectIdOk returns a tuple with the ObjectId field value -// and a boolean to check if the value has been set. -func (o *BulkAccessMethodUpdateRequestUpdatesInner) GetObjectIdOk() (*string, bool) { - if o == nil { - return nil, false - } - return &o.ObjectId, true -} - -// SetObjectId sets field value -func (o *BulkAccessMethodUpdateRequestUpdatesInner) SetObjectId(v string) { - o.ObjectId = v -} - -// GetAccessMethods returns the AccessMethods field value -func (o *BulkAccessMethodUpdateRequestUpdatesInner) GetAccessMethods() []AccessMethod { - if o == nil { - var ret []AccessMethod - return ret - } - - return o.AccessMethods -} - -// GetAccessMethodsOk returns a tuple with the AccessMethods field value -// and a boolean to check if the value has been set. -func (o *BulkAccessMethodUpdateRequestUpdatesInner) GetAccessMethodsOk() ([]AccessMethod, bool) { - if o == nil { - return nil, false - } - return o.AccessMethods, true -} - -// SetAccessMethods sets field value -func (o *BulkAccessMethodUpdateRequestUpdatesInner) SetAccessMethods(v []AccessMethod) { - o.AccessMethods = v -} - -func (o BulkAccessMethodUpdateRequestUpdatesInner) MarshalJSON() ([]byte, error) { - toSerialize,err := o.ToMap() - if err != nil { - return []byte{}, err - } - return json.Marshal(toSerialize) -} - -func (o BulkAccessMethodUpdateRequestUpdatesInner) ToMap() (map[string]interface{}, error) { - toSerialize := map[string]interface{}{} - toSerialize["object_id"] = o.ObjectId - toSerialize["access_methods"] = o.AccessMethods - return toSerialize, nil -} - -func (o *BulkAccessMethodUpdateRequestUpdatesInner) UnmarshalJSON(data []byte) (err error) { - // This validates that all required properties are included in the JSON object - // by unmarshalling the object into a generic map with string keys and checking - // that every required field exists as a key in the generic map. - requiredProperties := []string{ - "object_id", - "access_methods", - } - - allProperties := make(map[string]interface{}) - - err = json.Unmarshal(data, &allProperties) - - if err != nil { - return err; - } - - for _, requiredProperty := range(requiredProperties) { - if _, exists := allProperties[requiredProperty]; !exists { - return fmt.Errorf("no value given for required property %v", requiredProperty) - } - } - - varBulkAccessMethodUpdateRequestUpdatesInner := _BulkAccessMethodUpdateRequestUpdatesInner{} - - decoder := json.NewDecoder(bytes.NewReader(data)) - decoder.DisallowUnknownFields() - err = decoder.Decode(&varBulkAccessMethodUpdateRequestUpdatesInner) - - if err != nil { - return err - } - - *o = BulkAccessMethodUpdateRequestUpdatesInner(varBulkAccessMethodUpdateRequestUpdatesInner) - - return err -} - -type NullableBulkAccessMethodUpdateRequestUpdatesInner struct { - value *BulkAccessMethodUpdateRequestUpdatesInner - isSet bool -} - -func (v NullableBulkAccessMethodUpdateRequestUpdatesInner) Get() *BulkAccessMethodUpdateRequestUpdatesInner { - return v.value -} - -func (v *NullableBulkAccessMethodUpdateRequestUpdatesInner) Set(val *BulkAccessMethodUpdateRequestUpdatesInner) { - v.value = val - v.isSet = true -} - -func (v NullableBulkAccessMethodUpdateRequestUpdatesInner) IsSet() bool { - return v.isSet -} - -func (v *NullableBulkAccessMethodUpdateRequestUpdatesInner) Unset() { - v.value = nil - v.isSet = false -} - -func NewNullableBulkAccessMethodUpdateRequestUpdatesInner(val *BulkAccessMethodUpdateRequestUpdatesInner) *NullableBulkAccessMethodUpdateRequestUpdatesInner { - return &NullableBulkAccessMethodUpdateRequestUpdatesInner{value: val, isSet: true} -} - -func (v NullableBulkAccessMethodUpdateRequestUpdatesInner) MarshalJSON() ([]byte, error) { - return json.Marshal(v.value) -} - -func (v *NullableBulkAccessMethodUpdateRequestUpdatesInner) UnmarshalJSON(src []byte) error { - v.isSet = true - return json.Unmarshal(src, &v.value) -} - - diff --git a/apigen/drs/model_bulk_access_url.go b/apigen/drs/model_bulk_access_url.go deleted file mode 100644 index 17e6e39..0000000 --- a/apigen/drs/model_bulk_access_url.go +++ /dev/null @@ -1,269 +0,0 @@ -/* -Data Repository Service - -No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - -API version: 1.5.0 -Contact: ga4gh-cloud@ga4gh.org -*/ - -// Code generated by OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT. - -package drs - -import ( - "encoding/json" - "bytes" - "fmt" -) - -// checks if the BulkAccessURL type satisfies the MappedNullable interface at compile time -var _ MappedNullable = &BulkAccessURL{} - -// BulkAccessURL struct for BulkAccessURL -type BulkAccessURL struct { - DrsObjectId *string `json:"drs_object_id,omitempty"` - DrsAccessId *string `json:"drs_access_id,omitempty"` - // A fully resolvable URL that can be used to fetch the actual object bytes. - Url string `json:"url"` - // An optional list of headers to include in the HTTP request to `url`. These headers can be used to provide auth tokens required to fetch the object bytes. - Headers []string `json:"headers,omitempty"` -} - -type _BulkAccessURL BulkAccessURL - -// NewBulkAccessURL instantiates a new BulkAccessURL object -// This constructor will assign default values to properties that have it defined, -// and makes sure properties required by API are set, but the set of arguments -// will change when the set of required properties is changed -func NewBulkAccessURL(url string) *BulkAccessURL { - this := BulkAccessURL{} - this.Url = url - return &this -} - -// NewBulkAccessURLWithDefaults instantiates a new BulkAccessURL object -// This constructor will only assign default values to properties that have it defined, -// but it doesn't guarantee that properties required by API are set -func NewBulkAccessURLWithDefaults() *BulkAccessURL { - this := BulkAccessURL{} - return &this -} - -// GetDrsObjectId returns the DrsObjectId field value if set, zero value otherwise. -func (o *BulkAccessURL) GetDrsObjectId() string { - if o == nil || IsNil(o.DrsObjectId) { - var ret string - return ret - } - return *o.DrsObjectId -} - -// GetDrsObjectIdOk returns a tuple with the DrsObjectId field value if set, nil otherwise -// and a boolean to check if the value has been set. -func (o *BulkAccessURL) GetDrsObjectIdOk() (*string, bool) { - if o == nil || IsNil(o.DrsObjectId) { - return nil, false - } - return o.DrsObjectId, true -} - -// HasDrsObjectId returns a boolean if a field has been set. -func (o *BulkAccessURL) HasDrsObjectId() bool { - if o != nil && !IsNil(o.DrsObjectId) { - return true - } - - return false -} - -// SetDrsObjectId gets a reference to the given string and assigns it to the DrsObjectId field. -func (o *BulkAccessURL) SetDrsObjectId(v string) { - o.DrsObjectId = &v -} - -// GetDrsAccessId returns the DrsAccessId field value if set, zero value otherwise. -func (o *BulkAccessURL) GetDrsAccessId() string { - if o == nil || IsNil(o.DrsAccessId) { - var ret string - return ret - } - return *o.DrsAccessId -} - -// GetDrsAccessIdOk returns a tuple with the DrsAccessId field value if set, nil otherwise -// and a boolean to check if the value has been set. -func (o *BulkAccessURL) GetDrsAccessIdOk() (*string, bool) { - if o == nil || IsNil(o.DrsAccessId) { - return nil, false - } - return o.DrsAccessId, true -} - -// HasDrsAccessId returns a boolean if a field has been set. -func (o *BulkAccessURL) HasDrsAccessId() bool { - if o != nil && !IsNil(o.DrsAccessId) { - return true - } - - return false -} - -// SetDrsAccessId gets a reference to the given string and assigns it to the DrsAccessId field. -func (o *BulkAccessURL) SetDrsAccessId(v string) { - o.DrsAccessId = &v -} - -// GetUrl returns the Url field value -func (o *BulkAccessURL) GetUrl() string { - if o == nil { - var ret string - return ret - } - - return o.Url -} - -// GetUrlOk returns a tuple with the Url field value -// and a boolean to check if the value has been set. -func (o *BulkAccessURL) GetUrlOk() (*string, bool) { - if o == nil { - return nil, false - } - return &o.Url, true -} - -// SetUrl sets field value -func (o *BulkAccessURL) SetUrl(v string) { - o.Url = v -} - -// GetHeaders returns the Headers field value if set, zero value otherwise. -func (o *BulkAccessURL) GetHeaders() []string { - if o == nil || IsNil(o.Headers) { - var ret []string - return ret - } - return o.Headers -} - -// GetHeadersOk returns a tuple with the Headers field value if set, nil otherwise -// and a boolean to check if the value has been set. -func (o *BulkAccessURL) GetHeadersOk() ([]string, bool) { - if o == nil || IsNil(o.Headers) { - return nil, false - } - return o.Headers, true -} - -// HasHeaders returns a boolean if a field has been set. -func (o *BulkAccessURL) HasHeaders() bool { - if o != nil && !IsNil(o.Headers) { - return true - } - - return false -} - -// SetHeaders gets a reference to the given []string and assigns it to the Headers field. -func (o *BulkAccessURL) SetHeaders(v []string) { - o.Headers = v -} - -func (o BulkAccessURL) MarshalJSON() ([]byte, error) { - toSerialize,err := o.ToMap() - if err != nil { - return []byte{}, err - } - return json.Marshal(toSerialize) -} - -func (o BulkAccessURL) ToMap() (map[string]interface{}, error) { - toSerialize := map[string]interface{}{} - if !IsNil(o.DrsObjectId) { - toSerialize["drs_object_id"] = o.DrsObjectId - } - if !IsNil(o.DrsAccessId) { - toSerialize["drs_access_id"] = o.DrsAccessId - } - toSerialize["url"] = o.Url - if !IsNil(o.Headers) { - toSerialize["headers"] = o.Headers - } - return toSerialize, nil -} - -func (o *BulkAccessURL) UnmarshalJSON(data []byte) (err error) { - // This validates that all required properties are included in the JSON object - // by unmarshalling the object into a generic map with string keys and checking - // that every required field exists as a key in the generic map. - requiredProperties := []string{ - "url", - } - - allProperties := make(map[string]interface{}) - - err = json.Unmarshal(data, &allProperties) - - if err != nil { - return err; - } - - for _, requiredProperty := range(requiredProperties) { - if _, exists := allProperties[requiredProperty]; !exists { - return fmt.Errorf("no value given for required property %v", requiredProperty) - } - } - - varBulkAccessURL := _BulkAccessURL{} - - decoder := json.NewDecoder(bytes.NewReader(data)) - decoder.DisallowUnknownFields() - err = decoder.Decode(&varBulkAccessURL) - - if err != nil { - return err - } - - *o = BulkAccessURL(varBulkAccessURL) - - return err -} - -type NullableBulkAccessURL struct { - value *BulkAccessURL - isSet bool -} - -func (v NullableBulkAccessURL) Get() *BulkAccessURL { - return v.value -} - -func (v *NullableBulkAccessURL) Set(val *BulkAccessURL) { - v.value = val - v.isSet = true -} - -func (v NullableBulkAccessURL) IsSet() bool { - return v.isSet -} - -func (v *NullableBulkAccessURL) Unset() { - v.value = nil - v.isSet = false -} - -func NewNullableBulkAccessURL(val *BulkAccessURL) *NullableBulkAccessURL { - return &NullableBulkAccessURL{value: val, isSet: true} -} - -func (v NullableBulkAccessURL) MarshalJSON() ([]byte, error) { - return json.Marshal(v.value) -} - -func (v *NullableBulkAccessURL) UnmarshalJSON(src []byte) error { - v.isSet = true - return json.Unmarshal(src, &v.value) -} - - diff --git a/apigen/drs/model_bulk_delete_request.go b/apigen/drs/model_bulk_delete_request.go deleted file mode 100644 index 7937f5e..0000000 --- a/apigen/drs/model_bulk_delete_request.go +++ /dev/null @@ -1,238 +0,0 @@ -/* -Data Repository Service - -No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - -API version: 1.5.0 -Contact: ga4gh-cloud@ga4gh.org -*/ - -// Code generated by OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT. - -package drs - -import ( - "encoding/json" - "bytes" - "fmt" -) - -// checks if the BulkDeleteRequest type satisfies the MappedNullable interface at compile time -var _ MappedNullable = &BulkDeleteRequest{} - -// BulkDeleteRequest Request body for bulk delete operations -type BulkDeleteRequest struct { - // Array of DRS object IDs to delete - BulkObjectIds []string `json:"bulk_object_ids"` - // the encoded JWT GA4GH Passport that contains embedded Visas. The overall JWT is signed as are the individual Passport Visas. - Passports []string `json:"passports,omitempty"` - // If true, delete both DRS object metadata and underlying storage data (follows server's deleteStorageDataSupported capability). If false (default), only delete DRS object metadata while preserving underlying storage data. Clients must explicitly set this to true to enable storage data deletion, ensuring intentional choice for this potentially destructive operation. - DeleteStorageData *bool `json:"delete_storage_data,omitempty"` -} - -type _BulkDeleteRequest BulkDeleteRequest - -// NewBulkDeleteRequest instantiates a new BulkDeleteRequest object -// This constructor will assign default values to properties that have it defined, -// and makes sure properties required by API are set, but the set of arguments -// will change when the set of required properties is changed -func NewBulkDeleteRequest(bulkObjectIds []string) *BulkDeleteRequest { - this := BulkDeleteRequest{} - this.BulkObjectIds = bulkObjectIds - var deleteStorageData bool = false - this.DeleteStorageData = &deleteStorageData - return &this -} - -// NewBulkDeleteRequestWithDefaults instantiates a new BulkDeleteRequest object -// This constructor will only assign default values to properties that have it defined, -// but it doesn't guarantee that properties required by API are set -func NewBulkDeleteRequestWithDefaults() *BulkDeleteRequest { - this := BulkDeleteRequest{} - var deleteStorageData bool = false - this.DeleteStorageData = &deleteStorageData - return &this -} - -// GetBulkObjectIds returns the BulkObjectIds field value -func (o *BulkDeleteRequest) GetBulkObjectIds() []string { - if o == nil { - var ret []string - return ret - } - - return o.BulkObjectIds -} - -// GetBulkObjectIdsOk returns a tuple with the BulkObjectIds field value -// and a boolean to check if the value has been set. -func (o *BulkDeleteRequest) GetBulkObjectIdsOk() ([]string, bool) { - if o == nil { - return nil, false - } - return o.BulkObjectIds, true -} - -// SetBulkObjectIds sets field value -func (o *BulkDeleteRequest) SetBulkObjectIds(v []string) { - o.BulkObjectIds = v -} - -// GetPassports returns the Passports field value if set, zero value otherwise. -func (o *BulkDeleteRequest) GetPassports() []string { - if o == nil || IsNil(o.Passports) { - var ret []string - return ret - } - return o.Passports -} - -// GetPassportsOk returns a tuple with the Passports field value if set, nil otherwise -// and a boolean to check if the value has been set. -func (o *BulkDeleteRequest) GetPassportsOk() ([]string, bool) { - if o == nil || IsNil(o.Passports) { - return nil, false - } - return o.Passports, true -} - -// HasPassports returns a boolean if a field has been set. -func (o *BulkDeleteRequest) HasPassports() bool { - if o != nil && !IsNil(o.Passports) { - return true - } - - return false -} - -// SetPassports gets a reference to the given []string and assigns it to the Passports field. -func (o *BulkDeleteRequest) SetPassports(v []string) { - o.Passports = v -} - -// GetDeleteStorageData returns the DeleteStorageData field value if set, zero value otherwise. -func (o *BulkDeleteRequest) GetDeleteStorageData() bool { - if o == nil || IsNil(o.DeleteStorageData) { - var ret bool - return ret - } - return *o.DeleteStorageData -} - -// GetDeleteStorageDataOk returns a tuple with the DeleteStorageData field value if set, nil otherwise -// and a boolean to check if the value has been set. -func (o *BulkDeleteRequest) GetDeleteStorageDataOk() (*bool, bool) { - if o == nil || IsNil(o.DeleteStorageData) { - return nil, false - } - return o.DeleteStorageData, true -} - -// HasDeleteStorageData returns a boolean if a field has been set. -func (o *BulkDeleteRequest) HasDeleteStorageData() bool { - if o != nil && !IsNil(o.DeleteStorageData) { - return true - } - - return false -} - -// SetDeleteStorageData gets a reference to the given bool and assigns it to the DeleteStorageData field. -func (o *BulkDeleteRequest) SetDeleteStorageData(v bool) { - o.DeleteStorageData = &v -} - -func (o BulkDeleteRequest) MarshalJSON() ([]byte, error) { - toSerialize,err := o.ToMap() - if err != nil { - return []byte{}, err - } - return json.Marshal(toSerialize) -} - -func (o BulkDeleteRequest) ToMap() (map[string]interface{}, error) { - toSerialize := map[string]interface{}{} - toSerialize["bulk_object_ids"] = o.BulkObjectIds - if !IsNil(o.Passports) { - toSerialize["passports"] = o.Passports - } - if !IsNil(o.DeleteStorageData) { - toSerialize["delete_storage_data"] = o.DeleteStorageData - } - return toSerialize, nil -} - -func (o *BulkDeleteRequest) UnmarshalJSON(data []byte) (err error) { - // This validates that all required properties are included in the JSON object - // by unmarshalling the object into a generic map with string keys and checking - // that every required field exists as a key in the generic map. - requiredProperties := []string{ - "bulk_object_ids", - } - - allProperties := make(map[string]interface{}) - - err = json.Unmarshal(data, &allProperties) - - if err != nil { - return err; - } - - for _, requiredProperty := range(requiredProperties) { - if _, exists := allProperties[requiredProperty]; !exists { - return fmt.Errorf("no value given for required property %v", requiredProperty) - } - } - - varBulkDeleteRequest := _BulkDeleteRequest{} - - decoder := json.NewDecoder(bytes.NewReader(data)) - decoder.DisallowUnknownFields() - err = decoder.Decode(&varBulkDeleteRequest) - - if err != nil { - return err - } - - *o = BulkDeleteRequest(varBulkDeleteRequest) - - return err -} - -type NullableBulkDeleteRequest struct { - value *BulkDeleteRequest - isSet bool -} - -func (v NullableBulkDeleteRequest) Get() *BulkDeleteRequest { - return v.value -} - -func (v *NullableBulkDeleteRequest) Set(val *BulkDeleteRequest) { - v.value = val - v.isSet = true -} - -func (v NullableBulkDeleteRequest) IsSet() bool { - return v.isSet -} - -func (v *NullableBulkDeleteRequest) Unset() { - v.value = nil - v.isSet = false -} - -func NewNullableBulkDeleteRequest(val *BulkDeleteRequest) *NullableBulkDeleteRequest { - return &NullableBulkDeleteRequest{value: val, isSet: true} -} - -func (v NullableBulkDeleteRequest) MarshalJSON() ([]byte, error) { - return json.Marshal(v.value) -} - -func (v *NullableBulkDeleteRequest) UnmarshalJSON(src []byte) error { - v.isSet = true - return json.Unmarshal(src, &v.value) -} - - diff --git a/apigen/drs/model_bulk_object_access_id.go b/apigen/drs/model_bulk_object_access_id.go deleted file mode 100644 index aa0408f..0000000 --- a/apigen/drs/model_bulk_object_access_id.go +++ /dev/null @@ -1,163 +0,0 @@ -/* -Data Repository Service - -No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - -API version: 1.5.0 -Contact: ga4gh-cloud@ga4gh.org -*/ - -// Code generated by OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT. - -package drs - -import ( - "encoding/json" -) - -// checks if the BulkObjectAccessId type satisfies the MappedNullable interface at compile time -var _ MappedNullable = &BulkObjectAccessId{} - -// BulkObjectAccessId The object that contains object_id/access_id tuples -type BulkObjectAccessId struct { - Passports []string `json:"passports,omitempty"` - BulkObjectAccessIds []BulkObjectAccessIdBulkObjectAccessIdsInner `json:"bulk_object_access_ids,omitempty"` -} - -// NewBulkObjectAccessId instantiates a new BulkObjectAccessId object -// This constructor will assign default values to properties that have it defined, -// and makes sure properties required by API are set, but the set of arguments -// will change when the set of required properties is changed -func NewBulkObjectAccessId() *BulkObjectAccessId { - this := BulkObjectAccessId{} - return &this -} - -// NewBulkObjectAccessIdWithDefaults instantiates a new BulkObjectAccessId object -// This constructor will only assign default values to properties that have it defined, -// but it doesn't guarantee that properties required by API are set -func NewBulkObjectAccessIdWithDefaults() *BulkObjectAccessId { - this := BulkObjectAccessId{} - return &this -} - -// GetPassports returns the Passports field value if set, zero value otherwise. -func (o *BulkObjectAccessId) GetPassports() []string { - if o == nil || IsNil(o.Passports) { - var ret []string - return ret - } - return o.Passports -} - -// GetPassportsOk returns a tuple with the Passports field value if set, nil otherwise -// and a boolean to check if the value has been set. -func (o *BulkObjectAccessId) GetPassportsOk() ([]string, bool) { - if o == nil || IsNil(o.Passports) { - return nil, false - } - return o.Passports, true -} - -// HasPassports returns a boolean if a field has been set. -func (o *BulkObjectAccessId) HasPassports() bool { - if o != nil && !IsNil(o.Passports) { - return true - } - - return false -} - -// SetPassports gets a reference to the given []string and assigns it to the Passports field. -func (o *BulkObjectAccessId) SetPassports(v []string) { - o.Passports = v -} - -// GetBulkObjectAccessIds returns the BulkObjectAccessIds field value if set, zero value otherwise. -func (o *BulkObjectAccessId) GetBulkObjectAccessIds() []BulkObjectAccessIdBulkObjectAccessIdsInner { - if o == nil || IsNil(o.BulkObjectAccessIds) { - var ret []BulkObjectAccessIdBulkObjectAccessIdsInner - return ret - } - return o.BulkObjectAccessIds -} - -// GetBulkObjectAccessIdsOk returns a tuple with the BulkObjectAccessIds field value if set, nil otherwise -// and a boolean to check if the value has been set. -func (o *BulkObjectAccessId) GetBulkObjectAccessIdsOk() ([]BulkObjectAccessIdBulkObjectAccessIdsInner, bool) { - if o == nil || IsNil(o.BulkObjectAccessIds) { - return nil, false - } - return o.BulkObjectAccessIds, true -} - -// HasBulkObjectAccessIds returns a boolean if a field has been set. -func (o *BulkObjectAccessId) HasBulkObjectAccessIds() bool { - if o != nil && !IsNil(o.BulkObjectAccessIds) { - return true - } - - return false -} - -// SetBulkObjectAccessIds gets a reference to the given []BulkObjectAccessIdBulkObjectAccessIdsInner and assigns it to the BulkObjectAccessIds field. -func (o *BulkObjectAccessId) SetBulkObjectAccessIds(v []BulkObjectAccessIdBulkObjectAccessIdsInner) { - o.BulkObjectAccessIds = v -} - -func (o BulkObjectAccessId) MarshalJSON() ([]byte, error) { - toSerialize,err := o.ToMap() - if err != nil { - return []byte{}, err - } - return json.Marshal(toSerialize) -} - -func (o BulkObjectAccessId) ToMap() (map[string]interface{}, error) { - toSerialize := map[string]interface{}{} - if !IsNil(o.Passports) { - toSerialize["passports"] = o.Passports - } - if !IsNil(o.BulkObjectAccessIds) { - toSerialize["bulk_object_access_ids"] = o.BulkObjectAccessIds - } - return toSerialize, nil -} - -type NullableBulkObjectAccessId struct { - value *BulkObjectAccessId - isSet bool -} - -func (v NullableBulkObjectAccessId) Get() *BulkObjectAccessId { - return v.value -} - -func (v *NullableBulkObjectAccessId) Set(val *BulkObjectAccessId) { - v.value = val - v.isSet = true -} - -func (v NullableBulkObjectAccessId) IsSet() bool { - return v.isSet -} - -func (v *NullableBulkObjectAccessId) Unset() { - v.value = nil - v.isSet = false -} - -func NewNullableBulkObjectAccessId(val *BulkObjectAccessId) *NullableBulkObjectAccessId { - return &NullableBulkObjectAccessId{value: val, isSet: true} -} - -func (v NullableBulkObjectAccessId) MarshalJSON() ([]byte, error) { - return json.Marshal(v.value) -} - -func (v *NullableBulkObjectAccessId) UnmarshalJSON(src []byte) error { - v.isSet = true - return json.Unmarshal(src, &v.value) -} - - diff --git a/apigen/drs/model_bulk_object_access_id_bulk_object_access_ids_inner.go b/apigen/drs/model_bulk_object_access_id_bulk_object_access_ids_inner.go deleted file mode 100644 index 32fe2b0..0000000 --- a/apigen/drs/model_bulk_object_access_id_bulk_object_access_ids_inner.go +++ /dev/null @@ -1,165 +0,0 @@ -/* -Data Repository Service - -No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - -API version: 1.5.0 -Contact: ga4gh-cloud@ga4gh.org -*/ - -// Code generated by OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT. - -package drs - -import ( - "encoding/json" -) - -// checks if the BulkObjectAccessIdBulkObjectAccessIdsInner type satisfies the MappedNullable interface at compile time -var _ MappedNullable = &BulkObjectAccessIdBulkObjectAccessIdsInner{} - -// BulkObjectAccessIdBulkObjectAccessIdsInner struct for BulkObjectAccessIdBulkObjectAccessIdsInner -type BulkObjectAccessIdBulkObjectAccessIdsInner struct { - // DRS object ID - BulkObjectId *string `json:"bulk_object_id,omitempty"` - // DRS object access ID - BulkAccessIds []string `json:"bulk_access_ids,omitempty"` -} - -// NewBulkObjectAccessIdBulkObjectAccessIdsInner instantiates a new BulkObjectAccessIdBulkObjectAccessIdsInner object -// This constructor will assign default values to properties that have it defined, -// and makes sure properties required by API are set, but the set of arguments -// will change when the set of required properties is changed -func NewBulkObjectAccessIdBulkObjectAccessIdsInner() *BulkObjectAccessIdBulkObjectAccessIdsInner { - this := BulkObjectAccessIdBulkObjectAccessIdsInner{} - return &this -} - -// NewBulkObjectAccessIdBulkObjectAccessIdsInnerWithDefaults instantiates a new BulkObjectAccessIdBulkObjectAccessIdsInner object -// This constructor will only assign default values to properties that have it defined, -// but it doesn't guarantee that properties required by API are set -func NewBulkObjectAccessIdBulkObjectAccessIdsInnerWithDefaults() *BulkObjectAccessIdBulkObjectAccessIdsInner { - this := BulkObjectAccessIdBulkObjectAccessIdsInner{} - return &this -} - -// GetBulkObjectId returns the BulkObjectId field value if set, zero value otherwise. -func (o *BulkObjectAccessIdBulkObjectAccessIdsInner) GetBulkObjectId() string { - if o == nil || IsNil(o.BulkObjectId) { - var ret string - return ret - } - return *o.BulkObjectId -} - -// GetBulkObjectIdOk returns a tuple with the BulkObjectId field value if set, nil otherwise -// and a boolean to check if the value has been set. -func (o *BulkObjectAccessIdBulkObjectAccessIdsInner) GetBulkObjectIdOk() (*string, bool) { - if o == nil || IsNil(o.BulkObjectId) { - return nil, false - } - return o.BulkObjectId, true -} - -// HasBulkObjectId returns a boolean if a field has been set. -func (o *BulkObjectAccessIdBulkObjectAccessIdsInner) HasBulkObjectId() bool { - if o != nil && !IsNil(o.BulkObjectId) { - return true - } - - return false -} - -// SetBulkObjectId gets a reference to the given string and assigns it to the BulkObjectId field. -func (o *BulkObjectAccessIdBulkObjectAccessIdsInner) SetBulkObjectId(v string) { - o.BulkObjectId = &v -} - -// GetBulkAccessIds returns the BulkAccessIds field value if set, zero value otherwise. -func (o *BulkObjectAccessIdBulkObjectAccessIdsInner) GetBulkAccessIds() []string { - if o == nil || IsNil(o.BulkAccessIds) { - var ret []string - return ret - } - return o.BulkAccessIds -} - -// GetBulkAccessIdsOk returns a tuple with the BulkAccessIds field value if set, nil otherwise -// and a boolean to check if the value has been set. -func (o *BulkObjectAccessIdBulkObjectAccessIdsInner) GetBulkAccessIdsOk() ([]string, bool) { - if o == nil || IsNil(o.BulkAccessIds) { - return nil, false - } - return o.BulkAccessIds, true -} - -// HasBulkAccessIds returns a boolean if a field has been set. -func (o *BulkObjectAccessIdBulkObjectAccessIdsInner) HasBulkAccessIds() bool { - if o != nil && !IsNil(o.BulkAccessIds) { - return true - } - - return false -} - -// SetBulkAccessIds gets a reference to the given []string and assigns it to the BulkAccessIds field. -func (o *BulkObjectAccessIdBulkObjectAccessIdsInner) SetBulkAccessIds(v []string) { - o.BulkAccessIds = v -} - -func (o BulkObjectAccessIdBulkObjectAccessIdsInner) MarshalJSON() ([]byte, error) { - toSerialize,err := o.ToMap() - if err != nil { - return []byte{}, err - } - return json.Marshal(toSerialize) -} - -func (o BulkObjectAccessIdBulkObjectAccessIdsInner) ToMap() (map[string]interface{}, error) { - toSerialize := map[string]interface{}{} - if !IsNil(o.BulkObjectId) { - toSerialize["bulk_object_id"] = o.BulkObjectId - } - if !IsNil(o.BulkAccessIds) { - toSerialize["bulk_access_ids"] = o.BulkAccessIds - } - return toSerialize, nil -} - -type NullableBulkObjectAccessIdBulkObjectAccessIdsInner struct { - value *BulkObjectAccessIdBulkObjectAccessIdsInner - isSet bool -} - -func (v NullableBulkObjectAccessIdBulkObjectAccessIdsInner) Get() *BulkObjectAccessIdBulkObjectAccessIdsInner { - return v.value -} - -func (v *NullableBulkObjectAccessIdBulkObjectAccessIdsInner) Set(val *BulkObjectAccessIdBulkObjectAccessIdsInner) { - v.value = val - v.isSet = true -} - -func (v NullableBulkObjectAccessIdBulkObjectAccessIdsInner) IsSet() bool { - return v.isSet -} - -func (v *NullableBulkObjectAccessIdBulkObjectAccessIdsInner) Unset() { - v.value = nil - v.isSet = false -} - -func NewNullableBulkObjectAccessIdBulkObjectAccessIdsInner(val *BulkObjectAccessIdBulkObjectAccessIdsInner) *NullableBulkObjectAccessIdBulkObjectAccessIdsInner { - return &NullableBulkObjectAccessIdBulkObjectAccessIdsInner{value: val, isSet: true} -} - -func (v NullableBulkObjectAccessIdBulkObjectAccessIdsInner) MarshalJSON() ([]byte, error) { - return json.Marshal(v.value) -} - -func (v *NullableBulkObjectAccessIdBulkObjectAccessIdsInner) UnmarshalJSON(src []byte) error { - v.isSet = true - return json.Unmarshal(src, &v.value) -} - - diff --git a/apigen/drs/model_bulk_object_id_no_passport.go b/apigen/drs/model_bulk_object_id_no_passport.go deleted file mode 100644 index f050e6c..0000000 --- a/apigen/drs/model_bulk_object_id_no_passport.go +++ /dev/null @@ -1,128 +0,0 @@ -/* -Data Repository Service - -No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - -API version: 1.5.0 -Contact: ga4gh-cloud@ga4gh.org -*/ - -// Code generated by OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT. - -package drs - -import ( - "encoding/json" -) - -// checks if the BulkObjectIdNoPassport type satisfies the MappedNullable interface at compile time -var _ MappedNullable = &BulkObjectIdNoPassport{} - -// BulkObjectIdNoPassport The object that contains the DRS object IDs array -type BulkObjectIdNoPassport struct { - // An array of ObjectIDs. - BulkObjectIds []string `json:"bulk_object_ids,omitempty"` -} - -// NewBulkObjectIdNoPassport instantiates a new BulkObjectIdNoPassport object -// This constructor will assign default values to properties that have it defined, -// and makes sure properties required by API are set, but the set of arguments -// will change when the set of required properties is changed -func NewBulkObjectIdNoPassport() *BulkObjectIdNoPassport { - this := BulkObjectIdNoPassport{} - return &this -} - -// NewBulkObjectIdNoPassportWithDefaults instantiates a new BulkObjectIdNoPassport object -// This constructor will only assign default values to properties that have it defined, -// but it doesn't guarantee that properties required by API are set -func NewBulkObjectIdNoPassportWithDefaults() *BulkObjectIdNoPassport { - this := BulkObjectIdNoPassport{} - return &this -} - -// GetBulkObjectIds returns the BulkObjectIds field value if set, zero value otherwise. -func (o *BulkObjectIdNoPassport) GetBulkObjectIds() []string { - if o == nil || IsNil(o.BulkObjectIds) { - var ret []string - return ret - } - return o.BulkObjectIds -} - -// GetBulkObjectIdsOk returns a tuple with the BulkObjectIds field value if set, nil otherwise -// and a boolean to check if the value has been set. -func (o *BulkObjectIdNoPassport) GetBulkObjectIdsOk() ([]string, bool) { - if o == nil || IsNil(o.BulkObjectIds) { - return nil, false - } - return o.BulkObjectIds, true -} - -// HasBulkObjectIds returns a boolean if a field has been set. -func (o *BulkObjectIdNoPassport) HasBulkObjectIds() bool { - if o != nil && !IsNil(o.BulkObjectIds) { - return true - } - - return false -} - -// SetBulkObjectIds gets a reference to the given []string and assigns it to the BulkObjectIds field. -func (o *BulkObjectIdNoPassport) SetBulkObjectIds(v []string) { - o.BulkObjectIds = v -} - -func (o BulkObjectIdNoPassport) MarshalJSON() ([]byte, error) { - toSerialize,err := o.ToMap() - if err != nil { - return []byte{}, err - } - return json.Marshal(toSerialize) -} - -func (o BulkObjectIdNoPassport) ToMap() (map[string]interface{}, error) { - toSerialize := map[string]interface{}{} - if !IsNil(o.BulkObjectIds) { - toSerialize["bulk_object_ids"] = o.BulkObjectIds - } - return toSerialize, nil -} - -type NullableBulkObjectIdNoPassport struct { - value *BulkObjectIdNoPassport - isSet bool -} - -func (v NullableBulkObjectIdNoPassport) Get() *BulkObjectIdNoPassport { - return v.value -} - -func (v *NullableBulkObjectIdNoPassport) Set(val *BulkObjectIdNoPassport) { - v.value = val - v.isSet = true -} - -func (v NullableBulkObjectIdNoPassport) IsSet() bool { - return v.isSet -} - -func (v *NullableBulkObjectIdNoPassport) Unset() { - v.value = nil - v.isSet = false -} - -func NewNullableBulkObjectIdNoPassport(val *BulkObjectIdNoPassport) *NullableBulkObjectIdNoPassport { - return &NullableBulkObjectIdNoPassport{value: val, isSet: true} -} - -func (v NullableBulkObjectIdNoPassport) MarshalJSON() ([]byte, error) { - return json.Marshal(v.value) -} - -func (v *NullableBulkObjectIdNoPassport) UnmarshalJSON(src []byte) error { - v.isSet = true - return json.Unmarshal(src, &v.value) -} - - diff --git a/apigen/drs/model_bulk_update_access_methods_200_response.go b/apigen/drs/model_bulk_update_access_methods_200_response.go deleted file mode 100644 index 1075bc6..0000000 --- a/apigen/drs/model_bulk_update_access_methods_200_response.go +++ /dev/null @@ -1,160 +0,0 @@ -/* -Data Repository Service - -No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - -API version: 1.5.0 -Contact: ga4gh-cloud@ga4gh.org -*/ - -// Code generated by OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT. - -package drs - -import ( - "encoding/json" - "bytes" - "fmt" -) - -// checks if the BulkUpdateAccessMethods200Response type satisfies the MappedNullable interface at compile time -var _ MappedNullable = &BulkUpdateAccessMethods200Response{} - -// BulkUpdateAccessMethods200Response struct for BulkUpdateAccessMethods200Response -type BulkUpdateAccessMethods200Response struct { - // Array of updated DRS objects - Objects []DrsObject `json:"objects"` -} - -type _BulkUpdateAccessMethods200Response BulkUpdateAccessMethods200Response - -// NewBulkUpdateAccessMethods200Response instantiates a new BulkUpdateAccessMethods200Response object -// This constructor will assign default values to properties that have it defined, -// and makes sure properties required by API are set, but the set of arguments -// will change when the set of required properties is changed -func NewBulkUpdateAccessMethods200Response(objects []DrsObject) *BulkUpdateAccessMethods200Response { - this := BulkUpdateAccessMethods200Response{} - this.Objects = objects - return &this -} - -// NewBulkUpdateAccessMethods200ResponseWithDefaults instantiates a new BulkUpdateAccessMethods200Response object -// This constructor will only assign default values to properties that have it defined, -// but it doesn't guarantee that properties required by API are set -func NewBulkUpdateAccessMethods200ResponseWithDefaults() *BulkUpdateAccessMethods200Response { - this := BulkUpdateAccessMethods200Response{} - return &this -} - -// GetObjects returns the Objects field value -func (o *BulkUpdateAccessMethods200Response) GetObjects() []DrsObject { - if o == nil { - var ret []DrsObject - return ret - } - - return o.Objects -} - -// GetObjectsOk returns a tuple with the Objects field value -// and a boolean to check if the value has been set. -func (o *BulkUpdateAccessMethods200Response) GetObjectsOk() ([]DrsObject, bool) { - if o == nil { - return nil, false - } - return o.Objects, true -} - -// SetObjects sets field value -func (o *BulkUpdateAccessMethods200Response) SetObjects(v []DrsObject) { - o.Objects = v -} - -func (o BulkUpdateAccessMethods200Response) MarshalJSON() ([]byte, error) { - toSerialize,err := o.ToMap() - if err != nil { - return []byte{}, err - } - return json.Marshal(toSerialize) -} - -func (o BulkUpdateAccessMethods200Response) ToMap() (map[string]interface{}, error) { - toSerialize := map[string]interface{}{} - toSerialize["objects"] = o.Objects - return toSerialize, nil -} - -func (o *BulkUpdateAccessMethods200Response) UnmarshalJSON(data []byte) (err error) { - // This validates that all required properties are included in the JSON object - // by unmarshalling the object into a generic map with string keys and checking - // that every required field exists as a key in the generic map. - requiredProperties := []string{ - "objects", - } - - allProperties := make(map[string]interface{}) - - err = json.Unmarshal(data, &allProperties) - - if err != nil { - return err; - } - - for _, requiredProperty := range(requiredProperties) { - if _, exists := allProperties[requiredProperty]; !exists { - return fmt.Errorf("no value given for required property %v", requiredProperty) - } - } - - varBulkUpdateAccessMethods200Response := _BulkUpdateAccessMethods200Response{} - - decoder := json.NewDecoder(bytes.NewReader(data)) - decoder.DisallowUnknownFields() - err = decoder.Decode(&varBulkUpdateAccessMethods200Response) - - if err != nil { - return err - } - - *o = BulkUpdateAccessMethods200Response(varBulkUpdateAccessMethods200Response) - - return err -} - -type NullableBulkUpdateAccessMethods200Response struct { - value *BulkUpdateAccessMethods200Response - isSet bool -} - -func (v NullableBulkUpdateAccessMethods200Response) Get() *BulkUpdateAccessMethods200Response { - return v.value -} - -func (v *NullableBulkUpdateAccessMethods200Response) Set(val *BulkUpdateAccessMethods200Response) { - v.value = val - v.isSet = true -} - -func (v NullableBulkUpdateAccessMethods200Response) IsSet() bool { - return v.isSet -} - -func (v *NullableBulkUpdateAccessMethods200Response) Unset() { - v.value = nil - v.isSet = false -} - -func NewNullableBulkUpdateAccessMethods200Response(val *BulkUpdateAccessMethods200Response) *NullableBulkUpdateAccessMethods200Response { - return &NullableBulkUpdateAccessMethods200Response{value: val, isSet: true} -} - -func (v NullableBulkUpdateAccessMethods200Response) MarshalJSON() ([]byte, error) { - return json.Marshal(v.value) -} - -func (v *NullableBulkUpdateAccessMethods200Response) UnmarshalJSON(src []byte) error { - v.isSet = true - return json.Unmarshal(src, &v.value) -} - - diff --git a/apigen/drs/model_checksum.go b/apigen/drs/model_checksum.go deleted file mode 100644 index 629d14b..0000000 --- a/apigen/drs/model_checksum.go +++ /dev/null @@ -1,189 +0,0 @@ -/* -Data Repository Service - -No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - -API version: 1.5.0 -Contact: ga4gh-cloud@ga4gh.org -*/ - -// Code generated by OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT. - -package drs - -import ( - "encoding/json" - "bytes" - "fmt" -) - -// checks if the Checksum type satisfies the MappedNullable interface at compile time -var _ MappedNullable = &Checksum{} - -// Checksum struct for Checksum -type Checksum struct { - // The hex-string encoded checksum for the data - Checksum string `json:"checksum"` - // The digest method used to create the checksum. The value (e.g. `sha-256`) SHOULD be listed as `Hash Name String` in the https://www.iana.org/assignments/named-information/named-information.xhtml#hash-alg[IANA Named Information Hash Algorithm Registry]. Other values MAY be used, as long as implementors are aware of the issues discussed in https://tools.ietf.org/html/rfc6920#section-9.4[RFC6920]. GA4GH may provide more explicit guidance for use of non-IANA-registered algorithms in the future. Until then, if implementers do choose such an algorithm (e.g. because it's implemented by their storage provider), they SHOULD use an existing standard `type` value such as `md5`, `etag`, `crc32c`, `trunc512`, or `sha1`. - Type string `json:"type"` -} - -type _Checksum Checksum - -// NewChecksum instantiates a new Checksum object -// This constructor will assign default values to properties that have it defined, -// and makes sure properties required by API are set, but the set of arguments -// will change when the set of required properties is changed -func NewChecksum(checksum string, type_ string) *Checksum { - this := Checksum{} - this.Checksum = checksum - this.Type = type_ - return &this -} - -// NewChecksumWithDefaults instantiates a new Checksum object -// This constructor will only assign default values to properties that have it defined, -// but it doesn't guarantee that properties required by API are set -func NewChecksumWithDefaults() *Checksum { - this := Checksum{} - return &this -} - -// GetChecksum returns the Checksum field value -func (o *Checksum) GetChecksum() string { - if o == nil { - var ret string - return ret - } - - return o.Checksum -} - -// GetChecksumOk returns a tuple with the Checksum field value -// and a boolean to check if the value has been set. -func (o *Checksum) GetChecksumOk() (*string, bool) { - if o == nil { - return nil, false - } - return &o.Checksum, true -} - -// SetChecksum sets field value -func (o *Checksum) SetChecksum(v string) { - o.Checksum = v -} - -// GetType returns the Type field value -func (o *Checksum) GetType() string { - if o == nil { - var ret string - return ret - } - - return o.Type -} - -// GetTypeOk returns a tuple with the Type field value -// and a boolean to check if the value has been set. -func (o *Checksum) GetTypeOk() (*string, bool) { - if o == nil { - return nil, false - } - return &o.Type, true -} - -// SetType sets field value -func (o *Checksum) SetType(v string) { - o.Type = v -} - -func (o Checksum) MarshalJSON() ([]byte, error) { - toSerialize,err := o.ToMap() - if err != nil { - return []byte{}, err - } - return json.Marshal(toSerialize) -} - -func (o Checksum) ToMap() (map[string]interface{}, error) { - toSerialize := map[string]interface{}{} - toSerialize["checksum"] = o.Checksum - toSerialize["type"] = o.Type - return toSerialize, nil -} - -func (o *Checksum) UnmarshalJSON(data []byte) (err error) { - // This validates that all required properties are included in the JSON object - // by unmarshalling the object into a generic map with string keys and checking - // that every required field exists as a key in the generic map. - requiredProperties := []string{ - "checksum", - "type", - } - - allProperties := make(map[string]interface{}) - - err = json.Unmarshal(data, &allProperties) - - if err != nil { - return err; - } - - for _, requiredProperty := range(requiredProperties) { - if _, exists := allProperties[requiredProperty]; !exists { - return fmt.Errorf("no value given for required property %v", requiredProperty) - } - } - - varChecksum := _Checksum{} - - decoder := json.NewDecoder(bytes.NewReader(data)) - decoder.DisallowUnknownFields() - err = decoder.Decode(&varChecksum) - - if err != nil { - return err - } - - *o = Checksum(varChecksum) - - return err -} - -type NullableChecksum struct { - value *Checksum - isSet bool -} - -func (v NullableChecksum) Get() *Checksum { - return v.value -} - -func (v *NullableChecksum) Set(val *Checksum) { - v.value = val - v.isSet = true -} - -func (v NullableChecksum) IsSet() bool { - return v.isSet -} - -func (v *NullableChecksum) Unset() { - v.value = nil - v.isSet = false -} - -func NewNullableChecksum(val *Checksum) *NullableChecksum { - return &NullableChecksum{value: val, isSet: true} -} - -func (v NullableChecksum) MarshalJSON() ([]byte, error) { - return json.Marshal(v.value) -} - -func (v *NullableChecksum) UnmarshalJSON(src []byte) error { - v.isSet = true - return json.Unmarshal(src, &v.value) -} - - diff --git a/apigen/drs/model_contents_object.go b/apigen/drs/model_contents_object.go deleted file mode 100644 index 8059a1e..0000000 --- a/apigen/drs/model_contents_object.go +++ /dev/null @@ -1,271 +0,0 @@ -/* -Data Repository Service - -No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - -API version: 1.5.0 -Contact: ga4gh-cloud@ga4gh.org -*/ - -// Code generated by OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT. - -package drs - -import ( - "encoding/json" - "bytes" - "fmt" -) - -// checks if the ContentsObject type satisfies the MappedNullable interface at compile time -var _ MappedNullable = &ContentsObject{} - -// ContentsObject struct for ContentsObject -type ContentsObject struct { - // A name declared by the bundle author that must be used when materialising this object, overriding any name directly associated with the object itself. The name must be unique within the containing bundle. This string is made up of uppercase and lowercase letters, decimal digits, hyphen, period, and underscore [A-Za-z0-9.-_]. See http://pubs.opengroup.org/onlinepubs/9699919799/basedefs/V1_chap03.html#tag_03_282[portable filenames]. - Name string `json:"name"` - // A DRS identifier of a `DrsObject` (either a single blob or a nested bundle). If this ContentsObject is an object within a nested bundle, then the id is optional. Otherwise, the id is required. - Id *string `json:"id,omitempty"` - // A list of full DRS identifier URI paths that may be used to obtain the object. These URIs may be external to this DRS instance. - DrsUri []string `json:"drs_uri,omitempty"` - // If this ContentsObject describes a nested bundle and the caller specified \"?expand=true\" on the request, then this contents array must be present and describe the objects within the nested bundle. - Contents []ContentsObject `json:"contents,omitempty"` -} - -type _ContentsObject ContentsObject - -// NewContentsObject instantiates a new ContentsObject object -// This constructor will assign default values to properties that have it defined, -// and makes sure properties required by API are set, but the set of arguments -// will change when the set of required properties is changed -func NewContentsObject(name string) *ContentsObject { - this := ContentsObject{} - this.Name = name - return &this -} - -// NewContentsObjectWithDefaults instantiates a new ContentsObject object -// This constructor will only assign default values to properties that have it defined, -// but it doesn't guarantee that properties required by API are set -func NewContentsObjectWithDefaults() *ContentsObject { - this := ContentsObject{} - return &this -} - -// GetName returns the Name field value -func (o *ContentsObject) GetName() string { - if o == nil { - var ret string - return ret - } - - return o.Name -} - -// GetNameOk returns a tuple with the Name field value -// and a boolean to check if the value has been set. -func (o *ContentsObject) GetNameOk() (*string, bool) { - if o == nil { - return nil, false - } - return &o.Name, true -} - -// SetName sets field value -func (o *ContentsObject) SetName(v string) { - o.Name = v -} - -// GetId returns the Id field value if set, zero value otherwise. -func (o *ContentsObject) GetId() string { - if o == nil || IsNil(o.Id) { - var ret string - return ret - } - return *o.Id -} - -// GetIdOk returns a tuple with the Id field value if set, nil otherwise -// and a boolean to check if the value has been set. -func (o *ContentsObject) GetIdOk() (*string, bool) { - if o == nil || IsNil(o.Id) { - return nil, false - } - return o.Id, true -} - -// HasId returns a boolean if a field has been set. -func (o *ContentsObject) HasId() bool { - if o != nil && !IsNil(o.Id) { - return true - } - - return false -} - -// SetId gets a reference to the given string and assigns it to the Id field. -func (o *ContentsObject) SetId(v string) { - o.Id = &v -} - -// GetDrsUri returns the DrsUri field value if set, zero value otherwise. -func (o *ContentsObject) GetDrsUri() []string { - if o == nil || IsNil(o.DrsUri) { - var ret []string - return ret - } - return o.DrsUri -} - -// GetDrsUriOk returns a tuple with the DrsUri field value if set, nil otherwise -// and a boolean to check if the value has been set. -func (o *ContentsObject) GetDrsUriOk() ([]string, bool) { - if o == nil || IsNil(o.DrsUri) { - return nil, false - } - return o.DrsUri, true -} - -// HasDrsUri returns a boolean if a field has been set. -func (o *ContentsObject) HasDrsUri() bool { - if o != nil && !IsNil(o.DrsUri) { - return true - } - - return false -} - -// SetDrsUri gets a reference to the given []string and assigns it to the DrsUri field. -func (o *ContentsObject) SetDrsUri(v []string) { - o.DrsUri = v -} - -// GetContents returns the Contents field value if set, zero value otherwise. -func (o *ContentsObject) GetContents() []ContentsObject { - if o == nil || IsNil(o.Contents) { - var ret []ContentsObject - return ret - } - return o.Contents -} - -// GetContentsOk returns a tuple with the Contents field value if set, nil otherwise -// and a boolean to check if the value has been set. -func (o *ContentsObject) GetContentsOk() ([]ContentsObject, bool) { - if o == nil || IsNil(o.Contents) { - return nil, false - } - return o.Contents, true -} - -// HasContents returns a boolean if a field has been set. -func (o *ContentsObject) HasContents() bool { - if o != nil && !IsNil(o.Contents) { - return true - } - - return false -} - -// SetContents gets a reference to the given []ContentsObject and assigns it to the Contents field. -func (o *ContentsObject) SetContents(v []ContentsObject) { - o.Contents = v -} - -func (o ContentsObject) MarshalJSON() ([]byte, error) { - toSerialize,err := o.ToMap() - if err != nil { - return []byte{}, err - } - return json.Marshal(toSerialize) -} - -func (o ContentsObject) ToMap() (map[string]interface{}, error) { - toSerialize := map[string]interface{}{} - toSerialize["name"] = o.Name - if !IsNil(o.Id) { - toSerialize["id"] = o.Id - } - if !IsNil(o.DrsUri) { - toSerialize["drs_uri"] = o.DrsUri - } - if !IsNil(o.Contents) { - toSerialize["contents"] = o.Contents - } - return toSerialize, nil -} - -func (o *ContentsObject) UnmarshalJSON(data []byte) (err error) { - // This validates that all required properties are included in the JSON object - // by unmarshalling the object into a generic map with string keys and checking - // that every required field exists as a key in the generic map. - requiredProperties := []string{ - "name", - } - - allProperties := make(map[string]interface{}) - - err = json.Unmarshal(data, &allProperties) - - if err != nil { - return err; - } - - for _, requiredProperty := range(requiredProperties) { - if _, exists := allProperties[requiredProperty]; !exists { - return fmt.Errorf("no value given for required property %v", requiredProperty) - } - } - - varContentsObject := _ContentsObject{} - - decoder := json.NewDecoder(bytes.NewReader(data)) - decoder.DisallowUnknownFields() - err = decoder.Decode(&varContentsObject) - - if err != nil { - return err - } - - *o = ContentsObject(varContentsObject) - - return err -} - -type NullableContentsObject struct { - value *ContentsObject - isSet bool -} - -func (v NullableContentsObject) Get() *ContentsObject { - return v.value -} - -func (v *NullableContentsObject) Set(val *ContentsObject) { - v.value = val - v.isSet = true -} - -func (v NullableContentsObject) IsSet() bool { - return v.isSet -} - -func (v *NullableContentsObject) Unset() { - v.value = nil - v.isSet = false -} - -func NewNullableContentsObject(val *ContentsObject) *NullableContentsObject { - return &NullableContentsObject{value: val, isSet: true} -} - -func (v NullableContentsObject) MarshalJSON() ([]byte, error) { - return json.Marshal(v.value) -} - -func (v *NullableContentsObject) UnmarshalJSON(src []byte) error { - v.isSet = true - return json.Unmarshal(src, &v.value) -} - - diff --git a/apigen/drs/model_delete_request.go b/apigen/drs/model_delete_request.go deleted file mode 100644 index 42cd290..0000000 --- a/apigen/drs/model_delete_request.go +++ /dev/null @@ -1,169 +0,0 @@ -/* -Data Repository Service - -No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - -API version: 1.5.0 -Contact: ga4gh-cloud@ga4gh.org -*/ - -// Code generated by OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT. - -package drs - -import ( - "encoding/json" -) - -// checks if the DeleteRequest type satisfies the MappedNullable interface at compile time -var _ MappedNullable = &DeleteRequest{} - -// DeleteRequest Request body for single object delete operations -type DeleteRequest struct { - // the encoded JWT GA4GH Passport that contains embedded Visas. The overall JWT is signed as are the individual Passport Visas. - Passports []string `json:"passports,omitempty"` - // If true, delete both DRS object metadata and underlying storage data (follows server's deleteStorageDataSupported capability). If false (default), only delete DRS object metadata while preserving underlying storage data. Clients must explicitly set this to true to enable storage data deletion, ensuring intentional choice for this potentially destructive operation. - DeleteStorageData *bool `json:"delete_storage_data,omitempty"` -} - -// NewDeleteRequest instantiates a new DeleteRequest object -// This constructor will assign default values to properties that have it defined, -// and makes sure properties required by API are set, but the set of arguments -// will change when the set of required properties is changed -func NewDeleteRequest() *DeleteRequest { - this := DeleteRequest{} - var deleteStorageData bool = false - this.DeleteStorageData = &deleteStorageData - return &this -} - -// NewDeleteRequestWithDefaults instantiates a new DeleteRequest object -// This constructor will only assign default values to properties that have it defined, -// but it doesn't guarantee that properties required by API are set -func NewDeleteRequestWithDefaults() *DeleteRequest { - this := DeleteRequest{} - var deleteStorageData bool = false - this.DeleteStorageData = &deleteStorageData - return &this -} - -// GetPassports returns the Passports field value if set, zero value otherwise. -func (o *DeleteRequest) GetPassports() []string { - if o == nil || IsNil(o.Passports) { - var ret []string - return ret - } - return o.Passports -} - -// GetPassportsOk returns a tuple with the Passports field value if set, nil otherwise -// and a boolean to check if the value has been set. -func (o *DeleteRequest) GetPassportsOk() ([]string, bool) { - if o == nil || IsNil(o.Passports) { - return nil, false - } - return o.Passports, true -} - -// HasPassports returns a boolean if a field has been set. -func (o *DeleteRequest) HasPassports() bool { - if o != nil && !IsNil(o.Passports) { - return true - } - - return false -} - -// SetPassports gets a reference to the given []string and assigns it to the Passports field. -func (o *DeleteRequest) SetPassports(v []string) { - o.Passports = v -} - -// GetDeleteStorageData returns the DeleteStorageData field value if set, zero value otherwise. -func (o *DeleteRequest) GetDeleteStorageData() bool { - if o == nil || IsNil(o.DeleteStorageData) { - var ret bool - return ret - } - return *o.DeleteStorageData -} - -// GetDeleteStorageDataOk returns a tuple with the DeleteStorageData field value if set, nil otherwise -// and a boolean to check if the value has been set. -func (o *DeleteRequest) GetDeleteStorageDataOk() (*bool, bool) { - if o == nil || IsNil(o.DeleteStorageData) { - return nil, false - } - return o.DeleteStorageData, true -} - -// HasDeleteStorageData returns a boolean if a field has been set. -func (o *DeleteRequest) HasDeleteStorageData() bool { - if o != nil && !IsNil(o.DeleteStorageData) { - return true - } - - return false -} - -// SetDeleteStorageData gets a reference to the given bool and assigns it to the DeleteStorageData field. -func (o *DeleteRequest) SetDeleteStorageData(v bool) { - o.DeleteStorageData = &v -} - -func (o DeleteRequest) MarshalJSON() ([]byte, error) { - toSerialize,err := o.ToMap() - if err != nil { - return []byte{}, err - } - return json.Marshal(toSerialize) -} - -func (o DeleteRequest) ToMap() (map[string]interface{}, error) { - toSerialize := map[string]interface{}{} - if !IsNil(o.Passports) { - toSerialize["passports"] = o.Passports - } - if !IsNil(o.DeleteStorageData) { - toSerialize["delete_storage_data"] = o.DeleteStorageData - } - return toSerialize, nil -} - -type NullableDeleteRequest struct { - value *DeleteRequest - isSet bool -} - -func (v NullableDeleteRequest) Get() *DeleteRequest { - return v.value -} - -func (v *NullableDeleteRequest) Set(val *DeleteRequest) { - v.value = val - v.isSet = true -} - -func (v NullableDeleteRequest) IsSet() bool { - return v.isSet -} - -func (v *NullableDeleteRequest) Unset() { - v.value = nil - v.isSet = false -} - -func NewNullableDeleteRequest(val *DeleteRequest) *NullableDeleteRequest { - return &NullableDeleteRequest{value: val, isSet: true} -} - -func (v NullableDeleteRequest) MarshalJSON() ([]byte, error) { - return json.Marshal(v.value) -} - -func (v *NullableDeleteRequest) UnmarshalJSON(src []byte) error { - v.isSet = true - return json.Unmarshal(src, &v.value) -} - - diff --git a/apigen/drs/model_drs_object.go b/apigen/drs/model_drs_object.go deleted file mode 100644 index 14a06f4..0000000 --- a/apigen/drs/model_drs_object.go +++ /dev/null @@ -1,573 +0,0 @@ -/* -Data Repository Service - -No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - -API version: 1.5.0 -Contact: ga4gh-cloud@ga4gh.org -*/ - -// Code generated by OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT. - -package drs - -import ( - "encoding/json" - "time" - "bytes" - "fmt" -) - -// checks if the DrsObject type satisfies the MappedNullable interface at compile time -var _ MappedNullable = &DrsObject{} - -// DrsObject struct for DrsObject -type DrsObject struct { - // An identifier unique to this `DrsObject` - Id string `json:"id"` - // A string that can be used to name a `DrsObject`. This string is made up of uppercase and lowercase letters, decimal digits, hyphen, period, and underscore [A-Za-z0-9.-_]. See http://pubs.opengroup.org/onlinepubs/9699919799/basedefs/V1_chap03.html#tag_03_282[portable filenames]. - Name *string `json:"name,omitempty"` - // A drs:// hostname-based URI, as defined in the DRS documentation, that tells clients how to access this object. The intent of this field is to make DRS objects self-contained, and therefore easier for clients to store and pass around. For example, if you arrive at this DRS JSON by resolving a compact identifier-based DRS URI, the `self_uri` presents you with a hostname and properly encoded DRS ID for use in subsequent `access` endpoint calls. - SelfUri string `json:"self_uri"` - // For blobs, the blob size in bytes. For bundles, the cumulative size, in bytes, of items in the `contents` field. - Size int64 `json:"size"` - // Timestamp of content creation in RFC3339. (This is the creation time of the underlying content, not of the JSON object.) - CreatedTime time.Time `json:"created_time"` - // Timestamp of content update in RFC3339, identical to `created_time` in systems that do not support updates. (This is the update time of the underlying content, not of the JSON object.) - UpdatedTime *time.Time `json:"updated_time,omitempty"` - // A string representing a version. (Some systems may use checksum, a RFC3339 timestamp, or an incrementing version number.) - Version *string `json:"version,omitempty"` - // A string providing the mime-type of the `DrsObject`. - MimeType *string `json:"mime_type,omitempty"` - // The checksum of the `DrsObject`. At least one checksum must be provided. For blobs, the checksum is computed over the bytes in the blob. For bundles, the checksum is computed over a sorted concatenation of the checksums of its top-level contained objects (not recursive, names not included). The list of checksums is sorted alphabetically (hex-code) before concatenation and a further checksum is performed on the concatenated checksum value. For example, if a bundle contains blobs with the following checksums: md5(blob1) = 72794b6d md5(blob2) = 5e089d29 Then the checksum of the bundle is: md5( concat( sort( md5(blob1), md5(blob2) ) ) ) = md5( concat( sort( 72794b6d, 5e089d29 ) ) ) = md5( concat( 5e089d29, 72794b6d ) ) = md5( 5e089d2972794b6d ) = f7a29a04 - Checksums []Checksum `json:"checksums"` - // The list of access methods that can be used to fetch the `DrsObject`. Required for single blobs; optional for bundles. - AccessMethods []AccessMethod `json:"access_methods,omitempty"` - // If not set, this `DrsObject` is a single blob. If set, this `DrsObject` is a bundle containing the listed `ContentsObject` s (some of which may be further nested). - Contents []ContentsObject `json:"contents,omitempty"` - // A human readable description of the `DrsObject`. - Description *string `json:"description,omitempty"` - // A list of strings that can be used to find other metadata about this `DrsObject` from external metadata sources. These aliases can be used to represent secondary accession numbers or external GUIDs. - Aliases []string `json:"aliases,omitempty"` -} - -type _DrsObject DrsObject - -// NewDrsObject instantiates a new DrsObject object -// This constructor will assign default values to properties that have it defined, -// and makes sure properties required by API are set, but the set of arguments -// will change when the set of required properties is changed -func NewDrsObject(id string, selfUri string, size int64, createdTime time.Time, checksums []Checksum) *DrsObject { - this := DrsObject{} - this.Id = id - this.SelfUri = selfUri - this.Size = size - this.CreatedTime = createdTime - this.Checksums = checksums - return &this -} - -// NewDrsObjectWithDefaults instantiates a new DrsObject object -// This constructor will only assign default values to properties that have it defined, -// but it doesn't guarantee that properties required by API are set -func NewDrsObjectWithDefaults() *DrsObject { - this := DrsObject{} - return &this -} - -// GetId returns the Id field value -func (o *DrsObject) GetId() string { - if o == nil { - var ret string - return ret - } - - return o.Id -} - -// GetIdOk returns a tuple with the Id field value -// and a boolean to check if the value has been set. -func (o *DrsObject) GetIdOk() (*string, bool) { - if o == nil { - return nil, false - } - return &o.Id, true -} - -// SetId sets field value -func (o *DrsObject) SetId(v string) { - o.Id = v -} - -// GetName returns the Name field value if set, zero value otherwise. -func (o *DrsObject) GetName() string { - if o == nil || IsNil(o.Name) { - var ret string - return ret - } - return *o.Name -} - -// GetNameOk returns a tuple with the Name field value if set, nil otherwise -// and a boolean to check if the value has been set. -func (o *DrsObject) GetNameOk() (*string, bool) { - if o == nil || IsNil(o.Name) { - return nil, false - } - return o.Name, true -} - -// HasName returns a boolean if a field has been set. -func (o *DrsObject) HasName() bool { - if o != nil && !IsNil(o.Name) { - return true - } - - return false -} - -// SetName gets a reference to the given string and assigns it to the Name field. -func (o *DrsObject) SetName(v string) { - o.Name = &v -} - -// GetSelfUri returns the SelfUri field value -func (o *DrsObject) GetSelfUri() string { - if o == nil { - var ret string - return ret - } - - return o.SelfUri -} - -// GetSelfUriOk returns a tuple with the SelfUri field value -// and a boolean to check if the value has been set. -func (o *DrsObject) GetSelfUriOk() (*string, bool) { - if o == nil { - return nil, false - } - return &o.SelfUri, true -} - -// SetSelfUri sets field value -func (o *DrsObject) SetSelfUri(v string) { - o.SelfUri = v -} - -// GetSize returns the Size field value -func (o *DrsObject) GetSize() int64 { - if o == nil { - var ret int64 - return ret - } - - return o.Size -} - -// GetSizeOk returns a tuple with the Size field value -// and a boolean to check if the value has been set. -func (o *DrsObject) GetSizeOk() (*int64, bool) { - if o == nil { - return nil, false - } - return &o.Size, true -} - -// SetSize sets field value -func (o *DrsObject) SetSize(v int64) { - o.Size = v -} - -// GetCreatedTime returns the CreatedTime field value -func (o *DrsObject) GetCreatedTime() time.Time { - if o == nil { - var ret time.Time - return ret - } - - return o.CreatedTime -} - -// GetCreatedTimeOk returns a tuple with the CreatedTime field value -// and a boolean to check if the value has been set. -func (o *DrsObject) GetCreatedTimeOk() (*time.Time, bool) { - if o == nil { - return nil, false - } - return &o.CreatedTime, true -} - -// SetCreatedTime sets field value -func (o *DrsObject) SetCreatedTime(v time.Time) { - o.CreatedTime = v -} - -// GetUpdatedTime returns the UpdatedTime field value if set, zero value otherwise. -func (o *DrsObject) GetUpdatedTime() time.Time { - if o == nil || IsNil(o.UpdatedTime) { - var ret time.Time - return ret - } - return *o.UpdatedTime -} - -// GetUpdatedTimeOk returns a tuple with the UpdatedTime field value if set, nil otherwise -// and a boolean to check if the value has been set. -func (o *DrsObject) GetUpdatedTimeOk() (*time.Time, bool) { - if o == nil || IsNil(o.UpdatedTime) { - return nil, false - } - return o.UpdatedTime, true -} - -// HasUpdatedTime returns a boolean if a field has been set. -func (o *DrsObject) HasUpdatedTime() bool { - if o != nil && !IsNil(o.UpdatedTime) { - return true - } - - return false -} - -// SetUpdatedTime gets a reference to the given time.Time and assigns it to the UpdatedTime field. -func (o *DrsObject) SetUpdatedTime(v time.Time) { - o.UpdatedTime = &v -} - -// GetVersion returns the Version field value if set, zero value otherwise. -func (o *DrsObject) GetVersion() string { - if o == nil || IsNil(o.Version) { - var ret string - return ret - } - return *o.Version -} - -// GetVersionOk returns a tuple with the Version field value if set, nil otherwise -// and a boolean to check if the value has been set. -func (o *DrsObject) GetVersionOk() (*string, bool) { - if o == nil || IsNil(o.Version) { - return nil, false - } - return o.Version, true -} - -// HasVersion returns a boolean if a field has been set. -func (o *DrsObject) HasVersion() bool { - if o != nil && !IsNil(o.Version) { - return true - } - - return false -} - -// SetVersion gets a reference to the given string and assigns it to the Version field. -func (o *DrsObject) SetVersion(v string) { - o.Version = &v -} - -// GetMimeType returns the MimeType field value if set, zero value otherwise. -func (o *DrsObject) GetMimeType() string { - if o == nil || IsNil(o.MimeType) { - var ret string - return ret - } - return *o.MimeType -} - -// GetMimeTypeOk returns a tuple with the MimeType field value if set, nil otherwise -// and a boolean to check if the value has been set. -func (o *DrsObject) GetMimeTypeOk() (*string, bool) { - if o == nil || IsNil(o.MimeType) { - return nil, false - } - return o.MimeType, true -} - -// HasMimeType returns a boolean if a field has been set. -func (o *DrsObject) HasMimeType() bool { - if o != nil && !IsNil(o.MimeType) { - return true - } - - return false -} - -// SetMimeType gets a reference to the given string and assigns it to the MimeType field. -func (o *DrsObject) SetMimeType(v string) { - o.MimeType = &v -} - -// GetChecksums returns the Checksums field value -func (o *DrsObject) GetChecksums() []Checksum { - if o == nil { - var ret []Checksum - return ret - } - - return o.Checksums -} - -// GetChecksumsOk returns a tuple with the Checksums field value -// and a boolean to check if the value has been set. -func (o *DrsObject) GetChecksumsOk() ([]Checksum, bool) { - if o == nil { - return nil, false - } - return o.Checksums, true -} - -// SetChecksums sets field value -func (o *DrsObject) SetChecksums(v []Checksum) { - o.Checksums = v -} - -// GetAccessMethods returns the AccessMethods field value if set, zero value otherwise. -func (o *DrsObject) GetAccessMethods() []AccessMethod { - if o == nil || IsNil(o.AccessMethods) { - var ret []AccessMethod - return ret - } - return o.AccessMethods -} - -// GetAccessMethodsOk returns a tuple with the AccessMethods field value if set, nil otherwise -// and a boolean to check if the value has been set. -func (o *DrsObject) GetAccessMethodsOk() ([]AccessMethod, bool) { - if o == nil || IsNil(o.AccessMethods) { - return nil, false - } - return o.AccessMethods, true -} - -// HasAccessMethods returns a boolean if a field has been set. -func (o *DrsObject) HasAccessMethods() bool { - if o != nil && !IsNil(o.AccessMethods) { - return true - } - - return false -} - -// SetAccessMethods gets a reference to the given []AccessMethod and assigns it to the AccessMethods field. -func (o *DrsObject) SetAccessMethods(v []AccessMethod) { - o.AccessMethods = v -} - -// GetContents returns the Contents field value if set, zero value otherwise. -func (o *DrsObject) GetContents() []ContentsObject { - if o == nil || IsNil(o.Contents) { - var ret []ContentsObject - return ret - } - return o.Contents -} - -// GetContentsOk returns a tuple with the Contents field value if set, nil otherwise -// and a boolean to check if the value has been set. -func (o *DrsObject) GetContentsOk() ([]ContentsObject, bool) { - if o == nil || IsNil(o.Contents) { - return nil, false - } - return o.Contents, true -} - -// HasContents returns a boolean if a field has been set. -func (o *DrsObject) HasContents() bool { - if o != nil && !IsNil(o.Contents) { - return true - } - - return false -} - -// SetContents gets a reference to the given []ContentsObject and assigns it to the Contents field. -func (o *DrsObject) SetContents(v []ContentsObject) { - o.Contents = v -} - -// GetDescription returns the Description field value if set, zero value otherwise. -func (o *DrsObject) GetDescription() string { - if o == nil || IsNil(o.Description) { - var ret string - return ret - } - return *o.Description -} - -// GetDescriptionOk returns a tuple with the Description field value if set, nil otherwise -// and a boolean to check if the value has been set. -func (o *DrsObject) GetDescriptionOk() (*string, bool) { - if o == nil || IsNil(o.Description) { - return nil, false - } - return o.Description, true -} - -// HasDescription returns a boolean if a field has been set. -func (o *DrsObject) HasDescription() bool { - if o != nil && !IsNil(o.Description) { - return true - } - - return false -} - -// SetDescription gets a reference to the given string and assigns it to the Description field. -func (o *DrsObject) SetDescription(v string) { - o.Description = &v -} - -// GetAliases returns the Aliases field value if set, zero value otherwise. -func (o *DrsObject) GetAliases() []string { - if o == nil || IsNil(o.Aliases) { - var ret []string - return ret - } - return o.Aliases -} - -// GetAliasesOk returns a tuple with the Aliases field value if set, nil otherwise -// and a boolean to check if the value has been set. -func (o *DrsObject) GetAliasesOk() ([]string, bool) { - if o == nil || IsNil(o.Aliases) { - return nil, false - } - return o.Aliases, true -} - -// HasAliases returns a boolean if a field has been set. -func (o *DrsObject) HasAliases() bool { - if o != nil && !IsNil(o.Aliases) { - return true - } - - return false -} - -// SetAliases gets a reference to the given []string and assigns it to the Aliases field. -func (o *DrsObject) SetAliases(v []string) { - o.Aliases = v -} - -func (o DrsObject) MarshalJSON() ([]byte, error) { - toSerialize,err := o.ToMap() - if err != nil { - return []byte{}, err - } - return json.Marshal(toSerialize) -} - -func (o DrsObject) ToMap() (map[string]interface{}, error) { - toSerialize := map[string]interface{}{} - toSerialize["id"] = o.Id - if !IsNil(o.Name) { - toSerialize["name"] = o.Name - } - toSerialize["self_uri"] = o.SelfUri - toSerialize["size"] = o.Size - toSerialize["created_time"] = o.CreatedTime - if !IsNil(o.UpdatedTime) { - toSerialize["updated_time"] = o.UpdatedTime - } - if !IsNil(o.Version) { - toSerialize["version"] = o.Version - } - if !IsNil(o.MimeType) { - toSerialize["mime_type"] = o.MimeType - } - toSerialize["checksums"] = o.Checksums - if !IsNil(o.AccessMethods) { - toSerialize["access_methods"] = o.AccessMethods - } - if !IsNil(o.Contents) { - toSerialize["contents"] = o.Contents - } - if !IsNil(o.Description) { - toSerialize["description"] = o.Description - } - if !IsNil(o.Aliases) { - toSerialize["aliases"] = o.Aliases - } - return toSerialize, nil -} - -func (o *DrsObject) UnmarshalJSON(data []byte) (err error) { - // This validates that all required properties are included in the JSON object - // by unmarshalling the object into a generic map with string keys and checking - // that every required field exists as a key in the generic map. - requiredProperties := []string{ - "id", - "self_uri", - "size", - "created_time", - "checksums", - } - - allProperties := make(map[string]interface{}) - - err = json.Unmarshal(data, &allProperties) - - if err != nil { - return err; - } - - for _, requiredProperty := range(requiredProperties) { - if _, exists := allProperties[requiredProperty]; !exists { - return fmt.Errorf("no value given for required property %v", requiredProperty) - } - } - - varDrsObject := _DrsObject{} - - decoder := json.NewDecoder(bytes.NewReader(data)) - decoder.DisallowUnknownFields() - err = decoder.Decode(&varDrsObject) - - if err != nil { - return err - } - - *o = DrsObject(varDrsObject) - - return err -} - -type NullableDrsObject struct { - value *DrsObject - isSet bool -} - -func (v NullableDrsObject) Get() *DrsObject { - return v.value -} - -func (v *NullableDrsObject) Set(val *DrsObject) { - v.value = val - v.isSet = true -} - -func (v NullableDrsObject) IsSet() bool { - return v.isSet -} - -func (v *NullableDrsObject) Unset() { - v.value = nil - v.isSet = false -} - -func NewNullableDrsObject(val *DrsObject) *NullableDrsObject { - return &NullableDrsObject{value: val, isSet: true} -} - -func (v NullableDrsObject) MarshalJSON() ([]byte, error) { - return json.Marshal(v.value) -} - -func (v *NullableDrsObject) UnmarshalJSON(src []byte) error { - v.isSet = true - return json.Unmarshal(src, &v.value) -} - - diff --git a/apigen/drs/model_drs_object_candidate.go b/apigen/drs/model_drs_object_candidate.go deleted file mode 100644 index 8bf6ee2..0000000 --- a/apigen/drs/model_drs_object_candidate.go +++ /dev/null @@ -1,448 +0,0 @@ -/* -Data Repository Service - -No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - -API version: 1.5.0 -Contact: ga4gh-cloud@ga4gh.org -*/ - -// Code generated by OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT. - -package drs - -import ( - "encoding/json" - "bytes" - "fmt" -) - -// checks if the DrsObjectCandidate type satisfies the MappedNullable interface at compile time -var _ MappedNullable = &DrsObjectCandidate{} - -// DrsObjectCandidate struct for DrsObjectCandidate -type DrsObjectCandidate struct { - // A string that can be used to name a `DrsObject`. This string is made up of uppercase and lowercase letters, decimal digits, hyphen, period, and underscore [A-Za-z0-9.-_]. See http://pubs.opengroup.org/onlinepubs/9699919799/basedefs/V1_chap03.html#tag_03_282[portable filenames]. - Name *string `json:"name,omitempty"` - // For blobs, the blob size in bytes. For bundles, the cumulative size, in bytes, of items in the `contents` field. - Size int64 `json:"size"` - // A string representing a version. (Some systems may use checksum, a RFC3339 timestamp, or an incrementing version number.) - Version *string `json:"version,omitempty"` - // A string providing the mime-type of the `DrsObject`. - MimeType *string `json:"mime_type,omitempty"` - // The checksum of the `DrsObject`. At least one checksum must be provided. For blobs, the checksum is computed over the bytes in the blob. For bundles, the checksum is computed over a sorted concatenation of the checksums of its top-level contained objects (not recursive, names not included). The list of checksums is sorted alphabetically (hex-code) before concatenation and a further checksum is performed on the concatenated checksum value. For example, if a bundle contains blobs with the following checksums: md5(blob1) = 72794b6d md5(blob2) = 5e089d29 Then the checksum of the bundle is: md5( concat( sort( md5(blob1), md5(blob2) ) ) ) = md5( concat( sort( 72794b6d, 5e089d29 ) ) ) = md5( concat( 5e089d29, 72794b6d ) ) = md5( 5e089d2972794b6d ) = f7a29a04 - Checksums []Checksum `json:"checksums"` - // The list of access methods that can be used to fetch the `DrsObject`. Required for single blobs; optional for bundles. - AccessMethods []AccessMethod `json:"access_methods,omitempty"` - // If not set, this `DrsObject` is a single blob. If set, this `DrsObject` is a bundle containing the listed `ContentsObject` s (some of which may be further nested). - Contents []ContentsObject `json:"contents,omitempty"` - // A human readable description of the `DrsObject`. - Description *string `json:"description,omitempty"` - // A list of strings that can be used to find other metadata about this `DrsObject` from external metadata sources. These aliases can be used to represent secondary accession numbers or external GUIDs. - Aliases []string `json:"aliases,omitempty"` -} - -type _DrsObjectCandidate DrsObjectCandidate - -// NewDrsObjectCandidate instantiates a new DrsObjectCandidate object -// This constructor will assign default values to properties that have it defined, -// and makes sure properties required by API are set, but the set of arguments -// will change when the set of required properties is changed -func NewDrsObjectCandidate(size int64, checksums []Checksum) *DrsObjectCandidate { - this := DrsObjectCandidate{} - this.Size = size - this.Checksums = checksums - return &this -} - -// NewDrsObjectCandidateWithDefaults instantiates a new DrsObjectCandidate object -// This constructor will only assign default values to properties that have it defined, -// but it doesn't guarantee that properties required by API are set -func NewDrsObjectCandidateWithDefaults() *DrsObjectCandidate { - this := DrsObjectCandidate{} - return &this -} - -// GetName returns the Name field value if set, zero value otherwise. -func (o *DrsObjectCandidate) GetName() string { - if o == nil || IsNil(o.Name) { - var ret string - return ret - } - return *o.Name -} - -// GetNameOk returns a tuple with the Name field value if set, nil otherwise -// and a boolean to check if the value has been set. -func (o *DrsObjectCandidate) GetNameOk() (*string, bool) { - if o == nil || IsNil(o.Name) { - return nil, false - } - return o.Name, true -} - -// HasName returns a boolean if a field has been set. -func (o *DrsObjectCandidate) HasName() bool { - if o != nil && !IsNil(o.Name) { - return true - } - - return false -} - -// SetName gets a reference to the given string and assigns it to the Name field. -func (o *DrsObjectCandidate) SetName(v string) { - o.Name = &v -} - -// GetSize returns the Size field value -func (o *DrsObjectCandidate) GetSize() int64 { - if o == nil { - var ret int64 - return ret - } - - return o.Size -} - -// GetSizeOk returns a tuple with the Size field value -// and a boolean to check if the value has been set. -func (o *DrsObjectCandidate) GetSizeOk() (*int64, bool) { - if o == nil { - return nil, false - } - return &o.Size, true -} - -// SetSize sets field value -func (o *DrsObjectCandidate) SetSize(v int64) { - o.Size = v -} - -// GetVersion returns the Version field value if set, zero value otherwise. -func (o *DrsObjectCandidate) GetVersion() string { - if o == nil || IsNil(o.Version) { - var ret string - return ret - } - return *o.Version -} - -// GetVersionOk returns a tuple with the Version field value if set, nil otherwise -// and a boolean to check if the value has been set. -func (o *DrsObjectCandidate) GetVersionOk() (*string, bool) { - if o == nil || IsNil(o.Version) { - return nil, false - } - return o.Version, true -} - -// HasVersion returns a boolean if a field has been set. -func (o *DrsObjectCandidate) HasVersion() bool { - if o != nil && !IsNil(o.Version) { - return true - } - - return false -} - -// SetVersion gets a reference to the given string and assigns it to the Version field. -func (o *DrsObjectCandidate) SetVersion(v string) { - o.Version = &v -} - -// GetMimeType returns the MimeType field value if set, zero value otherwise. -func (o *DrsObjectCandidate) GetMimeType() string { - if o == nil || IsNil(o.MimeType) { - var ret string - return ret - } - return *o.MimeType -} - -// GetMimeTypeOk returns a tuple with the MimeType field value if set, nil otherwise -// and a boolean to check if the value has been set. -func (o *DrsObjectCandidate) GetMimeTypeOk() (*string, bool) { - if o == nil || IsNil(o.MimeType) { - return nil, false - } - return o.MimeType, true -} - -// HasMimeType returns a boolean if a field has been set. -func (o *DrsObjectCandidate) HasMimeType() bool { - if o != nil && !IsNil(o.MimeType) { - return true - } - - return false -} - -// SetMimeType gets a reference to the given string and assigns it to the MimeType field. -func (o *DrsObjectCandidate) SetMimeType(v string) { - o.MimeType = &v -} - -// GetChecksums returns the Checksums field value -func (o *DrsObjectCandidate) GetChecksums() []Checksum { - if o == nil { - var ret []Checksum - return ret - } - - return o.Checksums -} - -// GetChecksumsOk returns a tuple with the Checksums field value -// and a boolean to check if the value has been set. -func (o *DrsObjectCandidate) GetChecksumsOk() ([]Checksum, bool) { - if o == nil { - return nil, false - } - return o.Checksums, true -} - -// SetChecksums sets field value -func (o *DrsObjectCandidate) SetChecksums(v []Checksum) { - o.Checksums = v -} - -// GetAccessMethods returns the AccessMethods field value if set, zero value otherwise. -func (o *DrsObjectCandidate) GetAccessMethods() []AccessMethod { - if o == nil || IsNil(o.AccessMethods) { - var ret []AccessMethod - return ret - } - return o.AccessMethods -} - -// GetAccessMethodsOk returns a tuple with the AccessMethods field value if set, nil otherwise -// and a boolean to check if the value has been set. -func (o *DrsObjectCandidate) GetAccessMethodsOk() ([]AccessMethod, bool) { - if o == nil || IsNil(o.AccessMethods) { - return nil, false - } - return o.AccessMethods, true -} - -// HasAccessMethods returns a boolean if a field has been set. -func (o *DrsObjectCandidate) HasAccessMethods() bool { - if o != nil && !IsNil(o.AccessMethods) { - return true - } - - return false -} - -// SetAccessMethods gets a reference to the given []AccessMethod and assigns it to the AccessMethods field. -func (o *DrsObjectCandidate) SetAccessMethods(v []AccessMethod) { - o.AccessMethods = v -} - -// GetContents returns the Contents field value if set, zero value otherwise. -func (o *DrsObjectCandidate) GetContents() []ContentsObject { - if o == nil || IsNil(o.Contents) { - var ret []ContentsObject - return ret - } - return o.Contents -} - -// GetContentsOk returns a tuple with the Contents field value if set, nil otherwise -// and a boolean to check if the value has been set. -func (o *DrsObjectCandidate) GetContentsOk() ([]ContentsObject, bool) { - if o == nil || IsNil(o.Contents) { - return nil, false - } - return o.Contents, true -} - -// HasContents returns a boolean if a field has been set. -func (o *DrsObjectCandidate) HasContents() bool { - if o != nil && !IsNil(o.Contents) { - return true - } - - return false -} - -// SetContents gets a reference to the given []ContentsObject and assigns it to the Contents field. -func (o *DrsObjectCandidate) SetContents(v []ContentsObject) { - o.Contents = v -} - -// GetDescription returns the Description field value if set, zero value otherwise. -func (o *DrsObjectCandidate) GetDescription() string { - if o == nil || IsNil(o.Description) { - var ret string - return ret - } - return *o.Description -} - -// GetDescriptionOk returns a tuple with the Description field value if set, nil otherwise -// and a boolean to check if the value has been set. -func (o *DrsObjectCandidate) GetDescriptionOk() (*string, bool) { - if o == nil || IsNil(o.Description) { - return nil, false - } - return o.Description, true -} - -// HasDescription returns a boolean if a field has been set. -func (o *DrsObjectCandidate) HasDescription() bool { - if o != nil && !IsNil(o.Description) { - return true - } - - return false -} - -// SetDescription gets a reference to the given string and assigns it to the Description field. -func (o *DrsObjectCandidate) SetDescription(v string) { - o.Description = &v -} - -// GetAliases returns the Aliases field value if set, zero value otherwise. -func (o *DrsObjectCandidate) GetAliases() []string { - if o == nil || IsNil(o.Aliases) { - var ret []string - return ret - } - return o.Aliases -} - -// GetAliasesOk returns a tuple with the Aliases field value if set, nil otherwise -// and a boolean to check if the value has been set. -func (o *DrsObjectCandidate) GetAliasesOk() ([]string, bool) { - if o == nil || IsNil(o.Aliases) { - return nil, false - } - return o.Aliases, true -} - -// HasAliases returns a boolean if a field has been set. -func (o *DrsObjectCandidate) HasAliases() bool { - if o != nil && !IsNil(o.Aliases) { - return true - } - - return false -} - -// SetAliases gets a reference to the given []string and assigns it to the Aliases field. -func (o *DrsObjectCandidate) SetAliases(v []string) { - o.Aliases = v -} - -func (o DrsObjectCandidate) MarshalJSON() ([]byte, error) { - toSerialize,err := o.ToMap() - if err != nil { - return []byte{}, err - } - return json.Marshal(toSerialize) -} - -func (o DrsObjectCandidate) ToMap() (map[string]interface{}, error) { - toSerialize := map[string]interface{}{} - if !IsNil(o.Name) { - toSerialize["name"] = o.Name - } - toSerialize["size"] = o.Size - if !IsNil(o.Version) { - toSerialize["version"] = o.Version - } - if !IsNil(o.MimeType) { - toSerialize["mime_type"] = o.MimeType - } - toSerialize["checksums"] = o.Checksums - if !IsNil(o.AccessMethods) { - toSerialize["access_methods"] = o.AccessMethods - } - if !IsNil(o.Contents) { - toSerialize["contents"] = o.Contents - } - if !IsNil(o.Description) { - toSerialize["description"] = o.Description - } - if !IsNil(o.Aliases) { - toSerialize["aliases"] = o.Aliases - } - return toSerialize, nil -} - -func (o *DrsObjectCandidate) UnmarshalJSON(data []byte) (err error) { - // This validates that all required properties are included in the JSON object - // by unmarshalling the object into a generic map with string keys and checking - // that every required field exists as a key in the generic map. - requiredProperties := []string{ - "size", - "checksums", - } - - allProperties := make(map[string]interface{}) - - err = json.Unmarshal(data, &allProperties) - - if err != nil { - return err; - } - - for _, requiredProperty := range(requiredProperties) { - if _, exists := allProperties[requiredProperty]; !exists { - return fmt.Errorf("no value given for required property %v", requiredProperty) - } - } - - varDrsObjectCandidate := _DrsObjectCandidate{} - - decoder := json.NewDecoder(bytes.NewReader(data)) - decoder.DisallowUnknownFields() - err = decoder.Decode(&varDrsObjectCandidate) - - if err != nil { - return err - } - - *o = DrsObjectCandidate(varDrsObjectCandidate) - - return err -} - -type NullableDrsObjectCandidate struct { - value *DrsObjectCandidate - isSet bool -} - -func (v NullableDrsObjectCandidate) Get() *DrsObjectCandidate { - return v.value -} - -func (v *NullableDrsObjectCandidate) Set(val *DrsObjectCandidate) { - v.value = val - v.isSet = true -} - -func (v NullableDrsObjectCandidate) IsSet() bool { - return v.isSet -} - -func (v *NullableDrsObjectCandidate) Unset() { - v.value = nil - v.isSet = false -} - -func NewNullableDrsObjectCandidate(val *DrsObjectCandidate) *NullableDrsObjectCandidate { - return &NullableDrsObjectCandidate{value: val, isSet: true} -} - -func (v NullableDrsObjectCandidate) MarshalJSON() ([]byte, error) { - return json.Marshal(v.value) -} - -func (v *NullableDrsObjectCandidate) UnmarshalJSON(src []byte) error { - v.isSet = true - return json.Unmarshal(src, &v.value) -} - - diff --git a/apigen/drs/model_drs_service.go b/apigen/drs/model_drs_service.go deleted file mode 100644 index f87786a..0000000 --- a/apigen/drs/model_drs_service.go +++ /dev/null @@ -1,224 +0,0 @@ -/* -Data Repository Service - -No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - -API version: 1.5.0 -Contact: ga4gh-cloud@ga4gh.org -*/ - -// Code generated by OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT. - -package drs - -import ( - "encoding/json" - "bytes" - "fmt" -) - -// checks if the DrsService type satisfies the MappedNullable interface at compile time -var _ MappedNullable = &DrsService{} - -// DrsService struct for DrsService -type DrsService struct { - // DEPRECATED - In 2.0 this will move to under the drs section of service info and not at the root level. The max length the bulk request endpoints can handle (>= 1) before generating a 413 error e.g. how long can the arrays bulk_object_ids and bulk_object_access_ids be for this server. - MaxBulkRequestLength int32 `json:"maxBulkRequestLength"` - Type DrsServiceType `json:"type"` - Drs *DrsServiceDrs `json:"drs,omitempty"` -} - -type _DrsService DrsService - -// NewDrsService instantiates a new DrsService object -// This constructor will assign default values to properties that have it defined, -// and makes sure properties required by API are set, but the set of arguments -// will change when the set of required properties is changed -func NewDrsService(maxBulkRequestLength int32, type_ DrsServiceType) *DrsService { - this := DrsService{} - this.MaxBulkRequestLength = maxBulkRequestLength - this.Type = type_ - return &this -} - -// NewDrsServiceWithDefaults instantiates a new DrsService object -// This constructor will only assign default values to properties that have it defined, -// but it doesn't guarantee that properties required by API are set -func NewDrsServiceWithDefaults() *DrsService { - this := DrsService{} - return &this -} - -// GetMaxBulkRequestLength returns the MaxBulkRequestLength field value -func (o *DrsService) GetMaxBulkRequestLength() int32 { - if o == nil { - var ret int32 - return ret - } - - return o.MaxBulkRequestLength -} - -// GetMaxBulkRequestLengthOk returns a tuple with the MaxBulkRequestLength field value -// and a boolean to check if the value has been set. -func (o *DrsService) GetMaxBulkRequestLengthOk() (*int32, bool) { - if o == nil { - return nil, false - } - return &o.MaxBulkRequestLength, true -} - -// SetMaxBulkRequestLength sets field value -func (o *DrsService) SetMaxBulkRequestLength(v int32) { - o.MaxBulkRequestLength = v -} - -// GetType returns the Type field value -func (o *DrsService) GetType() DrsServiceType { - if o == nil { - var ret DrsServiceType - return ret - } - - return o.Type -} - -// GetTypeOk returns a tuple with the Type field value -// and a boolean to check if the value has been set. -func (o *DrsService) GetTypeOk() (*DrsServiceType, bool) { - if o == nil { - return nil, false - } - return &o.Type, true -} - -// SetType sets field value -func (o *DrsService) SetType(v DrsServiceType) { - o.Type = v -} - -// GetDrs returns the Drs field value if set, zero value otherwise. -func (o *DrsService) GetDrs() DrsServiceDrs { - if o == nil || IsNil(o.Drs) { - var ret DrsServiceDrs - return ret - } - return *o.Drs -} - -// GetDrsOk returns a tuple with the Drs field value if set, nil otherwise -// and a boolean to check if the value has been set. -func (o *DrsService) GetDrsOk() (*DrsServiceDrs, bool) { - if o == nil || IsNil(o.Drs) { - return nil, false - } - return o.Drs, true -} - -// HasDrs returns a boolean if a field has been set. -func (o *DrsService) HasDrs() bool { - if o != nil && !IsNil(o.Drs) { - return true - } - - return false -} - -// SetDrs gets a reference to the given DrsServiceDrs and assigns it to the Drs field. -func (o *DrsService) SetDrs(v DrsServiceDrs) { - o.Drs = &v -} - -func (o DrsService) MarshalJSON() ([]byte, error) { - toSerialize,err := o.ToMap() - if err != nil { - return []byte{}, err - } - return json.Marshal(toSerialize) -} - -func (o DrsService) ToMap() (map[string]interface{}, error) { - toSerialize := map[string]interface{}{} - toSerialize["maxBulkRequestLength"] = o.MaxBulkRequestLength - toSerialize["type"] = o.Type - if !IsNil(o.Drs) { - toSerialize["drs"] = o.Drs - } - return toSerialize, nil -} - -func (o *DrsService) UnmarshalJSON(data []byte) (err error) { - // This validates that all required properties are included in the JSON object - // by unmarshalling the object into a generic map with string keys and checking - // that every required field exists as a key in the generic map. - requiredProperties := []string{ - "maxBulkRequestLength", - "type", - } - - allProperties := make(map[string]interface{}) - - err = json.Unmarshal(data, &allProperties) - - if err != nil { - return err; - } - - for _, requiredProperty := range(requiredProperties) { - if _, exists := allProperties[requiredProperty]; !exists { - return fmt.Errorf("no value given for required property %v", requiredProperty) - } - } - - varDrsService := _DrsService{} - - decoder := json.NewDecoder(bytes.NewReader(data)) - decoder.DisallowUnknownFields() - err = decoder.Decode(&varDrsService) - - if err != nil { - return err - } - - *o = DrsService(varDrsService) - - return err -} - -type NullableDrsService struct { - value *DrsService - isSet bool -} - -func (v NullableDrsService) Get() *DrsService { - return v.value -} - -func (v *NullableDrsService) Set(val *DrsService) { - v.value = val - v.isSet = true -} - -func (v NullableDrsService) IsSet() bool { - return v.isSet -} - -func (v *NullableDrsService) Unset() { - v.value = nil - v.isSet = false -} - -func NewNullableDrsService(val *DrsService) *NullableDrsService { - return &NullableDrsService{value: val, isSet: true} -} - -func (v NullableDrsService) MarshalJSON() ([]byte, error) { - return json.Marshal(v.value) -} - -func (v *NullableDrsService) UnmarshalJSON(src []byte) error { - v.isSet = true - return json.Unmarshal(src, &v.value) -} - - diff --git a/apigen/drs/model_drs_service_drs.go b/apigen/drs/model_drs_service_drs.go deleted file mode 100644 index fe7ba14..0000000 --- a/apigen/drs/model_drs_service_drs.go +++ /dev/null @@ -1,825 +0,0 @@ -/* -Data Repository Service - -No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - -API version: 1.5.0 -Contact: ga4gh-cloud@ga4gh.org -*/ - -// Code generated by OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT. - -package drs - -import ( - "encoding/json" - "bytes" - "fmt" -) - -// checks if the DrsServiceDrs type satisfies the MappedNullable interface at compile time -var _ MappedNullable = &DrsServiceDrs{} - -// DrsServiceDrs struct for DrsServiceDrs -type DrsServiceDrs struct { - // The max length the bulk request endpoints can handle (>= 1) before generating a 413 error e.g. how long can the arrays bulk_object_ids and bulk_object_access_ids be for this server. - MaxBulkRequestLength int32 `json:"maxBulkRequestLength"` - // The total number of objects in this DRS service. - ObjectCount *int32 `json:"objectCount,omitempty"` - // The total size of all objects in this DRS service in bytes. As a general best practice, file bytes are counted for each unique file and not cloud mirrors or other redundant copies. - TotalObjectSize *int32 `json:"totalObjectSize,omitempty"` - // Indicates whether this DRS server supports upload request operations via the `/upload-request` endpoint. If true, clients can request upload methods and credentials for uploading files. If false or missing, the server does not support upload request coordination. - UploadRequestSupported *bool `json:"uploadRequestSupported,omitempty"` - // Indicates whether this DRS server supports object registration operations via the `/objects/register` endpoint. If true, clients can register uploaded files or existing data as DRS objects. If false or missing, the server does not support object registration. - ObjectRegistrationSupported *bool `json:"objectRegistrationSupported,omitempty"` - // List of upload methods supported by this DRS server. Only present when uploadRequestSupported is true. Clients can use this information to determine which upload methods are available before making upload requests. - **s3**: Direct S3 upload with temporary AWS credentials - **gs**: Google Cloud Storage upload with access tokens - **https**: Presigned POST URL for HTTP uploads - **ftp**: File Transfer Protocol uploads - **sftp**: Secure File Transfer Protocol uploads - **gsiftp**: GridFTP secure file transfer - **globus**: Globus transfer service for high-performance data movement - SupportedUploadMethods []string `json:"supportedUploadMethods,omitempty"` - // Maximum file size in bytes that can be uploaded via the upload endpoints. Only present when uploadRequestSupported is true. If not specified, there is no explicit size limit. - MaxUploadSize *int64 `json:"maxUploadSize,omitempty"` - // Maximum number of files that can be included in a single upload request. Only present when uploadRequestSupported is true. If not specified, defaults to the same value as maxBulkRequestLength. - MaxUploadRequestLength *int32 `json:"maxUploadRequestLength,omitempty"` - // Maximum number of candidate objects that can be included in a single registration request. Only present when objectRegistrationSupported is true. If not specified, defaults to the same value as maxBulkRequestLength. - MaxRegisterRequestLength *int32 `json:"maxRegisterRequestLength,omitempty"` - // Indicates whether this DRS server validates uploaded file checksums against the provided metadata. If true, the server will verify that uploaded files match their declared checksums and may reject uploads with mismatches. If false or missing, the server does not perform checksum validation and relies on client-provided metadata. Only present when uploadRequestSupported or objectRegistrationSupported is true. - ValidateUploadChecksums *bool `json:"validateUploadChecksums,omitempty"` - // Indicates whether this DRS server validates uploaded file sizes against the provided metadata. If true, the server will verify that uploaded files match their declared sizes and may reject uploads with mismatches. If false or missing, the server does not perform file size validation and relies on client-provided metadata. Only present when uploadRequestSupported or objectRegistrationSupported is true. - ValidateUploadFileSizes *bool `json:"validateUploadFileSizes,omitempty"` - // Indicates whether this DRS server supports storing files from the same upload request under a common prefix or folder structure. If true, the server will organize related files together in storage, enabling bioinformatics workflows that expect co-located files (e.g., CRAM + CRAI, VCF + TBI). If false or missing, the server may distribute files across different storage locations or prefixes. Only present when uploadRequestSupported is true. This feature is particularly valuable for genomics tools like samtools that expect index files to be co-located with data files. - RelatedFileStorageSupported *bool `json:"relatedFileStorageSupported,omitempty"` - // Indicates whether this DRS server supports delete operations via the delete endpoints. If true, clients can delete DRS objects using POST requests to `/objects/{object_id}/delete` and `/objects/delete`. If false or missing, the server does not support delete operations and will return 404 for delete endpoint requests. Like upload functionality, delete support is entirely optional and servers remain DRS compliant without it. - DeleteSupported *bool `json:"deleteSupported,omitempty"` - // Maximum number of objects that can be deleted in a single bulk delete request via `/objects/delete`. Only present when deleteSupported is true. If not specified when delete is supported, defaults to the same value as maxBulkRequestLength. Servers may enforce lower limits for delete operations compared to other bulk operations for safety reasons. - MaxBulkDeleteLength *int32 `json:"maxBulkDeleteLength,omitempty"` - // Indicates whether this DRS server supports attempting to delete underlying storage data when clients request it. If true, the server will attempt to delete both metadata and storage files when `delete_storage_data: true` is specified in delete requests. If false or missing, the server only supports metadata deletion regardless of client request, preserving underlying storage data. Only present when deleteSupported is true. This is a capability flag indicating what the server can attempt, not a default behavior setting. Note: Storage deletion attempts may fail due to permissions, network issues, or storage service errors. - DeleteStorageDataSupported *bool `json:"deleteStorageDataSupported,omitempty"` - // Indicates whether this DRS server supports updating access methods for existing objects. If true, clients can update access methods using `/objects/{object_id}/access-methods` and `/objects/access-methods` endpoints. If false or missing, the server does not support access method updates. - AccessMethodUpdateSupported *bool `json:"accessMethodUpdateSupported,omitempty"` - // Maximum number of objects that can be updated in a single bulk access method update request. Only present when accessMethodUpdateSupported is true. If not specified, defaults to maxBulkRequestLength. - MaxBulkAccessMethodUpdateLength *int32 `json:"maxBulkAccessMethodUpdateLength,omitempty"` - // Indicates whether this DRS server validates new access methods by verifying they point to the same data. If true, the server will attempt to verify checksums/content before updating access methods. If false or missing, the server trusts client-provided access methods without validation. Only present when accessMethodUpdateSupported is true. - ValidateAccessMethodUpdates *bool `json:"validateAccessMethodUpdates,omitempty"` -} - -type _DrsServiceDrs DrsServiceDrs - -// NewDrsServiceDrs instantiates a new DrsServiceDrs object -// This constructor will assign default values to properties that have it defined, -// and makes sure properties required by API are set, but the set of arguments -// will change when the set of required properties is changed -func NewDrsServiceDrs(maxBulkRequestLength int32) *DrsServiceDrs { - this := DrsServiceDrs{} - this.MaxBulkRequestLength = maxBulkRequestLength - var uploadRequestSupported bool = false - this.UploadRequestSupported = &uploadRequestSupported - var objectRegistrationSupported bool = false - this.ObjectRegistrationSupported = &objectRegistrationSupported - var validateUploadChecksums bool = false - this.ValidateUploadChecksums = &validateUploadChecksums - var validateUploadFileSizes bool = false - this.ValidateUploadFileSizes = &validateUploadFileSizes - var relatedFileStorageSupported bool = false - this.RelatedFileStorageSupported = &relatedFileStorageSupported - var deleteSupported bool = false - this.DeleteSupported = &deleteSupported - var deleteStorageDataSupported bool = false - this.DeleteStorageDataSupported = &deleteStorageDataSupported - var accessMethodUpdateSupported bool = false - this.AccessMethodUpdateSupported = &accessMethodUpdateSupported - var validateAccessMethodUpdates bool = false - this.ValidateAccessMethodUpdates = &validateAccessMethodUpdates - return &this -} - -// NewDrsServiceDrsWithDefaults instantiates a new DrsServiceDrs object -// This constructor will only assign default values to properties that have it defined, -// but it doesn't guarantee that properties required by API are set -func NewDrsServiceDrsWithDefaults() *DrsServiceDrs { - this := DrsServiceDrs{} - var uploadRequestSupported bool = false - this.UploadRequestSupported = &uploadRequestSupported - var objectRegistrationSupported bool = false - this.ObjectRegistrationSupported = &objectRegistrationSupported - var validateUploadChecksums bool = false - this.ValidateUploadChecksums = &validateUploadChecksums - var validateUploadFileSizes bool = false - this.ValidateUploadFileSizes = &validateUploadFileSizes - var relatedFileStorageSupported bool = false - this.RelatedFileStorageSupported = &relatedFileStorageSupported - var deleteSupported bool = false - this.DeleteSupported = &deleteSupported - var deleteStorageDataSupported bool = false - this.DeleteStorageDataSupported = &deleteStorageDataSupported - var accessMethodUpdateSupported bool = false - this.AccessMethodUpdateSupported = &accessMethodUpdateSupported - var validateAccessMethodUpdates bool = false - this.ValidateAccessMethodUpdates = &validateAccessMethodUpdates - return &this -} - -// GetMaxBulkRequestLength returns the MaxBulkRequestLength field value -func (o *DrsServiceDrs) GetMaxBulkRequestLength() int32 { - if o == nil { - var ret int32 - return ret - } - - return o.MaxBulkRequestLength -} - -// GetMaxBulkRequestLengthOk returns a tuple with the MaxBulkRequestLength field value -// and a boolean to check if the value has been set. -func (o *DrsServiceDrs) GetMaxBulkRequestLengthOk() (*int32, bool) { - if o == nil { - return nil, false - } - return &o.MaxBulkRequestLength, true -} - -// SetMaxBulkRequestLength sets field value -func (o *DrsServiceDrs) SetMaxBulkRequestLength(v int32) { - o.MaxBulkRequestLength = v -} - -// GetObjectCount returns the ObjectCount field value if set, zero value otherwise. -func (o *DrsServiceDrs) GetObjectCount() int32 { - if o == nil || IsNil(o.ObjectCount) { - var ret int32 - return ret - } - return *o.ObjectCount -} - -// GetObjectCountOk returns a tuple with the ObjectCount field value if set, nil otherwise -// and a boolean to check if the value has been set. -func (o *DrsServiceDrs) GetObjectCountOk() (*int32, bool) { - if o == nil || IsNil(o.ObjectCount) { - return nil, false - } - return o.ObjectCount, true -} - -// HasObjectCount returns a boolean if a field has been set. -func (o *DrsServiceDrs) HasObjectCount() bool { - if o != nil && !IsNil(o.ObjectCount) { - return true - } - - return false -} - -// SetObjectCount gets a reference to the given int32 and assigns it to the ObjectCount field. -func (o *DrsServiceDrs) SetObjectCount(v int32) { - o.ObjectCount = &v -} - -// GetTotalObjectSize returns the TotalObjectSize field value if set, zero value otherwise. -func (o *DrsServiceDrs) GetTotalObjectSize() int32 { - if o == nil || IsNil(o.TotalObjectSize) { - var ret int32 - return ret - } - return *o.TotalObjectSize -} - -// GetTotalObjectSizeOk returns a tuple with the TotalObjectSize field value if set, nil otherwise -// and a boolean to check if the value has been set. -func (o *DrsServiceDrs) GetTotalObjectSizeOk() (*int32, bool) { - if o == nil || IsNil(o.TotalObjectSize) { - return nil, false - } - return o.TotalObjectSize, true -} - -// HasTotalObjectSize returns a boolean if a field has been set. -func (o *DrsServiceDrs) HasTotalObjectSize() bool { - if o != nil && !IsNil(o.TotalObjectSize) { - return true - } - - return false -} - -// SetTotalObjectSize gets a reference to the given int32 and assigns it to the TotalObjectSize field. -func (o *DrsServiceDrs) SetTotalObjectSize(v int32) { - o.TotalObjectSize = &v -} - -// GetUploadRequestSupported returns the UploadRequestSupported field value if set, zero value otherwise. -func (o *DrsServiceDrs) GetUploadRequestSupported() bool { - if o == nil || IsNil(o.UploadRequestSupported) { - var ret bool - return ret - } - return *o.UploadRequestSupported -} - -// GetUploadRequestSupportedOk returns a tuple with the UploadRequestSupported field value if set, nil otherwise -// and a boolean to check if the value has been set. -func (o *DrsServiceDrs) GetUploadRequestSupportedOk() (*bool, bool) { - if o == nil || IsNil(o.UploadRequestSupported) { - return nil, false - } - return o.UploadRequestSupported, true -} - -// HasUploadRequestSupported returns a boolean if a field has been set. -func (o *DrsServiceDrs) HasUploadRequestSupported() bool { - if o != nil && !IsNil(o.UploadRequestSupported) { - return true - } - - return false -} - -// SetUploadRequestSupported gets a reference to the given bool and assigns it to the UploadRequestSupported field. -func (o *DrsServiceDrs) SetUploadRequestSupported(v bool) { - o.UploadRequestSupported = &v -} - -// GetObjectRegistrationSupported returns the ObjectRegistrationSupported field value if set, zero value otherwise. -func (o *DrsServiceDrs) GetObjectRegistrationSupported() bool { - if o == nil || IsNil(o.ObjectRegistrationSupported) { - var ret bool - return ret - } - return *o.ObjectRegistrationSupported -} - -// GetObjectRegistrationSupportedOk returns a tuple with the ObjectRegistrationSupported field value if set, nil otherwise -// and a boolean to check if the value has been set. -func (o *DrsServiceDrs) GetObjectRegistrationSupportedOk() (*bool, bool) { - if o == nil || IsNil(o.ObjectRegistrationSupported) { - return nil, false - } - return o.ObjectRegistrationSupported, true -} - -// HasObjectRegistrationSupported returns a boolean if a field has been set. -func (o *DrsServiceDrs) HasObjectRegistrationSupported() bool { - if o != nil && !IsNil(o.ObjectRegistrationSupported) { - return true - } - - return false -} - -// SetObjectRegistrationSupported gets a reference to the given bool and assigns it to the ObjectRegistrationSupported field. -func (o *DrsServiceDrs) SetObjectRegistrationSupported(v bool) { - o.ObjectRegistrationSupported = &v -} - -// GetSupportedUploadMethods returns the SupportedUploadMethods field value if set, zero value otherwise. -func (o *DrsServiceDrs) GetSupportedUploadMethods() []string { - if o == nil || IsNil(o.SupportedUploadMethods) { - var ret []string - return ret - } - return o.SupportedUploadMethods -} - -// GetSupportedUploadMethodsOk returns a tuple with the SupportedUploadMethods field value if set, nil otherwise -// and a boolean to check if the value has been set. -func (o *DrsServiceDrs) GetSupportedUploadMethodsOk() ([]string, bool) { - if o == nil || IsNil(o.SupportedUploadMethods) { - return nil, false - } - return o.SupportedUploadMethods, true -} - -// HasSupportedUploadMethods returns a boolean if a field has been set. -func (o *DrsServiceDrs) HasSupportedUploadMethods() bool { - if o != nil && !IsNil(o.SupportedUploadMethods) { - return true - } - - return false -} - -// SetSupportedUploadMethods gets a reference to the given []string and assigns it to the SupportedUploadMethods field. -func (o *DrsServiceDrs) SetSupportedUploadMethods(v []string) { - o.SupportedUploadMethods = v -} - -// GetMaxUploadSize returns the MaxUploadSize field value if set, zero value otherwise. -func (o *DrsServiceDrs) GetMaxUploadSize() int64 { - if o == nil || IsNil(o.MaxUploadSize) { - var ret int64 - return ret - } - return *o.MaxUploadSize -} - -// GetMaxUploadSizeOk returns a tuple with the MaxUploadSize field value if set, nil otherwise -// and a boolean to check if the value has been set. -func (o *DrsServiceDrs) GetMaxUploadSizeOk() (*int64, bool) { - if o == nil || IsNil(o.MaxUploadSize) { - return nil, false - } - return o.MaxUploadSize, true -} - -// HasMaxUploadSize returns a boolean if a field has been set. -func (o *DrsServiceDrs) HasMaxUploadSize() bool { - if o != nil && !IsNil(o.MaxUploadSize) { - return true - } - - return false -} - -// SetMaxUploadSize gets a reference to the given int64 and assigns it to the MaxUploadSize field. -func (o *DrsServiceDrs) SetMaxUploadSize(v int64) { - o.MaxUploadSize = &v -} - -// GetMaxUploadRequestLength returns the MaxUploadRequestLength field value if set, zero value otherwise. -func (o *DrsServiceDrs) GetMaxUploadRequestLength() int32 { - if o == nil || IsNil(o.MaxUploadRequestLength) { - var ret int32 - return ret - } - return *o.MaxUploadRequestLength -} - -// GetMaxUploadRequestLengthOk returns a tuple with the MaxUploadRequestLength field value if set, nil otherwise -// and a boolean to check if the value has been set. -func (o *DrsServiceDrs) GetMaxUploadRequestLengthOk() (*int32, bool) { - if o == nil || IsNil(o.MaxUploadRequestLength) { - return nil, false - } - return o.MaxUploadRequestLength, true -} - -// HasMaxUploadRequestLength returns a boolean if a field has been set. -func (o *DrsServiceDrs) HasMaxUploadRequestLength() bool { - if o != nil && !IsNil(o.MaxUploadRequestLength) { - return true - } - - return false -} - -// SetMaxUploadRequestLength gets a reference to the given int32 and assigns it to the MaxUploadRequestLength field. -func (o *DrsServiceDrs) SetMaxUploadRequestLength(v int32) { - o.MaxUploadRequestLength = &v -} - -// GetMaxRegisterRequestLength returns the MaxRegisterRequestLength field value if set, zero value otherwise. -func (o *DrsServiceDrs) GetMaxRegisterRequestLength() int32 { - if o == nil || IsNil(o.MaxRegisterRequestLength) { - var ret int32 - return ret - } - return *o.MaxRegisterRequestLength -} - -// GetMaxRegisterRequestLengthOk returns a tuple with the MaxRegisterRequestLength field value if set, nil otherwise -// and a boolean to check if the value has been set. -func (o *DrsServiceDrs) GetMaxRegisterRequestLengthOk() (*int32, bool) { - if o == nil || IsNil(o.MaxRegisterRequestLength) { - return nil, false - } - return o.MaxRegisterRequestLength, true -} - -// HasMaxRegisterRequestLength returns a boolean if a field has been set. -func (o *DrsServiceDrs) HasMaxRegisterRequestLength() bool { - if o != nil && !IsNil(o.MaxRegisterRequestLength) { - return true - } - - return false -} - -// SetMaxRegisterRequestLength gets a reference to the given int32 and assigns it to the MaxRegisterRequestLength field. -func (o *DrsServiceDrs) SetMaxRegisterRequestLength(v int32) { - o.MaxRegisterRequestLength = &v -} - -// GetValidateUploadChecksums returns the ValidateUploadChecksums field value if set, zero value otherwise. -func (o *DrsServiceDrs) GetValidateUploadChecksums() bool { - if o == nil || IsNil(o.ValidateUploadChecksums) { - var ret bool - return ret - } - return *o.ValidateUploadChecksums -} - -// GetValidateUploadChecksumsOk returns a tuple with the ValidateUploadChecksums field value if set, nil otherwise -// and a boolean to check if the value has been set. -func (o *DrsServiceDrs) GetValidateUploadChecksumsOk() (*bool, bool) { - if o == nil || IsNil(o.ValidateUploadChecksums) { - return nil, false - } - return o.ValidateUploadChecksums, true -} - -// HasValidateUploadChecksums returns a boolean if a field has been set. -func (o *DrsServiceDrs) HasValidateUploadChecksums() bool { - if o != nil && !IsNil(o.ValidateUploadChecksums) { - return true - } - - return false -} - -// SetValidateUploadChecksums gets a reference to the given bool and assigns it to the ValidateUploadChecksums field. -func (o *DrsServiceDrs) SetValidateUploadChecksums(v bool) { - o.ValidateUploadChecksums = &v -} - -// GetValidateUploadFileSizes returns the ValidateUploadFileSizes field value if set, zero value otherwise. -func (o *DrsServiceDrs) GetValidateUploadFileSizes() bool { - if o == nil || IsNil(o.ValidateUploadFileSizes) { - var ret bool - return ret - } - return *o.ValidateUploadFileSizes -} - -// GetValidateUploadFileSizesOk returns a tuple with the ValidateUploadFileSizes field value if set, nil otherwise -// and a boolean to check if the value has been set. -func (o *DrsServiceDrs) GetValidateUploadFileSizesOk() (*bool, bool) { - if o == nil || IsNil(o.ValidateUploadFileSizes) { - return nil, false - } - return o.ValidateUploadFileSizes, true -} - -// HasValidateUploadFileSizes returns a boolean if a field has been set. -func (o *DrsServiceDrs) HasValidateUploadFileSizes() bool { - if o != nil && !IsNil(o.ValidateUploadFileSizes) { - return true - } - - return false -} - -// SetValidateUploadFileSizes gets a reference to the given bool and assigns it to the ValidateUploadFileSizes field. -func (o *DrsServiceDrs) SetValidateUploadFileSizes(v bool) { - o.ValidateUploadFileSizes = &v -} - -// GetRelatedFileStorageSupported returns the RelatedFileStorageSupported field value if set, zero value otherwise. -func (o *DrsServiceDrs) GetRelatedFileStorageSupported() bool { - if o == nil || IsNil(o.RelatedFileStorageSupported) { - var ret bool - return ret - } - return *o.RelatedFileStorageSupported -} - -// GetRelatedFileStorageSupportedOk returns a tuple with the RelatedFileStorageSupported field value if set, nil otherwise -// and a boolean to check if the value has been set. -func (o *DrsServiceDrs) GetRelatedFileStorageSupportedOk() (*bool, bool) { - if o == nil || IsNil(o.RelatedFileStorageSupported) { - return nil, false - } - return o.RelatedFileStorageSupported, true -} - -// HasRelatedFileStorageSupported returns a boolean if a field has been set. -func (o *DrsServiceDrs) HasRelatedFileStorageSupported() bool { - if o != nil && !IsNil(o.RelatedFileStorageSupported) { - return true - } - - return false -} - -// SetRelatedFileStorageSupported gets a reference to the given bool and assigns it to the RelatedFileStorageSupported field. -func (o *DrsServiceDrs) SetRelatedFileStorageSupported(v bool) { - o.RelatedFileStorageSupported = &v -} - -// GetDeleteSupported returns the DeleteSupported field value if set, zero value otherwise. -func (o *DrsServiceDrs) GetDeleteSupported() bool { - if o == nil || IsNil(o.DeleteSupported) { - var ret bool - return ret - } - return *o.DeleteSupported -} - -// GetDeleteSupportedOk returns a tuple with the DeleteSupported field value if set, nil otherwise -// and a boolean to check if the value has been set. -func (o *DrsServiceDrs) GetDeleteSupportedOk() (*bool, bool) { - if o == nil || IsNil(o.DeleteSupported) { - return nil, false - } - return o.DeleteSupported, true -} - -// HasDeleteSupported returns a boolean if a field has been set. -func (o *DrsServiceDrs) HasDeleteSupported() bool { - if o != nil && !IsNil(o.DeleteSupported) { - return true - } - - return false -} - -// SetDeleteSupported gets a reference to the given bool and assigns it to the DeleteSupported field. -func (o *DrsServiceDrs) SetDeleteSupported(v bool) { - o.DeleteSupported = &v -} - -// GetMaxBulkDeleteLength returns the MaxBulkDeleteLength field value if set, zero value otherwise. -func (o *DrsServiceDrs) GetMaxBulkDeleteLength() int32 { - if o == nil || IsNil(o.MaxBulkDeleteLength) { - var ret int32 - return ret - } - return *o.MaxBulkDeleteLength -} - -// GetMaxBulkDeleteLengthOk returns a tuple with the MaxBulkDeleteLength field value if set, nil otherwise -// and a boolean to check if the value has been set. -func (o *DrsServiceDrs) GetMaxBulkDeleteLengthOk() (*int32, bool) { - if o == nil || IsNil(o.MaxBulkDeleteLength) { - return nil, false - } - return o.MaxBulkDeleteLength, true -} - -// HasMaxBulkDeleteLength returns a boolean if a field has been set. -func (o *DrsServiceDrs) HasMaxBulkDeleteLength() bool { - if o != nil && !IsNil(o.MaxBulkDeleteLength) { - return true - } - - return false -} - -// SetMaxBulkDeleteLength gets a reference to the given int32 and assigns it to the MaxBulkDeleteLength field. -func (o *DrsServiceDrs) SetMaxBulkDeleteLength(v int32) { - o.MaxBulkDeleteLength = &v -} - -// GetDeleteStorageDataSupported returns the DeleteStorageDataSupported field value if set, zero value otherwise. -func (o *DrsServiceDrs) GetDeleteStorageDataSupported() bool { - if o == nil || IsNil(o.DeleteStorageDataSupported) { - var ret bool - return ret - } - return *o.DeleteStorageDataSupported -} - -// GetDeleteStorageDataSupportedOk returns a tuple with the DeleteStorageDataSupported field value if set, nil otherwise -// and a boolean to check if the value has been set. -func (o *DrsServiceDrs) GetDeleteStorageDataSupportedOk() (*bool, bool) { - if o == nil || IsNil(o.DeleteStorageDataSupported) { - return nil, false - } - return o.DeleteStorageDataSupported, true -} - -// HasDeleteStorageDataSupported returns a boolean if a field has been set. -func (o *DrsServiceDrs) HasDeleteStorageDataSupported() bool { - if o != nil && !IsNil(o.DeleteStorageDataSupported) { - return true - } - - return false -} - -// SetDeleteStorageDataSupported gets a reference to the given bool and assigns it to the DeleteStorageDataSupported field. -func (o *DrsServiceDrs) SetDeleteStorageDataSupported(v bool) { - o.DeleteStorageDataSupported = &v -} - -// GetAccessMethodUpdateSupported returns the AccessMethodUpdateSupported field value if set, zero value otherwise. -func (o *DrsServiceDrs) GetAccessMethodUpdateSupported() bool { - if o == nil || IsNil(o.AccessMethodUpdateSupported) { - var ret bool - return ret - } - return *o.AccessMethodUpdateSupported -} - -// GetAccessMethodUpdateSupportedOk returns a tuple with the AccessMethodUpdateSupported field value if set, nil otherwise -// and a boolean to check if the value has been set. -func (o *DrsServiceDrs) GetAccessMethodUpdateSupportedOk() (*bool, bool) { - if o == nil || IsNil(o.AccessMethodUpdateSupported) { - return nil, false - } - return o.AccessMethodUpdateSupported, true -} - -// HasAccessMethodUpdateSupported returns a boolean if a field has been set. -func (o *DrsServiceDrs) HasAccessMethodUpdateSupported() bool { - if o != nil && !IsNil(o.AccessMethodUpdateSupported) { - return true - } - - return false -} - -// SetAccessMethodUpdateSupported gets a reference to the given bool and assigns it to the AccessMethodUpdateSupported field. -func (o *DrsServiceDrs) SetAccessMethodUpdateSupported(v bool) { - o.AccessMethodUpdateSupported = &v -} - -// GetMaxBulkAccessMethodUpdateLength returns the MaxBulkAccessMethodUpdateLength field value if set, zero value otherwise. -func (o *DrsServiceDrs) GetMaxBulkAccessMethodUpdateLength() int32 { - if o == nil || IsNil(o.MaxBulkAccessMethodUpdateLength) { - var ret int32 - return ret - } - return *o.MaxBulkAccessMethodUpdateLength -} - -// GetMaxBulkAccessMethodUpdateLengthOk returns a tuple with the MaxBulkAccessMethodUpdateLength field value if set, nil otherwise -// and a boolean to check if the value has been set. -func (o *DrsServiceDrs) GetMaxBulkAccessMethodUpdateLengthOk() (*int32, bool) { - if o == nil || IsNil(o.MaxBulkAccessMethodUpdateLength) { - return nil, false - } - return o.MaxBulkAccessMethodUpdateLength, true -} - -// HasMaxBulkAccessMethodUpdateLength returns a boolean if a field has been set. -func (o *DrsServiceDrs) HasMaxBulkAccessMethodUpdateLength() bool { - if o != nil && !IsNil(o.MaxBulkAccessMethodUpdateLength) { - return true - } - - return false -} - -// SetMaxBulkAccessMethodUpdateLength gets a reference to the given int32 and assigns it to the MaxBulkAccessMethodUpdateLength field. -func (o *DrsServiceDrs) SetMaxBulkAccessMethodUpdateLength(v int32) { - o.MaxBulkAccessMethodUpdateLength = &v -} - -// GetValidateAccessMethodUpdates returns the ValidateAccessMethodUpdates field value if set, zero value otherwise. -func (o *DrsServiceDrs) GetValidateAccessMethodUpdates() bool { - if o == nil || IsNil(o.ValidateAccessMethodUpdates) { - var ret bool - return ret - } - return *o.ValidateAccessMethodUpdates -} - -// GetValidateAccessMethodUpdatesOk returns a tuple with the ValidateAccessMethodUpdates field value if set, nil otherwise -// and a boolean to check if the value has been set. -func (o *DrsServiceDrs) GetValidateAccessMethodUpdatesOk() (*bool, bool) { - if o == nil || IsNil(o.ValidateAccessMethodUpdates) { - return nil, false - } - return o.ValidateAccessMethodUpdates, true -} - -// HasValidateAccessMethodUpdates returns a boolean if a field has been set. -func (o *DrsServiceDrs) HasValidateAccessMethodUpdates() bool { - if o != nil && !IsNil(o.ValidateAccessMethodUpdates) { - return true - } - - return false -} - -// SetValidateAccessMethodUpdates gets a reference to the given bool and assigns it to the ValidateAccessMethodUpdates field. -func (o *DrsServiceDrs) SetValidateAccessMethodUpdates(v bool) { - o.ValidateAccessMethodUpdates = &v -} - -func (o DrsServiceDrs) MarshalJSON() ([]byte, error) { - toSerialize,err := o.ToMap() - if err != nil { - return []byte{}, err - } - return json.Marshal(toSerialize) -} - -func (o DrsServiceDrs) ToMap() (map[string]interface{}, error) { - toSerialize := map[string]interface{}{} - toSerialize["maxBulkRequestLength"] = o.MaxBulkRequestLength - if !IsNil(o.ObjectCount) { - toSerialize["objectCount"] = o.ObjectCount - } - if !IsNil(o.TotalObjectSize) { - toSerialize["totalObjectSize"] = o.TotalObjectSize - } - if !IsNil(o.UploadRequestSupported) { - toSerialize["uploadRequestSupported"] = o.UploadRequestSupported - } - if !IsNil(o.ObjectRegistrationSupported) { - toSerialize["objectRegistrationSupported"] = o.ObjectRegistrationSupported - } - if !IsNil(o.SupportedUploadMethods) { - toSerialize["supportedUploadMethods"] = o.SupportedUploadMethods - } - if !IsNil(o.MaxUploadSize) { - toSerialize["maxUploadSize"] = o.MaxUploadSize - } - if !IsNil(o.MaxUploadRequestLength) { - toSerialize["maxUploadRequestLength"] = o.MaxUploadRequestLength - } - if !IsNil(o.MaxRegisterRequestLength) { - toSerialize["maxRegisterRequestLength"] = o.MaxRegisterRequestLength - } - if !IsNil(o.ValidateUploadChecksums) { - toSerialize["validateUploadChecksums"] = o.ValidateUploadChecksums - } - if !IsNil(o.ValidateUploadFileSizes) { - toSerialize["validateUploadFileSizes"] = o.ValidateUploadFileSizes - } - if !IsNil(o.RelatedFileStorageSupported) { - toSerialize["relatedFileStorageSupported"] = o.RelatedFileStorageSupported - } - if !IsNil(o.DeleteSupported) { - toSerialize["deleteSupported"] = o.DeleteSupported - } - if !IsNil(o.MaxBulkDeleteLength) { - toSerialize["maxBulkDeleteLength"] = o.MaxBulkDeleteLength - } - if !IsNil(o.DeleteStorageDataSupported) { - toSerialize["deleteStorageDataSupported"] = o.DeleteStorageDataSupported - } - if !IsNil(o.AccessMethodUpdateSupported) { - toSerialize["accessMethodUpdateSupported"] = o.AccessMethodUpdateSupported - } - if !IsNil(o.MaxBulkAccessMethodUpdateLength) { - toSerialize["maxBulkAccessMethodUpdateLength"] = o.MaxBulkAccessMethodUpdateLength - } - if !IsNil(o.ValidateAccessMethodUpdates) { - toSerialize["validateAccessMethodUpdates"] = o.ValidateAccessMethodUpdates - } - return toSerialize, nil -} - -func (o *DrsServiceDrs) UnmarshalJSON(data []byte) (err error) { - // This validates that all required properties are included in the JSON object - // by unmarshalling the object into a generic map with string keys and checking - // that every required field exists as a key in the generic map. - requiredProperties := []string{ - "maxBulkRequestLength", - } - - allProperties := make(map[string]interface{}) - - err = json.Unmarshal(data, &allProperties) - - if err != nil { - return err; - } - - for _, requiredProperty := range(requiredProperties) { - if _, exists := allProperties[requiredProperty]; !exists { - return fmt.Errorf("no value given for required property %v", requiredProperty) - } - } - - varDrsServiceDrs := _DrsServiceDrs{} - - decoder := json.NewDecoder(bytes.NewReader(data)) - decoder.DisallowUnknownFields() - err = decoder.Decode(&varDrsServiceDrs) - - if err != nil { - return err - } - - *o = DrsServiceDrs(varDrsServiceDrs) - - return err -} - -type NullableDrsServiceDrs struct { - value *DrsServiceDrs - isSet bool -} - -func (v NullableDrsServiceDrs) Get() *DrsServiceDrs { - return v.value -} - -func (v *NullableDrsServiceDrs) Set(val *DrsServiceDrs) { - v.value = val - v.isSet = true -} - -func (v NullableDrsServiceDrs) IsSet() bool { - return v.isSet -} - -func (v *NullableDrsServiceDrs) Unset() { - v.value = nil - v.isSet = false -} - -func NewNullableDrsServiceDrs(val *DrsServiceDrs) *NullableDrsServiceDrs { - return &NullableDrsServiceDrs{value: val, isSet: true} -} - -func (v NullableDrsServiceDrs) MarshalJSON() ([]byte, error) { - return json.Marshal(v.value) -} - -func (v *NullableDrsServiceDrs) UnmarshalJSON(src []byte) error { - v.isSet = true - return json.Unmarshal(src, &v.value) -} - - diff --git a/apigen/drs/model_drs_service_type.go b/apigen/drs/model_drs_service_type.go deleted file mode 100644 index cecf0ad..0000000 --- a/apigen/drs/model_drs_service_type.go +++ /dev/null @@ -1,159 +0,0 @@ -/* -Data Repository Service - -No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - -API version: 1.5.0 -Contact: ga4gh-cloud@ga4gh.org -*/ - -// Code generated by OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT. - -package drs - -import ( - "encoding/json" - "bytes" - "fmt" -) - -// checks if the DrsServiceType type satisfies the MappedNullable interface at compile time -var _ MappedNullable = &DrsServiceType{} - -// DrsServiceType struct for DrsServiceType -type DrsServiceType struct { - Artifact string `json:"artifact"` -} - -type _DrsServiceType DrsServiceType - -// NewDrsServiceType instantiates a new DrsServiceType object -// This constructor will assign default values to properties that have it defined, -// and makes sure properties required by API are set, but the set of arguments -// will change when the set of required properties is changed -func NewDrsServiceType(artifact string) *DrsServiceType { - this := DrsServiceType{} - this.Artifact = artifact - return &this -} - -// NewDrsServiceTypeWithDefaults instantiates a new DrsServiceType object -// This constructor will only assign default values to properties that have it defined, -// but it doesn't guarantee that properties required by API are set -func NewDrsServiceTypeWithDefaults() *DrsServiceType { - this := DrsServiceType{} - return &this -} - -// GetArtifact returns the Artifact field value -func (o *DrsServiceType) GetArtifact() string { - if o == nil { - var ret string - return ret - } - - return o.Artifact -} - -// GetArtifactOk returns a tuple with the Artifact field value -// and a boolean to check if the value has been set. -func (o *DrsServiceType) GetArtifactOk() (*string, bool) { - if o == nil { - return nil, false - } - return &o.Artifact, true -} - -// SetArtifact sets field value -func (o *DrsServiceType) SetArtifact(v string) { - o.Artifact = v -} - -func (o DrsServiceType) MarshalJSON() ([]byte, error) { - toSerialize,err := o.ToMap() - if err != nil { - return []byte{}, err - } - return json.Marshal(toSerialize) -} - -func (o DrsServiceType) ToMap() (map[string]interface{}, error) { - toSerialize := map[string]interface{}{} - toSerialize["artifact"] = o.Artifact - return toSerialize, nil -} - -func (o *DrsServiceType) UnmarshalJSON(data []byte) (err error) { - // This validates that all required properties are included in the JSON object - // by unmarshalling the object into a generic map with string keys and checking - // that every required field exists as a key in the generic map. - requiredProperties := []string{ - "artifact", - } - - allProperties := make(map[string]interface{}) - - err = json.Unmarshal(data, &allProperties) - - if err != nil { - return err; - } - - for _, requiredProperty := range(requiredProperties) { - if _, exists := allProperties[requiredProperty]; !exists { - return fmt.Errorf("no value given for required property %v", requiredProperty) - } - } - - varDrsServiceType := _DrsServiceType{} - - decoder := json.NewDecoder(bytes.NewReader(data)) - decoder.DisallowUnknownFields() - err = decoder.Decode(&varDrsServiceType) - - if err != nil { - return err - } - - *o = DrsServiceType(varDrsServiceType) - - return err -} - -type NullableDrsServiceType struct { - value *DrsServiceType - isSet bool -} - -func (v NullableDrsServiceType) Get() *DrsServiceType { - return v.value -} - -func (v *NullableDrsServiceType) Set(val *DrsServiceType) { - v.value = val - v.isSet = true -} - -func (v NullableDrsServiceType) IsSet() bool { - return v.isSet -} - -func (v *NullableDrsServiceType) Unset() { - v.value = nil - v.isSet = false -} - -func NewNullableDrsServiceType(val *DrsServiceType) *NullableDrsServiceType { - return &NullableDrsServiceType{value: val, isSet: true} -} - -func (v NullableDrsServiceType) MarshalJSON() ([]byte, error) { - return json.Marshal(v.value) -} - -func (v *NullableDrsServiceType) UnmarshalJSON(src []byte) error { - v.isSet = true - return json.Unmarshal(src, &v.value) -} - - diff --git a/apigen/drs/model_error.go b/apigen/drs/model_error.go deleted file mode 100644 index 5bc566e..0000000 --- a/apigen/drs/model_error.go +++ /dev/null @@ -1,165 +0,0 @@ -/* -Data Repository Service - -No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - -API version: 1.5.0 -Contact: ga4gh-cloud@ga4gh.org -*/ - -// Code generated by OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT. - -package drs - -import ( - "encoding/json" -) - -// checks if the Error type satisfies the MappedNullable interface at compile time -var _ MappedNullable = &Error{} - -// Error An object that can optionally include information about the error. -type Error struct { - // A detailed error message. - Msg *string `json:"msg,omitempty"` - // The integer representing the HTTP status code (e.g. 200, 404). - StatusCode *int32 `json:"status_code,omitempty"` -} - -// NewError instantiates a new Error object -// This constructor will assign default values to properties that have it defined, -// and makes sure properties required by API are set, but the set of arguments -// will change when the set of required properties is changed -func NewError() *Error { - this := Error{} - return &this -} - -// NewErrorWithDefaults instantiates a new Error object -// This constructor will only assign default values to properties that have it defined, -// but it doesn't guarantee that properties required by API are set -func NewErrorWithDefaults() *Error { - this := Error{} - return &this -} - -// GetMsg returns the Msg field value if set, zero value otherwise. -func (o *Error) GetMsg() string { - if o == nil || IsNil(o.Msg) { - var ret string - return ret - } - return *o.Msg -} - -// GetMsgOk returns a tuple with the Msg field value if set, nil otherwise -// and a boolean to check if the value has been set. -func (o *Error) GetMsgOk() (*string, bool) { - if o == nil || IsNil(o.Msg) { - return nil, false - } - return o.Msg, true -} - -// HasMsg returns a boolean if a field has been set. -func (o *Error) HasMsg() bool { - if o != nil && !IsNil(o.Msg) { - return true - } - - return false -} - -// SetMsg gets a reference to the given string and assigns it to the Msg field. -func (o *Error) SetMsg(v string) { - o.Msg = &v -} - -// GetStatusCode returns the StatusCode field value if set, zero value otherwise. -func (o *Error) GetStatusCode() int32 { - if o == nil || IsNil(o.StatusCode) { - var ret int32 - return ret - } - return *o.StatusCode -} - -// GetStatusCodeOk returns a tuple with the StatusCode field value if set, nil otherwise -// and a boolean to check if the value has been set. -func (o *Error) GetStatusCodeOk() (*int32, bool) { - if o == nil || IsNil(o.StatusCode) { - return nil, false - } - return o.StatusCode, true -} - -// HasStatusCode returns a boolean if a field has been set. -func (o *Error) HasStatusCode() bool { - if o != nil && !IsNil(o.StatusCode) { - return true - } - - return false -} - -// SetStatusCode gets a reference to the given int32 and assigns it to the StatusCode field. -func (o *Error) SetStatusCode(v int32) { - o.StatusCode = &v -} - -func (o Error) MarshalJSON() ([]byte, error) { - toSerialize,err := o.ToMap() - if err != nil { - return []byte{}, err - } - return json.Marshal(toSerialize) -} - -func (o Error) ToMap() (map[string]interface{}, error) { - toSerialize := map[string]interface{}{} - if !IsNil(o.Msg) { - toSerialize["msg"] = o.Msg - } - if !IsNil(o.StatusCode) { - toSerialize["status_code"] = o.StatusCode - } - return toSerialize, nil -} - -type NullableError struct { - value *Error - isSet bool -} - -func (v NullableError) Get() *Error { - return v.value -} - -func (v *NullableError) Set(val *Error) { - v.value = val - v.isSet = true -} - -func (v NullableError) IsSet() bool { - return v.isSet -} - -func (v *NullableError) Unset() { - v.value = nil - v.isSet = false -} - -func NewNullableError(val *Error) *NullableError { - return &NullableError{value: val, isSet: true} -} - -func (v NullableError) MarshalJSON() ([]byte, error) { - return json.Marshal(v.value) -} - -func (v *NullableError) UnmarshalJSON(src []byte) error { - v.isSet = true - return json.Unmarshal(src, &v.value) -} - - diff --git a/apigen/drs/model_get_bulk_access_url_200_response.go b/apigen/drs/model_get_bulk_access_url_200_response.go deleted file mode 100644 index b3db2cb..0000000 --- a/apigen/drs/model_get_bulk_access_url_200_response.go +++ /dev/null @@ -1,200 +0,0 @@ -/* -Data Repository Service - -No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - -API version: 1.5.0 -Contact: ga4gh-cloud@ga4gh.org -*/ - -// Code generated by OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT. - -package drs - -import ( - "encoding/json" -) - -// checks if the GetBulkAccessURL200Response type satisfies the MappedNullable interface at compile time -var _ MappedNullable = &GetBulkAccessURL200Response{} - -// GetBulkAccessURL200Response struct for GetBulkAccessURL200Response -type GetBulkAccessURL200Response struct { - Summary *Summary `json:"summary,omitempty"` - // Error codes for each unresolved drs objects. - UnresolvedDrsObjects []UnresolvedInner `json:"unresolved_drs_objects,omitempty"` - ResolvedDrsObjectAccessUrls []BulkAccessURL `json:"resolved_drs_object_access_urls,omitempty"` -} - -// NewGetBulkAccessURL200Response instantiates a new GetBulkAccessURL200Response object -// This constructor will assign default values to properties that have it defined, -// and makes sure properties required by API are set, but the set of arguments -// will change when the set of required properties is changed -func NewGetBulkAccessURL200Response() *GetBulkAccessURL200Response { - this := GetBulkAccessURL200Response{} - return &this -} - -// NewGetBulkAccessURL200ResponseWithDefaults instantiates a new GetBulkAccessURL200Response object -// This constructor will only assign default values to properties that have it defined, -// but it doesn't guarantee that properties required by API are set -func NewGetBulkAccessURL200ResponseWithDefaults() *GetBulkAccessURL200Response { - this := GetBulkAccessURL200Response{} - return &this -} - -// GetSummary returns the Summary field value if set, zero value otherwise. -func (o *GetBulkAccessURL200Response) GetSummary() Summary { - if o == nil || IsNil(o.Summary) { - var ret Summary - return ret - } - return *o.Summary -} - -// GetSummaryOk returns a tuple with the Summary field value if set, nil otherwise -// and a boolean to check if the value has been set. -func (o *GetBulkAccessURL200Response) GetSummaryOk() (*Summary, bool) { - if o == nil || IsNil(o.Summary) { - return nil, false - } - return o.Summary, true -} - -// HasSummary returns a boolean if a field has been set. -func (o *GetBulkAccessURL200Response) HasSummary() bool { - if o != nil && !IsNil(o.Summary) { - return true - } - - return false -} - -// SetSummary gets a reference to the given Summary and assigns it to the Summary field. -func (o *GetBulkAccessURL200Response) SetSummary(v Summary) { - o.Summary = &v -} - -// GetUnresolvedDrsObjects returns the UnresolvedDrsObjects field value if set, zero value otherwise. -func (o *GetBulkAccessURL200Response) GetUnresolvedDrsObjects() []UnresolvedInner { - if o == nil || IsNil(o.UnresolvedDrsObjects) { - var ret []UnresolvedInner - return ret - } - return o.UnresolvedDrsObjects -} - -// GetUnresolvedDrsObjectsOk returns a tuple with the UnresolvedDrsObjects field value if set, nil otherwise -// and a boolean to check if the value has been set. -func (o *GetBulkAccessURL200Response) GetUnresolvedDrsObjectsOk() ([]UnresolvedInner, bool) { - if o == nil || IsNil(o.UnresolvedDrsObjects) { - return nil, false - } - return o.UnresolvedDrsObjects, true -} - -// HasUnresolvedDrsObjects returns a boolean if a field has been set. -func (o *GetBulkAccessURL200Response) HasUnresolvedDrsObjects() bool { - if o != nil && !IsNil(o.UnresolvedDrsObjects) { - return true - } - - return false -} - -// SetUnresolvedDrsObjects gets a reference to the given []UnresolvedInner and assigns it to the UnresolvedDrsObjects field. -func (o *GetBulkAccessURL200Response) SetUnresolvedDrsObjects(v []UnresolvedInner) { - o.UnresolvedDrsObjects = v -} - -// GetResolvedDrsObjectAccessUrls returns the ResolvedDrsObjectAccessUrls field value if set, zero value otherwise. -func (o *GetBulkAccessURL200Response) GetResolvedDrsObjectAccessUrls() []BulkAccessURL { - if o == nil || IsNil(o.ResolvedDrsObjectAccessUrls) { - var ret []BulkAccessURL - return ret - } - return o.ResolvedDrsObjectAccessUrls -} - -// GetResolvedDrsObjectAccessUrlsOk returns a tuple with the ResolvedDrsObjectAccessUrls field value if set, nil otherwise -// and a boolean to check if the value has been set. -func (o *GetBulkAccessURL200Response) GetResolvedDrsObjectAccessUrlsOk() ([]BulkAccessURL, bool) { - if o == nil || IsNil(o.ResolvedDrsObjectAccessUrls) { - return nil, false - } - return o.ResolvedDrsObjectAccessUrls, true -} - -// HasResolvedDrsObjectAccessUrls returns a boolean if a field has been set. -func (o *GetBulkAccessURL200Response) HasResolvedDrsObjectAccessUrls() bool { - if o != nil && !IsNil(o.ResolvedDrsObjectAccessUrls) { - return true - } - - return false -} - -// SetResolvedDrsObjectAccessUrls gets a reference to the given []BulkAccessURL and assigns it to the ResolvedDrsObjectAccessUrls field. -func (o *GetBulkAccessURL200Response) SetResolvedDrsObjectAccessUrls(v []BulkAccessURL) { - o.ResolvedDrsObjectAccessUrls = v -} - -func (o GetBulkAccessURL200Response) MarshalJSON() ([]byte, error) { - toSerialize,err := o.ToMap() - if err != nil { - return []byte{}, err - } - return json.Marshal(toSerialize) -} - -func (o GetBulkAccessURL200Response) ToMap() (map[string]interface{}, error) { - toSerialize := map[string]interface{}{} - if !IsNil(o.Summary) { - toSerialize["summary"] = o.Summary - } - if !IsNil(o.UnresolvedDrsObjects) { - toSerialize["unresolved_drs_objects"] = o.UnresolvedDrsObjects - } - if !IsNil(o.ResolvedDrsObjectAccessUrls) { - toSerialize["resolved_drs_object_access_urls"] = o.ResolvedDrsObjectAccessUrls - } - return toSerialize, nil -} - -type NullableGetBulkAccessURL200Response struct { - value *GetBulkAccessURL200Response - isSet bool -} - -func (v NullableGetBulkAccessURL200Response) Get() *GetBulkAccessURL200Response { - return v.value -} - -func (v *NullableGetBulkAccessURL200Response) Set(val *GetBulkAccessURL200Response) { - v.value = val - v.isSet = true -} - -func (v NullableGetBulkAccessURL200Response) IsSet() bool { - return v.isSet -} - -func (v *NullableGetBulkAccessURL200Response) Unset() { - v.value = nil - v.isSet = false -} - -func NewNullableGetBulkAccessURL200Response(val *GetBulkAccessURL200Response) *NullableGetBulkAccessURL200Response { - return &NullableGetBulkAccessURL200Response{value: val, isSet: true} -} - -func (v NullableGetBulkAccessURL200Response) MarshalJSON() ([]byte, error) { - return json.Marshal(v.value) -} - -func (v *NullableGetBulkAccessURL200Response) UnmarshalJSON(src []byte) error { - v.isSet = true - return json.Unmarshal(src, &v.value) -} - - diff --git a/apigen/drs/model_get_bulk_objects_200_response.go b/apigen/drs/model_get_bulk_objects_200_response.go deleted file mode 100644 index d92607a..0000000 --- a/apigen/drs/model_get_bulk_objects_200_response.go +++ /dev/null @@ -1,200 +0,0 @@ -/* -Data Repository Service - -No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - -API version: 1.5.0 -Contact: ga4gh-cloud@ga4gh.org -*/ - -// Code generated by OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT. - -package drs - -import ( - "encoding/json" -) - -// checks if the GetBulkObjects200Response type satisfies the MappedNullable interface at compile time -var _ MappedNullable = &GetBulkObjects200Response{} - -// GetBulkObjects200Response struct for GetBulkObjects200Response -type GetBulkObjects200Response struct { - Summary *Summary `json:"summary,omitempty"` - // Error codes for each unresolved drs objects. - UnresolvedDrsObjects []UnresolvedInner `json:"unresolved_drs_objects,omitempty"` - ResolvedDrsObject []DrsObject `json:"resolved_drs_object,omitempty"` -} - -// NewGetBulkObjects200Response instantiates a new GetBulkObjects200Response object -// This constructor will assign default values to properties that have it defined, -// and makes sure properties required by API are set, but the set of arguments -// will change when the set of required properties is changed -func NewGetBulkObjects200Response() *GetBulkObjects200Response { - this := GetBulkObjects200Response{} - return &this -} - -// NewGetBulkObjects200ResponseWithDefaults instantiates a new GetBulkObjects200Response object -// This constructor will only assign default values to properties that have it defined, -// but it doesn't guarantee that properties required by API are set -func NewGetBulkObjects200ResponseWithDefaults() *GetBulkObjects200Response { - this := GetBulkObjects200Response{} - return &this -} - -// GetSummary returns the Summary field value if set, zero value otherwise. -func (o *GetBulkObjects200Response) GetSummary() Summary { - if o == nil || IsNil(o.Summary) { - var ret Summary - return ret - } - return *o.Summary -} - -// GetSummaryOk returns a tuple with the Summary field value if set, nil otherwise -// and a boolean to check if the value has been set. -func (o *GetBulkObjects200Response) GetSummaryOk() (*Summary, bool) { - if o == nil || IsNil(o.Summary) { - return nil, false - } - return o.Summary, true -} - -// HasSummary returns a boolean if a field has been set. -func (o *GetBulkObjects200Response) HasSummary() bool { - if o != nil && !IsNil(o.Summary) { - return true - } - - return false -} - -// SetSummary gets a reference to the given Summary and assigns it to the Summary field. -func (o *GetBulkObjects200Response) SetSummary(v Summary) { - o.Summary = &v -} - -// GetUnresolvedDrsObjects returns the UnresolvedDrsObjects field value if set, zero value otherwise. -func (o *GetBulkObjects200Response) GetUnresolvedDrsObjects() []UnresolvedInner { - if o == nil || IsNil(o.UnresolvedDrsObjects) { - var ret []UnresolvedInner - return ret - } - return o.UnresolvedDrsObjects -} - -// GetUnresolvedDrsObjectsOk returns a tuple with the UnresolvedDrsObjects field value if set, nil otherwise -// and a boolean to check if the value has been set. -func (o *GetBulkObjects200Response) GetUnresolvedDrsObjectsOk() ([]UnresolvedInner, bool) { - if o == nil || IsNil(o.UnresolvedDrsObjects) { - return nil, false - } - return o.UnresolvedDrsObjects, true -} - -// HasUnresolvedDrsObjects returns a boolean if a field has been set. -func (o *GetBulkObjects200Response) HasUnresolvedDrsObjects() bool { - if o != nil && !IsNil(o.UnresolvedDrsObjects) { - return true - } - - return false -} - -// SetUnresolvedDrsObjects gets a reference to the given []UnresolvedInner and assigns it to the UnresolvedDrsObjects field. -func (o *GetBulkObjects200Response) SetUnresolvedDrsObjects(v []UnresolvedInner) { - o.UnresolvedDrsObjects = v -} - -// GetResolvedDrsObject returns the ResolvedDrsObject field value if set, zero value otherwise. -func (o *GetBulkObjects200Response) GetResolvedDrsObject() []DrsObject { - if o == nil || IsNil(o.ResolvedDrsObject) { - var ret []DrsObject - return ret - } - return o.ResolvedDrsObject -} - -// GetResolvedDrsObjectOk returns a tuple with the ResolvedDrsObject field value if set, nil otherwise -// and a boolean to check if the value has been set. -func (o *GetBulkObjects200Response) GetResolvedDrsObjectOk() ([]DrsObject, bool) { - if o == nil || IsNil(o.ResolvedDrsObject) { - return nil, false - } - return o.ResolvedDrsObject, true -} - -// HasResolvedDrsObject returns a boolean if a field has been set. -func (o *GetBulkObjects200Response) HasResolvedDrsObject() bool { - if o != nil && !IsNil(o.ResolvedDrsObject) { - return true - } - - return false -} - -// SetResolvedDrsObject gets a reference to the given []DrsObject and assigns it to the ResolvedDrsObject field. -func (o *GetBulkObjects200Response) SetResolvedDrsObject(v []DrsObject) { - o.ResolvedDrsObject = v -} - -func (o GetBulkObjects200Response) MarshalJSON() ([]byte, error) { - toSerialize,err := o.ToMap() - if err != nil { - return []byte{}, err - } - return json.Marshal(toSerialize) -} - -func (o GetBulkObjects200Response) ToMap() (map[string]interface{}, error) { - toSerialize := map[string]interface{}{} - if !IsNil(o.Summary) { - toSerialize["summary"] = o.Summary - } - if !IsNil(o.UnresolvedDrsObjects) { - toSerialize["unresolved_drs_objects"] = o.UnresolvedDrsObjects - } - if !IsNil(o.ResolvedDrsObject) { - toSerialize["resolved_drs_object"] = o.ResolvedDrsObject - } - return toSerialize, nil -} - -type NullableGetBulkObjects200Response struct { - value *GetBulkObjects200Response - isSet bool -} - -func (v NullableGetBulkObjects200Response) Get() *GetBulkObjects200Response { - return v.value -} - -func (v *NullableGetBulkObjects200Response) Set(val *GetBulkObjects200Response) { - v.value = val - v.isSet = true -} - -func (v NullableGetBulkObjects200Response) IsSet() bool { - return v.isSet -} - -func (v *NullableGetBulkObjects200Response) Unset() { - v.value = nil - v.isSet = false -} - -func NewNullableGetBulkObjects200Response(val *GetBulkObjects200Response) *NullableGetBulkObjects200Response { - return &NullableGetBulkObjects200Response{value: val, isSet: true} -} - -func (v NullableGetBulkObjects200Response) MarshalJSON() ([]byte, error) { - return json.Marshal(v.value) -} - -func (v *NullableGetBulkObjects200Response) UnmarshalJSON(src []byte) error { - v.isSet = true - return json.Unmarshal(src, &v.value) -} - - diff --git a/apigen/drs/model_get_bulk_objects_request.go b/apigen/drs/model_get_bulk_objects_request.go deleted file mode 100644 index 5656fec..0000000 --- a/apigen/drs/model_get_bulk_objects_request.go +++ /dev/null @@ -1,197 +0,0 @@ -/* -Data Repository Service - -No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - -API version: 1.5.0 -Contact: ga4gh-cloud@ga4gh.org -*/ - -// Code generated by OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT. - -package drs - -import ( - "encoding/json" - "bytes" - "fmt" -) - -// checks if the GetBulkObjectsRequest type satisfies the MappedNullable interface at compile time -var _ MappedNullable = &GetBulkObjectsRequest{} - -// GetBulkObjectsRequest struct for GetBulkObjectsRequest -type GetBulkObjectsRequest struct { - // the encoded JWT GA4GH Passport that contains embedded Visas. The overall JWT is signed as are the individual Passport Visas. - Passports []string `json:"passports,omitempty"` - // An array of ObjectIDs to retrieve metadata for - BulkObjectIds []string `json:"bulk_object_ids"` -} - -type _GetBulkObjectsRequest GetBulkObjectsRequest - -// NewGetBulkObjectsRequest instantiates a new GetBulkObjectsRequest object -// This constructor will assign default values to properties that have it defined, -// and makes sure properties required by API are set, but the set of arguments -// will change when the set of required properties is changed -func NewGetBulkObjectsRequest(bulkObjectIds []string) *GetBulkObjectsRequest { - this := GetBulkObjectsRequest{} - this.BulkObjectIds = bulkObjectIds - return &this -} - -// NewGetBulkObjectsRequestWithDefaults instantiates a new GetBulkObjectsRequest object -// This constructor will only assign default values to properties that have it defined, -// but it doesn't guarantee that properties required by API are set -func NewGetBulkObjectsRequestWithDefaults() *GetBulkObjectsRequest { - this := GetBulkObjectsRequest{} - return &this -} - -// GetPassports returns the Passports field value if set, zero value otherwise. -func (o *GetBulkObjectsRequest) GetPassports() []string { - if o == nil || IsNil(o.Passports) { - var ret []string - return ret - } - return o.Passports -} - -// GetPassportsOk returns a tuple with the Passports field value if set, nil otherwise -// and a boolean to check if the value has been set. -func (o *GetBulkObjectsRequest) GetPassportsOk() ([]string, bool) { - if o == nil || IsNil(o.Passports) { - return nil, false - } - return o.Passports, true -} - -// HasPassports returns a boolean if a field has been set. -func (o *GetBulkObjectsRequest) HasPassports() bool { - if o != nil && !IsNil(o.Passports) { - return true - } - - return false -} - -// SetPassports gets a reference to the given []string and assigns it to the Passports field. -func (o *GetBulkObjectsRequest) SetPassports(v []string) { - o.Passports = v -} - -// GetBulkObjectIds returns the BulkObjectIds field value -func (o *GetBulkObjectsRequest) GetBulkObjectIds() []string { - if o == nil { - var ret []string - return ret - } - - return o.BulkObjectIds -} - -// GetBulkObjectIdsOk returns a tuple with the BulkObjectIds field value -// and a boolean to check if the value has been set. -func (o *GetBulkObjectsRequest) GetBulkObjectIdsOk() ([]string, bool) { - if o == nil { - return nil, false - } - return o.BulkObjectIds, true -} - -// SetBulkObjectIds sets field value -func (o *GetBulkObjectsRequest) SetBulkObjectIds(v []string) { - o.BulkObjectIds = v -} - -func (o GetBulkObjectsRequest) MarshalJSON() ([]byte, error) { - toSerialize,err := o.ToMap() - if err != nil { - return []byte{}, err - } - return json.Marshal(toSerialize) -} - -func (o GetBulkObjectsRequest) ToMap() (map[string]interface{}, error) { - toSerialize := map[string]interface{}{} - if !IsNil(o.Passports) { - toSerialize["passports"] = o.Passports - } - toSerialize["bulk_object_ids"] = o.BulkObjectIds - return toSerialize, nil -} - -func (o *GetBulkObjectsRequest) UnmarshalJSON(data []byte) (err error) { - // This validates that all required properties are included in the JSON object - // by unmarshalling the object into a generic map with string keys and checking - // that every required field exists as a key in the generic map. - requiredProperties := []string{ - "bulk_object_ids", - } - - allProperties := make(map[string]interface{}) - - err = json.Unmarshal(data, &allProperties) - - if err != nil { - return err; - } - - for _, requiredProperty := range(requiredProperties) { - if _, exists := allProperties[requiredProperty]; !exists { - return fmt.Errorf("no value given for required property %v", requiredProperty) - } - } - - varGetBulkObjectsRequest := _GetBulkObjectsRequest{} - - decoder := json.NewDecoder(bytes.NewReader(data)) - decoder.DisallowUnknownFields() - err = decoder.Decode(&varGetBulkObjectsRequest) - - if err != nil { - return err - } - - *o = GetBulkObjectsRequest(varGetBulkObjectsRequest) - - return err -} - -type NullableGetBulkObjectsRequest struct { - value *GetBulkObjectsRequest - isSet bool -} - -func (v NullableGetBulkObjectsRequest) Get() *GetBulkObjectsRequest { - return v.value -} - -func (v *NullableGetBulkObjectsRequest) Set(val *GetBulkObjectsRequest) { - v.value = val - v.isSet = true -} - -func (v NullableGetBulkObjectsRequest) IsSet() bool { - return v.isSet -} - -func (v *NullableGetBulkObjectsRequest) Unset() { - v.value = nil - v.isSet = false -} - -func NewNullableGetBulkObjectsRequest(val *GetBulkObjectsRequest) *NullableGetBulkObjectsRequest { - return &NullableGetBulkObjectsRequest{value: val, isSet: true} -} - -func (v NullableGetBulkObjectsRequest) MarshalJSON() ([]byte, error) { - return json.Marshal(v.value) -} - -func (v *NullableGetBulkObjectsRequest) UnmarshalJSON(src []byte) error { - v.isSet = true - return json.Unmarshal(src, &v.value) -} - - diff --git a/apigen/drs/model_get_service_info_200_response.go b/apigen/drs/model_get_service_info_200_response.go deleted file mode 100644 index 5bdc486..0000000 --- a/apigen/drs/model_get_service_info_200_response.go +++ /dev/null @@ -1,562 +0,0 @@ -/* -Data Repository Service - -No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - -API version: 1.5.0 -Contact: ga4gh-cloud@ga4gh.org -*/ - -// Code generated by OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT. - -package drs - -import ( - "encoding/json" - "time" - "bytes" - "fmt" -) - -// checks if the GetServiceInfo200Response type satisfies the MappedNullable interface at compile time -var _ MappedNullable = &GetServiceInfo200Response{} - -// GetServiceInfo200Response struct for GetServiceInfo200Response -type GetServiceInfo200Response struct { - // Unique ID of this service. Reverse domain name notation is recommended, though not required. The identifier should attempt to be globally unique so it can be used in downstream aggregator services e.g. Service Registry. - Id string `json:"id"` - // Name of this service. Should be human readable. - Name string `json:"name"` - Type DrsServiceType `json:"type"` - // Description of the service. Should be human readable and provide information about the service. - Description *string `json:"description,omitempty"` - Organization ServiceOrganization `json:"organization"` - // URL of the contact for the provider of this service, e.g. a link to a contact form (RFC 3986 format), or an email (RFC 2368 format). - ContactUrl *string `json:"contactUrl,omitempty"` - // URL of the documentation of this service (RFC 3986 format). This should help someone learn how to use your service, including any specifics required to access data, e.g. authentication. - DocumentationUrl *string `json:"documentationUrl,omitempty"` - // Timestamp describing when the service was first deployed and available (RFC 3339 format) - CreatedAt *time.Time `json:"createdAt,omitempty"` - // Timestamp describing when the service was last updated (RFC 3339 format) - UpdatedAt *time.Time `json:"updatedAt,omitempty"` - // Environment the service is running in. Use this to distinguish between production, development and testing/staging deployments. Suggested values are prod, test, dev, staging. However this is advised and not enforced. - Environment *string `json:"environment,omitempty"` - // Version of the service being described. Semantic versioning is recommended, but other identifiers, such as dates or commit hashes, are also allowed. The version should be changed whenever the service is updated. - Version string `json:"version"` - // DEPRECATED - In 2.0 this will move to under the drs section of service info and not at the root level. The max length the bulk request endpoints can handle (>= 1) before generating a 413 error e.g. how long can the arrays bulk_object_ids and bulk_object_access_ids be for this server. - MaxBulkRequestLength int32 `json:"maxBulkRequestLength"` - Drs *DrsServiceDrs `json:"drs,omitempty"` -} - -type _GetServiceInfo200Response GetServiceInfo200Response - -// NewGetServiceInfo200Response instantiates a new GetServiceInfo200Response object -// This constructor will assign default values to properties that have it defined, -// and makes sure properties required by API are set, but the set of arguments -// will change when the set of required properties is changed -func NewGetServiceInfo200Response(id string, name string, type_ DrsServiceType, organization ServiceOrganization, version string, maxBulkRequestLength int32) *GetServiceInfo200Response { - this := GetServiceInfo200Response{} - this.Id = id - this.Name = name - this.Type = type_ - this.Organization = organization - this.Version = version - this.MaxBulkRequestLength = maxBulkRequestLength - return &this -} - -// NewGetServiceInfo200ResponseWithDefaults instantiates a new GetServiceInfo200Response object -// This constructor will only assign default values to properties that have it defined, -// but it doesn't guarantee that properties required by API are set -func NewGetServiceInfo200ResponseWithDefaults() *GetServiceInfo200Response { - this := GetServiceInfo200Response{} - return &this -} - -// GetId returns the Id field value -func (o *GetServiceInfo200Response) GetId() string { - if o == nil { - var ret string - return ret - } - - return o.Id -} - -// GetIdOk returns a tuple with the Id field value -// and a boolean to check if the value has been set. -func (o *GetServiceInfo200Response) GetIdOk() (*string, bool) { - if o == nil { - return nil, false - } - return &o.Id, true -} - -// SetId sets field value -func (o *GetServiceInfo200Response) SetId(v string) { - o.Id = v -} - -// GetName returns the Name field value -func (o *GetServiceInfo200Response) GetName() string { - if o == nil { - var ret string - return ret - } - - return o.Name -} - -// GetNameOk returns a tuple with the Name field value -// and a boolean to check if the value has been set. -func (o *GetServiceInfo200Response) GetNameOk() (*string, bool) { - if o == nil { - return nil, false - } - return &o.Name, true -} - -// SetName sets field value -func (o *GetServiceInfo200Response) SetName(v string) { - o.Name = v -} - -// GetType returns the Type field value -func (o *GetServiceInfo200Response) GetType() DrsServiceType { - if o == nil { - var ret DrsServiceType - return ret - } - - return o.Type -} - -// GetTypeOk returns a tuple with the Type field value -// and a boolean to check if the value has been set. -func (o *GetServiceInfo200Response) GetTypeOk() (*DrsServiceType, bool) { - if o == nil { - return nil, false - } - return &o.Type, true -} - -// SetType sets field value -func (o *GetServiceInfo200Response) SetType(v DrsServiceType) { - o.Type = v -} - -// GetDescription returns the Description field value if set, zero value otherwise. -func (o *GetServiceInfo200Response) GetDescription() string { - if o == nil || IsNil(o.Description) { - var ret string - return ret - } - return *o.Description -} - -// GetDescriptionOk returns a tuple with the Description field value if set, nil otherwise -// and a boolean to check if the value has been set. -func (o *GetServiceInfo200Response) GetDescriptionOk() (*string, bool) { - if o == nil || IsNil(o.Description) { - return nil, false - } - return o.Description, true -} - -// HasDescription returns a boolean if a field has been set. -func (o *GetServiceInfo200Response) HasDescription() bool { - if o != nil && !IsNil(o.Description) { - return true - } - - return false -} - -// SetDescription gets a reference to the given string and assigns it to the Description field. -func (o *GetServiceInfo200Response) SetDescription(v string) { - o.Description = &v -} - -// GetOrganization returns the Organization field value -func (o *GetServiceInfo200Response) GetOrganization() ServiceOrganization { - if o == nil { - var ret ServiceOrganization - return ret - } - - return o.Organization -} - -// GetOrganizationOk returns a tuple with the Organization field value -// and a boolean to check if the value has been set. -func (o *GetServiceInfo200Response) GetOrganizationOk() (*ServiceOrganization, bool) { - if o == nil { - return nil, false - } - return &o.Organization, true -} - -// SetOrganization sets field value -func (o *GetServiceInfo200Response) SetOrganization(v ServiceOrganization) { - o.Organization = v -} - -// GetContactUrl returns the ContactUrl field value if set, zero value otherwise. -func (o *GetServiceInfo200Response) GetContactUrl() string { - if o == nil || IsNil(o.ContactUrl) { - var ret string - return ret - } - return *o.ContactUrl -} - -// GetContactUrlOk returns a tuple with the ContactUrl field value if set, nil otherwise -// and a boolean to check if the value has been set. -func (o *GetServiceInfo200Response) GetContactUrlOk() (*string, bool) { - if o == nil || IsNil(o.ContactUrl) { - return nil, false - } - return o.ContactUrl, true -} - -// HasContactUrl returns a boolean if a field has been set. -func (o *GetServiceInfo200Response) HasContactUrl() bool { - if o != nil && !IsNil(o.ContactUrl) { - return true - } - - return false -} - -// SetContactUrl gets a reference to the given string and assigns it to the ContactUrl field. -func (o *GetServiceInfo200Response) SetContactUrl(v string) { - o.ContactUrl = &v -} - -// GetDocumentationUrl returns the DocumentationUrl field value if set, zero value otherwise. -func (o *GetServiceInfo200Response) GetDocumentationUrl() string { - if o == nil || IsNil(o.DocumentationUrl) { - var ret string - return ret - } - return *o.DocumentationUrl -} - -// GetDocumentationUrlOk returns a tuple with the DocumentationUrl field value if set, nil otherwise -// and a boolean to check if the value has been set. -func (o *GetServiceInfo200Response) GetDocumentationUrlOk() (*string, bool) { - if o == nil || IsNil(o.DocumentationUrl) { - return nil, false - } - return o.DocumentationUrl, true -} - -// HasDocumentationUrl returns a boolean if a field has been set. -func (o *GetServiceInfo200Response) HasDocumentationUrl() bool { - if o != nil && !IsNil(o.DocumentationUrl) { - return true - } - - return false -} - -// SetDocumentationUrl gets a reference to the given string and assigns it to the DocumentationUrl field. -func (o *GetServiceInfo200Response) SetDocumentationUrl(v string) { - o.DocumentationUrl = &v -} - -// GetCreatedAt returns the CreatedAt field value if set, zero value otherwise. -func (o *GetServiceInfo200Response) GetCreatedAt() time.Time { - if o == nil || IsNil(o.CreatedAt) { - var ret time.Time - return ret - } - return *o.CreatedAt -} - -// GetCreatedAtOk returns a tuple with the CreatedAt field value if set, nil otherwise -// and a boolean to check if the value has been set. -func (o *GetServiceInfo200Response) GetCreatedAtOk() (*time.Time, bool) { - if o == nil || IsNil(o.CreatedAt) { - return nil, false - } - return o.CreatedAt, true -} - -// HasCreatedAt returns a boolean if a field has been set. -func (o *GetServiceInfo200Response) HasCreatedAt() bool { - if o != nil && !IsNil(o.CreatedAt) { - return true - } - - return false -} - -// SetCreatedAt gets a reference to the given time.Time and assigns it to the CreatedAt field. -func (o *GetServiceInfo200Response) SetCreatedAt(v time.Time) { - o.CreatedAt = &v -} - -// GetUpdatedAt returns the UpdatedAt field value if set, zero value otherwise. -func (o *GetServiceInfo200Response) GetUpdatedAt() time.Time { - if o == nil || IsNil(o.UpdatedAt) { - var ret time.Time - return ret - } - return *o.UpdatedAt -} - -// GetUpdatedAtOk returns a tuple with the UpdatedAt field value if set, nil otherwise -// and a boolean to check if the value has been set. -func (o *GetServiceInfo200Response) GetUpdatedAtOk() (*time.Time, bool) { - if o == nil || IsNil(o.UpdatedAt) { - return nil, false - } - return o.UpdatedAt, true -} - -// HasUpdatedAt returns a boolean if a field has been set. -func (o *GetServiceInfo200Response) HasUpdatedAt() bool { - if o != nil && !IsNil(o.UpdatedAt) { - return true - } - - return false -} - -// SetUpdatedAt gets a reference to the given time.Time and assigns it to the UpdatedAt field. -func (o *GetServiceInfo200Response) SetUpdatedAt(v time.Time) { - o.UpdatedAt = &v -} - -// GetEnvironment returns the Environment field value if set, zero value otherwise. -func (o *GetServiceInfo200Response) GetEnvironment() string { - if o == nil || IsNil(o.Environment) { - var ret string - return ret - } - return *o.Environment -} - -// GetEnvironmentOk returns a tuple with the Environment field value if set, nil otherwise -// and a boolean to check if the value has been set. -func (o *GetServiceInfo200Response) GetEnvironmentOk() (*string, bool) { - if o == nil || IsNil(o.Environment) { - return nil, false - } - return o.Environment, true -} - -// HasEnvironment returns a boolean if a field has been set. -func (o *GetServiceInfo200Response) HasEnvironment() bool { - if o != nil && !IsNil(o.Environment) { - return true - } - - return false -} - -// SetEnvironment gets a reference to the given string and assigns it to the Environment field. -func (o *GetServiceInfo200Response) SetEnvironment(v string) { - o.Environment = &v -} - -// GetVersion returns the Version field value -func (o *GetServiceInfo200Response) GetVersion() string { - if o == nil { - var ret string - return ret - } - - return o.Version -} - -// GetVersionOk returns a tuple with the Version field value -// and a boolean to check if the value has been set. -func (o *GetServiceInfo200Response) GetVersionOk() (*string, bool) { - if o == nil { - return nil, false - } - return &o.Version, true -} - -// SetVersion sets field value -func (o *GetServiceInfo200Response) SetVersion(v string) { - o.Version = v -} - -// GetMaxBulkRequestLength returns the MaxBulkRequestLength field value -func (o *GetServiceInfo200Response) GetMaxBulkRequestLength() int32 { - if o == nil { - var ret int32 - return ret - } - - return o.MaxBulkRequestLength -} - -// GetMaxBulkRequestLengthOk returns a tuple with the MaxBulkRequestLength field value -// and a boolean to check if the value has been set. -func (o *GetServiceInfo200Response) GetMaxBulkRequestLengthOk() (*int32, bool) { - if o == nil { - return nil, false - } - return &o.MaxBulkRequestLength, true -} - -// SetMaxBulkRequestLength sets field value -func (o *GetServiceInfo200Response) SetMaxBulkRequestLength(v int32) { - o.MaxBulkRequestLength = v -} - -// GetDrs returns the Drs field value if set, zero value otherwise. -func (o *GetServiceInfo200Response) GetDrs() DrsServiceDrs { - if o == nil || IsNil(o.Drs) { - var ret DrsServiceDrs - return ret - } - return *o.Drs -} - -// GetDrsOk returns a tuple with the Drs field value if set, nil otherwise -// and a boolean to check if the value has been set. -func (o *GetServiceInfo200Response) GetDrsOk() (*DrsServiceDrs, bool) { - if o == nil || IsNil(o.Drs) { - return nil, false - } - return o.Drs, true -} - -// HasDrs returns a boolean if a field has been set. -func (o *GetServiceInfo200Response) HasDrs() bool { - if o != nil && !IsNil(o.Drs) { - return true - } - - return false -} - -// SetDrs gets a reference to the given DrsServiceDrs and assigns it to the Drs field. -func (o *GetServiceInfo200Response) SetDrs(v DrsServiceDrs) { - o.Drs = &v -} - -func (o GetServiceInfo200Response) MarshalJSON() ([]byte, error) { - toSerialize,err := o.ToMap() - if err != nil { - return []byte{}, err - } - return json.Marshal(toSerialize) -} - -func (o GetServiceInfo200Response) ToMap() (map[string]interface{}, error) { - toSerialize := map[string]interface{}{} - toSerialize["id"] = o.Id - toSerialize["name"] = o.Name - toSerialize["type"] = o.Type - if !IsNil(o.Description) { - toSerialize["description"] = o.Description - } - toSerialize["organization"] = o.Organization - if !IsNil(o.ContactUrl) { - toSerialize["contactUrl"] = o.ContactUrl - } - if !IsNil(o.DocumentationUrl) { - toSerialize["documentationUrl"] = o.DocumentationUrl - } - if !IsNil(o.CreatedAt) { - toSerialize["createdAt"] = o.CreatedAt - } - if !IsNil(o.UpdatedAt) { - toSerialize["updatedAt"] = o.UpdatedAt - } - if !IsNil(o.Environment) { - toSerialize["environment"] = o.Environment - } - toSerialize["version"] = o.Version - toSerialize["maxBulkRequestLength"] = o.MaxBulkRequestLength - if !IsNil(o.Drs) { - toSerialize["drs"] = o.Drs - } - return toSerialize, nil -} - -func (o *GetServiceInfo200Response) UnmarshalJSON(data []byte) (err error) { - // This validates that all required properties are included in the JSON object - // by unmarshalling the object into a generic map with string keys and checking - // that every required field exists as a key in the generic map. - requiredProperties := []string{ - "id", - "name", - "type", - "organization", - "version", - "maxBulkRequestLength", - } - - allProperties := make(map[string]interface{}) - - err = json.Unmarshal(data, &allProperties) - - if err != nil { - return err; - } - - for _, requiredProperty := range(requiredProperties) { - if _, exists := allProperties[requiredProperty]; !exists { - return fmt.Errorf("no value given for required property %v", requiredProperty) - } - } - - varGetServiceInfo200Response := _GetServiceInfo200Response{} - - decoder := json.NewDecoder(bytes.NewReader(data)) - decoder.DisallowUnknownFields() - err = decoder.Decode(&varGetServiceInfo200Response) - - if err != nil { - return err - } - - *o = GetServiceInfo200Response(varGetServiceInfo200Response) - - return err -} - -type NullableGetServiceInfo200Response struct { - value *GetServiceInfo200Response - isSet bool -} - -func (v NullableGetServiceInfo200Response) Get() *GetServiceInfo200Response { - return v.value -} - -func (v *NullableGetServiceInfo200Response) Set(val *GetServiceInfo200Response) { - v.value = val - v.isSet = true -} - -func (v NullableGetServiceInfo200Response) IsSet() bool { - return v.isSet -} - -func (v *NullableGetServiceInfo200Response) Unset() { - v.value = nil - v.isSet = false -} - -func NewNullableGetServiceInfo200Response(val *GetServiceInfo200Response) *NullableGetServiceInfo200Response { - return &NullableGetServiceInfo200Response{value: val, isSet: true} -} - -func (v NullableGetServiceInfo200Response) MarshalJSON() ([]byte, error) { - return json.Marshal(v.value) -} - -func (v *NullableGetServiceInfo200Response) UnmarshalJSON(src []byte) error { - v.isSet = true - return json.Unmarshal(src, &v.value) -} - - diff --git a/apigen/drs/model_options_bulk_object_200_response.go b/apigen/drs/model_options_bulk_object_200_response.go deleted file mode 100644 index 67df1c7..0000000 --- a/apigen/drs/model_options_bulk_object_200_response.go +++ /dev/null @@ -1,200 +0,0 @@ -/* -Data Repository Service - -No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - -API version: 1.5.0 -Contact: ga4gh-cloud@ga4gh.org -*/ - -// Code generated by OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT. - -package drs - -import ( - "encoding/json" -) - -// checks if the OptionsBulkObject200Response type satisfies the MappedNullable interface at compile time -var _ MappedNullable = &OptionsBulkObject200Response{} - -// OptionsBulkObject200Response struct for OptionsBulkObject200Response -type OptionsBulkObject200Response struct { - Summary *Summary `json:"summary,omitempty"` - // Error codes for each unresolved drs objects. - UnresolvedDrsObjects []UnresolvedInner `json:"unresolved_drs_objects,omitempty"` - ResolvedDrsObject []Authorizations `json:"resolved_drs_object,omitempty"` -} - -// NewOptionsBulkObject200Response instantiates a new OptionsBulkObject200Response object -// This constructor will assign default values to properties that have it defined, -// and makes sure properties required by API are set, but the set of arguments -// will change when the set of required properties is changed -func NewOptionsBulkObject200Response() *OptionsBulkObject200Response { - this := OptionsBulkObject200Response{} - return &this -} - -// NewOptionsBulkObject200ResponseWithDefaults instantiates a new OptionsBulkObject200Response object -// This constructor will only assign default values to properties that have it defined, -// but it doesn't guarantee that properties required by API are set -func NewOptionsBulkObject200ResponseWithDefaults() *OptionsBulkObject200Response { - this := OptionsBulkObject200Response{} - return &this -} - -// GetSummary returns the Summary field value if set, zero value otherwise. -func (o *OptionsBulkObject200Response) GetSummary() Summary { - if o == nil || IsNil(o.Summary) { - var ret Summary - return ret - } - return *o.Summary -} - -// GetSummaryOk returns a tuple with the Summary field value if set, nil otherwise -// and a boolean to check if the value has been set. -func (o *OptionsBulkObject200Response) GetSummaryOk() (*Summary, bool) { - if o == nil || IsNil(o.Summary) { - return nil, false - } - return o.Summary, true -} - -// HasSummary returns a boolean if a field has been set. -func (o *OptionsBulkObject200Response) HasSummary() bool { - if o != nil && !IsNil(o.Summary) { - return true - } - - return false -} - -// SetSummary gets a reference to the given Summary and assigns it to the Summary field. -func (o *OptionsBulkObject200Response) SetSummary(v Summary) { - o.Summary = &v -} - -// GetUnresolvedDrsObjects returns the UnresolvedDrsObjects field value if set, zero value otherwise. -func (o *OptionsBulkObject200Response) GetUnresolvedDrsObjects() []UnresolvedInner { - if o == nil || IsNil(o.UnresolvedDrsObjects) { - var ret []UnresolvedInner - return ret - } - return o.UnresolvedDrsObjects -} - -// GetUnresolvedDrsObjectsOk returns a tuple with the UnresolvedDrsObjects field value if set, nil otherwise -// and a boolean to check if the value has been set. -func (o *OptionsBulkObject200Response) GetUnresolvedDrsObjectsOk() ([]UnresolvedInner, bool) { - if o == nil || IsNil(o.UnresolvedDrsObjects) { - return nil, false - } - return o.UnresolvedDrsObjects, true -} - -// HasUnresolvedDrsObjects returns a boolean if a field has been set. -func (o *OptionsBulkObject200Response) HasUnresolvedDrsObjects() bool { - if o != nil && !IsNil(o.UnresolvedDrsObjects) { - return true - } - - return false -} - -// SetUnresolvedDrsObjects gets a reference to the given []UnresolvedInner and assigns it to the UnresolvedDrsObjects field. -func (o *OptionsBulkObject200Response) SetUnresolvedDrsObjects(v []UnresolvedInner) { - o.UnresolvedDrsObjects = v -} - -// GetResolvedDrsObject returns the ResolvedDrsObject field value if set, zero value otherwise. -func (o *OptionsBulkObject200Response) GetResolvedDrsObject() []Authorizations { - if o == nil || IsNil(o.ResolvedDrsObject) { - var ret []Authorizations - return ret - } - return o.ResolvedDrsObject -} - -// GetResolvedDrsObjectOk returns a tuple with the ResolvedDrsObject field value if set, nil otherwise -// and a boolean to check if the value has been set. -func (o *OptionsBulkObject200Response) GetResolvedDrsObjectOk() ([]Authorizations, bool) { - if o == nil || IsNil(o.ResolvedDrsObject) { - return nil, false - } - return o.ResolvedDrsObject, true -} - -// HasResolvedDrsObject returns a boolean if a field has been set. -func (o *OptionsBulkObject200Response) HasResolvedDrsObject() bool { - if o != nil && !IsNil(o.ResolvedDrsObject) { - return true - } - - return false -} - -// SetResolvedDrsObject gets a reference to the given []Authorizations and assigns it to the ResolvedDrsObject field. -func (o *OptionsBulkObject200Response) SetResolvedDrsObject(v []Authorizations) { - o.ResolvedDrsObject = v -} - -func (o OptionsBulkObject200Response) MarshalJSON() ([]byte, error) { - toSerialize,err := o.ToMap() - if err != nil { - return []byte{}, err - } - return json.Marshal(toSerialize) -} - -func (o OptionsBulkObject200Response) ToMap() (map[string]interface{}, error) { - toSerialize := map[string]interface{}{} - if !IsNil(o.Summary) { - toSerialize["summary"] = o.Summary - } - if !IsNil(o.UnresolvedDrsObjects) { - toSerialize["unresolved_drs_objects"] = o.UnresolvedDrsObjects - } - if !IsNil(o.ResolvedDrsObject) { - toSerialize["resolved_drs_object"] = o.ResolvedDrsObject - } - return toSerialize, nil -} - -type NullableOptionsBulkObject200Response struct { - value *OptionsBulkObject200Response - isSet bool -} - -func (v NullableOptionsBulkObject200Response) Get() *OptionsBulkObject200Response { - return v.value -} - -func (v *NullableOptionsBulkObject200Response) Set(val *OptionsBulkObject200Response) { - v.value = val - v.isSet = true -} - -func (v NullableOptionsBulkObject200Response) IsSet() bool { - return v.isSet -} - -func (v *NullableOptionsBulkObject200Response) Unset() { - v.value = nil - v.isSet = false -} - -func NewNullableOptionsBulkObject200Response(val *OptionsBulkObject200Response) *NullableOptionsBulkObject200Response { - return &NullableOptionsBulkObject200Response{value: val, isSet: true} -} - -func (v NullableOptionsBulkObject200Response) MarshalJSON() ([]byte, error) { - return json.Marshal(v.value) -} - -func (v *NullableOptionsBulkObject200Response) UnmarshalJSON(src []byte) error { - v.isSet = true - return json.Unmarshal(src, &v.value) -} - - diff --git a/apigen/drs/model_post_access_url_request.go b/apigen/drs/model_post_access_url_request.go deleted file mode 100644 index 805a44c..0000000 --- a/apigen/drs/model_post_access_url_request.go +++ /dev/null @@ -1,128 +0,0 @@ -/* -Data Repository Service - -No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - -API version: 1.5.0 -Contact: ga4gh-cloud@ga4gh.org -*/ - -// Code generated by OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT. - -package drs - -import ( - "encoding/json" -) - -// checks if the PostAccessURLRequest type satisfies the MappedNullable interface at compile time -var _ MappedNullable = &PostAccessURLRequest{} - -// PostAccessURLRequest struct for PostAccessURLRequest -type PostAccessURLRequest struct { - // the encoded JWT GA4GH Passport that contains embedded Visas. The overall JWT is signed as are the individual Passport Visas. - Passports []string `json:"passports,omitempty"` -} - -// NewPostAccessURLRequest instantiates a new PostAccessURLRequest object -// This constructor will assign default values to properties that have it defined, -// and makes sure properties required by API are set, but the set of arguments -// will change when the set of required properties is changed -func NewPostAccessURLRequest() *PostAccessURLRequest { - this := PostAccessURLRequest{} - return &this -} - -// NewPostAccessURLRequestWithDefaults instantiates a new PostAccessURLRequest object -// This constructor will only assign default values to properties that have it defined, -// but it doesn't guarantee that properties required by API are set -func NewPostAccessURLRequestWithDefaults() *PostAccessURLRequest { - this := PostAccessURLRequest{} - return &this -} - -// GetPassports returns the Passports field value if set, zero value otherwise. -func (o *PostAccessURLRequest) GetPassports() []string { - if o == nil || IsNil(o.Passports) { - var ret []string - return ret - } - return o.Passports -} - -// GetPassportsOk returns a tuple with the Passports field value if set, nil otherwise -// and a boolean to check if the value has been set. -func (o *PostAccessURLRequest) GetPassportsOk() ([]string, bool) { - if o == nil || IsNil(o.Passports) { - return nil, false - } - return o.Passports, true -} - -// HasPassports returns a boolean if a field has been set. -func (o *PostAccessURLRequest) HasPassports() bool { - if o != nil && !IsNil(o.Passports) { - return true - } - - return false -} - -// SetPassports gets a reference to the given []string and assigns it to the Passports field. -func (o *PostAccessURLRequest) SetPassports(v []string) { - o.Passports = v -} - -func (o PostAccessURLRequest) MarshalJSON() ([]byte, error) { - toSerialize,err := o.ToMap() - if err != nil { - return []byte{}, err - } - return json.Marshal(toSerialize) -} - -func (o PostAccessURLRequest) ToMap() (map[string]interface{}, error) { - toSerialize := map[string]interface{}{} - if !IsNil(o.Passports) { - toSerialize["passports"] = o.Passports - } - return toSerialize, nil -} - -type NullablePostAccessURLRequest struct { - value *PostAccessURLRequest - isSet bool -} - -func (v NullablePostAccessURLRequest) Get() *PostAccessURLRequest { - return v.value -} - -func (v *NullablePostAccessURLRequest) Set(val *PostAccessURLRequest) { - v.value = val - v.isSet = true -} - -func (v NullablePostAccessURLRequest) IsSet() bool { - return v.isSet -} - -func (v *NullablePostAccessURLRequest) Unset() { - v.value = nil - v.isSet = false -} - -func NewNullablePostAccessURLRequest(val *PostAccessURLRequest) *NullablePostAccessURLRequest { - return &NullablePostAccessURLRequest{value: val, isSet: true} -} - -func (v NullablePostAccessURLRequest) MarshalJSON() ([]byte, error) { - return json.Marshal(v.value) -} - -func (v *NullablePostAccessURLRequest) UnmarshalJSON(src []byte) error { - v.isSet = true - return json.Unmarshal(src, &v.value) -} - - diff --git a/apigen/drs/model_post_object_request.go b/apigen/drs/model_post_object_request.go deleted file mode 100644 index bf21e0e..0000000 --- a/apigen/drs/model_post_object_request.go +++ /dev/null @@ -1,165 +0,0 @@ -/* -Data Repository Service - -No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - -API version: 1.5.0 -Contact: ga4gh-cloud@ga4gh.org -*/ - -// Code generated by OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT. - -package drs - -import ( - "encoding/json" -) - -// checks if the PostObjectRequest type satisfies the MappedNullable interface at compile time -var _ MappedNullable = &PostObjectRequest{} - -// PostObjectRequest struct for PostObjectRequest -type PostObjectRequest struct { - // If false and the object_id refers to a bundle, then the ContentsObject array contains only those objects directly contained in the bundle. That is, if the bundle contains other bundles, those other bundles are not recursively included in the result. If true and the object_id refers to a bundle, then the entire set of objects in the bundle is expanded. That is, if the bundle contains other bundles, then those other bundles are recursively expanded and included in the result. Recursion continues through the entire sub-tree of the bundle. If the object_id refers to a blob, then the query parameter is ignored. - Expand *bool `json:"expand,omitempty"` - // the encoded JWT GA4GH Passport that contains embedded Visas. The overall JWT is signed as are the individual Passport Visas. - Passports []string `json:"passports,omitempty"` -} - -// NewPostObjectRequest instantiates a new PostObjectRequest object -// This constructor will assign default values to properties that have it defined, -// and makes sure properties required by API are set, but the set of arguments -// will change when the set of required properties is changed -func NewPostObjectRequest() *PostObjectRequest { - this := PostObjectRequest{} - return &this -} - -// NewPostObjectRequestWithDefaults instantiates a new PostObjectRequest object -// This constructor will only assign default values to properties that have it defined, -// but it doesn't guarantee that properties required by API are set -func NewPostObjectRequestWithDefaults() *PostObjectRequest { - this := PostObjectRequest{} - return &this -} - -// GetExpand returns the Expand field value if set, zero value otherwise. -func (o *PostObjectRequest) GetExpand() bool { - if o == nil || IsNil(o.Expand) { - var ret bool - return ret - } - return *o.Expand -} - -// GetExpandOk returns a tuple with the Expand field value if set, nil otherwise -// and a boolean to check if the value has been set. -func (o *PostObjectRequest) GetExpandOk() (*bool, bool) { - if o == nil || IsNil(o.Expand) { - return nil, false - } - return o.Expand, true -} - -// HasExpand returns a boolean if a field has been set. -func (o *PostObjectRequest) HasExpand() bool { - if o != nil && !IsNil(o.Expand) { - return true - } - - return false -} - -// SetExpand gets a reference to the given bool and assigns it to the Expand field. -func (o *PostObjectRequest) SetExpand(v bool) { - o.Expand = &v -} - -// GetPassports returns the Passports field value if set, zero value otherwise. -func (o *PostObjectRequest) GetPassports() []string { - if o == nil || IsNil(o.Passports) { - var ret []string - return ret - } - return o.Passports -} - -// GetPassportsOk returns a tuple with the Passports field value if set, nil otherwise -// and a boolean to check if the value has been set. -func (o *PostObjectRequest) GetPassportsOk() ([]string, bool) { - if o == nil || IsNil(o.Passports) { - return nil, false - } - return o.Passports, true -} - -// HasPassports returns a boolean if a field has been set. -func (o *PostObjectRequest) HasPassports() bool { - if o != nil && !IsNil(o.Passports) { - return true - } - - return false -} - -// SetPassports gets a reference to the given []string and assigns it to the Passports field. -func (o *PostObjectRequest) SetPassports(v []string) { - o.Passports = v -} - -func (o PostObjectRequest) MarshalJSON() ([]byte, error) { - toSerialize,err := o.ToMap() - if err != nil { - return []byte{}, err - } - return json.Marshal(toSerialize) -} - -func (o PostObjectRequest) ToMap() (map[string]interface{}, error) { - toSerialize := map[string]interface{}{} - if !IsNil(o.Expand) { - toSerialize["expand"] = o.Expand - } - if !IsNil(o.Passports) { - toSerialize["passports"] = o.Passports - } - return toSerialize, nil -} - -type NullablePostObjectRequest struct { - value *PostObjectRequest - isSet bool -} - -func (v NullablePostObjectRequest) Get() *PostObjectRequest { - return v.value -} - -func (v *NullablePostObjectRequest) Set(val *PostObjectRequest) { - v.value = val - v.isSet = true -} - -func (v NullablePostObjectRequest) IsSet() bool { - return v.isSet -} - -func (v *NullablePostObjectRequest) Unset() { - v.value = nil - v.isSet = false -} - -func NewNullablePostObjectRequest(val *PostObjectRequest) *NullablePostObjectRequest { - return &NullablePostObjectRequest{value: val, isSet: true} -} - -func (v NullablePostObjectRequest) MarshalJSON() ([]byte, error) { - return json.Marshal(v.value) -} - -func (v *NullablePostObjectRequest) UnmarshalJSON(src []byte) error { - v.isSet = true - return json.Unmarshal(src, &v.value) -} - - diff --git a/apigen/drs/model_register_objects_201_response.go b/apigen/drs/model_register_objects_201_response.go deleted file mode 100644 index eeb7b27..0000000 --- a/apigen/drs/model_register_objects_201_response.go +++ /dev/null @@ -1,160 +0,0 @@ -/* -Data Repository Service - -No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - -API version: 1.5.0 -Contact: ga4gh-cloud@ga4gh.org -*/ - -// Code generated by OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT. - -package drs - -import ( - "encoding/json" - "bytes" - "fmt" -) - -// checks if the RegisterObjects201Response type satisfies the MappedNullable interface at compile time -var _ MappedNullable = &RegisterObjects201Response{} - -// RegisterObjects201Response struct for RegisterObjects201Response -type RegisterObjects201Response struct { - // Array of registered DRS objects in the same order as the candidates in the request - Objects []DrsObject `json:"objects"` -} - -type _RegisterObjects201Response RegisterObjects201Response - -// NewRegisterObjects201Response instantiates a new RegisterObjects201Response object -// This constructor will assign default values to properties that have it defined, -// and makes sure properties required by API are set, but the set of arguments -// will change when the set of required properties is changed -func NewRegisterObjects201Response(objects []DrsObject) *RegisterObjects201Response { - this := RegisterObjects201Response{} - this.Objects = objects - return &this -} - -// NewRegisterObjects201ResponseWithDefaults instantiates a new RegisterObjects201Response object -// This constructor will only assign default values to properties that have it defined, -// but it doesn't guarantee that properties required by API are set -func NewRegisterObjects201ResponseWithDefaults() *RegisterObjects201Response { - this := RegisterObjects201Response{} - return &this -} - -// GetObjects returns the Objects field value -func (o *RegisterObjects201Response) GetObjects() []DrsObject { - if o == nil { - var ret []DrsObject - return ret - } - - return o.Objects -} - -// GetObjectsOk returns a tuple with the Objects field value -// and a boolean to check if the value has been set. -func (o *RegisterObjects201Response) GetObjectsOk() ([]DrsObject, bool) { - if o == nil { - return nil, false - } - return o.Objects, true -} - -// SetObjects sets field value -func (o *RegisterObjects201Response) SetObjects(v []DrsObject) { - o.Objects = v -} - -func (o RegisterObjects201Response) MarshalJSON() ([]byte, error) { - toSerialize,err := o.ToMap() - if err != nil { - return []byte{}, err - } - return json.Marshal(toSerialize) -} - -func (o RegisterObjects201Response) ToMap() (map[string]interface{}, error) { - toSerialize := map[string]interface{}{} - toSerialize["objects"] = o.Objects - return toSerialize, nil -} - -func (o *RegisterObjects201Response) UnmarshalJSON(data []byte) (err error) { - // This validates that all required properties are included in the JSON object - // by unmarshalling the object into a generic map with string keys and checking - // that every required field exists as a key in the generic map. - requiredProperties := []string{ - "objects", - } - - allProperties := make(map[string]interface{}) - - err = json.Unmarshal(data, &allProperties) - - if err != nil { - return err; - } - - for _, requiredProperty := range(requiredProperties) { - if _, exists := allProperties[requiredProperty]; !exists { - return fmt.Errorf("no value given for required property %v", requiredProperty) - } - } - - varRegisterObjects201Response := _RegisterObjects201Response{} - - decoder := json.NewDecoder(bytes.NewReader(data)) - decoder.DisallowUnknownFields() - err = decoder.Decode(&varRegisterObjects201Response) - - if err != nil { - return err - } - - *o = RegisterObjects201Response(varRegisterObjects201Response) - - return err -} - -type NullableRegisterObjects201Response struct { - value *RegisterObjects201Response - isSet bool -} - -func (v NullableRegisterObjects201Response) Get() *RegisterObjects201Response { - return v.value -} - -func (v *NullableRegisterObjects201Response) Set(val *RegisterObjects201Response) { - v.value = val - v.isSet = true -} - -func (v NullableRegisterObjects201Response) IsSet() bool { - return v.isSet -} - -func (v *NullableRegisterObjects201Response) Unset() { - v.value = nil - v.isSet = false -} - -func NewNullableRegisterObjects201Response(val *RegisterObjects201Response) *NullableRegisterObjects201Response { - return &NullableRegisterObjects201Response{value: val, isSet: true} -} - -func (v NullableRegisterObjects201Response) MarshalJSON() ([]byte, error) { - return json.Marshal(v.value) -} - -func (v *NullableRegisterObjects201Response) UnmarshalJSON(src []byte) error { - v.isSet = true - return json.Unmarshal(src, &v.value) -} - - diff --git a/apigen/drs/model_register_objects_request.go b/apigen/drs/model_register_objects_request.go deleted file mode 100644 index 85d332d..0000000 --- a/apigen/drs/model_register_objects_request.go +++ /dev/null @@ -1,197 +0,0 @@ -/* -Data Repository Service - -No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - -API version: 1.5.0 -Contact: ga4gh-cloud@ga4gh.org -*/ - -// Code generated by OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT. - -package drs - -import ( - "encoding/json" - "bytes" - "fmt" -) - -// checks if the RegisterObjectsRequest type satisfies the MappedNullable interface at compile time -var _ MappedNullable = &RegisterObjectsRequest{} - -// RegisterObjectsRequest struct for RegisterObjectsRequest -type RegisterObjectsRequest struct { - // Array of DRS object candidates to register (server will mint IDs and timestamps) - Candidates []DrsObjectCandidate `json:"candidates"` - // Optional array of GA4GH Passport JWTs for authorization - Passports []string `json:"passports,omitempty"` -} - -type _RegisterObjectsRequest RegisterObjectsRequest - -// NewRegisterObjectsRequest instantiates a new RegisterObjectsRequest object -// This constructor will assign default values to properties that have it defined, -// and makes sure properties required by API are set, but the set of arguments -// will change when the set of required properties is changed -func NewRegisterObjectsRequest(candidates []DrsObjectCandidate) *RegisterObjectsRequest { - this := RegisterObjectsRequest{} - this.Candidates = candidates - return &this -} - -// NewRegisterObjectsRequestWithDefaults instantiates a new RegisterObjectsRequest object -// This constructor will only assign default values to properties that have it defined, -// but it doesn't guarantee that properties required by API are set -func NewRegisterObjectsRequestWithDefaults() *RegisterObjectsRequest { - this := RegisterObjectsRequest{} - return &this -} - -// GetCandidates returns the Candidates field value -func (o *RegisterObjectsRequest) GetCandidates() []DrsObjectCandidate { - if o == nil { - var ret []DrsObjectCandidate - return ret - } - - return o.Candidates -} - -// GetCandidatesOk returns a tuple with the Candidates field value -// and a boolean to check if the value has been set. -func (o *RegisterObjectsRequest) GetCandidatesOk() ([]DrsObjectCandidate, bool) { - if o == nil { - return nil, false - } - return o.Candidates, true -} - -// SetCandidates sets field value -func (o *RegisterObjectsRequest) SetCandidates(v []DrsObjectCandidate) { - o.Candidates = v -} - -// GetPassports returns the Passports field value if set, zero value otherwise. -func (o *RegisterObjectsRequest) GetPassports() []string { - if o == nil || IsNil(o.Passports) { - var ret []string - return ret - } - return o.Passports -} - -// GetPassportsOk returns a tuple with the Passports field value if set, nil otherwise -// and a boolean to check if the value has been set. -func (o *RegisterObjectsRequest) GetPassportsOk() ([]string, bool) { - if o == nil || IsNil(o.Passports) { - return nil, false - } - return o.Passports, true -} - -// HasPassports returns a boolean if a field has been set. -func (o *RegisterObjectsRequest) HasPassports() bool { - if o != nil && !IsNil(o.Passports) { - return true - } - - return false -} - -// SetPassports gets a reference to the given []string and assigns it to the Passports field. -func (o *RegisterObjectsRequest) SetPassports(v []string) { - o.Passports = v -} - -func (o RegisterObjectsRequest) MarshalJSON() ([]byte, error) { - toSerialize,err := o.ToMap() - if err != nil { - return []byte{}, err - } - return json.Marshal(toSerialize) -} - -func (o RegisterObjectsRequest) ToMap() (map[string]interface{}, error) { - toSerialize := map[string]interface{}{} - toSerialize["candidates"] = o.Candidates - if !IsNil(o.Passports) { - toSerialize["passports"] = o.Passports - } - return toSerialize, nil -} - -func (o *RegisterObjectsRequest) UnmarshalJSON(data []byte) (err error) { - // This validates that all required properties are included in the JSON object - // by unmarshalling the object into a generic map with string keys and checking - // that every required field exists as a key in the generic map. - requiredProperties := []string{ - "candidates", - } - - allProperties := make(map[string]interface{}) - - err = json.Unmarshal(data, &allProperties) - - if err != nil { - return err; - } - - for _, requiredProperty := range(requiredProperties) { - if _, exists := allProperties[requiredProperty]; !exists { - return fmt.Errorf("no value given for required property %v", requiredProperty) - } - } - - varRegisterObjectsRequest := _RegisterObjectsRequest{} - - decoder := json.NewDecoder(bytes.NewReader(data)) - decoder.DisallowUnknownFields() - err = decoder.Decode(&varRegisterObjectsRequest) - - if err != nil { - return err - } - - *o = RegisterObjectsRequest(varRegisterObjectsRequest) - - return err -} - -type NullableRegisterObjectsRequest struct { - value *RegisterObjectsRequest - isSet bool -} - -func (v NullableRegisterObjectsRequest) Get() *RegisterObjectsRequest { - return v.value -} - -func (v *NullableRegisterObjectsRequest) Set(val *RegisterObjectsRequest) { - v.value = val - v.isSet = true -} - -func (v NullableRegisterObjectsRequest) IsSet() bool { - return v.isSet -} - -func (v *NullableRegisterObjectsRequest) Unset() { - v.value = nil - v.isSet = false -} - -func NewNullableRegisterObjectsRequest(val *RegisterObjectsRequest) *NullableRegisterObjectsRequest { - return &NullableRegisterObjectsRequest{value: val, isSet: true} -} - -func (v NullableRegisterObjectsRequest) MarshalJSON() ([]byte, error) { - return json.Marshal(v.value) -} - -func (v *NullableRegisterObjectsRequest) UnmarshalJSON(src []byte) error { - v.isSet = true - return json.Unmarshal(src, &v.value) -} - - diff --git a/apigen/drs/model_service.go b/apigen/drs/model_service.go deleted file mode 100644 index 5b8a3fc..0000000 --- a/apigen/drs/model_service.go +++ /dev/null @@ -1,497 +0,0 @@ -/* -Data Repository Service - -No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - -API version: 1.5.0 -Contact: ga4gh-cloud@ga4gh.org -*/ - -// Code generated by OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT. - -package drs - -import ( - "encoding/json" - "time" - "bytes" - "fmt" -) - -// checks if the Service type satisfies the MappedNullable interface at compile time -var _ MappedNullable = &Service{} - -// Service GA4GH service -type Service struct { - // Unique ID of this service. Reverse domain name notation is recommended, though not required. The identifier should attempt to be globally unique so it can be used in downstream aggregator services e.g. Service Registry. - Id string `json:"id"` - // Name of this service. Should be human readable. - Name string `json:"name"` - Type ServiceType `json:"type"` - // Description of the service. Should be human readable and provide information about the service. - Description *string `json:"description,omitempty"` - Organization ServiceOrganization `json:"organization"` - // URL of the contact for the provider of this service, e.g. a link to a contact form (RFC 3986 format), or an email (RFC 2368 format). - ContactUrl *string `json:"contactUrl,omitempty"` - // URL of the documentation of this service (RFC 3986 format). This should help someone learn how to use your service, including any specifics required to access data, e.g. authentication. - DocumentationUrl *string `json:"documentationUrl,omitempty"` - // Timestamp describing when the service was first deployed and available (RFC 3339 format) - CreatedAt *time.Time `json:"createdAt,omitempty"` - // Timestamp describing when the service was last updated (RFC 3339 format) - UpdatedAt *time.Time `json:"updatedAt,omitempty"` - // Environment the service is running in. Use this to distinguish between production, development and testing/staging deployments. Suggested values are prod, test, dev, staging. However this is advised and not enforced. - Environment *string `json:"environment,omitempty"` - // Version of the service being described. Semantic versioning is recommended, but other identifiers, such as dates or commit hashes, are also allowed. The version should be changed whenever the service is updated. - Version string `json:"version"` -} - -type _Service Service - -// NewService instantiates a new Service object -// This constructor will assign default values to properties that have it defined, -// and makes sure properties required by API are set, but the set of arguments -// will change when the set of required properties is changed -func NewService(id string, name string, type_ ServiceType, organization ServiceOrganization, version string) *Service { - this := Service{} - this.Id = id - this.Name = name - this.Type = type_ - this.Organization = organization - this.Version = version - return &this -} - -// NewServiceWithDefaults instantiates a new Service object -// This constructor will only assign default values to properties that have it defined, -// but it doesn't guarantee that properties required by API are set -func NewServiceWithDefaults() *Service { - this := Service{} - return &this -} - -// GetId returns the Id field value -func (o *Service) GetId() string { - if o == nil { - var ret string - return ret - } - - return o.Id -} - -// GetIdOk returns a tuple with the Id field value -// and a boolean to check if the value has been set. -func (o *Service) GetIdOk() (*string, bool) { - if o == nil { - return nil, false - } - return &o.Id, true -} - -// SetId sets field value -func (o *Service) SetId(v string) { - o.Id = v -} - -// GetName returns the Name field value -func (o *Service) GetName() string { - if o == nil { - var ret string - return ret - } - - return o.Name -} - -// GetNameOk returns a tuple with the Name field value -// and a boolean to check if the value has been set. -func (o *Service) GetNameOk() (*string, bool) { - if o == nil { - return nil, false - } - return &o.Name, true -} - -// SetName sets field value -func (o *Service) SetName(v string) { - o.Name = v -} - -// GetType returns the Type field value -func (o *Service) GetType() ServiceType { - if o == nil { - var ret ServiceType - return ret - } - - return o.Type -} - -// GetTypeOk returns a tuple with the Type field value -// and a boolean to check if the value has been set. -func (o *Service) GetTypeOk() (*ServiceType, bool) { - if o == nil { - return nil, false - } - return &o.Type, true -} - -// SetType sets field value -func (o *Service) SetType(v ServiceType) { - o.Type = v -} - -// GetDescription returns the Description field value if set, zero value otherwise. -func (o *Service) GetDescription() string { - if o == nil || IsNil(o.Description) { - var ret string - return ret - } - return *o.Description -} - -// GetDescriptionOk returns a tuple with the Description field value if set, nil otherwise -// and a boolean to check if the value has been set. -func (o *Service) GetDescriptionOk() (*string, bool) { - if o == nil || IsNil(o.Description) { - return nil, false - } - return o.Description, true -} - -// HasDescription returns a boolean if a field has been set. -func (o *Service) HasDescription() bool { - if o != nil && !IsNil(o.Description) { - return true - } - - return false -} - -// SetDescription gets a reference to the given string and assigns it to the Description field. -func (o *Service) SetDescription(v string) { - o.Description = &v -} - -// GetOrganization returns the Organization field value -func (o *Service) GetOrganization() ServiceOrganization { - if o == nil { - var ret ServiceOrganization - return ret - } - - return o.Organization -} - -// GetOrganizationOk returns a tuple with the Organization field value -// and a boolean to check if the value has been set. -func (o *Service) GetOrganizationOk() (*ServiceOrganization, bool) { - if o == nil { - return nil, false - } - return &o.Organization, true -} - -// SetOrganization sets field value -func (o *Service) SetOrganization(v ServiceOrganization) { - o.Organization = v -} - -// GetContactUrl returns the ContactUrl field value if set, zero value otherwise. -func (o *Service) GetContactUrl() string { - if o == nil || IsNil(o.ContactUrl) { - var ret string - return ret - } - return *o.ContactUrl -} - -// GetContactUrlOk returns a tuple with the ContactUrl field value if set, nil otherwise -// and a boolean to check if the value has been set. -func (o *Service) GetContactUrlOk() (*string, bool) { - if o == nil || IsNil(o.ContactUrl) { - return nil, false - } - return o.ContactUrl, true -} - -// HasContactUrl returns a boolean if a field has been set. -func (o *Service) HasContactUrl() bool { - if o != nil && !IsNil(o.ContactUrl) { - return true - } - - return false -} - -// SetContactUrl gets a reference to the given string and assigns it to the ContactUrl field. -func (o *Service) SetContactUrl(v string) { - o.ContactUrl = &v -} - -// GetDocumentationUrl returns the DocumentationUrl field value if set, zero value otherwise. -func (o *Service) GetDocumentationUrl() string { - if o == nil || IsNil(o.DocumentationUrl) { - var ret string - return ret - } - return *o.DocumentationUrl -} - -// GetDocumentationUrlOk returns a tuple with the DocumentationUrl field value if set, nil otherwise -// and a boolean to check if the value has been set. -func (o *Service) GetDocumentationUrlOk() (*string, bool) { - if o == nil || IsNil(o.DocumentationUrl) { - return nil, false - } - return o.DocumentationUrl, true -} - -// HasDocumentationUrl returns a boolean if a field has been set. -func (o *Service) HasDocumentationUrl() bool { - if o != nil && !IsNil(o.DocumentationUrl) { - return true - } - - return false -} - -// SetDocumentationUrl gets a reference to the given string and assigns it to the DocumentationUrl field. -func (o *Service) SetDocumentationUrl(v string) { - o.DocumentationUrl = &v -} - -// GetCreatedAt returns the CreatedAt field value if set, zero value otherwise. -func (o *Service) GetCreatedAt() time.Time { - if o == nil || IsNil(o.CreatedAt) { - var ret time.Time - return ret - } - return *o.CreatedAt -} - -// GetCreatedAtOk returns a tuple with the CreatedAt field value if set, nil otherwise -// and a boolean to check if the value has been set. -func (o *Service) GetCreatedAtOk() (*time.Time, bool) { - if o == nil || IsNil(o.CreatedAt) { - return nil, false - } - return o.CreatedAt, true -} - -// HasCreatedAt returns a boolean if a field has been set. -func (o *Service) HasCreatedAt() bool { - if o != nil && !IsNil(o.CreatedAt) { - return true - } - - return false -} - -// SetCreatedAt gets a reference to the given time.Time and assigns it to the CreatedAt field. -func (o *Service) SetCreatedAt(v time.Time) { - o.CreatedAt = &v -} - -// GetUpdatedAt returns the UpdatedAt field value if set, zero value otherwise. -func (o *Service) GetUpdatedAt() time.Time { - if o == nil || IsNil(o.UpdatedAt) { - var ret time.Time - return ret - } - return *o.UpdatedAt -} - -// GetUpdatedAtOk returns a tuple with the UpdatedAt field value if set, nil otherwise -// and a boolean to check if the value has been set. -func (o *Service) GetUpdatedAtOk() (*time.Time, bool) { - if o == nil || IsNil(o.UpdatedAt) { - return nil, false - } - return o.UpdatedAt, true -} - -// HasUpdatedAt returns a boolean if a field has been set. -func (o *Service) HasUpdatedAt() bool { - if o != nil && !IsNil(o.UpdatedAt) { - return true - } - - return false -} - -// SetUpdatedAt gets a reference to the given time.Time and assigns it to the UpdatedAt field. -func (o *Service) SetUpdatedAt(v time.Time) { - o.UpdatedAt = &v -} - -// GetEnvironment returns the Environment field value if set, zero value otherwise. -func (o *Service) GetEnvironment() string { - if o == nil || IsNil(o.Environment) { - var ret string - return ret - } - return *o.Environment -} - -// GetEnvironmentOk returns a tuple with the Environment field value if set, nil otherwise -// and a boolean to check if the value has been set. -func (o *Service) GetEnvironmentOk() (*string, bool) { - if o == nil || IsNil(o.Environment) { - return nil, false - } - return o.Environment, true -} - -// HasEnvironment returns a boolean if a field has been set. -func (o *Service) HasEnvironment() bool { - if o != nil && !IsNil(o.Environment) { - return true - } - - return false -} - -// SetEnvironment gets a reference to the given string and assigns it to the Environment field. -func (o *Service) SetEnvironment(v string) { - o.Environment = &v -} - -// GetVersion returns the Version field value -func (o *Service) GetVersion() string { - if o == nil { - var ret string - return ret - } - - return o.Version -} - -// GetVersionOk returns a tuple with the Version field value -// and a boolean to check if the value has been set. -func (o *Service) GetVersionOk() (*string, bool) { - if o == nil { - return nil, false - } - return &o.Version, true -} - -// SetVersion sets field value -func (o *Service) SetVersion(v string) { - o.Version = v -} - -func (o Service) MarshalJSON() ([]byte, error) { - toSerialize,err := o.ToMap() - if err != nil { - return []byte{}, err - } - return json.Marshal(toSerialize) -} - -func (o Service) ToMap() (map[string]interface{}, error) { - toSerialize := map[string]interface{}{} - toSerialize["id"] = o.Id - toSerialize["name"] = o.Name - toSerialize["type"] = o.Type - if !IsNil(o.Description) { - toSerialize["description"] = o.Description - } - toSerialize["organization"] = o.Organization - if !IsNil(o.ContactUrl) { - toSerialize["contactUrl"] = o.ContactUrl - } - if !IsNil(o.DocumentationUrl) { - toSerialize["documentationUrl"] = o.DocumentationUrl - } - if !IsNil(o.CreatedAt) { - toSerialize["createdAt"] = o.CreatedAt - } - if !IsNil(o.UpdatedAt) { - toSerialize["updatedAt"] = o.UpdatedAt - } - if !IsNil(o.Environment) { - toSerialize["environment"] = o.Environment - } - toSerialize["version"] = o.Version - return toSerialize, nil -} - -func (o *Service) UnmarshalJSON(data []byte) (err error) { - // This validates that all required properties are included in the JSON object - // by unmarshalling the object into a generic map with string keys and checking - // that every required field exists as a key in the generic map. - requiredProperties := []string{ - "id", - "name", - "type", - "organization", - "version", - } - - allProperties := make(map[string]interface{}) - - err = json.Unmarshal(data, &allProperties) - - if err != nil { - return err; - } - - for _, requiredProperty := range(requiredProperties) { - if _, exists := allProperties[requiredProperty]; !exists { - return fmt.Errorf("no value given for required property %v", requiredProperty) - } - } - - varService := _Service{} - - decoder := json.NewDecoder(bytes.NewReader(data)) - decoder.DisallowUnknownFields() - err = decoder.Decode(&varService) - - if err != nil { - return err - } - - *o = Service(varService) - - return err -} - -type NullableService struct { - value *Service - isSet bool -} - -func (v NullableService) Get() *Service { - return v.value -} - -func (v *NullableService) Set(val *Service) { - v.value = val - v.isSet = true -} - -func (v NullableService) IsSet() bool { - return v.isSet -} - -func (v *NullableService) Unset() { - v.value = nil - v.isSet = false -} - -func NewNullableService(val *Service) *NullableService { - return &NullableService{value: val, isSet: true} -} - -func (v NullableService) MarshalJSON() ([]byte, error) { - return json.Marshal(v.value) -} - -func (v *NullableService) UnmarshalJSON(src []byte) error { - v.isSet = true - return json.Unmarshal(src, &v.value) -} - - diff --git a/apigen/drs/model_service_organization.go b/apigen/drs/model_service_organization.go deleted file mode 100644 index bc6eab9..0000000 --- a/apigen/drs/model_service_organization.go +++ /dev/null @@ -1,189 +0,0 @@ -/* -Data Repository Service - -No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - -API version: 1.5.0 -Contact: ga4gh-cloud@ga4gh.org -*/ - -// Code generated by OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT. - -package drs - -import ( - "encoding/json" - "bytes" - "fmt" -) - -// checks if the ServiceOrganization type satisfies the MappedNullable interface at compile time -var _ MappedNullable = &ServiceOrganization{} - -// ServiceOrganization Organization providing the service -type ServiceOrganization struct { - // Name of the organization responsible for the service - Name string `json:"name"` - // URL of the website of the organization (RFC 3986 format) - Url string `json:"url"` -} - -type _ServiceOrganization ServiceOrganization - -// NewServiceOrganization instantiates a new ServiceOrganization object -// This constructor will assign default values to properties that have it defined, -// and makes sure properties required by API are set, but the set of arguments -// will change when the set of required properties is changed -func NewServiceOrganization(name string, url string) *ServiceOrganization { - this := ServiceOrganization{} - this.Name = name - this.Url = url - return &this -} - -// NewServiceOrganizationWithDefaults instantiates a new ServiceOrganization object -// This constructor will only assign default values to properties that have it defined, -// but it doesn't guarantee that properties required by API are set -func NewServiceOrganizationWithDefaults() *ServiceOrganization { - this := ServiceOrganization{} - return &this -} - -// GetName returns the Name field value -func (o *ServiceOrganization) GetName() string { - if o == nil { - var ret string - return ret - } - - return o.Name -} - -// GetNameOk returns a tuple with the Name field value -// and a boolean to check if the value has been set. -func (o *ServiceOrganization) GetNameOk() (*string, bool) { - if o == nil { - return nil, false - } - return &o.Name, true -} - -// SetName sets field value -func (o *ServiceOrganization) SetName(v string) { - o.Name = v -} - -// GetUrl returns the Url field value -func (o *ServiceOrganization) GetUrl() string { - if o == nil { - var ret string - return ret - } - - return o.Url -} - -// GetUrlOk returns a tuple with the Url field value -// and a boolean to check if the value has been set. -func (o *ServiceOrganization) GetUrlOk() (*string, bool) { - if o == nil { - return nil, false - } - return &o.Url, true -} - -// SetUrl sets field value -func (o *ServiceOrganization) SetUrl(v string) { - o.Url = v -} - -func (o ServiceOrganization) MarshalJSON() ([]byte, error) { - toSerialize,err := o.ToMap() - if err != nil { - return []byte{}, err - } - return json.Marshal(toSerialize) -} - -func (o ServiceOrganization) ToMap() (map[string]interface{}, error) { - toSerialize := map[string]interface{}{} - toSerialize["name"] = o.Name - toSerialize["url"] = o.Url - return toSerialize, nil -} - -func (o *ServiceOrganization) UnmarshalJSON(data []byte) (err error) { - // This validates that all required properties are included in the JSON object - // by unmarshalling the object into a generic map with string keys and checking - // that every required field exists as a key in the generic map. - requiredProperties := []string{ - "name", - "url", - } - - allProperties := make(map[string]interface{}) - - err = json.Unmarshal(data, &allProperties) - - if err != nil { - return err; - } - - for _, requiredProperty := range(requiredProperties) { - if _, exists := allProperties[requiredProperty]; !exists { - return fmt.Errorf("no value given for required property %v", requiredProperty) - } - } - - varServiceOrganization := _ServiceOrganization{} - - decoder := json.NewDecoder(bytes.NewReader(data)) - decoder.DisallowUnknownFields() - err = decoder.Decode(&varServiceOrganization) - - if err != nil { - return err - } - - *o = ServiceOrganization(varServiceOrganization) - - return err -} - -type NullableServiceOrganization struct { - value *ServiceOrganization - isSet bool -} - -func (v NullableServiceOrganization) Get() *ServiceOrganization { - return v.value -} - -func (v *NullableServiceOrganization) Set(val *ServiceOrganization) { - v.value = val - v.isSet = true -} - -func (v NullableServiceOrganization) IsSet() bool { - return v.isSet -} - -func (v *NullableServiceOrganization) Unset() { - v.value = nil - v.isSet = false -} - -func NewNullableServiceOrganization(val *ServiceOrganization) *NullableServiceOrganization { - return &NullableServiceOrganization{value: val, isSet: true} -} - -func (v NullableServiceOrganization) MarshalJSON() ([]byte, error) { - return json.Marshal(v.value) -} - -func (v *NullableServiceOrganization) UnmarshalJSON(src []byte) error { - v.isSet = true - return json.Unmarshal(src, &v.value) -} - - diff --git a/apigen/drs/model_service_type.go b/apigen/drs/model_service_type.go deleted file mode 100644 index 2efc328..0000000 --- a/apigen/drs/model_service_type.go +++ /dev/null @@ -1,218 +0,0 @@ -/* -Data Repository Service - -No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - -API version: 1.5.0 -Contact: ga4gh-cloud@ga4gh.org -*/ - -// Code generated by OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT. - -package drs - -import ( - "encoding/json" - "bytes" - "fmt" -) - -// checks if the ServiceType type satisfies the MappedNullable interface at compile time -var _ MappedNullable = &ServiceType{} - -// ServiceType Type of a GA4GH service -type ServiceType struct { - // Namespace in reverse domain name format. Use `org.ga4gh` for implementations compliant with official GA4GH specifications. For services with custom APIs not standardized by GA4GH, or implementations diverging from official GA4GH specifications, use a different namespace (e.g. your organization's reverse domain name). - Group string `json:"group"` - // Name of the API or GA4GH specification implemented. Official GA4GH types should be assigned as part of standards approval process. Custom artifacts are supported. - Artifact string `json:"artifact"` - // Version of the API or specification. GA4GH specifications use semantic versioning. - Version string `json:"version"` -} - -type _ServiceType ServiceType - -// NewServiceType instantiates a new ServiceType object -// This constructor will assign default values to properties that have it defined, -// and makes sure properties required by API are set, but the set of arguments -// will change when the set of required properties is changed -func NewServiceType(group string, artifact string, version string) *ServiceType { - this := ServiceType{} - this.Group = group - this.Artifact = artifact - this.Version = version - return &this -} - -// NewServiceTypeWithDefaults instantiates a new ServiceType object -// This constructor will only assign default values to properties that have it defined, -// but it doesn't guarantee that properties required by API are set -func NewServiceTypeWithDefaults() *ServiceType { - this := ServiceType{} - return &this -} - -// GetGroup returns the Group field value -func (o *ServiceType) GetGroup() string { - if o == nil { - var ret string - return ret - } - - return o.Group -} - -// GetGroupOk returns a tuple with the Group field value -// and a boolean to check if the value has been set. -func (o *ServiceType) GetGroupOk() (*string, bool) { - if o == nil { - return nil, false - } - return &o.Group, true -} - -// SetGroup sets field value -func (o *ServiceType) SetGroup(v string) { - o.Group = v -} - -// GetArtifact returns the Artifact field value -func (o *ServiceType) GetArtifact() string { - if o == nil { - var ret string - return ret - } - - return o.Artifact -} - -// GetArtifactOk returns a tuple with the Artifact field value -// and a boolean to check if the value has been set. -func (o *ServiceType) GetArtifactOk() (*string, bool) { - if o == nil { - return nil, false - } - return &o.Artifact, true -} - -// SetArtifact sets field value -func (o *ServiceType) SetArtifact(v string) { - o.Artifact = v -} - -// GetVersion returns the Version field value -func (o *ServiceType) GetVersion() string { - if o == nil { - var ret string - return ret - } - - return o.Version -} - -// GetVersionOk returns a tuple with the Version field value -// and a boolean to check if the value has been set. -func (o *ServiceType) GetVersionOk() (*string, bool) { - if o == nil { - return nil, false - } - return &o.Version, true -} - -// SetVersion sets field value -func (o *ServiceType) SetVersion(v string) { - o.Version = v -} - -func (o ServiceType) MarshalJSON() ([]byte, error) { - toSerialize,err := o.ToMap() - if err != nil { - return []byte{}, err - } - return json.Marshal(toSerialize) -} - -func (o ServiceType) ToMap() (map[string]interface{}, error) { - toSerialize := map[string]interface{}{} - toSerialize["group"] = o.Group - toSerialize["artifact"] = o.Artifact - toSerialize["version"] = o.Version - return toSerialize, nil -} - -func (o *ServiceType) UnmarshalJSON(data []byte) (err error) { - // This validates that all required properties are included in the JSON object - // by unmarshalling the object into a generic map with string keys and checking - // that every required field exists as a key in the generic map. - requiredProperties := []string{ - "group", - "artifact", - "version", - } - - allProperties := make(map[string]interface{}) - - err = json.Unmarshal(data, &allProperties) - - if err != nil { - return err; - } - - for _, requiredProperty := range(requiredProperties) { - if _, exists := allProperties[requiredProperty]; !exists { - return fmt.Errorf("no value given for required property %v", requiredProperty) - } - } - - varServiceType := _ServiceType{} - - decoder := json.NewDecoder(bytes.NewReader(data)) - decoder.DisallowUnknownFields() - err = decoder.Decode(&varServiceType) - - if err != nil { - return err - } - - *o = ServiceType(varServiceType) - - return err -} - -type NullableServiceType struct { - value *ServiceType - isSet bool -} - -func (v NullableServiceType) Get() *ServiceType { - return v.value -} - -func (v *NullableServiceType) Set(val *ServiceType) { - v.value = val - v.isSet = true -} - -func (v NullableServiceType) IsSet() bool { - return v.isSet -} - -func (v *NullableServiceType) Unset() { - v.value = nil - v.isSet = false -} - -func NewNullableServiceType(val *ServiceType) *NullableServiceType { - return &NullableServiceType{value: val, isSet: true} -} - -func (v NullableServiceType) MarshalJSON() ([]byte, error) { - return json.Marshal(v.value) -} - -func (v *NullableServiceType) UnmarshalJSON(src []byte) error { - v.isSet = true - return json.Unmarshal(src, &v.value) -} - - diff --git a/apigen/drs/model_summary.go b/apigen/drs/model_summary.go deleted file mode 100644 index 235923a..0000000 --- a/apigen/drs/model_summary.go +++ /dev/null @@ -1,202 +0,0 @@ -/* -Data Repository Service - -No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - -API version: 1.5.0 -Contact: ga4gh-cloud@ga4gh.org -*/ - -// Code generated by OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT. - -package drs - -import ( - "encoding/json" -) - -// checks if the Summary type satisfies the MappedNullable interface at compile time -var _ MappedNullable = &Summary{} - -// Summary A summary of what was resolved. -type Summary struct { - // Number of items requested. - Requested *int32 `json:"requested,omitempty"` - // Number of objects resolved. - Resolved *int32 `json:"resolved,omitempty"` - // Number of objects not resolved. - Unresolved *int32 `json:"unresolved,omitempty"` -} - -// NewSummary instantiates a new Summary object -// This constructor will assign default values to properties that have it defined, -// and makes sure properties required by API are set, but the set of arguments -// will change when the set of required properties is changed -func NewSummary() *Summary { - this := Summary{} - return &this -} - -// NewSummaryWithDefaults instantiates a new Summary object -// This constructor will only assign default values to properties that have it defined, -// but it doesn't guarantee that properties required by API are set -func NewSummaryWithDefaults() *Summary { - this := Summary{} - return &this -} - -// GetRequested returns the Requested field value if set, zero value otherwise. -func (o *Summary) GetRequested() int32 { - if o == nil || IsNil(o.Requested) { - var ret int32 - return ret - } - return *o.Requested -} - -// GetRequestedOk returns a tuple with the Requested field value if set, nil otherwise -// and a boolean to check if the value has been set. -func (o *Summary) GetRequestedOk() (*int32, bool) { - if o == nil || IsNil(o.Requested) { - return nil, false - } - return o.Requested, true -} - -// HasRequested returns a boolean if a field has been set. -func (o *Summary) HasRequested() bool { - if o != nil && !IsNil(o.Requested) { - return true - } - - return false -} - -// SetRequested gets a reference to the given int32 and assigns it to the Requested field. -func (o *Summary) SetRequested(v int32) { - o.Requested = &v -} - -// GetResolved returns the Resolved field value if set, zero value otherwise. -func (o *Summary) GetResolved() int32 { - if o == nil || IsNil(o.Resolved) { - var ret int32 - return ret - } - return *o.Resolved -} - -// GetResolvedOk returns a tuple with the Resolved field value if set, nil otherwise -// and a boolean to check if the value has been set. -func (o *Summary) GetResolvedOk() (*int32, bool) { - if o == nil || IsNil(o.Resolved) { - return nil, false - } - return o.Resolved, true -} - -// HasResolved returns a boolean if a field has been set. -func (o *Summary) HasResolved() bool { - if o != nil && !IsNil(o.Resolved) { - return true - } - - return false -} - -// SetResolved gets a reference to the given int32 and assigns it to the Resolved field. -func (o *Summary) SetResolved(v int32) { - o.Resolved = &v -} - -// GetUnresolved returns the Unresolved field value if set, zero value otherwise. -func (o *Summary) GetUnresolved() int32 { - if o == nil || IsNil(o.Unresolved) { - var ret int32 - return ret - } - return *o.Unresolved -} - -// GetUnresolvedOk returns a tuple with the Unresolved field value if set, nil otherwise -// and a boolean to check if the value has been set. -func (o *Summary) GetUnresolvedOk() (*int32, bool) { - if o == nil || IsNil(o.Unresolved) { - return nil, false - } - return o.Unresolved, true -} - -// HasUnresolved returns a boolean if a field has been set. -func (o *Summary) HasUnresolved() bool { - if o != nil && !IsNil(o.Unresolved) { - return true - } - - return false -} - -// SetUnresolved gets a reference to the given int32 and assigns it to the Unresolved field. -func (o *Summary) SetUnresolved(v int32) { - o.Unresolved = &v -} - -func (o Summary) MarshalJSON() ([]byte, error) { - toSerialize,err := o.ToMap() - if err != nil { - return []byte{}, err - } - return json.Marshal(toSerialize) -} - -func (o Summary) ToMap() (map[string]interface{}, error) { - toSerialize := map[string]interface{}{} - if !IsNil(o.Requested) { - toSerialize["requested"] = o.Requested - } - if !IsNil(o.Resolved) { - toSerialize["resolved"] = o.Resolved - } - if !IsNil(o.Unresolved) { - toSerialize["unresolved"] = o.Unresolved - } - return toSerialize, nil -} - -type NullableSummary struct { - value *Summary - isSet bool -} - -func (v NullableSummary) Get() *Summary { - return v.value -} - -func (v *NullableSummary) Set(val *Summary) { - v.value = val - v.isSet = true -} - -func (v NullableSummary) IsSet() bool { - return v.isSet -} - -func (v *NullableSummary) Unset() { - v.value = nil - v.isSet = false -} - -func NewNullableSummary(val *Summary) *NullableSummary { - return &NullableSummary{value: val, isSet: true} -} - -func (v NullableSummary) MarshalJSON() ([]byte, error) { - return json.Marshal(v.value) -} - -func (v *NullableSummary) UnmarshalJSON(src []byte) error { - v.isSet = true - return json.Unmarshal(src, &v.value) -} - - diff --git a/apigen/drs/model_unresolved_inner.go b/apigen/drs/model_unresolved_inner.go deleted file mode 100644 index 564f4c3..0000000 --- a/apigen/drs/model_unresolved_inner.go +++ /dev/null @@ -1,163 +0,0 @@ -/* -Data Repository Service - -No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - -API version: 1.5.0 -Contact: ga4gh-cloud@ga4gh.org -*/ - -// Code generated by OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT. - -package drs - -import ( - "encoding/json" -) - -// checks if the UnresolvedInner type satisfies the MappedNullable interface at compile time -var _ MappedNullable = &UnresolvedInner{} - -// UnresolvedInner struct for UnresolvedInner -type UnresolvedInner struct { - ErrorCode *int32 `json:"error_code,omitempty"` - ObjectIds []string `json:"object_ids,omitempty"` -} - -// NewUnresolvedInner instantiates a new UnresolvedInner object -// This constructor will assign default values to properties that have it defined, -// and makes sure properties required by API are set, but the set of arguments -// will change when the set of required properties is changed -func NewUnresolvedInner() *UnresolvedInner { - this := UnresolvedInner{} - return &this -} - -// NewUnresolvedInnerWithDefaults instantiates a new UnresolvedInner object -// This constructor will only assign default values to properties that have it defined, -// but it doesn't guarantee that properties required by API are set -func NewUnresolvedInnerWithDefaults() *UnresolvedInner { - this := UnresolvedInner{} - return &this -} - -// GetErrorCode returns the ErrorCode field value if set, zero value otherwise. -func (o *UnresolvedInner) GetErrorCode() int32 { - if o == nil || IsNil(o.ErrorCode) { - var ret int32 - return ret - } - return *o.ErrorCode -} - -// GetErrorCodeOk returns a tuple with the ErrorCode field value if set, nil otherwise -// and a boolean to check if the value has been set. -func (o *UnresolvedInner) GetErrorCodeOk() (*int32, bool) { - if o == nil || IsNil(o.ErrorCode) { - return nil, false - } - return o.ErrorCode, true -} - -// HasErrorCode returns a boolean if a field has been set. -func (o *UnresolvedInner) HasErrorCode() bool { - if o != nil && !IsNil(o.ErrorCode) { - return true - } - - return false -} - -// SetErrorCode gets a reference to the given int32 and assigns it to the ErrorCode field. -func (o *UnresolvedInner) SetErrorCode(v int32) { - o.ErrorCode = &v -} - -// GetObjectIds returns the ObjectIds field value if set, zero value otherwise. -func (o *UnresolvedInner) GetObjectIds() []string { - if o == nil || IsNil(o.ObjectIds) { - var ret []string - return ret - } - return o.ObjectIds -} - -// GetObjectIdsOk returns a tuple with the ObjectIds field value if set, nil otherwise -// and a boolean to check if the value has been set. -func (o *UnresolvedInner) GetObjectIdsOk() ([]string, bool) { - if o == nil || IsNil(o.ObjectIds) { - return nil, false - } - return o.ObjectIds, true -} - -// HasObjectIds returns a boolean if a field has been set. -func (o *UnresolvedInner) HasObjectIds() bool { - if o != nil && !IsNil(o.ObjectIds) { - return true - } - - return false -} - -// SetObjectIds gets a reference to the given []string and assigns it to the ObjectIds field. -func (o *UnresolvedInner) SetObjectIds(v []string) { - o.ObjectIds = v -} - -func (o UnresolvedInner) MarshalJSON() ([]byte, error) { - toSerialize,err := o.ToMap() - if err != nil { - return []byte{}, err - } - return json.Marshal(toSerialize) -} - -func (o UnresolvedInner) ToMap() (map[string]interface{}, error) { - toSerialize := map[string]interface{}{} - if !IsNil(o.ErrorCode) { - toSerialize["error_code"] = o.ErrorCode - } - if !IsNil(o.ObjectIds) { - toSerialize["object_ids"] = o.ObjectIds - } - return toSerialize, nil -} - -type NullableUnresolvedInner struct { - value *UnresolvedInner - isSet bool -} - -func (v NullableUnresolvedInner) Get() *UnresolvedInner { - return v.value -} - -func (v *NullableUnresolvedInner) Set(val *UnresolvedInner) { - v.value = val - v.isSet = true -} - -func (v NullableUnresolvedInner) IsSet() bool { - return v.isSet -} - -func (v *NullableUnresolvedInner) Unset() { - v.value = nil - v.isSet = false -} - -func NewNullableUnresolvedInner(val *UnresolvedInner) *NullableUnresolvedInner { - return &NullableUnresolvedInner{value: val, isSet: true} -} - -func (v NullableUnresolvedInner) MarshalJSON() ([]byte, error) { - return json.Marshal(v.value) -} - -func (v *NullableUnresolvedInner) UnmarshalJSON(src []byte) error { - v.isSet = true - return json.Unmarshal(src, &v.value) -} - - diff --git a/apigen/drs/model_upload_method.go b/apigen/drs/model_upload_method.go deleted file mode 100644 index 2bbdeea..0000000 --- a/apigen/drs/model_upload_method.go +++ /dev/null @@ -1,262 +0,0 @@ -/* -Data Repository Service - -No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - -API version: 1.5.0 -Contact: ga4gh-cloud@ga4gh.org -*/ - -// Code generated by OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT. - -package drs - -import ( - "encoding/json" - "bytes" - "fmt" -) - -// checks if the UploadMethod type satisfies the MappedNullable interface at compile time -var _ MappedNullable = &UploadMethod{} - -// UploadMethod struct for UploadMethod -type UploadMethod struct { - // Type of upload method. Implementations MAY support any subset of these types. The 'https' type can be used to return a presigned POST URL and is expected to be the most common implementation for typical file uploads. This method provides a simple HTTP POST interface that works with standard web clients. The 's3' type is primarily intended to support uploads of large files that want to take advantage of multipart uploads and automatic retries implemented in AWS libraries. This method provides direct access to S3-specific upload capabilities. Other common implementations include 'gs' for Google Cloud Storage and 'sftp' for secure FTP uploads. - Type string `json:"type"` - AccessUrl UploadMethodAccessUrl `json:"access_url"` - // Cloud region for the upload location. Optional for non-cloud storage types. - Region *string `json:"region,omitempty"` - // A dictionary of upload-specific configuration details that vary by upload method type. The contents and structure depend on the specific upload method being used. - UploadDetails map[string]interface{} `json:"upload_details,omitempty"` -} - -type _UploadMethod UploadMethod - -// NewUploadMethod instantiates a new UploadMethod object -// This constructor will assign default values to properties that have it defined, -// and makes sure properties required by API are set, but the set of arguments -// will change when the set of required properties is changed -func NewUploadMethod(type_ string, accessUrl UploadMethodAccessUrl) *UploadMethod { - this := UploadMethod{} - this.Type = type_ - this.AccessUrl = accessUrl - return &this -} - -// NewUploadMethodWithDefaults instantiates a new UploadMethod object -// This constructor will only assign default values to properties that have it defined, -// but it doesn't guarantee that properties required by API are set -func NewUploadMethodWithDefaults() *UploadMethod { - this := UploadMethod{} - return &this -} - -// GetType returns the Type field value -func (o *UploadMethod) GetType() string { - if o == nil { - var ret string - return ret - } - - return o.Type -} - -// GetTypeOk returns a tuple with the Type field value -// and a boolean to check if the value has been set. -func (o *UploadMethod) GetTypeOk() (*string, bool) { - if o == nil { - return nil, false - } - return &o.Type, true -} - -// SetType sets field value -func (o *UploadMethod) SetType(v string) { - o.Type = v -} - -// GetAccessUrl returns the AccessUrl field value -func (o *UploadMethod) GetAccessUrl() UploadMethodAccessUrl { - if o == nil { - var ret UploadMethodAccessUrl - return ret - } - - return o.AccessUrl -} - -// GetAccessUrlOk returns a tuple with the AccessUrl field value -// and a boolean to check if the value has been set. -func (o *UploadMethod) GetAccessUrlOk() (*UploadMethodAccessUrl, bool) { - if o == nil { - return nil, false - } - return &o.AccessUrl, true -} - -// SetAccessUrl sets field value -func (o *UploadMethod) SetAccessUrl(v UploadMethodAccessUrl) { - o.AccessUrl = v -} - -// GetRegion returns the Region field value if set, zero value otherwise. -func (o *UploadMethod) GetRegion() string { - if o == nil || IsNil(o.Region) { - var ret string - return ret - } - return *o.Region -} - -// GetRegionOk returns a tuple with the Region field value if set, nil otherwise -// and a boolean to check if the value has been set. -func (o *UploadMethod) GetRegionOk() (*string, bool) { - if o == nil || IsNil(o.Region) { - return nil, false - } - return o.Region, true -} - -// HasRegion returns a boolean if a field has been set. -func (o *UploadMethod) HasRegion() bool { - if o != nil && !IsNil(o.Region) { - return true - } - - return false -} - -// SetRegion gets a reference to the given string and assigns it to the Region field. -func (o *UploadMethod) SetRegion(v string) { - o.Region = &v -} - -// GetUploadDetails returns the UploadDetails field value if set, zero value otherwise. -func (o *UploadMethod) GetUploadDetails() map[string]interface{} { - if o == nil || IsNil(o.UploadDetails) { - var ret map[string]interface{} - return ret - } - return o.UploadDetails -} - -// GetUploadDetailsOk returns a tuple with the UploadDetails field value if set, nil otherwise -// and a boolean to check if the value has been set. -func (o *UploadMethod) GetUploadDetailsOk() (map[string]interface{}, bool) { - if o == nil || IsNil(o.UploadDetails) { - return map[string]interface{}{}, false - } - return o.UploadDetails, true -} - -// HasUploadDetails returns a boolean if a field has been set. -func (o *UploadMethod) HasUploadDetails() bool { - if o != nil && !IsNil(o.UploadDetails) { - return true - } - - return false -} - -// SetUploadDetails gets a reference to the given map[string]interface{} and assigns it to the UploadDetails field. -func (o *UploadMethod) SetUploadDetails(v map[string]interface{}) { - o.UploadDetails = v -} - -func (o UploadMethod) MarshalJSON() ([]byte, error) { - toSerialize,err := o.ToMap() - if err != nil { - return []byte{}, err - } - return json.Marshal(toSerialize) -} - -func (o UploadMethod) ToMap() (map[string]interface{}, error) { - toSerialize := map[string]interface{}{} - toSerialize["type"] = o.Type - toSerialize["access_url"] = o.AccessUrl - if !IsNil(o.Region) { - toSerialize["region"] = o.Region - } - if !IsNil(o.UploadDetails) { - toSerialize["upload_details"] = o.UploadDetails - } - return toSerialize, nil -} - -func (o *UploadMethod) UnmarshalJSON(data []byte) (err error) { - // This validates that all required properties are included in the JSON object - // by unmarshalling the object into a generic map with string keys and checking - // that every required field exists as a key in the generic map. - requiredProperties := []string{ - "type", - "access_url", - } - - allProperties := make(map[string]interface{}) - - err = json.Unmarshal(data, &allProperties) - - if err != nil { - return err; - } - - for _, requiredProperty := range(requiredProperties) { - if _, exists := allProperties[requiredProperty]; !exists { - return fmt.Errorf("no value given for required property %v", requiredProperty) - } - } - - varUploadMethod := _UploadMethod{} - - decoder := json.NewDecoder(bytes.NewReader(data)) - decoder.DisallowUnknownFields() - err = decoder.Decode(&varUploadMethod) - - if err != nil { - return err - } - - *o = UploadMethod(varUploadMethod) - - return err -} - -type NullableUploadMethod struct { - value *UploadMethod - isSet bool -} - -func (v NullableUploadMethod) Get() *UploadMethod { - return v.value -} - -func (v *NullableUploadMethod) Set(val *UploadMethod) { - v.value = val - v.isSet = true -} - -func (v NullableUploadMethod) IsSet() bool { - return v.isSet -} - -func (v *NullableUploadMethod) Unset() { - v.value = nil - v.isSet = false -} - -func NewNullableUploadMethod(val *UploadMethod) *NullableUploadMethod { - return &NullableUploadMethod{value: val, isSet: true} -} - -func (v NullableUploadMethod) MarshalJSON() ([]byte, error) { - return json.Marshal(v.value) -} - -func (v *NullableUploadMethod) UnmarshalJSON(src []byte) error { - v.isSet = true - return json.Unmarshal(src, &v.value) -} - - diff --git a/apigen/drs/model_upload_method_access_url.go b/apigen/drs/model_upload_method_access_url.go deleted file mode 100644 index ef097f6..0000000 --- a/apigen/drs/model_upload_method_access_url.go +++ /dev/null @@ -1,197 +0,0 @@ -/* -Data Repository Service - -No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - -API version: 1.5.0 -Contact: ga4gh-cloud@ga4gh.org -*/ - -// Code generated by OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT. - -package drs - -import ( - "encoding/json" - "bytes" - "fmt" -) - -// checks if the UploadMethodAccessUrl type satisfies the MappedNullable interface at compile time -var _ MappedNullable = &UploadMethodAccessUrl{} - -// UploadMethodAccessUrl An `AccessURL` that specifies where the file will be accessible after upload. This URL will be used as the access_url in the eventual DRS object, ensuring consistency between upload and retrieval operations. -type UploadMethodAccessUrl struct { - // Inlined Upload URL context. - Url string `json:"url"` - // Inlined Upload Headers. - Headers []string `json:"headers,omitempty"` -} - -type _UploadMethodAccessUrl UploadMethodAccessUrl - -// NewUploadMethodAccessUrl instantiates a new UploadMethodAccessUrl object -// This constructor will assign default values to properties that have it defined, -// and makes sure properties required by API are set, but the set of arguments -// will change when the set of required properties is changed -func NewUploadMethodAccessUrl(url string) *UploadMethodAccessUrl { - this := UploadMethodAccessUrl{} - this.Url = url - return &this -} - -// NewUploadMethodAccessUrlWithDefaults instantiates a new UploadMethodAccessUrl object -// This constructor will only assign default values to properties that have it defined, -// but it doesn't guarantee that properties required by API are set -func NewUploadMethodAccessUrlWithDefaults() *UploadMethodAccessUrl { - this := UploadMethodAccessUrl{} - return &this -} - -// GetUrl returns the Url field value -func (o *UploadMethodAccessUrl) GetUrl() string { - if o == nil { - var ret string - return ret - } - - return o.Url -} - -// GetUrlOk returns a tuple with the Url field value -// and a boolean to check if the value has been set. -func (o *UploadMethodAccessUrl) GetUrlOk() (*string, bool) { - if o == nil { - return nil, false - } - return &o.Url, true -} - -// SetUrl sets field value -func (o *UploadMethodAccessUrl) SetUrl(v string) { - o.Url = v -} - -// GetHeaders returns the Headers field value if set, zero value otherwise. -func (o *UploadMethodAccessUrl) GetHeaders() []string { - if o == nil || IsNil(o.Headers) { - var ret []string - return ret - } - return o.Headers -} - -// GetHeadersOk returns a tuple with the Headers field value if set, nil otherwise -// and a boolean to check if the value has been set. -func (o *UploadMethodAccessUrl) GetHeadersOk() ([]string, bool) { - if o == nil || IsNil(o.Headers) { - return nil, false - } - return o.Headers, true -} - -// HasHeaders returns a boolean if a field has been set. -func (o *UploadMethodAccessUrl) HasHeaders() bool { - if o != nil && !IsNil(o.Headers) { - return true - } - - return false -} - -// SetHeaders gets a reference to the given []string and assigns it to the Headers field. -func (o *UploadMethodAccessUrl) SetHeaders(v []string) { - o.Headers = v -} - -func (o UploadMethodAccessUrl) MarshalJSON() ([]byte, error) { - toSerialize,err := o.ToMap() - if err != nil { - return []byte{}, err - } - return json.Marshal(toSerialize) -} - -func (o UploadMethodAccessUrl) ToMap() (map[string]interface{}, error) { - toSerialize := map[string]interface{}{} - toSerialize["url"] = o.Url - if !IsNil(o.Headers) { - toSerialize["headers"] = o.Headers - } - return toSerialize, nil -} - -func (o *UploadMethodAccessUrl) UnmarshalJSON(data []byte) (err error) { - // This validates that all required properties are included in the JSON object - // by unmarshalling the object into a generic map with string keys and checking - // that every required field exists as a key in the generic map. - requiredProperties := []string{ - "url", - } - - allProperties := make(map[string]interface{}) - - err = json.Unmarshal(data, &allProperties) - - if err != nil { - return err; - } - - for _, requiredProperty := range(requiredProperties) { - if _, exists := allProperties[requiredProperty]; !exists { - return fmt.Errorf("no value given for required property %v", requiredProperty) - } - } - - varUploadMethodAccessUrl := _UploadMethodAccessUrl{} - - decoder := json.NewDecoder(bytes.NewReader(data)) - decoder.DisallowUnknownFields() - err = decoder.Decode(&varUploadMethodAccessUrl) - - if err != nil { - return err - } - - *o = UploadMethodAccessUrl(varUploadMethodAccessUrl) - - return err -} - -type NullableUploadMethodAccessUrl struct { - value *UploadMethodAccessUrl - isSet bool -} - -func (v NullableUploadMethodAccessUrl) Get() *UploadMethodAccessUrl { - return v.value -} - -func (v *NullableUploadMethodAccessUrl) Set(val *UploadMethodAccessUrl) { - v.value = val - v.isSet = true -} - -func (v NullableUploadMethodAccessUrl) IsSet() bool { - return v.isSet -} - -func (v *NullableUploadMethodAccessUrl) Unset() { - v.value = nil - v.isSet = false -} - -func NewNullableUploadMethodAccessUrl(val *UploadMethodAccessUrl) *NullableUploadMethodAccessUrl { - return &NullableUploadMethodAccessUrl{value: val, isSet: true} -} - -func (v NullableUploadMethodAccessUrl) MarshalJSON() ([]byte, error) { - return json.Marshal(v.value) -} - -func (v *NullableUploadMethodAccessUrl) UnmarshalJSON(src []byte) error { - v.isSet = true - return json.Unmarshal(src, &v.value) -} - - diff --git a/apigen/drs/model_upload_request.go b/apigen/drs/model_upload_request.go deleted file mode 100644 index 25e7dfc..0000000 --- a/apigen/drs/model_upload_request.go +++ /dev/null @@ -1,197 +0,0 @@ -/* -Data Repository Service - -No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - -API version: 1.5.0 -Contact: ga4gh-cloud@ga4gh.org -*/ - -// Code generated by OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT. - -package drs - -import ( - "encoding/json" - "bytes" - "fmt" -) - -// checks if the UploadRequest type satisfies the MappedNullable interface at compile time -var _ MappedNullable = &UploadRequest{} - -// UploadRequest struct for UploadRequest -type UploadRequest struct { - // Array of upload requests for files - Requests []UploadRequestObject `json:"requests"` - // Optional array of GA4GH Passport JWTs for authorization - Passports []string `json:"passports,omitempty"` -} - -type _UploadRequest UploadRequest - -// NewUploadRequest instantiates a new UploadRequest object -// This constructor will assign default values to properties that have it defined, -// and makes sure properties required by API are set, but the set of arguments -// will change when the set of required properties is changed -func NewUploadRequest(requests []UploadRequestObject) *UploadRequest { - this := UploadRequest{} - this.Requests = requests - return &this -} - -// NewUploadRequestWithDefaults instantiates a new UploadRequest object -// This constructor will only assign default values to properties that have it defined, -// but it doesn't guarantee that properties required by API are set -func NewUploadRequestWithDefaults() *UploadRequest { - this := UploadRequest{} - return &this -} - -// GetRequests returns the Requests field value -func (o *UploadRequest) GetRequests() []UploadRequestObject { - if o == nil { - var ret []UploadRequestObject - return ret - } - - return o.Requests -} - -// GetRequestsOk returns a tuple with the Requests field value -// and a boolean to check if the value has been set. -func (o *UploadRequest) GetRequestsOk() ([]UploadRequestObject, bool) { - if o == nil { - return nil, false - } - return o.Requests, true -} - -// SetRequests sets field value -func (o *UploadRequest) SetRequests(v []UploadRequestObject) { - o.Requests = v -} - -// GetPassports returns the Passports field value if set, zero value otherwise. -func (o *UploadRequest) GetPassports() []string { - if o == nil || IsNil(o.Passports) { - var ret []string - return ret - } - return o.Passports -} - -// GetPassportsOk returns a tuple with the Passports field value if set, nil otherwise -// and a boolean to check if the value has been set. -func (o *UploadRequest) GetPassportsOk() ([]string, bool) { - if o == nil || IsNil(o.Passports) { - return nil, false - } - return o.Passports, true -} - -// HasPassports returns a boolean if a field has been set. -func (o *UploadRequest) HasPassports() bool { - if o != nil && !IsNil(o.Passports) { - return true - } - - return false -} - -// SetPassports gets a reference to the given []string and assigns it to the Passports field. -func (o *UploadRequest) SetPassports(v []string) { - o.Passports = v -} - -func (o UploadRequest) MarshalJSON() ([]byte, error) { - toSerialize,err := o.ToMap() - if err != nil { - return []byte{}, err - } - return json.Marshal(toSerialize) -} - -func (o UploadRequest) ToMap() (map[string]interface{}, error) { - toSerialize := map[string]interface{}{} - toSerialize["requests"] = o.Requests - if !IsNil(o.Passports) { - toSerialize["passports"] = o.Passports - } - return toSerialize, nil -} - -func (o *UploadRequest) UnmarshalJSON(data []byte) (err error) { - // This validates that all required properties are included in the JSON object - // by unmarshalling the object into a generic map with string keys and checking - // that every required field exists as a key in the generic map. - requiredProperties := []string{ - "requests", - } - - allProperties := make(map[string]interface{}) - - err = json.Unmarshal(data, &allProperties) - - if err != nil { - return err; - } - - for _, requiredProperty := range(requiredProperties) { - if _, exists := allProperties[requiredProperty]; !exists { - return fmt.Errorf("no value given for required property %v", requiredProperty) - } - } - - varUploadRequest := _UploadRequest{} - - decoder := json.NewDecoder(bytes.NewReader(data)) - decoder.DisallowUnknownFields() - err = decoder.Decode(&varUploadRequest) - - if err != nil { - return err - } - - *o = UploadRequest(varUploadRequest) - - return err -} - -type NullableUploadRequest struct { - value *UploadRequest - isSet bool -} - -func (v NullableUploadRequest) Get() *UploadRequest { - return v.value -} - -func (v *NullableUploadRequest) Set(val *UploadRequest) { - v.value = val - v.isSet = true -} - -func (v NullableUploadRequest) IsSet() bool { - return v.isSet -} - -func (v *NullableUploadRequest) Unset() { - v.value = nil - v.isSet = false -} - -func NewNullableUploadRequest(val *UploadRequest) *NullableUploadRequest { - return &NullableUploadRequest{value: val, isSet: true} -} - -func (v NullableUploadRequest) MarshalJSON() ([]byte, error) { - return json.Marshal(v.value) -} - -func (v *NullableUploadRequest) UnmarshalJSON(src []byte) error { - v.isSet = true - return json.Unmarshal(src, &v.value) -} - - diff --git a/apigen/drs/model_upload_request_object.go b/apigen/drs/model_upload_request_object.go deleted file mode 100644 index 807ce4d..0000000 --- a/apigen/drs/model_upload_request_object.go +++ /dev/null @@ -1,321 +0,0 @@ -/* -Data Repository Service - -No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - -API version: 1.5.0 -Contact: ga4gh-cloud@ga4gh.org -*/ - -// Code generated by OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT. - -package drs - -import ( - "encoding/json" - "bytes" - "fmt" -) - -// checks if the UploadRequestObject type satisfies the MappedNullable interface at compile time -var _ MappedNullable = &UploadRequestObject{} - -// UploadRequestObject struct for UploadRequestObject -type UploadRequestObject struct { - // The name of the file to upload - Name string `json:"name"` - // Size of the file in bytes - Size int64 `json:"size"` - // MIME type of the file - MimeType string `json:"mime_type"` - // Array of checksums for file integrity verification - Checksums []Checksum `json:"checksums"` - // Optional description of the file - Description *string `json:"description,omitempty"` - // Optional array of alternative names for the file - Aliases []string `json:"aliases,omitempty"` -} - -type _UploadRequestObject UploadRequestObject - -// NewUploadRequestObject instantiates a new UploadRequestObject object -// This constructor will assign default values to properties that have it defined, -// and makes sure properties required by API are set, but the set of arguments -// will change when the set of required properties is changed -func NewUploadRequestObject(name string, size int64, mimeType string, checksums []Checksum) *UploadRequestObject { - this := UploadRequestObject{} - this.Name = name - this.Size = size - this.MimeType = mimeType - this.Checksums = checksums - return &this -} - -// NewUploadRequestObjectWithDefaults instantiates a new UploadRequestObject object -// This constructor will only assign default values to properties that have it defined, -// but it doesn't guarantee that properties required by API are set -func NewUploadRequestObjectWithDefaults() *UploadRequestObject { - this := UploadRequestObject{} - return &this -} - -// GetName returns the Name field value -func (o *UploadRequestObject) GetName() string { - if o == nil { - var ret string - return ret - } - - return o.Name -} - -// GetNameOk returns a tuple with the Name field value -// and a boolean to check if the value has been set. -func (o *UploadRequestObject) GetNameOk() (*string, bool) { - if o == nil { - return nil, false - } - return &o.Name, true -} - -// SetName sets field value -func (o *UploadRequestObject) SetName(v string) { - o.Name = v -} - -// GetSize returns the Size field value -func (o *UploadRequestObject) GetSize() int64 { - if o == nil { - var ret int64 - return ret - } - - return o.Size -} - -// GetSizeOk returns a tuple with the Size field value -// and a boolean to check if the value has been set. -func (o *UploadRequestObject) GetSizeOk() (*int64, bool) { - if o == nil { - return nil, false - } - return &o.Size, true -} - -// SetSize sets field value -func (o *UploadRequestObject) SetSize(v int64) { - o.Size = v -} - -// GetMimeType returns the MimeType field value -func (o *UploadRequestObject) GetMimeType() string { - if o == nil { - var ret string - return ret - } - - return o.MimeType -} - -// GetMimeTypeOk returns a tuple with the MimeType field value -// and a boolean to check if the value has been set. -func (o *UploadRequestObject) GetMimeTypeOk() (*string, bool) { - if o == nil { - return nil, false - } - return &o.MimeType, true -} - -// SetMimeType sets field value -func (o *UploadRequestObject) SetMimeType(v string) { - o.MimeType = v -} - -// GetChecksums returns the Checksums field value -func (o *UploadRequestObject) GetChecksums() []Checksum { - if o == nil { - var ret []Checksum - return ret - } - - return o.Checksums -} - -// GetChecksumsOk returns a tuple with the Checksums field value -// and a boolean to check if the value has been set. -func (o *UploadRequestObject) GetChecksumsOk() ([]Checksum, bool) { - if o == nil { - return nil, false - } - return o.Checksums, true -} - -// SetChecksums sets field value -func (o *UploadRequestObject) SetChecksums(v []Checksum) { - o.Checksums = v -} - -// GetDescription returns the Description field value if set, zero value otherwise. -func (o *UploadRequestObject) GetDescription() string { - if o == nil || IsNil(o.Description) { - var ret string - return ret - } - return *o.Description -} - -// GetDescriptionOk returns a tuple with the Description field value if set, nil otherwise -// and a boolean to check if the value has been set. -func (o *UploadRequestObject) GetDescriptionOk() (*string, bool) { - if o == nil || IsNil(o.Description) { - return nil, false - } - return o.Description, true -} - -// HasDescription returns a boolean if a field has been set. -func (o *UploadRequestObject) HasDescription() bool { - if o != nil && !IsNil(o.Description) { - return true - } - - return false -} - -// SetDescription gets a reference to the given string and assigns it to the Description field. -func (o *UploadRequestObject) SetDescription(v string) { - o.Description = &v -} - -// GetAliases returns the Aliases field value if set, zero value otherwise. -func (o *UploadRequestObject) GetAliases() []string { - if o == nil || IsNil(o.Aliases) { - var ret []string - return ret - } - return o.Aliases -} - -// GetAliasesOk returns a tuple with the Aliases field value if set, nil otherwise -// and a boolean to check if the value has been set. -func (o *UploadRequestObject) GetAliasesOk() ([]string, bool) { - if o == nil || IsNil(o.Aliases) { - return nil, false - } - return o.Aliases, true -} - -// HasAliases returns a boolean if a field has been set. -func (o *UploadRequestObject) HasAliases() bool { - if o != nil && !IsNil(o.Aliases) { - return true - } - - return false -} - -// SetAliases gets a reference to the given []string and assigns it to the Aliases field. -func (o *UploadRequestObject) SetAliases(v []string) { - o.Aliases = v -} - -func (o UploadRequestObject) MarshalJSON() ([]byte, error) { - toSerialize,err := o.ToMap() - if err != nil { - return []byte{}, err - } - return json.Marshal(toSerialize) -} - -func (o UploadRequestObject) ToMap() (map[string]interface{}, error) { - toSerialize := map[string]interface{}{} - toSerialize["name"] = o.Name - toSerialize["size"] = o.Size - toSerialize["mime_type"] = o.MimeType - toSerialize["checksums"] = o.Checksums - if !IsNil(o.Description) { - toSerialize["description"] = o.Description - } - if !IsNil(o.Aliases) { - toSerialize["aliases"] = o.Aliases - } - return toSerialize, nil -} - -func (o *UploadRequestObject) UnmarshalJSON(data []byte) (err error) { - // This validates that all required properties are included in the JSON object - // by unmarshalling the object into a generic map with string keys and checking - // that every required field exists as a key in the generic map. - requiredProperties := []string{ - "name", - "size", - "mime_type", - "checksums", - } - - allProperties := make(map[string]interface{}) - - err = json.Unmarshal(data, &allProperties) - - if err != nil { - return err; - } - - for _, requiredProperty := range(requiredProperties) { - if _, exists := allProperties[requiredProperty]; !exists { - return fmt.Errorf("no value given for required property %v", requiredProperty) - } - } - - varUploadRequestObject := _UploadRequestObject{} - - decoder := json.NewDecoder(bytes.NewReader(data)) - decoder.DisallowUnknownFields() - err = decoder.Decode(&varUploadRequestObject) - - if err != nil { - return err - } - - *o = UploadRequestObject(varUploadRequestObject) - - return err -} - -type NullableUploadRequestObject struct { - value *UploadRequestObject - isSet bool -} - -func (v NullableUploadRequestObject) Get() *UploadRequestObject { - return v.value -} - -func (v *NullableUploadRequestObject) Set(val *UploadRequestObject) { - v.value = val - v.isSet = true -} - -func (v NullableUploadRequestObject) IsSet() bool { - return v.isSet -} - -func (v *NullableUploadRequestObject) Unset() { - v.value = nil - v.isSet = false -} - -func NewNullableUploadRequestObject(val *UploadRequestObject) *NullableUploadRequestObject { - return &NullableUploadRequestObject{value: val, isSet: true} -} - -func (v NullableUploadRequestObject) MarshalJSON() ([]byte, error) { - return json.Marshal(v.value) -} - -func (v *NullableUploadRequestObject) UnmarshalJSON(src []byte) error { - v.isSet = true - return json.Unmarshal(src, &v.value) -} - - diff --git a/apigen/drs/model_upload_response.go b/apigen/drs/model_upload_response.go deleted file mode 100644 index 10f39af..0000000 --- a/apigen/drs/model_upload_response.go +++ /dev/null @@ -1,160 +0,0 @@ -/* -Data Repository Service - -No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - -API version: 1.5.0 -Contact: ga4gh-cloud@ga4gh.org -*/ - -// Code generated by OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT. - -package drs - -import ( - "encoding/json" - "bytes" - "fmt" -) - -// checks if the UploadResponse type satisfies the MappedNullable interface at compile time -var _ MappedNullable = &UploadResponse{} - -// UploadResponse struct for UploadResponse -type UploadResponse struct { - // List of upload responses for the requested files - Responses []UploadResponseObject `json:"responses"` -} - -type _UploadResponse UploadResponse - -// NewUploadResponse instantiates a new UploadResponse object -// This constructor will assign default values to properties that have it defined, -// and makes sure properties required by API are set, but the set of arguments -// will change when the set of required properties is changed -func NewUploadResponse(responses []UploadResponseObject) *UploadResponse { - this := UploadResponse{} - this.Responses = responses - return &this -} - -// NewUploadResponseWithDefaults instantiates a new UploadResponse object -// This constructor will only assign default values to properties that have it defined, -// but it doesn't guarantee that properties required by API are set -func NewUploadResponseWithDefaults() *UploadResponse { - this := UploadResponse{} - return &this -} - -// GetResponses returns the Responses field value -func (o *UploadResponse) GetResponses() []UploadResponseObject { - if o == nil { - var ret []UploadResponseObject - return ret - } - - return o.Responses -} - -// GetResponsesOk returns a tuple with the Responses field value -// and a boolean to check if the value has been set. -func (o *UploadResponse) GetResponsesOk() ([]UploadResponseObject, bool) { - if o == nil { - return nil, false - } - return o.Responses, true -} - -// SetResponses sets field value -func (o *UploadResponse) SetResponses(v []UploadResponseObject) { - o.Responses = v -} - -func (o UploadResponse) MarshalJSON() ([]byte, error) { - toSerialize,err := o.ToMap() - if err != nil { - return []byte{}, err - } - return json.Marshal(toSerialize) -} - -func (o UploadResponse) ToMap() (map[string]interface{}, error) { - toSerialize := map[string]interface{}{} - toSerialize["responses"] = o.Responses - return toSerialize, nil -} - -func (o *UploadResponse) UnmarshalJSON(data []byte) (err error) { - // This validates that all required properties are included in the JSON object - // by unmarshalling the object into a generic map with string keys and checking - // that every required field exists as a key in the generic map. - requiredProperties := []string{ - "responses", - } - - allProperties := make(map[string]interface{}) - - err = json.Unmarshal(data, &allProperties) - - if err != nil { - return err; - } - - for _, requiredProperty := range(requiredProperties) { - if _, exists := allProperties[requiredProperty]; !exists { - return fmt.Errorf("no value given for required property %v", requiredProperty) - } - } - - varUploadResponse := _UploadResponse{} - - decoder := json.NewDecoder(bytes.NewReader(data)) - decoder.DisallowUnknownFields() - err = decoder.Decode(&varUploadResponse) - - if err != nil { - return err - } - - *o = UploadResponse(varUploadResponse) - - return err -} - -type NullableUploadResponse struct { - value *UploadResponse - isSet bool -} - -func (v NullableUploadResponse) Get() *UploadResponse { - return v.value -} - -func (v *NullableUploadResponse) Set(val *UploadResponse) { - v.value = val - v.isSet = true -} - -func (v NullableUploadResponse) IsSet() bool { - return v.isSet -} - -func (v *NullableUploadResponse) Unset() { - v.value = nil - v.isSet = false -} - -func NewNullableUploadResponse(val *UploadResponse) *NullableUploadResponse { - return &NullableUploadResponse{value: val, isSet: true} -} - -func (v NullableUploadResponse) MarshalJSON() ([]byte, error) { - return json.Marshal(v.value) -} - -func (v *NullableUploadResponse) UnmarshalJSON(src []byte) error { - v.isSet = true - return json.Unmarshal(src, &v.value) -} - - diff --git a/apigen/drs/model_upload_response_object.go b/apigen/drs/model_upload_response_object.go deleted file mode 100644 index c63169e..0000000 --- a/apigen/drs/model_upload_response_object.go +++ /dev/null @@ -1,358 +0,0 @@ -/* -Data Repository Service - -No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - -API version: 1.5.0 -Contact: ga4gh-cloud@ga4gh.org -*/ - -// Code generated by OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT. - -package drs - -import ( - "encoding/json" - "bytes" - "fmt" -) - -// checks if the UploadResponseObject type satisfies the MappedNullable interface at compile time -var _ MappedNullable = &UploadResponseObject{} - -// UploadResponseObject struct for UploadResponseObject -type UploadResponseObject struct { - // The name of the file - Name string `json:"name"` - // Size of the file in bytes - Size int64 `json:"size"` - // MIME type of the file - MimeType string `json:"mime_type"` - // Array of checksums for file integrity verification - Checksums []Checksum `json:"checksums"` - // Optional description of the file - Description *string `json:"description,omitempty"` - // Optional array of alternative names - Aliases []string `json:"aliases,omitempty"` - // Available methods for uploading this file - UploadMethods []UploadMethod `json:"upload_methods,omitempty"` -} - -type _UploadResponseObject UploadResponseObject - -// NewUploadResponseObject instantiates a new UploadResponseObject object -// This constructor will assign default values to properties that have it defined, -// and makes sure properties required by API are set, but the set of arguments -// will change when the set of required properties is changed -func NewUploadResponseObject(name string, size int64, mimeType string, checksums []Checksum) *UploadResponseObject { - this := UploadResponseObject{} - this.Name = name - this.Size = size - this.MimeType = mimeType - this.Checksums = checksums - return &this -} - -// NewUploadResponseObjectWithDefaults instantiates a new UploadResponseObject object -// This constructor will only assign default values to properties that have it defined, -// but it doesn't guarantee that properties required by API are set -func NewUploadResponseObjectWithDefaults() *UploadResponseObject { - this := UploadResponseObject{} - return &this -} - -// GetName returns the Name field value -func (o *UploadResponseObject) GetName() string { - if o == nil { - var ret string - return ret - } - - return o.Name -} - -// GetNameOk returns a tuple with the Name field value -// and a boolean to check if the value has been set. -func (o *UploadResponseObject) GetNameOk() (*string, bool) { - if o == nil { - return nil, false - } - return &o.Name, true -} - -// SetName sets field value -func (o *UploadResponseObject) SetName(v string) { - o.Name = v -} - -// GetSize returns the Size field value -func (o *UploadResponseObject) GetSize() int64 { - if o == nil { - var ret int64 - return ret - } - - return o.Size -} - -// GetSizeOk returns a tuple with the Size field value -// and a boolean to check if the value has been set. -func (o *UploadResponseObject) GetSizeOk() (*int64, bool) { - if o == nil { - return nil, false - } - return &o.Size, true -} - -// SetSize sets field value -func (o *UploadResponseObject) SetSize(v int64) { - o.Size = v -} - -// GetMimeType returns the MimeType field value -func (o *UploadResponseObject) GetMimeType() string { - if o == nil { - var ret string - return ret - } - - return o.MimeType -} - -// GetMimeTypeOk returns a tuple with the MimeType field value -// and a boolean to check if the value has been set. -func (o *UploadResponseObject) GetMimeTypeOk() (*string, bool) { - if o == nil { - return nil, false - } - return &o.MimeType, true -} - -// SetMimeType sets field value -func (o *UploadResponseObject) SetMimeType(v string) { - o.MimeType = v -} - -// GetChecksums returns the Checksums field value -func (o *UploadResponseObject) GetChecksums() []Checksum { - if o == nil { - var ret []Checksum - return ret - } - - return o.Checksums -} - -// GetChecksumsOk returns a tuple with the Checksums field value -// and a boolean to check if the value has been set. -func (o *UploadResponseObject) GetChecksumsOk() ([]Checksum, bool) { - if o == nil { - return nil, false - } - return o.Checksums, true -} - -// SetChecksums sets field value -func (o *UploadResponseObject) SetChecksums(v []Checksum) { - o.Checksums = v -} - -// GetDescription returns the Description field value if set, zero value otherwise. -func (o *UploadResponseObject) GetDescription() string { - if o == nil || IsNil(o.Description) { - var ret string - return ret - } - return *o.Description -} - -// GetDescriptionOk returns a tuple with the Description field value if set, nil otherwise -// and a boolean to check if the value has been set. -func (o *UploadResponseObject) GetDescriptionOk() (*string, bool) { - if o == nil || IsNil(o.Description) { - return nil, false - } - return o.Description, true -} - -// HasDescription returns a boolean if a field has been set. -func (o *UploadResponseObject) HasDescription() bool { - if o != nil && !IsNil(o.Description) { - return true - } - - return false -} - -// SetDescription gets a reference to the given string and assigns it to the Description field. -func (o *UploadResponseObject) SetDescription(v string) { - o.Description = &v -} - -// GetAliases returns the Aliases field value if set, zero value otherwise. -func (o *UploadResponseObject) GetAliases() []string { - if o == nil || IsNil(o.Aliases) { - var ret []string - return ret - } - return o.Aliases -} - -// GetAliasesOk returns a tuple with the Aliases field value if set, nil otherwise -// and a boolean to check if the value has been set. -func (o *UploadResponseObject) GetAliasesOk() ([]string, bool) { - if o == nil || IsNil(o.Aliases) { - return nil, false - } - return o.Aliases, true -} - -// HasAliases returns a boolean if a field has been set. -func (o *UploadResponseObject) HasAliases() bool { - if o != nil && !IsNil(o.Aliases) { - return true - } - - return false -} - -// SetAliases gets a reference to the given []string and assigns it to the Aliases field. -func (o *UploadResponseObject) SetAliases(v []string) { - o.Aliases = v -} - -// GetUploadMethods returns the UploadMethods field value if set, zero value otherwise. -func (o *UploadResponseObject) GetUploadMethods() []UploadMethod { - if o == nil || IsNil(o.UploadMethods) { - var ret []UploadMethod - return ret - } - return o.UploadMethods -} - -// GetUploadMethodsOk returns a tuple with the UploadMethods field value if set, nil otherwise -// and a boolean to check if the value has been set. -func (o *UploadResponseObject) GetUploadMethodsOk() ([]UploadMethod, bool) { - if o == nil || IsNil(o.UploadMethods) { - return nil, false - } - return o.UploadMethods, true -} - -// HasUploadMethods returns a boolean if a field has been set. -func (o *UploadResponseObject) HasUploadMethods() bool { - if o != nil && !IsNil(o.UploadMethods) { - return true - } - - return false -} - -// SetUploadMethods gets a reference to the given []UploadMethod and assigns it to the UploadMethods field. -func (o *UploadResponseObject) SetUploadMethods(v []UploadMethod) { - o.UploadMethods = v -} - -func (o UploadResponseObject) MarshalJSON() ([]byte, error) { - toSerialize,err := o.ToMap() - if err != nil { - return []byte{}, err - } - return json.Marshal(toSerialize) -} - -func (o UploadResponseObject) ToMap() (map[string]interface{}, error) { - toSerialize := map[string]interface{}{} - toSerialize["name"] = o.Name - toSerialize["size"] = o.Size - toSerialize["mime_type"] = o.MimeType - toSerialize["checksums"] = o.Checksums - if !IsNil(o.Description) { - toSerialize["description"] = o.Description - } - if !IsNil(o.Aliases) { - toSerialize["aliases"] = o.Aliases - } - if !IsNil(o.UploadMethods) { - toSerialize["upload_methods"] = o.UploadMethods - } - return toSerialize, nil -} - -func (o *UploadResponseObject) UnmarshalJSON(data []byte) (err error) { - // This validates that all required properties are included in the JSON object - // by unmarshalling the object into a generic map with string keys and checking - // that every required field exists as a key in the generic map. - requiredProperties := []string{ - "name", - "size", - "mime_type", - "checksums", - } - - allProperties := make(map[string]interface{}) - - err = json.Unmarshal(data, &allProperties) - - if err != nil { - return err; - } - - for _, requiredProperty := range(requiredProperties) { - if _, exists := allProperties[requiredProperty]; !exists { - return fmt.Errorf("no value given for required property %v", requiredProperty) - } - } - - varUploadResponseObject := _UploadResponseObject{} - - decoder := json.NewDecoder(bytes.NewReader(data)) - decoder.DisallowUnknownFields() - err = decoder.Decode(&varUploadResponseObject) - - if err != nil { - return err - } - - *o = UploadResponseObject(varUploadResponseObject) - - return err -} - -type NullableUploadResponseObject struct { - value *UploadResponseObject - isSet bool -} - -func (v NullableUploadResponseObject) Get() *UploadResponseObject { - return v.value -} - -func (v *NullableUploadResponseObject) Set(val *UploadResponseObject) { - v.value = val - v.isSet = true -} - -func (v NullableUploadResponseObject) IsSet() bool { - return v.isSet -} - -func (v *NullableUploadResponseObject) Unset() { - v.value = nil - v.isSet = false -} - -func NewNullableUploadResponseObject(val *UploadResponseObject) *NullableUploadResponseObject { - return &NullableUploadResponseObject{value: val, isSet: true} -} - -func (v NullableUploadResponseObject) MarshalJSON() ([]byte, error) { - return json.Marshal(v.value) -} - -func (v *NullableUploadResponseObject) UnmarshalJSON(src []byte) error { - v.isSet = true - return json.Unmarshal(src, &v.value) -} - - diff --git a/apigen/drs/utils.go b/apigen/drs/utils.go deleted file mode 100644 index 4b16c12..0000000 --- a/apigen/drs/utils.go +++ /dev/null @@ -1,362 +0,0 @@ -/* -Data Repository Service - -No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - -API version: 1.5.0 -Contact: ga4gh-cloud@ga4gh.org -*/ - -// Code generated by OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT. - -package drs - -import ( - "bytes" - "encoding/json" - "fmt" - "reflect" - "time" -) - -// PtrBool is a helper routine that returns a pointer to given boolean value. -func PtrBool(v bool) *bool { return &v } - -// PtrInt is a helper routine that returns a pointer to given integer value. -func PtrInt(v int) *int { return &v } - -// PtrInt32 is a helper routine that returns a pointer to given integer value. -func PtrInt32(v int32) *int32 { return &v } - -// PtrInt64 is a helper routine that returns a pointer to given integer value. -func PtrInt64(v int64) *int64 { return &v } - -// PtrFloat32 is a helper routine that returns a pointer to given float value. -func PtrFloat32(v float32) *float32 { return &v } - -// PtrFloat64 is a helper routine that returns a pointer to given float value. -func PtrFloat64(v float64) *float64 { return &v } - -// PtrString is a helper routine that returns a pointer to given string value. -func PtrString(v string) *string { return &v } - -// PtrTime is helper routine that returns a pointer to given Time value. -func PtrTime(v time.Time) *time.Time { return &v } - -type NullableBool struct { - value *bool - isSet bool -} - -func (v NullableBool) Get() *bool { - return v.value -} - -func (v *NullableBool) Set(val *bool) { - v.value = val - v.isSet = true -} - -func (v NullableBool) IsSet() bool { - return v.isSet -} - -func (v *NullableBool) Unset() { - v.value = nil - v.isSet = false -} - -func NewNullableBool(val *bool) *NullableBool { - return &NullableBool{value: val, isSet: true} -} - -func (v NullableBool) MarshalJSON() ([]byte, error) { - return json.Marshal(v.value) -} - -func (v *NullableBool) UnmarshalJSON(src []byte) error { - v.isSet = true - return json.Unmarshal(src, &v.value) -} - -type NullableInt struct { - value *int - isSet bool -} - -func (v NullableInt) Get() *int { - return v.value -} - -func (v *NullableInt) Set(val *int) { - v.value = val - v.isSet = true -} - -func (v NullableInt) IsSet() bool { - return v.isSet -} - -func (v *NullableInt) Unset() { - v.value = nil - v.isSet = false -} - -func NewNullableInt(val *int) *NullableInt { - return &NullableInt{value: val, isSet: true} -} - -func (v NullableInt) MarshalJSON() ([]byte, error) { - return json.Marshal(v.value) -} - -func (v *NullableInt) UnmarshalJSON(src []byte) error { - v.isSet = true - return json.Unmarshal(src, &v.value) -} - -type NullableInt32 struct { - value *int32 - isSet bool -} - -func (v NullableInt32) Get() *int32 { - return v.value -} - -func (v *NullableInt32) Set(val *int32) { - v.value = val - v.isSet = true -} - -func (v NullableInt32) IsSet() bool { - return v.isSet -} - -func (v *NullableInt32) Unset() { - v.value = nil - v.isSet = false -} - -func NewNullableInt32(val *int32) *NullableInt32 { - return &NullableInt32{value: val, isSet: true} -} - -func (v NullableInt32) MarshalJSON() ([]byte, error) { - return json.Marshal(v.value) -} - -func (v *NullableInt32) UnmarshalJSON(src []byte) error { - v.isSet = true - return json.Unmarshal(src, &v.value) -} - -type NullableInt64 struct { - value *int64 - isSet bool -} - -func (v NullableInt64) Get() *int64 { - return v.value -} - -func (v *NullableInt64) Set(val *int64) { - v.value = val - v.isSet = true -} - -func (v NullableInt64) IsSet() bool { - return v.isSet -} - -func (v *NullableInt64) Unset() { - v.value = nil - v.isSet = false -} - -func NewNullableInt64(val *int64) *NullableInt64 { - return &NullableInt64{value: val, isSet: true} -} - -func (v NullableInt64) MarshalJSON() ([]byte, error) { - return json.Marshal(v.value) -} - -func (v *NullableInt64) UnmarshalJSON(src []byte) error { - v.isSet = true - return json.Unmarshal(src, &v.value) -} - -type NullableFloat32 struct { - value *float32 - isSet bool -} - -func (v NullableFloat32) Get() *float32 { - return v.value -} - -func (v *NullableFloat32) Set(val *float32) { - v.value = val - v.isSet = true -} - -func (v NullableFloat32) IsSet() bool { - return v.isSet -} - -func (v *NullableFloat32) Unset() { - v.value = nil - v.isSet = false -} - -func NewNullableFloat32(val *float32) *NullableFloat32 { - return &NullableFloat32{value: val, isSet: true} -} - -func (v NullableFloat32) MarshalJSON() ([]byte, error) { - return json.Marshal(v.value) -} - -func (v *NullableFloat32) UnmarshalJSON(src []byte) error { - v.isSet = true - return json.Unmarshal(src, &v.value) -} - -type NullableFloat64 struct { - value *float64 - isSet bool -} - -func (v NullableFloat64) Get() *float64 { - return v.value -} - -func (v *NullableFloat64) Set(val *float64) { - v.value = val - v.isSet = true -} - -func (v NullableFloat64) IsSet() bool { - return v.isSet -} - -func (v *NullableFloat64) Unset() { - v.value = nil - v.isSet = false -} - -func NewNullableFloat64(val *float64) *NullableFloat64 { - return &NullableFloat64{value: val, isSet: true} -} - -func (v NullableFloat64) MarshalJSON() ([]byte, error) { - return json.Marshal(v.value) -} - -func (v *NullableFloat64) UnmarshalJSON(src []byte) error { - v.isSet = true - return json.Unmarshal(src, &v.value) -} - -type NullableString struct { - value *string - isSet bool -} - -func (v NullableString) Get() *string { - return v.value -} - -func (v *NullableString) Set(val *string) { - v.value = val - v.isSet = true -} - -func (v NullableString) IsSet() bool { - return v.isSet -} - -func (v *NullableString) Unset() { - v.value = nil - v.isSet = false -} - -func NewNullableString(val *string) *NullableString { - return &NullableString{value: val, isSet: true} -} - -func (v NullableString) MarshalJSON() ([]byte, error) { - return json.Marshal(v.value) -} - -func (v *NullableString) UnmarshalJSON(src []byte) error { - v.isSet = true - return json.Unmarshal(src, &v.value) -} - -type NullableTime struct { - value *time.Time - isSet bool -} - -func (v NullableTime) Get() *time.Time { - return v.value -} - -func (v *NullableTime) Set(val *time.Time) { - v.value = val - v.isSet = true -} - -func (v NullableTime) IsSet() bool { - return v.isSet -} - -func (v *NullableTime) Unset() { - v.value = nil - v.isSet = false -} - -func NewNullableTime(val *time.Time) *NullableTime { - return &NullableTime{value: val, isSet: true} -} - -func (v NullableTime) MarshalJSON() ([]byte, error) { - return json.Marshal(v.value) -} - -func (v *NullableTime) UnmarshalJSON(src []byte) error { - v.isSet = true - return json.Unmarshal(src, &v.value) -} - -// IsNil checks if an input is nil -func IsNil(i interface{}) bool { - if i == nil { - return true - } - switch reflect.TypeOf(i).Kind() { - case reflect.Chan, reflect.Func, reflect.Map, reflect.Ptr, reflect.UnsafePointer, reflect.Interface, reflect.Slice: - return reflect.ValueOf(i).IsNil() - case reflect.Array: - return reflect.ValueOf(i).IsZero() - } - return false -} - -type MappedNullable interface { - ToMap() (map[string]interface{}, error) -} - -// A wrapper for strict JSON decoding -func newStrictDecoder(data []byte) *json.Decoder { - dec := json.NewDecoder(bytes.NewBuffer(data)) - dec.DisallowUnknownFields() - return dec -} - -// Prevent trying to import "fmt" -func reportError(format string, a ...interface{}) error { - return fmt.Errorf(format, a...) -} \ No newline at end of file diff --git a/apigen/internalapi/model_bulk_create_request.go b/apigen/internalapi/model_bulk_create_request.go deleted file mode 100644 index f405e25..0000000 --- a/apigen/internalapi/model_bulk_create_request.go +++ /dev/null @@ -1,158 +0,0 @@ -/* -Internal Compatibility API (DRS Server) - -Consolidated internal API delta for drs-server. This spec captures non-GA4GH DRS internal/compatibility routes. - -API version: 1.0.0 -*/ - -// Code generated by OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT. - -package internalapi - -import ( - "encoding/json" - "bytes" - "fmt" -) - -// checks if the BulkCreateRequest type satisfies the MappedNullable interface at compile time -var _ MappedNullable = &BulkCreateRequest{} - -// BulkCreateRequest struct for BulkCreateRequest -type BulkCreateRequest struct { - Records []InternalRecord `json:"records"` -} - -type _BulkCreateRequest BulkCreateRequest - -// NewBulkCreateRequest instantiates a new BulkCreateRequest object -// This constructor will assign default values to properties that have it defined, -// and makes sure properties required by API are set, but the set of arguments -// will change when the set of required properties is changed -func NewBulkCreateRequest(records []InternalRecord) *BulkCreateRequest { - this := BulkCreateRequest{} - this.Records = records - return &this -} - -// NewBulkCreateRequestWithDefaults instantiates a new BulkCreateRequest object -// This constructor will only assign default values to properties that have it defined, -// but it doesn't guarantee that properties required by API are set -func NewBulkCreateRequestWithDefaults() *BulkCreateRequest { - this := BulkCreateRequest{} - return &this -} - -// GetRecords returns the Records field value -func (o *BulkCreateRequest) GetRecords() []InternalRecord { - if o == nil { - var ret []InternalRecord - return ret - } - - return o.Records -} - -// GetRecordsOk returns a tuple with the Records field value -// and a boolean to check if the value has been set. -func (o *BulkCreateRequest) GetRecordsOk() ([]InternalRecord, bool) { - if o == nil { - return nil, false - } - return o.Records, true -} - -// SetRecords sets field value -func (o *BulkCreateRequest) SetRecords(v []InternalRecord) { - o.Records = v -} - -func (o BulkCreateRequest) MarshalJSON() ([]byte, error) { - toSerialize,err := o.ToMap() - if err != nil { - return []byte{}, err - } - return json.Marshal(toSerialize) -} - -func (o BulkCreateRequest) ToMap() (map[string]interface{}, error) { - toSerialize := map[string]interface{}{} - toSerialize["records"] = o.Records - return toSerialize, nil -} - -func (o *BulkCreateRequest) UnmarshalJSON(data []byte) (err error) { - // This validates that all required properties are included in the JSON object - // by unmarshalling the object into a generic map with string keys and checking - // that every required field exists as a key in the generic map. - requiredProperties := []string{ - "records", - } - - allProperties := make(map[string]interface{}) - - err = json.Unmarshal(data, &allProperties) - - if err != nil { - return err; - } - - for _, requiredProperty := range(requiredProperties) { - if _, exists := allProperties[requiredProperty]; !exists { - return fmt.Errorf("no value given for required property %v", requiredProperty) - } - } - - varBulkCreateRequest := _BulkCreateRequest{} - - decoder := json.NewDecoder(bytes.NewReader(data)) - decoder.DisallowUnknownFields() - err = decoder.Decode(&varBulkCreateRequest) - - if err != nil { - return err - } - - *o = BulkCreateRequest(varBulkCreateRequest) - - return err -} - -type NullableBulkCreateRequest struct { - value *BulkCreateRequest - isSet bool -} - -func (v NullableBulkCreateRequest) Get() *BulkCreateRequest { - return v.value -} - -func (v *NullableBulkCreateRequest) Set(val *BulkCreateRequest) { - v.value = val - v.isSet = true -} - -func (v NullableBulkCreateRequest) IsSet() bool { - return v.isSet -} - -func (v *NullableBulkCreateRequest) Unset() { - v.value = nil - v.isSet = false -} - -func NewNullableBulkCreateRequest(val *BulkCreateRequest) *NullableBulkCreateRequest { - return &NullableBulkCreateRequest{value: val, isSet: true} -} - -func (v NullableBulkCreateRequest) MarshalJSON() ([]byte, error) { - return json.Marshal(v.value) -} - -func (v *NullableBulkCreateRequest) UnmarshalJSON(src []byte) error { - v.isSet = true - return json.Unmarshal(src, &v.value) -} - - diff --git a/apigen/internalapi/model_bulk_documents_request.go b/apigen/internalapi/model_bulk_documents_request.go deleted file mode 100644 index 219beb4..0000000 --- a/apigen/internalapi/model_bulk_documents_request.go +++ /dev/null @@ -1,171 +0,0 @@ -/* -Internal Compatibility API (DRS Server) - -Consolidated internal API delta for drs-server. This spec captures non-GA4GH DRS internal/compatibility routes. - -API version: 1.0.0 -*/ - -// Code generated by OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT. - -package internalapi - -import ( - "encoding/json" - "fmt" - "gopkg.in/validator.v2" -) - -// BulkDocumentsRequest - struct for BulkDocumentsRequest -type BulkDocumentsRequest struct { - BulkDocumentsRequestOneOf *BulkDocumentsRequestOneOf - ArrayOfString *[]string -} - -// BulkDocumentsRequestOneOfAsBulkDocumentsRequest is a convenience function that returns BulkDocumentsRequestOneOf wrapped in BulkDocumentsRequest -func BulkDocumentsRequestOneOfAsBulkDocumentsRequest(v *BulkDocumentsRequestOneOf) BulkDocumentsRequest { - return BulkDocumentsRequest{ - BulkDocumentsRequestOneOf: v, - } -} - -// []stringAsBulkDocumentsRequest is a convenience function that returns []string wrapped in BulkDocumentsRequest -func ArrayOfStringAsBulkDocumentsRequest(v *[]string) BulkDocumentsRequest { - return BulkDocumentsRequest{ - ArrayOfString: v, - } -} - - -// Unmarshal JSON data into one of the pointers in the struct -func (dst *BulkDocumentsRequest) UnmarshalJSON(data []byte) error { - var err error - match := 0 - // try to unmarshal data into BulkDocumentsRequestOneOf - err = newStrictDecoder(data).Decode(&dst.BulkDocumentsRequestOneOf) - if err == nil { - jsonBulkDocumentsRequestOneOf, _ := json.Marshal(dst.BulkDocumentsRequestOneOf) - if string(jsonBulkDocumentsRequestOneOf) == "{}" { // empty struct - dst.BulkDocumentsRequestOneOf = nil - } else { - if err = validator.Validate(dst.BulkDocumentsRequestOneOf); err != nil { - dst.BulkDocumentsRequestOneOf = nil - } else { - match++ - } - } - } else { - dst.BulkDocumentsRequestOneOf = nil - } - - // try to unmarshal data into ArrayOfString - err = newStrictDecoder(data).Decode(&dst.ArrayOfString) - if err == nil { - jsonArrayOfString, _ := json.Marshal(dst.ArrayOfString) - if string(jsonArrayOfString) == "{}" { // empty struct - dst.ArrayOfString = nil - } else { - if err = validator.Validate(dst.ArrayOfString); err != nil { - dst.ArrayOfString = nil - } else { - match++ - } - } - } else { - dst.ArrayOfString = nil - } - - if match > 1 { // more than 1 match - // reset to nil - dst.BulkDocumentsRequestOneOf = nil - dst.ArrayOfString = nil - - return fmt.Errorf("data matches more than one schema in oneOf(BulkDocumentsRequest)") - } else if match == 1 { - return nil // exactly one match - } else { // no match - return fmt.Errorf("data failed to match schemas in oneOf(BulkDocumentsRequest)") - } -} - -// Marshal data from the first non-nil pointers in the struct to JSON -func (src BulkDocumentsRequest) MarshalJSON() ([]byte, error) { - if src.BulkDocumentsRequestOneOf != nil { - return json.Marshal(&src.BulkDocumentsRequestOneOf) - } - - if src.ArrayOfString != nil { - return json.Marshal(&src.ArrayOfString) - } - - return nil, nil // no data in oneOf schemas -} - -// Get the actual instance -func (obj *BulkDocumentsRequest) GetActualInstance() (interface{}) { - if obj == nil { - return nil - } - if obj.BulkDocumentsRequestOneOf != nil { - return obj.BulkDocumentsRequestOneOf - } - - if obj.ArrayOfString != nil { - return obj.ArrayOfString - } - - // all schemas are nil - return nil -} - -// Get the actual instance value -func (obj BulkDocumentsRequest) GetActualInstanceValue() (interface{}) { - if obj.BulkDocumentsRequestOneOf != nil { - return *obj.BulkDocumentsRequestOneOf - } - - if obj.ArrayOfString != nil { - return *obj.ArrayOfString - } - - // all schemas are nil - return nil -} - -type NullableBulkDocumentsRequest struct { - value *BulkDocumentsRequest - isSet bool -} - -func (v NullableBulkDocumentsRequest) Get() *BulkDocumentsRequest { - return v.value -} - -func (v *NullableBulkDocumentsRequest) Set(val *BulkDocumentsRequest) { - v.value = val - v.isSet = true -} - -func (v NullableBulkDocumentsRequest) IsSet() bool { - return v.isSet -} - -func (v *NullableBulkDocumentsRequest) Unset() { - v.value = nil - v.isSet = false -} - -func NewNullableBulkDocumentsRequest(val *BulkDocumentsRequest) *NullableBulkDocumentsRequest { - return &NullableBulkDocumentsRequest{value: val, isSet: true} -} - -func (v NullableBulkDocumentsRequest) MarshalJSON() ([]byte, error) { - return json.Marshal(v.value) -} - -func (v *NullableBulkDocumentsRequest) UnmarshalJSON(src []byte) error { - v.isSet = true - return json.Unmarshal(src, &v.value) -} - - diff --git a/apigen/internalapi/model_bulk_documents_request_one_of.go b/apigen/internalapi/model_bulk_documents_request_one_of.go deleted file mode 100644 index 7c9327a..0000000 --- a/apigen/internalapi/model_bulk_documents_request_one_of.go +++ /dev/null @@ -1,162 +0,0 @@ -/* -Internal Compatibility API (DRS Server) - -Consolidated internal API delta for drs-server. This spec captures non-GA4GH DRS internal/compatibility routes. - -API version: 1.0.0 -*/ - -// Code generated by OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT. - -package internalapi - -import ( - "encoding/json" -) - -// checks if the BulkDocumentsRequestOneOf type satisfies the MappedNullable interface at compile time -var _ MappedNullable = &BulkDocumentsRequestOneOf{} - -// BulkDocumentsRequestOneOf struct for BulkDocumentsRequestOneOf -type BulkDocumentsRequestOneOf struct { - Ids []string `json:"ids,omitempty"` - Dids []string `json:"dids,omitempty"` -} - -// NewBulkDocumentsRequestOneOf instantiates a new BulkDocumentsRequestOneOf object -// This constructor will assign default values to properties that have it defined, -// and makes sure properties required by API are set, but the set of arguments -// will change when the set of required properties is changed -func NewBulkDocumentsRequestOneOf() *BulkDocumentsRequestOneOf { - this := BulkDocumentsRequestOneOf{} - return &this -} - -// NewBulkDocumentsRequestOneOfWithDefaults instantiates a new BulkDocumentsRequestOneOf object -// This constructor will only assign default values to properties that have it defined, -// but it doesn't guarantee that properties required by API are set -func NewBulkDocumentsRequestOneOfWithDefaults() *BulkDocumentsRequestOneOf { - this := BulkDocumentsRequestOneOf{} - return &this -} - -// GetIds returns the Ids field value if set, zero value otherwise. -func (o *BulkDocumentsRequestOneOf) GetIds() []string { - if o == nil || IsNil(o.Ids) { - var ret []string - return ret - } - return o.Ids -} - -// GetIdsOk returns a tuple with the Ids field value if set, nil otherwise -// and a boolean to check if the value has been set. -func (o *BulkDocumentsRequestOneOf) GetIdsOk() ([]string, bool) { - if o == nil || IsNil(o.Ids) { - return nil, false - } - return o.Ids, true -} - -// HasIds returns a boolean if a field has been set. -func (o *BulkDocumentsRequestOneOf) HasIds() bool { - if o != nil && !IsNil(o.Ids) { - return true - } - - return false -} - -// SetIds gets a reference to the given []string and assigns it to the Ids field. -func (o *BulkDocumentsRequestOneOf) SetIds(v []string) { - o.Ids = v -} - -// GetDids returns the Dids field value if set, zero value otherwise. -func (o *BulkDocumentsRequestOneOf) GetDids() []string { - if o == nil || IsNil(o.Dids) { - var ret []string - return ret - } - return o.Dids -} - -// GetDidsOk returns a tuple with the Dids field value if set, nil otherwise -// and a boolean to check if the value has been set. -func (o *BulkDocumentsRequestOneOf) GetDidsOk() ([]string, bool) { - if o == nil || IsNil(o.Dids) { - return nil, false - } - return o.Dids, true -} - -// HasDids returns a boolean if a field has been set. -func (o *BulkDocumentsRequestOneOf) HasDids() bool { - if o != nil && !IsNil(o.Dids) { - return true - } - - return false -} - -// SetDids gets a reference to the given []string and assigns it to the Dids field. -func (o *BulkDocumentsRequestOneOf) SetDids(v []string) { - o.Dids = v -} - -func (o BulkDocumentsRequestOneOf) MarshalJSON() ([]byte, error) { - toSerialize,err := o.ToMap() - if err != nil { - return []byte{}, err - } - return json.Marshal(toSerialize) -} - -func (o BulkDocumentsRequestOneOf) ToMap() (map[string]interface{}, error) { - toSerialize := map[string]interface{}{} - if !IsNil(o.Ids) { - toSerialize["ids"] = o.Ids - } - if !IsNil(o.Dids) { - toSerialize["dids"] = o.Dids - } - return toSerialize, nil -} - -type NullableBulkDocumentsRequestOneOf struct { - value *BulkDocumentsRequestOneOf - isSet bool -} - -func (v NullableBulkDocumentsRequestOneOf) Get() *BulkDocumentsRequestOneOf { - return v.value -} - -func (v *NullableBulkDocumentsRequestOneOf) Set(val *BulkDocumentsRequestOneOf) { - v.value = val - v.isSet = true -} - -func (v NullableBulkDocumentsRequestOneOf) IsSet() bool { - return v.isSet -} - -func (v *NullableBulkDocumentsRequestOneOf) Unset() { - v.value = nil - v.isSet = false -} - -func NewNullableBulkDocumentsRequestOneOf(val *BulkDocumentsRequestOneOf) *NullableBulkDocumentsRequestOneOf { - return &NullableBulkDocumentsRequestOneOf{value: val, isSet: true} -} - -func (v NullableBulkDocumentsRequestOneOf) MarshalJSON() ([]byte, error) { - return json.Marshal(v.value) -} - -func (v *NullableBulkDocumentsRequestOneOf) UnmarshalJSON(src []byte) error { - v.isSet = true - return json.Unmarshal(src, &v.value) -} - - diff --git a/apigen/internalapi/model_bulk_hashes_request.go b/apigen/internalapi/model_bulk_hashes_request.go deleted file mode 100644 index f987377..0000000 --- a/apigen/internalapi/model_bulk_hashes_request.go +++ /dev/null @@ -1,158 +0,0 @@ -/* -Internal Compatibility API (DRS Server) - -Consolidated internal API delta for drs-server. This spec captures non-GA4GH DRS internal/compatibility routes. - -API version: 1.0.0 -*/ - -// Code generated by OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT. - -package internalapi - -import ( - "encoding/json" - "bytes" - "fmt" -) - -// checks if the BulkHashesRequest type satisfies the MappedNullable interface at compile time -var _ MappedNullable = &BulkHashesRequest{} - -// BulkHashesRequest struct for BulkHashesRequest -type BulkHashesRequest struct { - Hashes []string `json:"hashes"` -} - -type _BulkHashesRequest BulkHashesRequest - -// NewBulkHashesRequest instantiates a new BulkHashesRequest object -// This constructor will assign default values to properties that have it defined, -// and makes sure properties required by API are set, but the set of arguments -// will change when the set of required properties is changed -func NewBulkHashesRequest(hashes []string) *BulkHashesRequest { - this := BulkHashesRequest{} - this.Hashes = hashes - return &this -} - -// NewBulkHashesRequestWithDefaults instantiates a new BulkHashesRequest object -// This constructor will only assign default values to properties that have it defined, -// but it doesn't guarantee that properties required by API are set -func NewBulkHashesRequestWithDefaults() *BulkHashesRequest { - this := BulkHashesRequest{} - return &this -} - -// GetHashes returns the Hashes field value -func (o *BulkHashesRequest) GetHashes() []string { - if o == nil { - var ret []string - return ret - } - - return o.Hashes -} - -// GetHashesOk returns a tuple with the Hashes field value -// and a boolean to check if the value has been set. -func (o *BulkHashesRequest) GetHashesOk() ([]string, bool) { - if o == nil { - return nil, false - } - return o.Hashes, true -} - -// SetHashes sets field value -func (o *BulkHashesRequest) SetHashes(v []string) { - o.Hashes = v -} - -func (o BulkHashesRequest) MarshalJSON() ([]byte, error) { - toSerialize,err := o.ToMap() - if err != nil { - return []byte{}, err - } - return json.Marshal(toSerialize) -} - -func (o BulkHashesRequest) ToMap() (map[string]interface{}, error) { - toSerialize := map[string]interface{}{} - toSerialize["hashes"] = o.Hashes - return toSerialize, nil -} - -func (o *BulkHashesRequest) UnmarshalJSON(data []byte) (err error) { - // This validates that all required properties are included in the JSON object - // by unmarshalling the object into a generic map with string keys and checking - // that every required field exists as a key in the generic map. - requiredProperties := []string{ - "hashes", - } - - allProperties := make(map[string]interface{}) - - err = json.Unmarshal(data, &allProperties) - - if err != nil { - return err; - } - - for _, requiredProperty := range(requiredProperties) { - if _, exists := allProperties[requiredProperty]; !exists { - return fmt.Errorf("no value given for required property %v", requiredProperty) - } - } - - varBulkHashesRequest := _BulkHashesRequest{} - - decoder := json.NewDecoder(bytes.NewReader(data)) - decoder.DisallowUnknownFields() - err = decoder.Decode(&varBulkHashesRequest) - - if err != nil { - return err - } - - *o = BulkHashesRequest(varBulkHashesRequest) - - return err -} - -type NullableBulkHashesRequest struct { - value *BulkHashesRequest - isSet bool -} - -func (v NullableBulkHashesRequest) Get() *BulkHashesRequest { - return v.value -} - -func (v *NullableBulkHashesRequest) Set(val *BulkHashesRequest) { - v.value = val - v.isSet = true -} - -func (v NullableBulkHashesRequest) IsSet() bool { - return v.isSet -} - -func (v *NullableBulkHashesRequest) Unset() { - v.value = nil - v.isSet = false -} - -func NewNullableBulkHashesRequest(val *BulkHashesRequest) *NullableBulkHashesRequest { - return &NullableBulkHashesRequest{value: val, isSet: true} -} - -func (v NullableBulkHashesRequest) MarshalJSON() ([]byte, error) { - return json.Marshal(v.value) -} - -func (v *NullableBulkHashesRequest) UnmarshalJSON(src []byte) error { - v.isSet = true - return json.Unmarshal(src, &v.value) -} - - diff --git a/apigen/internalapi/model_bulk_sha256_validity_request.go b/apigen/internalapi/model_bulk_sha256_validity_request.go deleted file mode 100644 index 72275ae..0000000 --- a/apigen/internalapi/model_bulk_sha256_validity_request.go +++ /dev/null @@ -1,162 +0,0 @@ -/* -Internal Compatibility API (DRS Server) - -Consolidated internal API delta for drs-server. This spec captures non-GA4GH DRS internal/compatibility routes. - -API version: 1.0.0 -*/ - -// Code generated by OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT. - -package internalapi - -import ( - "encoding/json" -) - -// checks if the BulkSHA256ValidityRequest type satisfies the MappedNullable interface at compile time -var _ MappedNullable = &BulkSHA256ValidityRequest{} - -// BulkSHA256ValidityRequest struct for BulkSHA256ValidityRequest -type BulkSHA256ValidityRequest struct { - Sha256 []string `json:"sha256,omitempty"` - Hashes []string `json:"hashes,omitempty"` -} - -// NewBulkSHA256ValidityRequest instantiates a new BulkSHA256ValidityRequest object -// This constructor will assign default values to properties that have it defined, -// and makes sure properties required by API are set, but the set of arguments -// will change when the set of required properties is changed -func NewBulkSHA256ValidityRequest() *BulkSHA256ValidityRequest { - this := BulkSHA256ValidityRequest{} - return &this -} - -// NewBulkSHA256ValidityRequestWithDefaults instantiates a new BulkSHA256ValidityRequest object -// This constructor will only assign default values to properties that have it defined, -// but it doesn't guarantee that properties required by API are set -func NewBulkSHA256ValidityRequestWithDefaults() *BulkSHA256ValidityRequest { - this := BulkSHA256ValidityRequest{} - return &this -} - -// GetSha256 returns the Sha256 field value if set, zero value otherwise. -func (o *BulkSHA256ValidityRequest) GetSha256() []string { - if o == nil || IsNil(o.Sha256) { - var ret []string - return ret - } - return o.Sha256 -} - -// GetSha256Ok returns a tuple with the Sha256 field value if set, nil otherwise -// and a boolean to check if the value has been set. -func (o *BulkSHA256ValidityRequest) GetSha256Ok() ([]string, bool) { - if o == nil || IsNil(o.Sha256) { - return nil, false - } - return o.Sha256, true -} - -// HasSha256 returns a boolean if a field has been set. -func (o *BulkSHA256ValidityRequest) HasSha256() bool { - if o != nil && !IsNil(o.Sha256) { - return true - } - - return false -} - -// SetSha256 gets a reference to the given []string and assigns it to the Sha256 field. -func (o *BulkSHA256ValidityRequest) SetSha256(v []string) { - o.Sha256 = v -} - -// GetHashes returns the Hashes field value if set, zero value otherwise. -func (o *BulkSHA256ValidityRequest) GetHashes() []string { - if o == nil || IsNil(o.Hashes) { - var ret []string - return ret - } - return o.Hashes -} - -// GetHashesOk returns a tuple with the Hashes field value if set, nil otherwise -// and a boolean to check if the value has been set. -func (o *BulkSHA256ValidityRequest) GetHashesOk() ([]string, bool) { - if o == nil || IsNil(o.Hashes) { - return nil, false - } - return o.Hashes, true -} - -// HasHashes returns a boolean if a field has been set. -func (o *BulkSHA256ValidityRequest) HasHashes() bool { - if o != nil && !IsNil(o.Hashes) { - return true - } - - return false -} - -// SetHashes gets a reference to the given []string and assigns it to the Hashes field. -func (o *BulkSHA256ValidityRequest) SetHashes(v []string) { - o.Hashes = v -} - -func (o BulkSHA256ValidityRequest) MarshalJSON() ([]byte, error) { - toSerialize,err := o.ToMap() - if err != nil { - return []byte{}, err - } - return json.Marshal(toSerialize) -} - -func (o BulkSHA256ValidityRequest) ToMap() (map[string]interface{}, error) { - toSerialize := map[string]interface{}{} - if !IsNil(o.Sha256) { - toSerialize["sha256"] = o.Sha256 - } - if !IsNil(o.Hashes) { - toSerialize["hashes"] = o.Hashes - } - return toSerialize, nil -} - -type NullableBulkSHA256ValidityRequest struct { - value *BulkSHA256ValidityRequest - isSet bool -} - -func (v NullableBulkSHA256ValidityRequest) Get() *BulkSHA256ValidityRequest { - return v.value -} - -func (v *NullableBulkSHA256ValidityRequest) Set(val *BulkSHA256ValidityRequest) { - v.value = val - v.isSet = true -} - -func (v NullableBulkSHA256ValidityRequest) IsSet() bool { - return v.isSet -} - -func (v *NullableBulkSHA256ValidityRequest) Unset() { - v.value = nil - v.isSet = false -} - -func NewNullableBulkSHA256ValidityRequest(val *BulkSHA256ValidityRequest) *NullableBulkSHA256ValidityRequest { - return &NullableBulkSHA256ValidityRequest{value: val, isSet: true} -} - -func (v NullableBulkSHA256ValidityRequest) MarshalJSON() ([]byte, error) { - return json.Marshal(v.value) -} - -func (v *NullableBulkSHA256ValidityRequest) UnmarshalJSON(src []byte) error { - v.isSet = true - return json.Unmarshal(src, &v.value) -} - - diff --git a/apigen/internalapi/model_delete_by_query_response.go b/apigen/internalapi/model_delete_by_query_response.go deleted file mode 100644 index ec34fbc..0000000 --- a/apigen/internalapi/model_delete_by_query_response.go +++ /dev/null @@ -1,126 +0,0 @@ -/* -Internal Compatibility API (DRS Server) - -Consolidated internal API delta for drs-server. This spec captures non-GA4GH DRS internal/compatibility routes. - -API version: 1.0.0 -*/ - -// Code generated by OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT. - -package internalapi - -import ( - "encoding/json" -) - -// checks if the DeleteByQueryResponse type satisfies the MappedNullable interface at compile time -var _ MappedNullable = &DeleteByQueryResponse{} - -// DeleteByQueryResponse struct for DeleteByQueryResponse -type DeleteByQueryResponse struct { - Deleted *int32 `json:"deleted,omitempty"` -} - -// NewDeleteByQueryResponse instantiates a new DeleteByQueryResponse object -// This constructor will assign default values to properties that have it defined, -// and makes sure properties required by API are set, but the set of arguments -// will change when the set of required properties is changed -func NewDeleteByQueryResponse() *DeleteByQueryResponse { - this := DeleteByQueryResponse{} - return &this -} - -// NewDeleteByQueryResponseWithDefaults instantiates a new DeleteByQueryResponse object -// This constructor will only assign default values to properties that have it defined, -// but it doesn't guarantee that properties required by API are set -func NewDeleteByQueryResponseWithDefaults() *DeleteByQueryResponse { - this := DeleteByQueryResponse{} - return &this -} - -// GetDeleted returns the Deleted field value if set, zero value otherwise. -func (o *DeleteByQueryResponse) GetDeleted() int32 { - if o == nil || IsNil(o.Deleted) { - var ret int32 - return ret - } - return *o.Deleted -} - -// GetDeletedOk returns a tuple with the Deleted field value if set, nil otherwise -// and a boolean to check if the value has been set. -func (o *DeleteByQueryResponse) GetDeletedOk() (*int32, bool) { - if o == nil || IsNil(o.Deleted) { - return nil, false - } - return o.Deleted, true -} - -// HasDeleted returns a boolean if a field has been set. -func (o *DeleteByQueryResponse) HasDeleted() bool { - if o != nil && !IsNil(o.Deleted) { - return true - } - - return false -} - -// SetDeleted gets a reference to the given int32 and assigns it to the Deleted field. -func (o *DeleteByQueryResponse) SetDeleted(v int32) { - o.Deleted = &v -} - -func (o DeleteByQueryResponse) MarshalJSON() ([]byte, error) { - toSerialize,err := o.ToMap() - if err != nil { - return []byte{}, err - } - return json.Marshal(toSerialize) -} - -func (o DeleteByQueryResponse) ToMap() (map[string]interface{}, error) { - toSerialize := map[string]interface{}{} - if !IsNil(o.Deleted) { - toSerialize["deleted"] = o.Deleted - } - return toSerialize, nil -} - -type NullableDeleteByQueryResponse struct { - value *DeleteByQueryResponse - isSet bool -} - -func (v NullableDeleteByQueryResponse) Get() *DeleteByQueryResponse { - return v.value -} - -func (v *NullableDeleteByQueryResponse) Set(val *DeleteByQueryResponse) { - v.value = val - v.isSet = true -} - -func (v NullableDeleteByQueryResponse) IsSet() bool { - return v.isSet -} - -func (v *NullableDeleteByQueryResponse) Unset() { - v.value = nil - v.isSet = false -} - -func NewNullableDeleteByQueryResponse(val *DeleteByQueryResponse) *NullableDeleteByQueryResponse { - return &NullableDeleteByQueryResponse{value: val, isSet: true} -} - -func (v NullableDeleteByQueryResponse) MarshalJSON() ([]byte, error) { - return json.Marshal(v.value) -} - -func (v *NullableDeleteByQueryResponse) UnmarshalJSON(src []byte) error { - v.isSet = true - return json.Unmarshal(src, &v.value) -} - - diff --git a/apigen/internalapi/model_internal_multipart_complete_request.go b/apigen/internalapi/model_internal_multipart_complete_request.go deleted file mode 100644 index 1646e30..0000000 --- a/apigen/internalapi/model_internal_multipart_complete_request.go +++ /dev/null @@ -1,250 +0,0 @@ -/* -Internal Compatibility API (DRS Server) - -Consolidated internal API delta for drs-server. This spec captures non-GA4GH DRS internal/compatibility routes. - -API version: 1.0.0 -*/ - -// Code generated by OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT. - -package internalapi - -import ( - "encoding/json" - "bytes" - "fmt" -) - -// checks if the InternalMultipartCompleteRequest type satisfies the MappedNullable interface at compile time -var _ MappedNullable = &InternalMultipartCompleteRequest{} - -// InternalMultipartCompleteRequest struct for InternalMultipartCompleteRequest -type InternalMultipartCompleteRequest struct { - Key string `json:"key"` - Bucket *string `json:"bucket,omitempty"` - UploadId string `json:"uploadId"` - Parts []InternalMultipartPart `json:"parts"` -} - -type _InternalMultipartCompleteRequest InternalMultipartCompleteRequest - -// NewInternalMultipartCompleteRequest instantiates a new InternalMultipartCompleteRequest object -// This constructor will assign default values to properties that have it defined, -// and makes sure properties required by API are set, but the set of arguments -// will change when the set of required properties is changed -func NewInternalMultipartCompleteRequest(key string, uploadId string, parts []InternalMultipartPart) *InternalMultipartCompleteRequest { - this := InternalMultipartCompleteRequest{} - this.Key = key - this.UploadId = uploadId - this.Parts = parts - return &this -} - -// NewInternalMultipartCompleteRequestWithDefaults instantiates a new InternalMultipartCompleteRequest object -// This constructor will only assign default values to properties that have it defined, -// but it doesn't guarantee that properties required by API are set -func NewInternalMultipartCompleteRequestWithDefaults() *InternalMultipartCompleteRequest { - this := InternalMultipartCompleteRequest{} - return &this -} - -// GetKey returns the Key field value -func (o *InternalMultipartCompleteRequest) GetKey() string { - if o == nil { - var ret string - return ret - } - - return o.Key -} - -// GetKeyOk returns a tuple with the Key field value -// and a boolean to check if the value has been set. -func (o *InternalMultipartCompleteRequest) GetKeyOk() (*string, bool) { - if o == nil { - return nil, false - } - return &o.Key, true -} - -// SetKey sets field value -func (o *InternalMultipartCompleteRequest) SetKey(v string) { - o.Key = v -} - -// GetBucket returns the Bucket field value if set, zero value otherwise. -func (o *InternalMultipartCompleteRequest) GetBucket() string { - if o == nil || IsNil(o.Bucket) { - var ret string - return ret - } - return *o.Bucket -} - -// GetBucketOk returns a tuple with the Bucket field value if set, nil otherwise -// and a boolean to check if the value has been set. -func (o *InternalMultipartCompleteRequest) GetBucketOk() (*string, bool) { - if o == nil || IsNil(o.Bucket) { - return nil, false - } - return o.Bucket, true -} - -// HasBucket returns a boolean if a field has been set. -func (o *InternalMultipartCompleteRequest) HasBucket() bool { - if o != nil && !IsNil(o.Bucket) { - return true - } - - return false -} - -// SetBucket gets a reference to the given string and assigns it to the Bucket field. -func (o *InternalMultipartCompleteRequest) SetBucket(v string) { - o.Bucket = &v -} - -// GetUploadId returns the UploadId field value -func (o *InternalMultipartCompleteRequest) GetUploadId() string { - if o == nil { - var ret string - return ret - } - - return o.UploadId -} - -// GetUploadIdOk returns a tuple with the UploadId field value -// and a boolean to check if the value has been set. -func (o *InternalMultipartCompleteRequest) GetUploadIdOk() (*string, bool) { - if o == nil { - return nil, false - } - return &o.UploadId, true -} - -// SetUploadId sets field value -func (o *InternalMultipartCompleteRequest) SetUploadId(v string) { - o.UploadId = v -} - -// GetParts returns the Parts field value -func (o *InternalMultipartCompleteRequest) GetParts() []InternalMultipartPart { - if o == nil { - var ret []InternalMultipartPart - return ret - } - - return o.Parts -} - -// GetPartsOk returns a tuple with the Parts field value -// and a boolean to check if the value has been set. -func (o *InternalMultipartCompleteRequest) GetPartsOk() ([]InternalMultipartPart, bool) { - if o == nil { - return nil, false - } - return o.Parts, true -} - -// SetParts sets field value -func (o *InternalMultipartCompleteRequest) SetParts(v []InternalMultipartPart) { - o.Parts = v -} - -func (o InternalMultipartCompleteRequest) MarshalJSON() ([]byte, error) { - toSerialize,err := o.ToMap() - if err != nil { - return []byte{}, err - } - return json.Marshal(toSerialize) -} - -func (o InternalMultipartCompleteRequest) ToMap() (map[string]interface{}, error) { - toSerialize := map[string]interface{}{} - toSerialize["key"] = o.Key - if !IsNil(o.Bucket) { - toSerialize["bucket"] = o.Bucket - } - toSerialize["uploadId"] = o.UploadId - toSerialize["parts"] = o.Parts - return toSerialize, nil -} - -func (o *InternalMultipartCompleteRequest) UnmarshalJSON(data []byte) (err error) { - // This validates that all required properties are included in the JSON object - // by unmarshalling the object into a generic map with string keys and checking - // that every required field exists as a key in the generic map. - requiredProperties := []string{ - "key", - "uploadId", - "parts", - } - - allProperties := make(map[string]interface{}) - - err = json.Unmarshal(data, &allProperties) - - if err != nil { - return err; - } - - for _, requiredProperty := range(requiredProperties) { - if _, exists := allProperties[requiredProperty]; !exists { - return fmt.Errorf("no value given for required property %v", requiredProperty) - } - } - - varInternalMultipartCompleteRequest := _InternalMultipartCompleteRequest{} - - decoder := json.NewDecoder(bytes.NewReader(data)) - decoder.DisallowUnknownFields() - err = decoder.Decode(&varInternalMultipartCompleteRequest) - - if err != nil { - return err - } - - *o = InternalMultipartCompleteRequest(varInternalMultipartCompleteRequest) - - return err -} - -type NullableInternalMultipartCompleteRequest struct { - value *InternalMultipartCompleteRequest - isSet bool -} - -func (v NullableInternalMultipartCompleteRequest) Get() *InternalMultipartCompleteRequest { - return v.value -} - -func (v *NullableInternalMultipartCompleteRequest) Set(val *InternalMultipartCompleteRequest) { - v.value = val - v.isSet = true -} - -func (v NullableInternalMultipartCompleteRequest) IsSet() bool { - return v.isSet -} - -func (v *NullableInternalMultipartCompleteRequest) Unset() { - v.value = nil - v.isSet = false -} - -func NewNullableInternalMultipartCompleteRequest(val *InternalMultipartCompleteRequest) *NullableInternalMultipartCompleteRequest { - return &NullableInternalMultipartCompleteRequest{value: val, isSet: true} -} - -func (v NullableInternalMultipartCompleteRequest) MarshalJSON() ([]byte, error) { - return json.Marshal(v.value) -} - -func (v *NullableInternalMultipartCompleteRequest) UnmarshalJSON(src []byte) error { - v.isSet = true - return json.Unmarshal(src, &v.value) -} - - diff --git a/apigen/internalapi/model_internal_multipart_init_request.go b/apigen/internalapi/model_internal_multipart_init_request.go deleted file mode 100644 index ca7fba3..0000000 --- a/apigen/internalapi/model_internal_multipart_init_request.go +++ /dev/null @@ -1,198 +0,0 @@ -/* -Internal Compatibility API (DRS Server) - -Consolidated internal API delta for drs-server. This spec captures non-GA4GH DRS internal/compatibility routes. - -API version: 1.0.0 -*/ - -// Code generated by OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT. - -package internalapi - -import ( - "encoding/json" -) - -// checks if the InternalMultipartInitRequest type satisfies the MappedNullable interface at compile time -var _ MappedNullable = &InternalMultipartInitRequest{} - -// InternalMultipartInitRequest struct for InternalMultipartInitRequest -type InternalMultipartInitRequest struct { - Guid *string `json:"guid,omitempty"` - FileName *string `json:"file_name,omitempty"` - Bucket *string `json:"bucket,omitempty"` -} - -// NewInternalMultipartInitRequest instantiates a new InternalMultipartInitRequest object -// This constructor will assign default values to properties that have it defined, -// and makes sure properties required by API are set, but the set of arguments -// will change when the set of required properties is changed -func NewInternalMultipartInitRequest() *InternalMultipartInitRequest { - this := InternalMultipartInitRequest{} - return &this -} - -// NewInternalMultipartInitRequestWithDefaults instantiates a new InternalMultipartInitRequest object -// This constructor will only assign default values to properties that have it defined, -// but it doesn't guarantee that properties required by API are set -func NewInternalMultipartInitRequestWithDefaults() *InternalMultipartInitRequest { - this := InternalMultipartInitRequest{} - return &this -} - -// GetGuid returns the Guid field value if set, zero value otherwise. -func (o *InternalMultipartInitRequest) GetGuid() string { - if o == nil || IsNil(o.Guid) { - var ret string - return ret - } - return *o.Guid -} - -// GetGuidOk returns a tuple with the Guid field value if set, nil otherwise -// and a boolean to check if the value has been set. -func (o *InternalMultipartInitRequest) GetGuidOk() (*string, bool) { - if o == nil || IsNil(o.Guid) { - return nil, false - } - return o.Guid, true -} - -// HasGuid returns a boolean if a field has been set. -func (o *InternalMultipartInitRequest) HasGuid() bool { - if o != nil && !IsNil(o.Guid) { - return true - } - - return false -} - -// SetGuid gets a reference to the given string and assigns it to the Guid field. -func (o *InternalMultipartInitRequest) SetGuid(v string) { - o.Guid = &v -} - -// GetFileName returns the FileName field value if set, zero value otherwise. -func (o *InternalMultipartInitRequest) GetFileName() string { - if o == nil || IsNil(o.FileName) { - var ret string - return ret - } - return *o.FileName -} - -// GetFileNameOk returns a tuple with the FileName field value if set, nil otherwise -// and a boolean to check if the value has been set. -func (o *InternalMultipartInitRequest) GetFileNameOk() (*string, bool) { - if o == nil || IsNil(o.FileName) { - return nil, false - } - return o.FileName, true -} - -// HasFileName returns a boolean if a field has been set. -func (o *InternalMultipartInitRequest) HasFileName() bool { - if o != nil && !IsNil(o.FileName) { - return true - } - - return false -} - -// SetFileName gets a reference to the given string and assigns it to the FileName field. -func (o *InternalMultipartInitRequest) SetFileName(v string) { - o.FileName = &v -} - -// GetBucket returns the Bucket field value if set, zero value otherwise. -func (o *InternalMultipartInitRequest) GetBucket() string { - if o == nil || IsNil(o.Bucket) { - var ret string - return ret - } - return *o.Bucket -} - -// GetBucketOk returns a tuple with the Bucket field value if set, nil otherwise -// and a boolean to check if the value has been set. -func (o *InternalMultipartInitRequest) GetBucketOk() (*string, bool) { - if o == nil || IsNil(o.Bucket) { - return nil, false - } - return o.Bucket, true -} - -// HasBucket returns a boolean if a field has been set. -func (o *InternalMultipartInitRequest) HasBucket() bool { - if o != nil && !IsNil(o.Bucket) { - return true - } - - return false -} - -// SetBucket gets a reference to the given string and assigns it to the Bucket field. -func (o *InternalMultipartInitRequest) SetBucket(v string) { - o.Bucket = &v -} - -func (o InternalMultipartInitRequest) MarshalJSON() ([]byte, error) { - toSerialize,err := o.ToMap() - if err != nil { - return []byte{}, err - } - return json.Marshal(toSerialize) -} - -func (o InternalMultipartInitRequest) ToMap() (map[string]interface{}, error) { - toSerialize := map[string]interface{}{} - if !IsNil(o.Guid) { - toSerialize["guid"] = o.Guid - } - if !IsNil(o.FileName) { - toSerialize["file_name"] = o.FileName - } - if !IsNil(o.Bucket) { - toSerialize["bucket"] = o.Bucket - } - return toSerialize, nil -} - -type NullableInternalMultipartInitRequest struct { - value *InternalMultipartInitRequest - isSet bool -} - -func (v NullableInternalMultipartInitRequest) Get() *InternalMultipartInitRequest { - return v.value -} - -func (v *NullableInternalMultipartInitRequest) Set(val *InternalMultipartInitRequest) { - v.value = val - v.isSet = true -} - -func (v NullableInternalMultipartInitRequest) IsSet() bool { - return v.isSet -} - -func (v *NullableInternalMultipartInitRequest) Unset() { - v.value = nil - v.isSet = false -} - -func NewNullableInternalMultipartInitRequest(val *InternalMultipartInitRequest) *NullableInternalMultipartInitRequest { - return &NullableInternalMultipartInitRequest{value: val, isSet: true} -} - -func (v NullableInternalMultipartInitRequest) MarshalJSON() ([]byte, error) { - return json.Marshal(v.value) -} - -func (v *NullableInternalMultipartInitRequest) UnmarshalJSON(src []byte) error { - v.isSet = true - return json.Unmarshal(src, &v.value) -} - - diff --git a/apigen/internalapi/model_internal_multipart_init_response.go b/apigen/internalapi/model_internal_multipart_init_response.go deleted file mode 100644 index c2decac..0000000 --- a/apigen/internalapi/model_internal_multipart_init_response.go +++ /dev/null @@ -1,162 +0,0 @@ -/* -Internal Compatibility API (DRS Server) - -Consolidated internal API delta for drs-server. This spec captures non-GA4GH DRS internal/compatibility routes. - -API version: 1.0.0 -*/ - -// Code generated by OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT. - -package internalapi - -import ( - "encoding/json" -) - -// checks if the InternalMultipartInitResponse type satisfies the MappedNullable interface at compile time -var _ MappedNullable = &InternalMultipartInitResponse{} - -// InternalMultipartInitResponse struct for InternalMultipartInitResponse -type InternalMultipartInitResponse struct { - Guid *string `json:"guid,omitempty"` - UploadId *string `json:"uploadId,omitempty"` -} - -// NewInternalMultipartInitResponse instantiates a new InternalMultipartInitResponse object -// This constructor will assign default values to properties that have it defined, -// and makes sure properties required by API are set, but the set of arguments -// will change when the set of required properties is changed -func NewInternalMultipartInitResponse() *InternalMultipartInitResponse { - this := InternalMultipartInitResponse{} - return &this -} - -// NewInternalMultipartInitResponseWithDefaults instantiates a new InternalMultipartInitResponse object -// This constructor will only assign default values to properties that have it defined, -// but it doesn't guarantee that properties required by API are set -func NewInternalMultipartInitResponseWithDefaults() *InternalMultipartInitResponse { - this := InternalMultipartInitResponse{} - return &this -} - -// GetGuid returns the Guid field value if set, zero value otherwise. -func (o *InternalMultipartInitResponse) GetGuid() string { - if o == nil || IsNil(o.Guid) { - var ret string - return ret - } - return *o.Guid -} - -// GetGuidOk returns a tuple with the Guid field value if set, nil otherwise -// and a boolean to check if the value has been set. -func (o *InternalMultipartInitResponse) GetGuidOk() (*string, bool) { - if o == nil || IsNil(o.Guid) { - return nil, false - } - return o.Guid, true -} - -// HasGuid returns a boolean if a field has been set. -func (o *InternalMultipartInitResponse) HasGuid() bool { - if o != nil && !IsNil(o.Guid) { - return true - } - - return false -} - -// SetGuid gets a reference to the given string and assigns it to the Guid field. -func (o *InternalMultipartInitResponse) SetGuid(v string) { - o.Guid = &v -} - -// GetUploadId returns the UploadId field value if set, zero value otherwise. -func (o *InternalMultipartInitResponse) GetUploadId() string { - if o == nil || IsNil(o.UploadId) { - var ret string - return ret - } - return *o.UploadId -} - -// GetUploadIdOk returns a tuple with the UploadId field value if set, nil otherwise -// and a boolean to check if the value has been set. -func (o *InternalMultipartInitResponse) GetUploadIdOk() (*string, bool) { - if o == nil || IsNil(o.UploadId) { - return nil, false - } - return o.UploadId, true -} - -// HasUploadId returns a boolean if a field has been set. -func (o *InternalMultipartInitResponse) HasUploadId() bool { - if o != nil && !IsNil(o.UploadId) { - return true - } - - return false -} - -// SetUploadId gets a reference to the given string and assigns it to the UploadId field. -func (o *InternalMultipartInitResponse) SetUploadId(v string) { - o.UploadId = &v -} - -func (o InternalMultipartInitResponse) MarshalJSON() ([]byte, error) { - toSerialize,err := o.ToMap() - if err != nil { - return []byte{}, err - } - return json.Marshal(toSerialize) -} - -func (o InternalMultipartInitResponse) ToMap() (map[string]interface{}, error) { - toSerialize := map[string]interface{}{} - if !IsNil(o.Guid) { - toSerialize["guid"] = o.Guid - } - if !IsNil(o.UploadId) { - toSerialize["uploadId"] = o.UploadId - } - return toSerialize, nil -} - -type NullableInternalMultipartInitResponse struct { - value *InternalMultipartInitResponse - isSet bool -} - -func (v NullableInternalMultipartInitResponse) Get() *InternalMultipartInitResponse { - return v.value -} - -func (v *NullableInternalMultipartInitResponse) Set(val *InternalMultipartInitResponse) { - v.value = val - v.isSet = true -} - -func (v NullableInternalMultipartInitResponse) IsSet() bool { - return v.isSet -} - -func (v *NullableInternalMultipartInitResponse) Unset() { - v.value = nil - v.isSet = false -} - -func NewNullableInternalMultipartInitResponse(val *InternalMultipartInitResponse) *NullableInternalMultipartInitResponse { - return &NullableInternalMultipartInitResponse{value: val, isSet: true} -} - -func (v NullableInternalMultipartInitResponse) MarshalJSON() ([]byte, error) { - return json.Marshal(v.value) -} - -func (v *NullableInternalMultipartInitResponse) UnmarshalJSON(src []byte) error { - v.isSet = true - return json.Unmarshal(src, &v.value) -} - - diff --git a/apigen/internalapi/model_internal_multipart_part.go b/apigen/internalapi/model_internal_multipart_part.go deleted file mode 100644 index 6bc1c95..0000000 --- a/apigen/internalapi/model_internal_multipart_part.go +++ /dev/null @@ -1,186 +0,0 @@ -/* -Internal Compatibility API (DRS Server) - -Consolidated internal API delta for drs-server. This spec captures non-GA4GH DRS internal/compatibility routes. - -API version: 1.0.0 -*/ - -// Code generated by OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT. - -package internalapi - -import ( - "encoding/json" - "bytes" - "fmt" -) - -// checks if the InternalMultipartPart type satisfies the MappedNullable interface at compile time -var _ MappedNullable = &InternalMultipartPart{} - -// InternalMultipartPart struct for InternalMultipartPart -type InternalMultipartPart struct { - PartNumber int32 `json:"PartNumber"` - ETag string `json:"ETag"` -} - -type _InternalMultipartPart InternalMultipartPart - -// NewInternalMultipartPart instantiates a new InternalMultipartPart object -// This constructor will assign default values to properties that have it defined, -// and makes sure properties required by API are set, but the set of arguments -// will change when the set of required properties is changed -func NewInternalMultipartPart(partNumber int32, eTag string) *InternalMultipartPart { - this := InternalMultipartPart{} - this.PartNumber = partNumber - this.ETag = eTag - return &this -} - -// NewInternalMultipartPartWithDefaults instantiates a new InternalMultipartPart object -// This constructor will only assign default values to properties that have it defined, -// but it doesn't guarantee that properties required by API are set -func NewInternalMultipartPartWithDefaults() *InternalMultipartPart { - this := InternalMultipartPart{} - return &this -} - -// GetPartNumber returns the PartNumber field value -func (o *InternalMultipartPart) GetPartNumber() int32 { - if o == nil { - var ret int32 - return ret - } - - return o.PartNumber -} - -// GetPartNumberOk returns a tuple with the PartNumber field value -// and a boolean to check if the value has been set. -func (o *InternalMultipartPart) GetPartNumberOk() (*int32, bool) { - if o == nil { - return nil, false - } - return &o.PartNumber, true -} - -// SetPartNumber sets field value -func (o *InternalMultipartPart) SetPartNumber(v int32) { - o.PartNumber = v -} - -// GetETag returns the ETag field value -func (o *InternalMultipartPart) GetETag() string { - if o == nil { - var ret string - return ret - } - - return o.ETag -} - -// GetETagOk returns a tuple with the ETag field value -// and a boolean to check if the value has been set. -func (o *InternalMultipartPart) GetETagOk() (*string, bool) { - if o == nil { - return nil, false - } - return &o.ETag, true -} - -// SetETag sets field value -func (o *InternalMultipartPart) SetETag(v string) { - o.ETag = v -} - -func (o InternalMultipartPart) MarshalJSON() ([]byte, error) { - toSerialize,err := o.ToMap() - if err != nil { - return []byte{}, err - } - return json.Marshal(toSerialize) -} - -func (o InternalMultipartPart) ToMap() (map[string]interface{}, error) { - toSerialize := map[string]interface{}{} - toSerialize["PartNumber"] = o.PartNumber - toSerialize["ETag"] = o.ETag - return toSerialize, nil -} - -func (o *InternalMultipartPart) UnmarshalJSON(data []byte) (err error) { - // This validates that all required properties are included in the JSON object - // by unmarshalling the object into a generic map with string keys and checking - // that every required field exists as a key in the generic map. - requiredProperties := []string{ - "PartNumber", - "ETag", - } - - allProperties := make(map[string]interface{}) - - err = json.Unmarshal(data, &allProperties) - - if err != nil { - return err; - } - - for _, requiredProperty := range(requiredProperties) { - if _, exists := allProperties[requiredProperty]; !exists { - return fmt.Errorf("no value given for required property %v", requiredProperty) - } - } - - varInternalMultipartPart := _InternalMultipartPart{} - - decoder := json.NewDecoder(bytes.NewReader(data)) - decoder.DisallowUnknownFields() - err = decoder.Decode(&varInternalMultipartPart) - - if err != nil { - return err - } - - *o = InternalMultipartPart(varInternalMultipartPart) - - return err -} - -type NullableInternalMultipartPart struct { - value *InternalMultipartPart - isSet bool -} - -func (v NullableInternalMultipartPart) Get() *InternalMultipartPart { - return v.value -} - -func (v *NullableInternalMultipartPart) Set(val *InternalMultipartPart) { - v.value = val - v.isSet = true -} - -func (v NullableInternalMultipartPart) IsSet() bool { - return v.isSet -} - -func (v *NullableInternalMultipartPart) Unset() { - v.value = nil - v.isSet = false -} - -func NewNullableInternalMultipartPart(val *InternalMultipartPart) *NullableInternalMultipartPart { - return &NullableInternalMultipartPart{value: val, isSet: true} -} - -func (v NullableInternalMultipartPart) MarshalJSON() ([]byte, error) { - return json.Marshal(v.value) -} - -func (v *NullableInternalMultipartPart) UnmarshalJSON(src []byte) error { - v.isSet = true - return json.Unmarshal(src, &v.value) -} - - diff --git a/apigen/internalapi/model_internal_multipart_upload_request.go b/apigen/internalapi/model_internal_multipart_upload_request.go deleted file mode 100644 index 00a446b..0000000 --- a/apigen/internalapi/model_internal_multipart_upload_request.go +++ /dev/null @@ -1,250 +0,0 @@ -/* -Internal Compatibility API (DRS Server) - -Consolidated internal API delta for drs-server. This spec captures non-GA4GH DRS internal/compatibility routes. - -API version: 1.0.0 -*/ - -// Code generated by OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT. - -package internalapi - -import ( - "encoding/json" - "bytes" - "fmt" -) - -// checks if the InternalMultipartUploadRequest type satisfies the MappedNullable interface at compile time -var _ MappedNullable = &InternalMultipartUploadRequest{} - -// InternalMultipartUploadRequest struct for InternalMultipartUploadRequest -type InternalMultipartUploadRequest struct { - Key string `json:"key"` - Bucket *string `json:"bucket,omitempty"` - UploadId string `json:"uploadId"` - PartNumber int32 `json:"partNumber"` -} - -type _InternalMultipartUploadRequest InternalMultipartUploadRequest - -// NewInternalMultipartUploadRequest instantiates a new InternalMultipartUploadRequest object -// This constructor will assign default values to properties that have it defined, -// and makes sure properties required by API are set, but the set of arguments -// will change when the set of required properties is changed -func NewInternalMultipartUploadRequest(key string, uploadId string, partNumber int32) *InternalMultipartUploadRequest { - this := InternalMultipartUploadRequest{} - this.Key = key - this.UploadId = uploadId - this.PartNumber = partNumber - return &this -} - -// NewInternalMultipartUploadRequestWithDefaults instantiates a new InternalMultipartUploadRequest object -// This constructor will only assign default values to properties that have it defined, -// but it doesn't guarantee that properties required by API are set -func NewInternalMultipartUploadRequestWithDefaults() *InternalMultipartUploadRequest { - this := InternalMultipartUploadRequest{} - return &this -} - -// GetKey returns the Key field value -func (o *InternalMultipartUploadRequest) GetKey() string { - if o == nil { - var ret string - return ret - } - - return o.Key -} - -// GetKeyOk returns a tuple with the Key field value -// and a boolean to check if the value has been set. -func (o *InternalMultipartUploadRequest) GetKeyOk() (*string, bool) { - if o == nil { - return nil, false - } - return &o.Key, true -} - -// SetKey sets field value -func (o *InternalMultipartUploadRequest) SetKey(v string) { - o.Key = v -} - -// GetBucket returns the Bucket field value if set, zero value otherwise. -func (o *InternalMultipartUploadRequest) GetBucket() string { - if o == nil || IsNil(o.Bucket) { - var ret string - return ret - } - return *o.Bucket -} - -// GetBucketOk returns a tuple with the Bucket field value if set, nil otherwise -// and a boolean to check if the value has been set. -func (o *InternalMultipartUploadRequest) GetBucketOk() (*string, bool) { - if o == nil || IsNil(o.Bucket) { - return nil, false - } - return o.Bucket, true -} - -// HasBucket returns a boolean if a field has been set. -func (o *InternalMultipartUploadRequest) HasBucket() bool { - if o != nil && !IsNil(o.Bucket) { - return true - } - - return false -} - -// SetBucket gets a reference to the given string and assigns it to the Bucket field. -func (o *InternalMultipartUploadRequest) SetBucket(v string) { - o.Bucket = &v -} - -// GetUploadId returns the UploadId field value -func (o *InternalMultipartUploadRequest) GetUploadId() string { - if o == nil { - var ret string - return ret - } - - return o.UploadId -} - -// GetUploadIdOk returns a tuple with the UploadId field value -// and a boolean to check if the value has been set. -func (o *InternalMultipartUploadRequest) GetUploadIdOk() (*string, bool) { - if o == nil { - return nil, false - } - return &o.UploadId, true -} - -// SetUploadId sets field value -func (o *InternalMultipartUploadRequest) SetUploadId(v string) { - o.UploadId = v -} - -// GetPartNumber returns the PartNumber field value -func (o *InternalMultipartUploadRequest) GetPartNumber() int32 { - if o == nil { - var ret int32 - return ret - } - - return o.PartNumber -} - -// GetPartNumberOk returns a tuple with the PartNumber field value -// and a boolean to check if the value has been set. -func (o *InternalMultipartUploadRequest) GetPartNumberOk() (*int32, bool) { - if o == nil { - return nil, false - } - return &o.PartNumber, true -} - -// SetPartNumber sets field value -func (o *InternalMultipartUploadRequest) SetPartNumber(v int32) { - o.PartNumber = v -} - -func (o InternalMultipartUploadRequest) MarshalJSON() ([]byte, error) { - toSerialize,err := o.ToMap() - if err != nil { - return []byte{}, err - } - return json.Marshal(toSerialize) -} - -func (o InternalMultipartUploadRequest) ToMap() (map[string]interface{}, error) { - toSerialize := map[string]interface{}{} - toSerialize["key"] = o.Key - if !IsNil(o.Bucket) { - toSerialize["bucket"] = o.Bucket - } - toSerialize["uploadId"] = o.UploadId - toSerialize["partNumber"] = o.PartNumber - return toSerialize, nil -} - -func (o *InternalMultipartUploadRequest) UnmarshalJSON(data []byte) (err error) { - // This validates that all required properties are included in the JSON object - // by unmarshalling the object into a generic map with string keys and checking - // that every required field exists as a key in the generic map. - requiredProperties := []string{ - "key", - "uploadId", - "partNumber", - } - - allProperties := make(map[string]interface{}) - - err = json.Unmarshal(data, &allProperties) - - if err != nil { - return err; - } - - for _, requiredProperty := range(requiredProperties) { - if _, exists := allProperties[requiredProperty]; !exists { - return fmt.Errorf("no value given for required property %v", requiredProperty) - } - } - - varInternalMultipartUploadRequest := _InternalMultipartUploadRequest{} - - decoder := json.NewDecoder(bytes.NewReader(data)) - decoder.DisallowUnknownFields() - err = decoder.Decode(&varInternalMultipartUploadRequest) - - if err != nil { - return err - } - - *o = InternalMultipartUploadRequest(varInternalMultipartUploadRequest) - - return err -} - -type NullableInternalMultipartUploadRequest struct { - value *InternalMultipartUploadRequest - isSet bool -} - -func (v NullableInternalMultipartUploadRequest) Get() *InternalMultipartUploadRequest { - return v.value -} - -func (v *NullableInternalMultipartUploadRequest) Set(val *InternalMultipartUploadRequest) { - v.value = val - v.isSet = true -} - -func (v NullableInternalMultipartUploadRequest) IsSet() bool { - return v.isSet -} - -func (v *NullableInternalMultipartUploadRequest) Unset() { - v.value = nil - v.isSet = false -} - -func NewNullableInternalMultipartUploadRequest(val *InternalMultipartUploadRequest) *NullableInternalMultipartUploadRequest { - return &NullableInternalMultipartUploadRequest{value: val, isSet: true} -} - -func (v NullableInternalMultipartUploadRequest) MarshalJSON() ([]byte, error) { - return json.Marshal(v.value) -} - -func (v *NullableInternalMultipartUploadRequest) UnmarshalJSON(src []byte) error { - v.isSet = true - return json.Unmarshal(src, &v.value) -} - - diff --git a/apigen/internalapi/model_internal_multipart_upload_response.go b/apigen/internalapi/model_internal_multipart_upload_response.go deleted file mode 100644 index 29b4d65..0000000 --- a/apigen/internalapi/model_internal_multipart_upload_response.go +++ /dev/null @@ -1,126 +0,0 @@ -/* -Internal Compatibility API (DRS Server) - -Consolidated internal API delta for drs-server. This spec captures non-GA4GH DRS internal/compatibility routes. - -API version: 1.0.0 -*/ - -// Code generated by OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT. - -package internalapi - -import ( - "encoding/json" -) - -// checks if the InternalMultipartUploadResponse type satisfies the MappedNullable interface at compile time -var _ MappedNullable = &InternalMultipartUploadResponse{} - -// InternalMultipartUploadResponse struct for InternalMultipartUploadResponse -type InternalMultipartUploadResponse struct { - PresignedUrl *string `json:"presigned_url,omitempty"` -} - -// NewInternalMultipartUploadResponse instantiates a new InternalMultipartUploadResponse object -// This constructor will assign default values to properties that have it defined, -// and makes sure properties required by API are set, but the set of arguments -// will change when the set of required properties is changed -func NewInternalMultipartUploadResponse() *InternalMultipartUploadResponse { - this := InternalMultipartUploadResponse{} - return &this -} - -// NewInternalMultipartUploadResponseWithDefaults instantiates a new InternalMultipartUploadResponse object -// This constructor will only assign default values to properties that have it defined, -// but it doesn't guarantee that properties required by API are set -func NewInternalMultipartUploadResponseWithDefaults() *InternalMultipartUploadResponse { - this := InternalMultipartUploadResponse{} - return &this -} - -// GetPresignedUrl returns the PresignedUrl field value if set, zero value otherwise. -func (o *InternalMultipartUploadResponse) GetPresignedUrl() string { - if o == nil || IsNil(o.PresignedUrl) { - var ret string - return ret - } - return *o.PresignedUrl -} - -// GetPresignedUrlOk returns a tuple with the PresignedUrl field value if set, nil otherwise -// and a boolean to check if the value has been set. -func (o *InternalMultipartUploadResponse) GetPresignedUrlOk() (*string, bool) { - if o == nil || IsNil(o.PresignedUrl) { - return nil, false - } - return o.PresignedUrl, true -} - -// HasPresignedUrl returns a boolean if a field has been set. -func (o *InternalMultipartUploadResponse) HasPresignedUrl() bool { - if o != nil && !IsNil(o.PresignedUrl) { - return true - } - - return false -} - -// SetPresignedUrl gets a reference to the given string and assigns it to the PresignedUrl field. -func (o *InternalMultipartUploadResponse) SetPresignedUrl(v string) { - o.PresignedUrl = &v -} - -func (o InternalMultipartUploadResponse) MarshalJSON() ([]byte, error) { - toSerialize,err := o.ToMap() - if err != nil { - return []byte{}, err - } - return json.Marshal(toSerialize) -} - -func (o InternalMultipartUploadResponse) ToMap() (map[string]interface{}, error) { - toSerialize := map[string]interface{}{} - if !IsNil(o.PresignedUrl) { - toSerialize["presigned_url"] = o.PresignedUrl - } - return toSerialize, nil -} - -type NullableInternalMultipartUploadResponse struct { - value *InternalMultipartUploadResponse - isSet bool -} - -func (v NullableInternalMultipartUploadResponse) Get() *InternalMultipartUploadResponse { - return v.value -} - -func (v *NullableInternalMultipartUploadResponse) Set(val *InternalMultipartUploadResponse) { - v.value = val - v.isSet = true -} - -func (v NullableInternalMultipartUploadResponse) IsSet() bool { - return v.isSet -} - -func (v *NullableInternalMultipartUploadResponse) Unset() { - v.value = nil - v.isSet = false -} - -func NewNullableInternalMultipartUploadResponse(val *InternalMultipartUploadResponse) *NullableInternalMultipartUploadResponse { - return &NullableInternalMultipartUploadResponse{value: val, isSet: true} -} - -func (v NullableInternalMultipartUploadResponse) MarshalJSON() ([]byte, error) { - return json.Marshal(v.value) -} - -func (v *NullableInternalMultipartUploadResponse) UnmarshalJSON(src []byte) error { - v.isSet = true - return json.Unmarshal(src, &v.value) -} - - diff --git a/apigen/internalapi/model_internal_record.go b/apigen/internalapi/model_internal_record.go deleted file mode 100644 index 9794159..0000000 --- a/apigen/internalapi/model_internal_record.go +++ /dev/null @@ -1,379 +0,0 @@ -/* -Internal Compatibility API (DRS Server) - -Consolidated internal API delta for drs-server. This spec captures non-GA4GH DRS internal/compatibility routes. - -API version: 1.0.0 -*/ - -// Code generated by OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT. - -package internalapi - -import ( - "encoding/json" -) - -// checks if the InternalRecord type satisfies the MappedNullable interface at compile time -var _ MappedNullable = &InternalRecord{} - -// InternalRecord struct for InternalRecord -type InternalRecord struct { - Did *string `json:"did,omitempty"` - // Hash map, e.g. {\"sha256\":\"...\"} - Hashes *map[string]string `json:"hashes,omitempty"` - Size *int64 `json:"size,omitempty"` - Urls []string `json:"urls,omitempty"` - Authz []string `json:"authz,omitempty"` - FileName *string `json:"file_name,omitempty"` - Organization *string `json:"organization,omitempty"` - Project *string `json:"project,omitempty"` -} - -// NewInternalRecord instantiates a new InternalRecord object -// This constructor will assign default values to properties that have it defined, -// and makes sure properties required by API are set, but the set of arguments -// will change when the set of required properties is changed -func NewInternalRecord() *InternalRecord { - this := InternalRecord{} - return &this -} - -// NewInternalRecordWithDefaults instantiates a new InternalRecord object -// This constructor will only assign default values to properties that have it defined, -// but it doesn't guarantee that properties required by API are set -func NewInternalRecordWithDefaults() *InternalRecord { - this := InternalRecord{} - return &this -} - -// GetDid returns the Did field value if set, zero value otherwise. -func (o *InternalRecord) GetDid() string { - if o == nil || IsNil(o.Did) { - var ret string - return ret - } - return *o.Did -} - -// GetDidOk returns a tuple with the Did field value if set, nil otherwise -// and a boolean to check if the value has been set. -func (o *InternalRecord) GetDidOk() (*string, bool) { - if o == nil || IsNil(o.Did) { - return nil, false - } - return o.Did, true -} - -// HasDid returns a boolean if a field has been set. -func (o *InternalRecord) HasDid() bool { - if o != nil && !IsNil(o.Did) { - return true - } - - return false -} - -// SetDid gets a reference to the given string and assigns it to the Did field. -func (o *InternalRecord) SetDid(v string) { - o.Did = &v -} - -// GetHashes returns the Hashes field value if set, zero value otherwise. -func (o *InternalRecord) GetHashes() map[string]string { - if o == nil || IsNil(o.Hashes) { - var ret map[string]string - return ret - } - return *o.Hashes -} - -// GetHashesOk returns a tuple with the Hashes field value if set, nil otherwise -// and a boolean to check if the value has been set. -func (o *InternalRecord) GetHashesOk() (*map[string]string, bool) { - if o == nil || IsNil(o.Hashes) { - return nil, false - } - return o.Hashes, true -} - -// HasHashes returns a boolean if a field has been set. -func (o *InternalRecord) HasHashes() bool { - if o != nil && !IsNil(o.Hashes) { - return true - } - - return false -} - -// SetHashes gets a reference to the given map[string]string and assigns it to the Hashes field. -func (o *InternalRecord) SetHashes(v map[string]string) { - o.Hashes = &v -} - -// GetSize returns the Size field value if set, zero value otherwise. -func (o *InternalRecord) GetSize() int64 { - if o == nil || IsNil(o.Size) { - var ret int64 - return ret - } - return *o.Size -} - -// GetSizeOk returns a tuple with the Size field value if set, nil otherwise -// and a boolean to check if the value has been set. -func (o *InternalRecord) GetSizeOk() (*int64, bool) { - if o == nil || IsNil(o.Size) { - return nil, false - } - return o.Size, true -} - -// HasSize returns a boolean if a field has been set. -func (o *InternalRecord) HasSize() bool { - if o != nil && !IsNil(o.Size) { - return true - } - - return false -} - -// SetSize gets a reference to the given int64 and assigns it to the Size field. -func (o *InternalRecord) SetSize(v int64) { - o.Size = &v -} - -// GetUrls returns the Urls field value if set, zero value otherwise. -func (o *InternalRecord) GetUrls() []string { - if o == nil || IsNil(o.Urls) { - var ret []string - return ret - } - return o.Urls -} - -// GetUrlsOk returns a tuple with the Urls field value if set, nil otherwise -// and a boolean to check if the value has been set. -func (o *InternalRecord) GetUrlsOk() ([]string, bool) { - if o == nil || IsNil(o.Urls) { - return nil, false - } - return o.Urls, true -} - -// HasUrls returns a boolean if a field has been set. -func (o *InternalRecord) HasUrls() bool { - if o != nil && !IsNil(o.Urls) { - return true - } - - return false -} - -// SetUrls gets a reference to the given []string and assigns it to the Urls field. -func (o *InternalRecord) SetUrls(v []string) { - o.Urls = v -} - -// GetAuthz returns the Authz field value if set, zero value otherwise. -func (o *InternalRecord) GetAuthz() []string { - if o == nil || IsNil(o.Authz) { - var ret []string - return ret - } - return o.Authz -} - -// GetAuthzOk returns a tuple with the Authz field value if set, nil otherwise -// and a boolean to check if the value has been set. -func (o *InternalRecord) GetAuthzOk() ([]string, bool) { - if o == nil || IsNil(o.Authz) { - return nil, false - } - return o.Authz, true -} - -// HasAuthz returns a boolean if a field has been set. -func (o *InternalRecord) HasAuthz() bool { - if o != nil && !IsNil(o.Authz) { - return true - } - - return false -} - -// SetAuthz gets a reference to the given []string and assigns it to the Authz field. -func (o *InternalRecord) SetAuthz(v []string) { - o.Authz = v -} - -// GetFileName returns the FileName field value if set, zero value otherwise. -func (o *InternalRecord) GetFileName() string { - if o == nil || IsNil(o.FileName) { - var ret string - return ret - } - return *o.FileName -} - -// GetFileNameOk returns a tuple with the FileName field value if set, nil otherwise -// and a boolean to check if the value has been set. -func (o *InternalRecord) GetFileNameOk() (*string, bool) { - if o == nil || IsNil(o.FileName) { - return nil, false - } - return o.FileName, true -} - -// HasFileName returns a boolean if a field has been set. -func (o *InternalRecord) HasFileName() bool { - if o != nil && !IsNil(o.FileName) { - return true - } - - return false -} - -// SetFileName gets a reference to the given string and assigns it to the FileName field. -func (o *InternalRecord) SetFileName(v string) { - o.FileName = &v -} - -// GetOrganization returns the Organization field value if set, zero value otherwise. -func (o *InternalRecord) GetOrganization() string { - if o == nil || IsNil(o.Organization) { - var ret string - return ret - } - return *o.Organization -} - -// GetOrganizationOk returns a tuple with the Organization field value if set, nil otherwise -// and a boolean to check if the value has been set. -func (o *InternalRecord) GetOrganizationOk() (*string, bool) { - if o == nil || IsNil(o.Organization) { - return nil, false - } - return o.Organization, true -} - -// HasOrganization returns a boolean if a field has been set. -func (o *InternalRecord) HasOrganization() bool { - if o != nil && !IsNil(o.Organization) { - return true - } - - return false -} - -// SetOrganization gets a reference to the given string and assigns it to the Organization field. -func (o *InternalRecord) SetOrganization(v string) { - o.Organization = &v -} - -// GetProject returns the Project field value if set, zero value otherwise. -func (o *InternalRecord) GetProject() string { - if o == nil || IsNil(o.Project) { - var ret string - return ret - } - return *o.Project -} - -// GetProjectOk returns a tuple with the Project field value if set, nil otherwise -// and a boolean to check if the value has been set. -func (o *InternalRecord) GetProjectOk() (*string, bool) { - if o == nil || IsNil(o.Project) { - return nil, false - } - return o.Project, true -} - -// HasProject returns a boolean if a field has been set. -func (o *InternalRecord) HasProject() bool { - if o != nil && !IsNil(o.Project) { - return true - } - - return false -} - -// SetProject gets a reference to the given string and assigns it to the Project field. -func (o *InternalRecord) SetProject(v string) { - o.Project = &v -} - -func (o InternalRecord) MarshalJSON() ([]byte, error) { - toSerialize,err := o.ToMap() - if err != nil { - return []byte{}, err - } - return json.Marshal(toSerialize) -} - -func (o InternalRecord) ToMap() (map[string]interface{}, error) { - toSerialize := map[string]interface{}{} - if !IsNil(o.Did) { - toSerialize["did"] = o.Did - } - if !IsNil(o.Hashes) { - toSerialize["hashes"] = o.Hashes - } - if !IsNil(o.Size) { - toSerialize["size"] = o.Size - } - if !IsNil(o.Urls) { - toSerialize["urls"] = o.Urls - } - if !IsNil(o.Authz) { - toSerialize["authz"] = o.Authz - } - if !IsNil(o.FileName) { - toSerialize["file_name"] = o.FileName - } - if !IsNil(o.Organization) { - toSerialize["organization"] = o.Organization - } - if !IsNil(o.Project) { - toSerialize["project"] = o.Project - } - return toSerialize, nil -} - -type NullableInternalRecord struct { - value *InternalRecord - isSet bool -} - -func (v NullableInternalRecord) Get() *InternalRecord { - return v.value -} - -func (v *NullableInternalRecord) Set(val *InternalRecord) { - v.value = val - v.isSet = true -} - -func (v NullableInternalRecord) IsSet() bool { - return v.isSet -} - -func (v *NullableInternalRecord) Unset() { - v.value = nil - v.isSet = false -} - -func NewNullableInternalRecord(val *InternalRecord) *NullableInternalRecord { - return &NullableInternalRecord{value: val, isSet: true} -} - -func (v NullableInternalRecord) MarshalJSON() ([]byte, error) { - return json.Marshal(v.value) -} - -func (v *NullableInternalRecord) UnmarshalJSON(src []byte) error { - v.isSet = true - return json.Unmarshal(src, &v.value) -} - - diff --git a/apigen/internalapi/model_internal_record_response.go b/apigen/internalapi/model_internal_record_response.go deleted file mode 100644 index cb62021..0000000 --- a/apigen/internalapi/model_internal_record_response.go +++ /dev/null @@ -1,559 +0,0 @@ -/* -Internal Compatibility API (DRS Server) - -Consolidated internal API delta for drs-server. This spec captures non-GA4GH DRS internal/compatibility routes. - -API version: 1.0.0 -*/ - -// Code generated by OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT. - -package internalapi - -import ( - "encoding/json" -) - -// checks if the InternalRecordResponse type satisfies the MappedNullable interface at compile time -var _ MappedNullable = &InternalRecordResponse{} - -// InternalRecordResponse struct for InternalRecordResponse -type InternalRecordResponse struct { - Did *string `json:"did,omitempty"` - // Hash map, e.g. {\"sha256\":\"...\"} - Hashes *map[string]string `json:"hashes,omitempty"` - Size *int64 `json:"size,omitempty"` - Urls []string `json:"urls,omitempty"` - Authz []string `json:"authz,omitempty"` - FileName *string `json:"file_name,omitempty"` - Organization *string `json:"organization,omitempty"` - Project *string `json:"project,omitempty"` - Baseid *string `json:"baseid,omitempty"` - Rev *string `json:"rev,omitempty"` - CreatedDate *string `json:"created_date,omitempty"` - UpdatedDate *string `json:"updated_date,omitempty"` - Uploader *string `json:"uploader,omitempty"` -} - -// NewInternalRecordResponse instantiates a new InternalRecordResponse object -// This constructor will assign default values to properties that have it defined, -// and makes sure properties required by API are set, but the set of arguments -// will change when the set of required properties is changed -func NewInternalRecordResponse() *InternalRecordResponse { - this := InternalRecordResponse{} - return &this -} - -// NewInternalRecordResponseWithDefaults instantiates a new InternalRecordResponse object -// This constructor will only assign default values to properties that have it defined, -// but it doesn't guarantee that properties required by API are set -func NewInternalRecordResponseWithDefaults() *InternalRecordResponse { - this := InternalRecordResponse{} - return &this -} - -// GetDid returns the Did field value if set, zero value otherwise. -func (o *InternalRecordResponse) GetDid() string { - if o == nil || IsNil(o.Did) { - var ret string - return ret - } - return *o.Did -} - -// GetDidOk returns a tuple with the Did field value if set, nil otherwise -// and a boolean to check if the value has been set. -func (o *InternalRecordResponse) GetDidOk() (*string, bool) { - if o == nil || IsNil(o.Did) { - return nil, false - } - return o.Did, true -} - -// HasDid returns a boolean if a field has been set. -func (o *InternalRecordResponse) HasDid() bool { - if o != nil && !IsNil(o.Did) { - return true - } - - return false -} - -// SetDid gets a reference to the given string and assigns it to the Did field. -func (o *InternalRecordResponse) SetDid(v string) { - o.Did = &v -} - -// GetHashes returns the Hashes field value if set, zero value otherwise. -func (o *InternalRecordResponse) GetHashes() map[string]string { - if o == nil || IsNil(o.Hashes) { - var ret map[string]string - return ret - } - return *o.Hashes -} - -// GetHashesOk returns a tuple with the Hashes field value if set, nil otherwise -// and a boolean to check if the value has been set. -func (o *InternalRecordResponse) GetHashesOk() (*map[string]string, bool) { - if o == nil || IsNil(o.Hashes) { - return nil, false - } - return o.Hashes, true -} - -// HasHashes returns a boolean if a field has been set. -func (o *InternalRecordResponse) HasHashes() bool { - if o != nil && !IsNil(o.Hashes) { - return true - } - - return false -} - -// SetHashes gets a reference to the given map[string]string and assigns it to the Hashes field. -func (o *InternalRecordResponse) SetHashes(v map[string]string) { - o.Hashes = &v -} - -// GetSize returns the Size field value if set, zero value otherwise. -func (o *InternalRecordResponse) GetSize() int64 { - if o == nil || IsNil(o.Size) { - var ret int64 - return ret - } - return *o.Size -} - -// GetSizeOk returns a tuple with the Size field value if set, nil otherwise -// and a boolean to check if the value has been set. -func (o *InternalRecordResponse) GetSizeOk() (*int64, bool) { - if o == nil || IsNil(o.Size) { - return nil, false - } - return o.Size, true -} - -// HasSize returns a boolean if a field has been set. -func (o *InternalRecordResponse) HasSize() bool { - if o != nil && !IsNil(o.Size) { - return true - } - - return false -} - -// SetSize gets a reference to the given int64 and assigns it to the Size field. -func (o *InternalRecordResponse) SetSize(v int64) { - o.Size = &v -} - -// GetUrls returns the Urls field value if set, zero value otherwise. -func (o *InternalRecordResponse) GetUrls() []string { - if o == nil || IsNil(o.Urls) { - var ret []string - return ret - } - return o.Urls -} - -// GetUrlsOk returns a tuple with the Urls field value if set, nil otherwise -// and a boolean to check if the value has been set. -func (o *InternalRecordResponse) GetUrlsOk() ([]string, bool) { - if o == nil || IsNil(o.Urls) { - return nil, false - } - return o.Urls, true -} - -// HasUrls returns a boolean if a field has been set. -func (o *InternalRecordResponse) HasUrls() bool { - if o != nil && !IsNil(o.Urls) { - return true - } - - return false -} - -// SetUrls gets a reference to the given []string and assigns it to the Urls field. -func (o *InternalRecordResponse) SetUrls(v []string) { - o.Urls = v -} - -// GetAuthz returns the Authz field value if set, zero value otherwise. -func (o *InternalRecordResponse) GetAuthz() []string { - if o == nil || IsNil(o.Authz) { - var ret []string - return ret - } - return o.Authz -} - -// GetAuthzOk returns a tuple with the Authz field value if set, nil otherwise -// and a boolean to check if the value has been set. -func (o *InternalRecordResponse) GetAuthzOk() ([]string, bool) { - if o == nil || IsNil(o.Authz) { - return nil, false - } - return o.Authz, true -} - -// HasAuthz returns a boolean if a field has been set. -func (o *InternalRecordResponse) HasAuthz() bool { - if o != nil && !IsNil(o.Authz) { - return true - } - - return false -} - -// SetAuthz gets a reference to the given []string and assigns it to the Authz field. -func (o *InternalRecordResponse) SetAuthz(v []string) { - o.Authz = v -} - -// GetFileName returns the FileName field value if set, zero value otherwise. -func (o *InternalRecordResponse) GetFileName() string { - if o == nil || IsNil(o.FileName) { - var ret string - return ret - } - return *o.FileName -} - -// GetFileNameOk returns a tuple with the FileName field value if set, nil otherwise -// and a boolean to check if the value has been set. -func (o *InternalRecordResponse) GetFileNameOk() (*string, bool) { - if o == nil || IsNil(o.FileName) { - return nil, false - } - return o.FileName, true -} - -// HasFileName returns a boolean if a field has been set. -func (o *InternalRecordResponse) HasFileName() bool { - if o != nil && !IsNil(o.FileName) { - return true - } - - return false -} - -// SetFileName gets a reference to the given string and assigns it to the FileName field. -func (o *InternalRecordResponse) SetFileName(v string) { - o.FileName = &v -} - -// GetOrganization returns the Organization field value if set, zero value otherwise. -func (o *InternalRecordResponse) GetOrganization() string { - if o == nil || IsNil(o.Organization) { - var ret string - return ret - } - return *o.Organization -} - -// GetOrganizationOk returns a tuple with the Organization field value if set, nil otherwise -// and a boolean to check if the value has been set. -func (o *InternalRecordResponse) GetOrganizationOk() (*string, bool) { - if o == nil || IsNil(o.Organization) { - return nil, false - } - return o.Organization, true -} - -// HasOrganization returns a boolean if a field has been set. -func (o *InternalRecordResponse) HasOrganization() bool { - if o != nil && !IsNil(o.Organization) { - return true - } - - return false -} - -// SetOrganization gets a reference to the given string and assigns it to the Organization field. -func (o *InternalRecordResponse) SetOrganization(v string) { - o.Organization = &v -} - -// GetProject returns the Project field value if set, zero value otherwise. -func (o *InternalRecordResponse) GetProject() string { - if o == nil || IsNil(o.Project) { - var ret string - return ret - } - return *o.Project -} - -// GetProjectOk returns a tuple with the Project field value if set, nil otherwise -// and a boolean to check if the value has been set. -func (o *InternalRecordResponse) GetProjectOk() (*string, bool) { - if o == nil || IsNil(o.Project) { - return nil, false - } - return o.Project, true -} - -// HasProject returns a boolean if a field has been set. -func (o *InternalRecordResponse) HasProject() bool { - if o != nil && !IsNil(o.Project) { - return true - } - - return false -} - -// SetProject gets a reference to the given string and assigns it to the Project field. -func (o *InternalRecordResponse) SetProject(v string) { - o.Project = &v -} - -// GetBaseid returns the Baseid field value if set, zero value otherwise. -func (o *InternalRecordResponse) GetBaseid() string { - if o == nil || IsNil(o.Baseid) { - var ret string - return ret - } - return *o.Baseid -} - -// GetBaseidOk returns a tuple with the Baseid field value if set, nil otherwise -// and a boolean to check if the value has been set. -func (o *InternalRecordResponse) GetBaseidOk() (*string, bool) { - if o == nil || IsNil(o.Baseid) { - return nil, false - } - return o.Baseid, true -} - -// HasBaseid returns a boolean if a field has been set. -func (o *InternalRecordResponse) HasBaseid() bool { - if o != nil && !IsNil(o.Baseid) { - return true - } - - return false -} - -// SetBaseid gets a reference to the given string and assigns it to the Baseid field. -func (o *InternalRecordResponse) SetBaseid(v string) { - o.Baseid = &v -} - -// GetRev returns the Rev field value if set, zero value otherwise. -func (o *InternalRecordResponse) GetRev() string { - if o == nil || IsNil(o.Rev) { - var ret string - return ret - } - return *o.Rev -} - -// GetRevOk returns a tuple with the Rev field value if set, nil otherwise -// and a boolean to check if the value has been set. -func (o *InternalRecordResponse) GetRevOk() (*string, bool) { - if o == nil || IsNil(o.Rev) { - return nil, false - } - return o.Rev, true -} - -// HasRev returns a boolean if a field has been set. -func (o *InternalRecordResponse) HasRev() bool { - if o != nil && !IsNil(o.Rev) { - return true - } - - return false -} - -// SetRev gets a reference to the given string and assigns it to the Rev field. -func (o *InternalRecordResponse) SetRev(v string) { - o.Rev = &v -} - -// GetCreatedDate returns the CreatedDate field value if set, zero value otherwise. -func (o *InternalRecordResponse) GetCreatedDate() string { - if o == nil || IsNil(o.CreatedDate) { - var ret string - return ret - } - return *o.CreatedDate -} - -// GetCreatedDateOk returns a tuple with the CreatedDate field value if set, nil otherwise -// and a boolean to check if the value has been set. -func (o *InternalRecordResponse) GetCreatedDateOk() (*string, bool) { - if o == nil || IsNil(o.CreatedDate) { - return nil, false - } - return o.CreatedDate, true -} - -// HasCreatedDate returns a boolean if a field has been set. -func (o *InternalRecordResponse) HasCreatedDate() bool { - if o != nil && !IsNil(o.CreatedDate) { - return true - } - - return false -} - -// SetCreatedDate gets a reference to the given string and assigns it to the CreatedDate field. -func (o *InternalRecordResponse) SetCreatedDate(v string) { - o.CreatedDate = &v -} - -// GetUpdatedDate returns the UpdatedDate field value if set, zero value otherwise. -func (o *InternalRecordResponse) GetUpdatedDate() string { - if o == nil || IsNil(o.UpdatedDate) { - var ret string - return ret - } - return *o.UpdatedDate -} - -// GetUpdatedDateOk returns a tuple with the UpdatedDate field value if set, nil otherwise -// and a boolean to check if the value has been set. -func (o *InternalRecordResponse) GetUpdatedDateOk() (*string, bool) { - if o == nil || IsNil(o.UpdatedDate) { - return nil, false - } - return o.UpdatedDate, true -} - -// HasUpdatedDate returns a boolean if a field has been set. -func (o *InternalRecordResponse) HasUpdatedDate() bool { - if o != nil && !IsNil(o.UpdatedDate) { - return true - } - - return false -} - -// SetUpdatedDate gets a reference to the given string and assigns it to the UpdatedDate field. -func (o *InternalRecordResponse) SetUpdatedDate(v string) { - o.UpdatedDate = &v -} - -// GetUploader returns the Uploader field value if set, zero value otherwise. -func (o *InternalRecordResponse) GetUploader() string { - if o == nil || IsNil(o.Uploader) { - var ret string - return ret - } - return *o.Uploader -} - -// GetUploaderOk returns a tuple with the Uploader field value if set, nil otherwise -// and a boolean to check if the value has been set. -func (o *InternalRecordResponse) GetUploaderOk() (*string, bool) { - if o == nil || IsNil(o.Uploader) { - return nil, false - } - return o.Uploader, true -} - -// HasUploader returns a boolean if a field has been set. -func (o *InternalRecordResponse) HasUploader() bool { - if o != nil && !IsNil(o.Uploader) { - return true - } - - return false -} - -// SetUploader gets a reference to the given string and assigns it to the Uploader field. -func (o *InternalRecordResponse) SetUploader(v string) { - o.Uploader = &v -} - -func (o InternalRecordResponse) MarshalJSON() ([]byte, error) { - toSerialize,err := o.ToMap() - if err != nil { - return []byte{}, err - } - return json.Marshal(toSerialize) -} - -func (o InternalRecordResponse) ToMap() (map[string]interface{}, error) { - toSerialize := map[string]interface{}{} - if !IsNil(o.Did) { - toSerialize["did"] = o.Did - } - if !IsNil(o.Hashes) { - toSerialize["hashes"] = o.Hashes - } - if !IsNil(o.Size) { - toSerialize["size"] = o.Size - } - if !IsNil(o.Urls) { - toSerialize["urls"] = o.Urls - } - if !IsNil(o.Authz) { - toSerialize["authz"] = o.Authz - } - if !IsNil(o.FileName) { - toSerialize["file_name"] = o.FileName - } - if !IsNil(o.Organization) { - toSerialize["organization"] = o.Organization - } - if !IsNil(o.Project) { - toSerialize["project"] = o.Project - } - if !IsNil(o.Baseid) { - toSerialize["baseid"] = o.Baseid - } - if !IsNil(o.Rev) { - toSerialize["rev"] = o.Rev - } - if !IsNil(o.CreatedDate) { - toSerialize["created_date"] = o.CreatedDate - } - if !IsNil(o.UpdatedDate) { - toSerialize["updated_date"] = o.UpdatedDate - } - if !IsNil(o.Uploader) { - toSerialize["uploader"] = o.Uploader - } - return toSerialize, nil -} - -type NullableInternalRecordResponse struct { - value *InternalRecordResponse - isSet bool -} - -func (v NullableInternalRecordResponse) Get() *InternalRecordResponse { - return v.value -} - -func (v *NullableInternalRecordResponse) Set(val *InternalRecordResponse) { - v.value = val - v.isSet = true -} - -func (v NullableInternalRecordResponse) IsSet() bool { - return v.isSet -} - -func (v *NullableInternalRecordResponse) Unset() { - v.value = nil - v.isSet = false -} - -func NewNullableInternalRecordResponse(val *InternalRecordResponse) *NullableInternalRecordResponse { - return &NullableInternalRecordResponse{value: val, isSet: true} -} - -func (v NullableInternalRecordResponse) MarshalJSON() ([]byte, error) { - return json.Marshal(v.value) -} - -func (v *NullableInternalRecordResponse) UnmarshalJSON(src []byte) error { - v.isSet = true - return json.Unmarshal(src, &v.value) -} - - diff --git a/apigen/internalapi/model_internal_signed_url.go b/apigen/internalapi/model_internal_signed_url.go deleted file mode 100644 index 74cd66f..0000000 --- a/apigen/internalapi/model_internal_signed_url.go +++ /dev/null @@ -1,126 +0,0 @@ -/* -Internal Compatibility API (DRS Server) - -Consolidated internal API delta for drs-server. This spec captures non-GA4GH DRS internal/compatibility routes. - -API version: 1.0.0 -*/ - -// Code generated by OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT. - -package internalapi - -import ( - "encoding/json" -) - -// checks if the InternalSignedURL type satisfies the MappedNullable interface at compile time -var _ MappedNullable = &InternalSignedURL{} - -// InternalSignedURL struct for InternalSignedURL -type InternalSignedURL struct { - Url *string `json:"url,omitempty"` -} - -// NewInternalSignedURL instantiates a new InternalSignedURL object -// This constructor will assign default values to properties that have it defined, -// and makes sure properties required by API are set, but the set of arguments -// will change when the set of required properties is changed -func NewInternalSignedURL() *InternalSignedURL { - this := InternalSignedURL{} - return &this -} - -// NewInternalSignedURLWithDefaults instantiates a new InternalSignedURL object -// This constructor will only assign default values to properties that have it defined, -// but it doesn't guarantee that properties required by API are set -func NewInternalSignedURLWithDefaults() *InternalSignedURL { - this := InternalSignedURL{} - return &this -} - -// GetUrl returns the Url field value if set, zero value otherwise. -func (o *InternalSignedURL) GetUrl() string { - if o == nil || IsNil(o.Url) { - var ret string - return ret - } - return *o.Url -} - -// GetUrlOk returns a tuple with the Url field value if set, nil otherwise -// and a boolean to check if the value has been set. -func (o *InternalSignedURL) GetUrlOk() (*string, bool) { - if o == nil || IsNil(o.Url) { - return nil, false - } - return o.Url, true -} - -// HasUrl returns a boolean if a field has been set. -func (o *InternalSignedURL) HasUrl() bool { - if o != nil && !IsNil(o.Url) { - return true - } - - return false -} - -// SetUrl gets a reference to the given string and assigns it to the Url field. -func (o *InternalSignedURL) SetUrl(v string) { - o.Url = &v -} - -func (o InternalSignedURL) MarshalJSON() ([]byte, error) { - toSerialize,err := o.ToMap() - if err != nil { - return []byte{}, err - } - return json.Marshal(toSerialize) -} - -func (o InternalSignedURL) ToMap() (map[string]interface{}, error) { - toSerialize := map[string]interface{}{} - if !IsNil(o.Url) { - toSerialize["url"] = o.Url - } - return toSerialize, nil -} - -type NullableInternalSignedURL struct { - value *InternalSignedURL - isSet bool -} - -func (v NullableInternalSignedURL) Get() *InternalSignedURL { - return v.value -} - -func (v *NullableInternalSignedURL) Set(val *InternalSignedURL) { - v.value = val - v.isSet = true -} - -func (v NullableInternalSignedURL) IsSet() bool { - return v.isSet -} - -func (v *NullableInternalSignedURL) Unset() { - v.value = nil - v.isSet = false -} - -func NewNullableInternalSignedURL(val *InternalSignedURL) *NullableInternalSignedURL { - return &NullableInternalSignedURL{value: val, isSet: true} -} - -func (v NullableInternalSignedURL) MarshalJSON() ([]byte, error) { - return json.Marshal(v.value) -} - -func (v *NullableInternalSignedURL) UnmarshalJSON(src []byte) error { - v.isSet = true - return json.Unmarshal(src, &v.value) -} - - diff --git a/apigen/internalapi/model_internal_upload_blank_request.go b/apigen/internalapi/model_internal_upload_blank_request.go deleted file mode 100644 index 7de0aa0..0000000 --- a/apigen/internalapi/model_internal_upload_blank_request.go +++ /dev/null @@ -1,162 +0,0 @@ -/* -Internal Compatibility API (DRS Server) - -Consolidated internal API delta for drs-server. This spec captures non-GA4GH DRS internal/compatibility routes. - -API version: 1.0.0 -*/ - -// Code generated by OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT. - -package internalapi - -import ( - "encoding/json" -) - -// checks if the InternalUploadBlankRequest type satisfies the MappedNullable interface at compile time -var _ MappedNullable = &InternalUploadBlankRequest{} - -// InternalUploadBlankRequest struct for InternalUploadBlankRequest -type InternalUploadBlankRequest struct { - Guid *string `json:"guid,omitempty"` - Authz []string `json:"authz,omitempty"` -} - -// NewInternalUploadBlankRequest instantiates a new InternalUploadBlankRequest object -// This constructor will assign default values to properties that have it defined, -// and makes sure properties required by API are set, but the set of arguments -// will change when the set of required properties is changed -func NewInternalUploadBlankRequest() *InternalUploadBlankRequest { - this := InternalUploadBlankRequest{} - return &this -} - -// NewInternalUploadBlankRequestWithDefaults instantiates a new InternalUploadBlankRequest object -// This constructor will only assign default values to properties that have it defined, -// but it doesn't guarantee that properties required by API are set -func NewInternalUploadBlankRequestWithDefaults() *InternalUploadBlankRequest { - this := InternalUploadBlankRequest{} - return &this -} - -// GetGuid returns the Guid field value if set, zero value otherwise. -func (o *InternalUploadBlankRequest) GetGuid() string { - if o == nil || IsNil(o.Guid) { - var ret string - return ret - } - return *o.Guid -} - -// GetGuidOk returns a tuple with the Guid field value if set, nil otherwise -// and a boolean to check if the value has been set. -func (o *InternalUploadBlankRequest) GetGuidOk() (*string, bool) { - if o == nil || IsNil(o.Guid) { - return nil, false - } - return o.Guid, true -} - -// HasGuid returns a boolean if a field has been set. -func (o *InternalUploadBlankRequest) HasGuid() bool { - if o != nil && !IsNil(o.Guid) { - return true - } - - return false -} - -// SetGuid gets a reference to the given string and assigns it to the Guid field. -func (o *InternalUploadBlankRequest) SetGuid(v string) { - o.Guid = &v -} - -// GetAuthz returns the Authz field value if set, zero value otherwise. -func (o *InternalUploadBlankRequest) GetAuthz() []string { - if o == nil || IsNil(o.Authz) { - var ret []string - return ret - } - return o.Authz -} - -// GetAuthzOk returns a tuple with the Authz field value if set, nil otherwise -// and a boolean to check if the value has been set. -func (o *InternalUploadBlankRequest) GetAuthzOk() ([]string, bool) { - if o == nil || IsNil(o.Authz) { - return nil, false - } - return o.Authz, true -} - -// HasAuthz returns a boolean if a field has been set. -func (o *InternalUploadBlankRequest) HasAuthz() bool { - if o != nil && !IsNil(o.Authz) { - return true - } - - return false -} - -// SetAuthz gets a reference to the given []string and assigns it to the Authz field. -func (o *InternalUploadBlankRequest) SetAuthz(v []string) { - o.Authz = v -} - -func (o InternalUploadBlankRequest) MarshalJSON() ([]byte, error) { - toSerialize,err := o.ToMap() - if err != nil { - return []byte{}, err - } - return json.Marshal(toSerialize) -} - -func (o InternalUploadBlankRequest) ToMap() (map[string]interface{}, error) { - toSerialize := map[string]interface{}{} - if !IsNil(o.Guid) { - toSerialize["guid"] = o.Guid - } - if !IsNil(o.Authz) { - toSerialize["authz"] = o.Authz - } - return toSerialize, nil -} - -type NullableInternalUploadBlankRequest struct { - value *InternalUploadBlankRequest - isSet bool -} - -func (v NullableInternalUploadBlankRequest) Get() *InternalUploadBlankRequest { - return v.value -} - -func (v *NullableInternalUploadBlankRequest) Set(val *InternalUploadBlankRequest) { - v.value = val - v.isSet = true -} - -func (v NullableInternalUploadBlankRequest) IsSet() bool { - return v.isSet -} - -func (v *NullableInternalUploadBlankRequest) Unset() { - v.value = nil - v.isSet = false -} - -func NewNullableInternalUploadBlankRequest(val *InternalUploadBlankRequest) *NullableInternalUploadBlankRequest { - return &NullableInternalUploadBlankRequest{value: val, isSet: true} -} - -func (v NullableInternalUploadBlankRequest) MarshalJSON() ([]byte, error) { - return json.Marshal(v.value) -} - -func (v *NullableInternalUploadBlankRequest) UnmarshalJSON(src []byte) error { - v.isSet = true - return json.Unmarshal(src, &v.value) -} - - diff --git a/apigen/internalapi/model_internal_upload_blank_response.go b/apigen/internalapi/model_internal_upload_blank_response.go deleted file mode 100644 index fe414fd..0000000 --- a/apigen/internalapi/model_internal_upload_blank_response.go +++ /dev/null @@ -1,162 +0,0 @@ -/* -Internal Compatibility API (DRS Server) - -Consolidated internal API delta for drs-server. This spec captures non-GA4GH DRS internal/compatibility routes. - -API version: 1.0.0 -*/ - -// Code generated by OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT. - -package internalapi - -import ( - "encoding/json" -) - -// checks if the InternalUploadBlankResponse type satisfies the MappedNullable interface at compile time -var _ MappedNullable = &InternalUploadBlankResponse{} - -// InternalUploadBlankResponse struct for InternalUploadBlankResponse -type InternalUploadBlankResponse struct { - Guid *string `json:"guid,omitempty"` - Url *string `json:"url,omitempty"` -} - -// NewInternalUploadBlankResponse instantiates a new InternalUploadBlankResponse object -// This constructor will assign default values to properties that have it defined, -// and makes sure properties required by API are set, but the set of arguments -// will change when the set of required properties is changed -func NewInternalUploadBlankResponse() *InternalUploadBlankResponse { - this := InternalUploadBlankResponse{} - return &this -} - -// NewInternalUploadBlankResponseWithDefaults instantiates a new InternalUploadBlankResponse object -// This constructor will only assign default values to properties that have it defined, -// but it doesn't guarantee that properties required by API are set -func NewInternalUploadBlankResponseWithDefaults() *InternalUploadBlankResponse { - this := InternalUploadBlankResponse{} - return &this -} - -// GetGuid returns the Guid field value if set, zero value otherwise. -func (o *InternalUploadBlankResponse) GetGuid() string { - if o == nil || IsNil(o.Guid) { - var ret string - return ret - } - return *o.Guid -} - -// GetGuidOk returns a tuple with the Guid field value if set, nil otherwise -// and a boolean to check if the value has been set. -func (o *InternalUploadBlankResponse) GetGuidOk() (*string, bool) { - if o == nil || IsNil(o.Guid) { - return nil, false - } - return o.Guid, true -} - -// HasGuid returns a boolean if a field has been set. -func (o *InternalUploadBlankResponse) HasGuid() bool { - if o != nil && !IsNil(o.Guid) { - return true - } - - return false -} - -// SetGuid gets a reference to the given string and assigns it to the Guid field. -func (o *InternalUploadBlankResponse) SetGuid(v string) { - o.Guid = &v -} - -// GetUrl returns the Url field value if set, zero value otherwise. -func (o *InternalUploadBlankResponse) GetUrl() string { - if o == nil || IsNil(o.Url) { - var ret string - return ret - } - return *o.Url -} - -// GetUrlOk returns a tuple with the Url field value if set, nil otherwise -// and a boolean to check if the value has been set. -func (o *InternalUploadBlankResponse) GetUrlOk() (*string, bool) { - if o == nil || IsNil(o.Url) { - return nil, false - } - return o.Url, true -} - -// HasUrl returns a boolean if a field has been set. -func (o *InternalUploadBlankResponse) HasUrl() bool { - if o != nil && !IsNil(o.Url) { - return true - } - - return false -} - -// SetUrl gets a reference to the given string and assigns it to the Url field. -func (o *InternalUploadBlankResponse) SetUrl(v string) { - o.Url = &v -} - -func (o InternalUploadBlankResponse) MarshalJSON() ([]byte, error) { - toSerialize,err := o.ToMap() - if err != nil { - return []byte{}, err - } - return json.Marshal(toSerialize) -} - -func (o InternalUploadBlankResponse) ToMap() (map[string]interface{}, error) { - toSerialize := map[string]interface{}{} - if !IsNil(o.Guid) { - toSerialize["guid"] = o.Guid - } - if !IsNil(o.Url) { - toSerialize["url"] = o.Url - } - return toSerialize, nil -} - -type NullableInternalUploadBlankResponse struct { - value *InternalUploadBlankResponse - isSet bool -} - -func (v NullableInternalUploadBlankResponse) Get() *InternalUploadBlankResponse { - return v.value -} - -func (v *NullableInternalUploadBlankResponse) Set(val *InternalUploadBlankResponse) { - v.value = val - v.isSet = true -} - -func (v NullableInternalUploadBlankResponse) IsSet() bool { - return v.isSet -} - -func (v *NullableInternalUploadBlankResponse) Unset() { - v.value = nil - v.isSet = false -} - -func NewNullableInternalUploadBlankResponse(val *InternalUploadBlankResponse) *NullableInternalUploadBlankResponse { - return &NullableInternalUploadBlankResponse{value: val, isSet: true} -} - -func (v NullableInternalUploadBlankResponse) MarshalJSON() ([]byte, error) { - return json.Marshal(v.value) -} - -func (v *NullableInternalUploadBlankResponse) UnmarshalJSON(src []byte) error { - v.isSet = true - return json.Unmarshal(src, &v.value) -} - - diff --git a/apigen/internalapi/model_list_records_response.go b/apigen/internalapi/model_list_records_response.go deleted file mode 100644 index 21f51ce..0000000 --- a/apigen/internalapi/model_list_records_response.go +++ /dev/null @@ -1,126 +0,0 @@ -/* -Internal Compatibility API (DRS Server) - -Consolidated internal API delta for drs-server. This spec captures non-GA4GH DRS internal/compatibility routes. - -API version: 1.0.0 -*/ - -// Code generated by OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT. - -package internalapi - -import ( - "encoding/json" -) - -// checks if the ListRecordsResponse type satisfies the MappedNullable interface at compile time -var _ MappedNullable = &ListRecordsResponse{} - -// ListRecordsResponse struct for ListRecordsResponse -type ListRecordsResponse struct { - Records []InternalRecordResponse `json:"records,omitempty"` -} - -// NewListRecordsResponse instantiates a new ListRecordsResponse object -// This constructor will assign default values to properties that have it defined, -// and makes sure properties required by API are set, but the set of arguments -// will change when the set of required properties is changed -func NewListRecordsResponse() *ListRecordsResponse { - this := ListRecordsResponse{} - return &this -} - -// NewListRecordsResponseWithDefaults instantiates a new ListRecordsResponse object -// This constructor will only assign default values to properties that have it defined, -// but it doesn't guarantee that properties required by API are set -func NewListRecordsResponseWithDefaults() *ListRecordsResponse { - this := ListRecordsResponse{} - return &this -} - -// GetRecords returns the Records field value if set, zero value otherwise. -func (o *ListRecordsResponse) GetRecords() []InternalRecordResponse { - if o == nil || IsNil(o.Records) { - var ret []InternalRecordResponse - return ret - } - return o.Records -} - -// GetRecordsOk returns a tuple with the Records field value if set, nil otherwise -// and a boolean to check if the value has been set. -func (o *ListRecordsResponse) GetRecordsOk() ([]InternalRecordResponse, bool) { - if o == nil || IsNil(o.Records) { - return nil, false - } - return o.Records, true -} - -// HasRecords returns a boolean if a field has been set. -func (o *ListRecordsResponse) HasRecords() bool { - if o != nil && !IsNil(o.Records) { - return true - } - - return false -} - -// SetRecords gets a reference to the given []InternalRecordResponse and assigns it to the Records field. -func (o *ListRecordsResponse) SetRecords(v []InternalRecordResponse) { - o.Records = v -} - -func (o ListRecordsResponse) MarshalJSON() ([]byte, error) { - toSerialize,err := o.ToMap() - if err != nil { - return []byte{}, err - } - return json.Marshal(toSerialize) -} - -func (o ListRecordsResponse) ToMap() (map[string]interface{}, error) { - toSerialize := map[string]interface{}{} - if !IsNil(o.Records) { - toSerialize["records"] = o.Records - } - return toSerialize, nil -} - -type NullableListRecordsResponse struct { - value *ListRecordsResponse - isSet bool -} - -func (v NullableListRecordsResponse) Get() *ListRecordsResponse { - return v.value -} - -func (v *NullableListRecordsResponse) Set(val *ListRecordsResponse) { - v.value = val - v.isSet = true -} - -func (v NullableListRecordsResponse) IsSet() bool { - return v.isSet -} - -func (v *NullableListRecordsResponse) Unset() { - v.value = nil - v.isSet = false -} - -func NewNullableListRecordsResponse(val *ListRecordsResponse) *NullableListRecordsResponse { - return &NullableListRecordsResponse{value: val, isSet: true} -} - -func (v NullableListRecordsResponse) MarshalJSON() ([]byte, error) { - return json.Marshal(v.value) -} - -func (v *NullableListRecordsResponse) UnmarshalJSON(src []byte) error { - v.isSet = true - return json.Unmarshal(src, &v.value) -} - - diff --git a/apigen/internalapi/utils.go b/apigen/internalapi/utils.go deleted file mode 100644 index d0fb557..0000000 --- a/apigen/internalapi/utils.go +++ /dev/null @@ -1,361 +0,0 @@ -/* -Internal Compatibility API (DRS Server) - -Consolidated internal API delta for drs-server. This spec captures non-GA4GH DRS internal/compatibility routes. - -API version: 1.0.0 -*/ - -// Code generated by OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT. - -package internalapi - -import ( - "bytes" - "encoding/json" - "fmt" - "reflect" - "time" -) - -// PtrBool is a helper routine that returns a pointer to given boolean value. -func PtrBool(v bool) *bool { return &v } - -// PtrInt is a helper routine that returns a pointer to given integer value. -func PtrInt(v int) *int { return &v } - -// PtrInt32 is a helper routine that returns a pointer to given integer value. -func PtrInt32(v int32) *int32 { return &v } - -// PtrInt64 is a helper routine that returns a pointer to given integer value. -func PtrInt64(v int64) *int64 { return &v } - -// PtrFloat32 is a helper routine that returns a pointer to given float value. -func PtrFloat32(v float32) *float32 { return &v } - -// PtrFloat64 is a helper routine that returns a pointer to given float value. -func PtrFloat64(v float64) *float64 { return &v } - -// PtrString is a helper routine that returns a pointer to given string value. -func PtrString(v string) *string { return &v } - -// PtrTime is helper routine that returns a pointer to given Time value. -func PtrTime(v time.Time) *time.Time { return &v } - -type NullableBool struct { - value *bool - isSet bool -} - -func (v NullableBool) Get() *bool { - return v.value -} - -func (v *NullableBool) Set(val *bool) { - v.value = val - v.isSet = true -} - -func (v NullableBool) IsSet() bool { - return v.isSet -} - -func (v *NullableBool) Unset() { - v.value = nil - v.isSet = false -} - -func NewNullableBool(val *bool) *NullableBool { - return &NullableBool{value: val, isSet: true} -} - -func (v NullableBool) MarshalJSON() ([]byte, error) { - return json.Marshal(v.value) -} - -func (v *NullableBool) UnmarshalJSON(src []byte) error { - v.isSet = true - return json.Unmarshal(src, &v.value) -} - -type NullableInt struct { - value *int - isSet bool -} - -func (v NullableInt) Get() *int { - return v.value -} - -func (v *NullableInt) Set(val *int) { - v.value = val - v.isSet = true -} - -func (v NullableInt) IsSet() bool { - return v.isSet -} - -func (v *NullableInt) Unset() { - v.value = nil - v.isSet = false -} - -func NewNullableInt(val *int) *NullableInt { - return &NullableInt{value: val, isSet: true} -} - -func (v NullableInt) MarshalJSON() ([]byte, error) { - return json.Marshal(v.value) -} - -func (v *NullableInt) UnmarshalJSON(src []byte) error { - v.isSet = true - return json.Unmarshal(src, &v.value) -} - -type NullableInt32 struct { - value *int32 - isSet bool -} - -func (v NullableInt32) Get() *int32 { - return v.value -} - -func (v *NullableInt32) Set(val *int32) { - v.value = val - v.isSet = true -} - -func (v NullableInt32) IsSet() bool { - return v.isSet -} - -func (v *NullableInt32) Unset() { - v.value = nil - v.isSet = false -} - -func NewNullableInt32(val *int32) *NullableInt32 { - return &NullableInt32{value: val, isSet: true} -} - -func (v NullableInt32) MarshalJSON() ([]byte, error) { - return json.Marshal(v.value) -} - -func (v *NullableInt32) UnmarshalJSON(src []byte) error { - v.isSet = true - return json.Unmarshal(src, &v.value) -} - -type NullableInt64 struct { - value *int64 - isSet bool -} - -func (v NullableInt64) Get() *int64 { - return v.value -} - -func (v *NullableInt64) Set(val *int64) { - v.value = val - v.isSet = true -} - -func (v NullableInt64) IsSet() bool { - return v.isSet -} - -func (v *NullableInt64) Unset() { - v.value = nil - v.isSet = false -} - -func NewNullableInt64(val *int64) *NullableInt64 { - return &NullableInt64{value: val, isSet: true} -} - -func (v NullableInt64) MarshalJSON() ([]byte, error) { - return json.Marshal(v.value) -} - -func (v *NullableInt64) UnmarshalJSON(src []byte) error { - v.isSet = true - return json.Unmarshal(src, &v.value) -} - -type NullableFloat32 struct { - value *float32 - isSet bool -} - -func (v NullableFloat32) Get() *float32 { - return v.value -} - -func (v *NullableFloat32) Set(val *float32) { - v.value = val - v.isSet = true -} - -func (v NullableFloat32) IsSet() bool { - return v.isSet -} - -func (v *NullableFloat32) Unset() { - v.value = nil - v.isSet = false -} - -func NewNullableFloat32(val *float32) *NullableFloat32 { - return &NullableFloat32{value: val, isSet: true} -} - -func (v NullableFloat32) MarshalJSON() ([]byte, error) { - return json.Marshal(v.value) -} - -func (v *NullableFloat32) UnmarshalJSON(src []byte) error { - v.isSet = true - return json.Unmarshal(src, &v.value) -} - -type NullableFloat64 struct { - value *float64 - isSet bool -} - -func (v NullableFloat64) Get() *float64 { - return v.value -} - -func (v *NullableFloat64) Set(val *float64) { - v.value = val - v.isSet = true -} - -func (v NullableFloat64) IsSet() bool { - return v.isSet -} - -func (v *NullableFloat64) Unset() { - v.value = nil - v.isSet = false -} - -func NewNullableFloat64(val *float64) *NullableFloat64 { - return &NullableFloat64{value: val, isSet: true} -} - -func (v NullableFloat64) MarshalJSON() ([]byte, error) { - return json.Marshal(v.value) -} - -func (v *NullableFloat64) UnmarshalJSON(src []byte) error { - v.isSet = true - return json.Unmarshal(src, &v.value) -} - -type NullableString struct { - value *string - isSet bool -} - -func (v NullableString) Get() *string { - return v.value -} - -func (v *NullableString) Set(val *string) { - v.value = val - v.isSet = true -} - -func (v NullableString) IsSet() bool { - return v.isSet -} - -func (v *NullableString) Unset() { - v.value = nil - v.isSet = false -} - -func NewNullableString(val *string) *NullableString { - return &NullableString{value: val, isSet: true} -} - -func (v NullableString) MarshalJSON() ([]byte, error) { - return json.Marshal(v.value) -} - -func (v *NullableString) UnmarshalJSON(src []byte) error { - v.isSet = true - return json.Unmarshal(src, &v.value) -} - -type NullableTime struct { - value *time.Time - isSet bool -} - -func (v NullableTime) Get() *time.Time { - return v.value -} - -func (v *NullableTime) Set(val *time.Time) { - v.value = val - v.isSet = true -} - -func (v NullableTime) IsSet() bool { - return v.isSet -} - -func (v *NullableTime) Unset() { - v.value = nil - v.isSet = false -} - -func NewNullableTime(val *time.Time) *NullableTime { - return &NullableTime{value: val, isSet: true} -} - -func (v NullableTime) MarshalJSON() ([]byte, error) { - return json.Marshal(v.value) -} - -func (v *NullableTime) UnmarshalJSON(src []byte) error { - v.isSet = true - return json.Unmarshal(src, &v.value) -} - -// IsNil checks if an input is nil -func IsNil(i interface{}) bool { - if i == nil { - return true - } - switch reflect.TypeOf(i).Kind() { - case reflect.Chan, reflect.Func, reflect.Map, reflect.Ptr, reflect.UnsafePointer, reflect.Interface, reflect.Slice: - return reflect.ValueOf(i).IsNil() - case reflect.Array: - return reflect.ValueOf(i).IsZero() - } - return false -} - -type MappedNullable interface { - ToMap() (map[string]interface{}, error) -} - -// A wrapper for strict JSON decoding -func newStrictDecoder(data []byte) *json.Decoder { - dec := json.NewDecoder(bytes.NewBuffer(data)) - dec.DisallowUnknownFields() - return dec -} - -// Prevent trying to import "fmt" -func reportError(format string, a ...interface{}) error { - return fmt.Errorf(format, a...) -} \ No newline at end of file diff --git a/apigen/specs/drs-extensions-overlay.yaml b/apigen/specs/drs-extensions-overlay.yaml deleted file mode 100644 index d6426a2..0000000 --- a/apigen/specs/drs-extensions-overlay.yaml +++ /dev/null @@ -1,25 +0,0 @@ -components: - schemas: - AccessMethod: - properties: - access_url: - type: object - required: [url] - properties: - url: { type: string, description: "A fully resolvable URL that can be used to fetch the actual object bytes." } - headers: { type: array, items: { type: string }, description: "GA4GH-compatible list of HTTP headers." } - authorizations: - type: object - properties: - drs_object_id: { type: string } - supported_types: { type: array, items: { type: string } } - passport_auth_issuers: { type: array, items: { type: string } } - bearer_auth_issuers: { type: array, items: { type: string } } - UploadMethod: - properties: - access_url: - type: object - required: [url] - properties: - url: { type: string, description: "Inlined Upload URL context." } - headers: { type: array, items: { type: string }, description: "Inlined Upload Headers." } diff --git a/download/file_info.go b/download/file_info.go index 54cf92b..1b15afe 100644 --- a/download/file_info.go +++ b/download/file_info.go @@ -25,9 +25,7 @@ func GetFileInfo( } name := "" - if drsObj.Name != nil { - name = *drsObj.Name - } + name = drsObj.Name if name == "" { // If name is empty (some DRS servers might not return it?), use GUID name = guid diff --git a/download/transfer_test.go b/download/transfer_test.go index 40132cf..a1dd9ec 100644 --- a/download/transfer_test.go +++ b/download/transfer_test.go @@ -40,10 +40,10 @@ func (f *fakeBackend) fileDetails(guid string) *drs.DRSObject { name := "payload.bin" accessID := "s3" return &drs.DRSObject{ - Name: &name, + Name: name, Size: size, AccessMethods: []drs.AccessMethod{ - {AccessId: &accessID, Type: "s3"}, + {AccessId: accessID, Type: "s3"}, }, } } diff --git a/drs/access_method_conversions.go b/drs/access_method_conversions.go new file mode 100644 index 0000000..22cb0bb --- /dev/null +++ b/drs/access_method_conversions.go @@ -0,0 +1,53 @@ +package drs + +import ( + "fmt" + "net/url" +) + +func DRSAccessMethodsFromInternalURLs(urls []string, authz []string) ([]AccessMethod, error) { + accessMethods := make([]AccessMethod, 0, len(urls)) + for _, urlString := range urls { + method := AccessMethod{ + AccessUrl: AccessURL{Url: urlString}, + } + + parsed, err := url.Parse(urlString) + if err != nil { + return nil, fmt.Errorf("failed to parse url %q: %v", urlString, err) + } + if parsed.Scheme == "" { + method.Type = "https" + } else { + method.Type = parsed.Scheme + } + + if len(authz) > 0 { + method.Authorizations = Authorizations{BearerAuthIssuers: []string{authz[0]}} + } + accessMethods = append(accessMethods, method) + } + return accessMethods, nil +} + +// InternalAuthzFromDrsAccessMethods extracts authz values from DRS access methods. +func InternalAuthzFromDrsAccessMethods(accessMethods []AccessMethod) []string { + authz := make([]string, 0, len(accessMethods)) + for _, drsURL := range accessMethods { + if len(drsURL.Authorizations.BearerAuthIssuers) > 0 { + authz = append(authz, drsURL.Authorizations.BearerAuthIssuers[0]) + } + } + return authz +} + +func InternalURLFromDrsAccessURLs(accessMethods []AccessMethod) []string { + urls := make([]string, 0, len(accessMethods)) + for _, drsURL := range accessMethods { + if drsURL.AccessUrl.Url == "" { + continue + } + urls = append(urls, drsURL.AccessUrl.Url) + } + return urls +} diff --git a/drs/client.go b/drs/client.go index df96d07..5a4fe2b 100644 --- a/drs/client.go +++ b/drs/client.go @@ -1,23 +1,21 @@ package drs import ( - "bytes" "context" - "encoding/json" "fmt" - "io" "log/slog" - "net/http" "strings" "github.com/calypr/data-client/conf" "github.com/calypr/data-client/hash" "github.com/calypr/data-client/request" + syclient "github.com/calypr/syfon/client" ) type DrsClient struct { request.RequestInterface provider endpointProvider + syfon *syclient.Client logger *slog.Logger projectId string organization string @@ -43,11 +41,13 @@ type localProvider struct { func (p localProvider) APIEndpoint() string { return p.endpoint } func (p localProvider) AccessToken() string { return "" } -// NewDrsClient creates a new DrsClient +// NewDrsClient creates a new DrsClient. func NewDrsClient(req request.RequestInterface, cred *conf.Credential, logger *slog.Logger) Client { + provider := gen3Provider{cred: cred} return &DrsClient{ RequestInterface: req, - provider: gen3Provider{cred: cred}, + provider: provider, + syfon: buildSyfonClient(req, provider.APIEndpoint(), provider.AccessToken()), logger: logger, } } @@ -55,15 +55,25 @@ func NewDrsClient(req request.RequestInterface, cred *conf.Credential, logger *s // NewLocalDrsClient creates a DRS client for local/non-Gen3 mode. // It intentionally carries no bearer token. func NewLocalDrsClient(req request.RequestInterface, endpoint string, logger *slog.Logger) Client { + provider := localProvider{endpoint: endpoint} return &DrsClient{ RequestInterface: req, - provider: localProvider{endpoint: endpoint}, + provider: provider, + syfon: buildSyfonClient(req, provider.APIEndpoint(), ""), logger: logger, } } -func (c *DrsClient) apiEndpoint() string { return c.provider.APIEndpoint() } -func (c *DrsClient) token() string { return c.provider.AccessToken() } +func buildSyfonClient(req request.RequestInterface, endpoint, token string) *syclient.Client { + opts := make([]syclient.Option, 0, 2) + if strings.TrimSpace(token) != "" { + opts = append(opts, syclient.WithBearerToken(token)) + } + if r, ok := req.(*request.Request); ok && r.RetryClient != nil { + opts = append(opts, syclient.WithHTTPClient(r.RetryClient.StandardClient())) + } + return syclient.New(endpoint, opts...) +} func (c *DrsClient) GetProjectId() string { return c.projectId @@ -93,60 +103,27 @@ func (c *DrsClient) WithBucket(bucketName string) Client { } func (c *DrsClient) GetObject(ctx context.Context, id string) (*DRSObject, error) { - url := fmt.Sprintf("%s/ga4gh/drs/v1/objects/%s", c.apiEndpoint(), id) - resp, err := c.Do(ctx, &request.RequestBuilder{ - Method: http.MethodGet, - Url: url, - Token: c.token(), - }) + obj, err := c.syfon.DRS().GetObject(ctx, id) if err != nil { return nil, err } - defer resp.Body.Close() - - if resp.StatusCode == http.StatusNotFound { - return nil, fmt.Errorf("object %s not found", id) - } - if resp.StatusCode != http.StatusOK { - body, _ := io.ReadAll(resp.Body) - return nil, fmt.Errorf("failed to get object %s: %s (status: %d)", id, string(body), resp.StatusCode) - } - - var out OutputObject - if err := json.NewDecoder(resp.Body).Decode(&out); err != nil { - return nil, err - } - return ConvertOutputObjectToDRSObject(&out), nil + return &obj, nil } func (c *DrsClient) GetObjectByHash(ctx context.Context, checksum *hash.Checksum) ([]DRSObject, error) { - url := fmt.Sprintf("%s/index?hash=%s:%s", c.apiEndpoint(), string(checksum.Type), checksum.Checksum) - resp, err := c.Do(ctx, &request.RequestBuilder{ - Method: http.MethodGet, - Url: url, - Headers: map[string]string{ - "Accept": "application/json", - }, - Token: c.token(), + if checksum == nil { + return nil, fmt.Errorf("checksum is required") + } + resp, err := c.syfon.Index().List(ctx, syclient.ListRecordsOptions{ + Hash: fmt.Sprintf("%s:%s", string(checksum.Type), checksum.Checksum), }) if err != nil { return nil, err } - defer resp.Body.Close() - - if resp.StatusCode != http.StatusOK { - body, _ := io.ReadAll(resp.Body) - return nil, fmt.Errorf("failed to query by hash %s:%s: %s (status: %d)", checksum.Type, checksum.Checksum, string(body), resp.StatusCode) - } - var records ListRecords - if err := json.NewDecoder(resp.Body).Decode(&records); err != nil { - return nil, err - } - - out := make([]DRSObject, 0, len(records.Records)) - for _, r := range records.Records { - drsObj, err := r.ToDrsObject() + out := make([]DRSObject, 0, len(resp.Records)) + for _, rec := range resp.Records { + drsObj, err := syfonInternalRecordToDRSObject(rec) if err != nil { return nil, err } @@ -156,146 +133,58 @@ func (c *DrsClient) GetObjectByHash(ctx context.Context, checksum *hash.Checksum } func (c *DrsClient) GetDownloadURL(ctx context.Context, id string, accessType string) (*AccessURL, error) { - url := fmt.Sprintf("%s/ga4gh/drs/v1/objects/%s/access/%s", c.apiEndpoint(), id, accessType) - resp, err := c.Do(ctx, &request.RequestBuilder{ - Method: http.MethodGet, - Url: url, - Token: c.token(), - }) + access, err := c.syfon.DRS().GetAccessURL(ctx, id, accessType) if err != nil { return nil, err } - defer resp.Body.Close() - - if resp.StatusCode != http.StatusOK { - body, _ := io.ReadAll(resp.Body) - return nil, fmt.Errorf("failed to get download URL for %s: %s (status: %d)", id, string(body), resp.StatusCode) - } - - var accessURL AccessURL - if err := json.NewDecoder(resp.Body).Decode(&accessURL); err != nil { - return nil, err - } - return &accessURL, nil + return &AccessURL{Url: access.Url}, nil } func (c *DrsClient) ListObjectsByProject(ctx context.Context, projectId string) (chan DRSObjectResult, error) { - const PAGESIZE = 50 - - resourcePath, err := ProjectToResource("", projectId) + resourcePath, err := ProjectToResource(c.organization, projectId) if err != nil { return nil, err } - out := make(chan DRSObjectResult, PAGESIZE) + resp, err := c.syfon.Index().List(ctx, syclient.ListRecordsOptions{Authz: resourcePath}) + if err != nil { + return nil, err + } + out := make(chan DRSObjectResult, len(resp.Records)) go func() { defer close(out) - pageNum := 0 - active := true - - for active { - url := fmt.Sprintf("%s/index?authz=%s&limit=%d&page=%d", - c.apiEndpoint(), resourcePath, PAGESIZE, pageNum) - - resp, err := c.Do(ctx, &request.RequestBuilder{ - Method: http.MethodGet, - Url: url, - Headers: map[string]string{ - "Accept": "application/json", - }, - Token: c.token(), - }) - - if err != nil { - out <- DRSObjectResult{Error: err} - break - } - - if resp.StatusCode != http.StatusOK { - body, _ := io.ReadAll(resp.Body) - resp.Body.Close() - out <- DRSObjectResult{Error: fmt.Errorf("api error %d: %s", resp.StatusCode, string(body))} - break - } - - var page ListRecords - err = json.NewDecoder(resp.Body).Decode(&page) - resp.Body.Close() - + for _, elem := range resp.Records { + drsObj, err := syfonInternalRecordToDRSObject(elem) if err != nil { out <- DRSObjectResult{Error: err} - break - } - - if len(page.Records) == 0 { - active = false - break - } - - for _, elem := range page.Records { - drsObj, err := elem.ToDrsObject() - if err != nil { - out <- DRSObjectResult{Error: err} - continue - } - out <- DRSObjectResult{Object: drsObj} + continue } - pageNum++ + out <- DRSObjectResult{Object: drsObj} } }() - return out, nil } func (c *DrsClient) ListObjects(ctx context.Context) (chan DRSObjectResult, error) { - url := fmt.Sprintf("%s/ga4gh/drs/v1/objects", c.apiEndpoint()) - const PAGESIZE = 50 - out := make(chan DRSObjectResult, 10) + const pageSize = 50 + out := make(chan DRSObjectResult, pageSize) go func() { defer close(out) - pageNum := 0 - active := true - for active { - fullURL := fmt.Sprintf("%s?limit=%d&page=%d", url, PAGESIZE, pageNum) - resp, err := c.Do(ctx, &request.RequestBuilder{ - Method: http.MethodGet, - Url: fullURL, - Token: c.token(), - }) - + for page := 0; ; page++ { + resp, err := c.syfon.DRS().ListObjects(ctx, pageSize, page) if err != nil { out <- DRSObjectResult{Error: err} return } - - if resp.StatusCode != http.StatusOK { - body, _ := io.ReadAll(resp.Body) - resp.Body.Close() - out <- DRSObjectResult{Error: fmt.Errorf("api error %d: %s", resp.StatusCode, string(body))} - return - } - - var page DRSPage - err = json.NewDecoder(resp.Body).Decode(&page) - resp.Body.Close() - - if err != nil { - out <- DRSObjectResult{Error: err} + if len(resp.DrsObjects) == 0 { return } - - if len(page.DRSObjects) == 0 { - active = false - break + for _, elem := range resp.DrsObjects { + obj := elem + out <- DRSObjectResult{Object: &obj} } - - for _, elem := range page.DRSObjects { - elemCopy := elem - out <- DRSObjectResult{Object: &elemCopy} - } - pageNum++ } }() return out, nil @@ -330,47 +219,15 @@ func (c *DrsClient) GetProjectSample(ctx context.Context, projectId string, limi } func (c *DrsClient) RegisterRecord(ctx context.Context, record *DRSObject) (*DRSObject, error) { - indexdRecord, err := InternalRecordFromDrsObject(record) - if err != nil { - return nil, fmt.Errorf("error converting DRS object to internal record: %v", err) - } - - indexdObjForm := InternalRecordForm{ - InternalRecord: *indexdRecord, - Form: "object", - } - - jsonBytes, err := json.Marshal(indexdObjForm) + internalRecord, err := drsObjectToSyfonInternalRecord(record) if err != nil { return nil, err } - - url := fmt.Sprintf("%s/index", c.apiEndpoint()) - resp, err := c.Do(ctx, &request.RequestBuilder{ - Method: http.MethodPost, - Url: url, - Body: bytes.NewBuffer(jsonBytes), - Headers: map[string]string{ - "Content-Type": "application/json", - "Accept": "application/json", - }, - Token: c.token(), - }) + created, err := c.syfon.Index().Create(ctx, internalRecord) if err != nil { return nil, err } - defer resp.Body.Close() - - if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusCreated { - body, _ := io.ReadAll(resp.Body) - did := "" - if indexdRecord.Did != nil { - did = *indexdRecord.Did - } - return nil, fmt.Errorf("failed to register record %s: %s (status: %d)", did, string(body), resp.StatusCode) - } - - return InternalRecordToDrsObject(indexdRecord) + return syfonInternalRecordToDRSObject(created) } func (c *DrsClient) RegisterRecords(ctx context.Context, records []*DRSObject) ([]*DRSObject, error) { @@ -378,226 +235,77 @@ func (c *DrsClient) RegisterRecords(ctx context.Context, records []*DRSObject) ( return nil, nil } - candidates := make([]DRSObjectCandidate, len(records)) + candidates := make([]syclient.DRSObjectCandidate, len(records)) for i, r := range records { candidates[i] = ConvertToCandidate(r) } - reqBody := RegisterObjectsRequest{ - Candidates: candidates, - } - - jsonBytes, err := json.Marshal(reqBody) + resp, err := c.syfon.DRS().RegisterObjects(ctx, syclient.RegisterObjectsRequest{Candidates: candidates}) if err != nil { return nil, err } - - url := fmt.Sprintf("%s/ga4gh/drs/v1/objects/register", c.apiEndpoint()) - resp, err := c.Do(ctx, &request.RequestBuilder{ - Method: http.MethodPost, - Url: url, - Body: bytes.NewBuffer(jsonBytes), - Headers: map[string]string{ - "Content-Type": "application/json", - "Accept": "application/json", - }, - Token: c.token(), - }) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusCreated { - body, _ := io.ReadAll(resp.Body) - return nil, fmt.Errorf("failed to register records: %s (status: %d)", string(body), resp.StatusCode) - } - - body, err := io.ReadAll(resp.Body) - if err != nil { - return nil, fmt.Errorf("error reading registered objects response: %v", err) + if len(resp.Objects) == 0 { + return nil, fmt.Errorf("register response did not include objects") } - // Canonical shape from DRS register API. - var wrapped struct { - Objects []*DRSObject `json:"objects"` + out := make([]*DRSObject, 0, len(resp.Objects)) + for _, obj := range resp.Objects { + o := obj + out = append(out, &o) } - if err := json.Unmarshal(body, &wrapped); err != nil { - return nil, fmt.Errorf("unsupported response payload: %s", string(body)) - } - if len(wrapped.Objects) == 0 { - return nil, fmt.Errorf("register response did not include objects") - } - return wrapped.Objects, nil + return out, nil } func (c *DrsClient) UpdateRecord(ctx context.Context, updateInfo *DRSObject, did string) (*DRSObject, error) { - // Get current revision from existing record - record, err := c.getInternalRecordByDID(ctx, did) + existing, err := c.syfon.Index().Get(ctx, did) if err != nil { - return nil, fmt.Errorf("could not retrieve existing record for DID %s: %v", did, err) - } - - // Build update payload starting with existing record values - updatePayload := UpdateInputInfo{ - URLs: record.Urls, - FileName: record.FileName, - Authz: record.Authz, + return nil, err } - // Apply updates from updateInfo - if len(updateInfo.AccessMethods) > 0 { - newURLs := make([]string, 0, len(updateInfo.AccessMethods)) - for _, a := range updateInfo.AccessMethods { - if a.AccessUrl != nil { + updated := existing + updated.SetDid(did) + if len(updateInfo.AccessMethods) > 0 { + newURLs := make([]string, 0, len(updateInfo.AccessMethods)) + for _, a := range updateInfo.AccessMethods { + if a.AccessUrl.Url == "" { + continue + } newURLs = append(newURLs, a.AccessUrl.Url) } - } - updatePayload.URLs = appendUnique(updatePayload.URLs, newURLs) + updated.SetUrls(appendUnique(updated.GetUrls(), newURLs)) authz := InternalAuthzFromDrsAccessMethods(updateInfo.AccessMethods) - updatePayload.Authz = appendUnique(updatePayload.Authz, authz) + updated.SetAuthz(appendUnique(updated.GetAuthz(), authz)) } - if updateInfo.Name != nil && *updateInfo.Name != "" { - updatePayload.FileName = updateInfo.Name + if updateInfo.Name != "" { + updated.SetFileName(updateInfo.Name) } - - if updateInfo.Version != nil && *updateInfo.Version != "" { - updatePayload.Version = updateInfo.Version + if updateInfo.Size > 0 { + updated.SetSize(updateInfo.Size) } - - if updateInfo.Description != nil && *updateInfo.Description != "" { - if updatePayload.Metadata == nil { - updatePayload.Metadata = make(map[string]any) - } - updatePayload.Metadata["description"] = *updateInfo.Description + if len(updateInfo.Checksums) > 0 { + updated.SetHashes(hash.ConvertDrsChecksumsToMap(updateInfo.Checksums)) } - jsonBytes, err := json.Marshal(updatePayload) - if err != nil { - return nil, fmt.Errorf("error marshaling indexd update payload: %v", err) - } - - rev := "" - if record.Rev != nil { - rev = *record.Rev - } - url := fmt.Sprintf("%s/index/%s?rev=%s", c.apiEndpoint(), did, rev) - resp, err := c.Do(ctx, &request.RequestBuilder{ - Method: http.MethodPut, - Url: url, - Body: bytes.NewBuffer(jsonBytes), - Headers: map[string]string{ - "Content-Type": "application/json", - "Accept": "application/json", - }, - Token: c.token(), - }) + res, err := c.syfon.Index().Update(ctx, did, updated) if err != nil { return nil, err } - defer resp.Body.Close() - - if resp.StatusCode != http.StatusOK { - body, _ := io.ReadAll(resp.Body) - return nil, fmt.Errorf("failed to update record %s: %s (status: %d)", did, string(body), resp.StatusCode) - } - - return c.GetObject(ctx, did) + return syfonInternalRecordToDRSObject(res) } func (c *DrsClient) DeleteRecord(ctx context.Context, did string) error { - // First get the record to get the revision (rev) - record, err := c.getInternalRecordByDID(ctx, did) - if err != nil { - return err - } - - rev := "" - if record.Rev != nil { - rev = *record.Rev - } - url := fmt.Sprintf("%s/index/%s?rev=%s", c.apiEndpoint(), did, rev) - resp, err := c.Do(ctx, &request.RequestBuilder{ - Method: http.MethodDelete, - Url: url, - Headers: map[string]string{ - "Accept": "application/json", - }, - Token: c.token(), - }) - if err != nil { - return err - } - defer resp.Body.Close() - - if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusNoContent { - body, _ := io.ReadAll(resp.Body) - return fmt.Errorf("failed to delete record %s: %s (status: %d)", did, string(body), resp.StatusCode) - } - - return nil + return c.syfon.Index().Delete(ctx, did) } func (c *DrsClient) DeleteRecordsByProject(ctx context.Context, projectId string) error { - recs, err := c.ListObjectsByProject(ctx, projectId) + resourcePath, err := ProjectToResource(c.organization, projectId) if err != nil { return err } - - ids := make([]string, 0, 128) - seen := make(map[string]struct{}) - for rec := range recs { - if rec.Error != nil { - return rec.Error - } - - if rec.Object == nil || rec.Object.Id == "" { - continue - } - if _, ok := seen[rec.Object.Id]; ok { - continue - } - seen[rec.Object.Id] = struct{}{} - ids = append(ids, rec.Object.Id) - } - - for _, id := range ids { - err := c.DeleteRecord(ctx, id) - if err != nil { - if strings.Contains(err.Error(), "404") { - continue - } - c.logger.Error(fmt.Sprintf("DeleteRecordsByProject Error for %s: %v", id, err)) - continue - } - } - return nil -} - -func (c *DrsClient) getInternalRecordByDID(ctx context.Context, did string) (*OutputInfo, error) { - url := fmt.Sprintf("%s/index/%s", c.apiEndpoint(), did) - resp, err := c.Do(ctx, &request.RequestBuilder{ - Method: http.MethodGet, - Url: url, - Token: c.token(), - }) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - if resp.StatusCode != http.StatusOK { - body, _ := io.ReadAll(resp.Body) - return nil, fmt.Errorf("failed to get internal record %s: %s (status: %d)", did, string(body), resp.StatusCode) - } - - var info OutputInfo - if err := json.NewDecoder(resp.Body).Decode(&info); err != nil { - return nil, err - } - return &info, nil + _, err = c.syfon.Index().DeleteByQuery(ctx, syclient.DeleteByQueryOptions{Authz: resourcePath}) + return err } func (c *DrsClient) BatchGetObjectsByHash(ctx context.Context, hashes []string) (map[string][]DRSObject, error) { @@ -605,46 +313,14 @@ func (c *DrsClient) BatchGetObjectsByHash(ctx context.Context, hashes []string) return nil, nil } - reqBody := struct { - Hashes []string `json:"hashes"` - }{ - Hashes: hashes, - } - - jsonBytes, err := json.Marshal(reqBody) - if err != nil { - return nil, err - } - - url := fmt.Sprintf("%s/index/bulk/hashes", c.apiEndpoint()) - resp, err := c.Do(ctx, &request.RequestBuilder{ - Method: http.MethodPost, - Url: url, - Body: bytes.NewBuffer(jsonBytes), - Headers: map[string]string{ - "Content-Type": "application/json", - "Accept": "application/json", - }, - Token: c.token(), - }) + resp, err := c.syfon.Index().BulkHashes(ctx, syclient.BulkHashesRequest{Hashes: hashes}) if err != nil { return nil, err } - defer resp.Body.Close() - - if resp.StatusCode != http.StatusOK { - body, _ := io.ReadAll(resp.Body) - return nil, fmt.Errorf("failed to bulk lookup hashes: %s (status: %d)", string(body), resp.StatusCode) - } - - var list ListRecords - if err := json.NewDecoder(resp.Body).Decode(&list); err != nil { - return nil, err - } result := make(map[string][]DRSObject) - for _, r := range list.Records { - drsObj, err := r.ToDrsObject() + for _, rec := range resp.Records { + drsObj, err := syfonInternalRecordToDRSObject(rec) if err != nil { continue } @@ -653,7 +329,6 @@ func (c *DrsClient) BatchGetObjectsByHash(ctx context.Context, hashes []string) result[hInfo.SHA256] = append(result[hInfo.SHA256], *drsObj) } } - return result, nil } @@ -671,14 +346,14 @@ func appendUnique(existing []string, toAdd []string) []string { return existing } -// BuildDrsObj matches git-drs behavior but moved to core +// BuildDrsObj matches git-drs behavior but moved to core. func (c *DrsClient) BuildDrsObj(fileName string, checksum string, size int64, drsId string) (*DRSObject, error) { return BuildDrsObj(fileName, checksum, size, drsId, c.GetBucketName(), c.GetOrganization(), c.GetProjectId()) } -// RegisterFile matches git-drs behavior but moved to core +// RegisterFile matches git-drs behavior but moved to core. func (c *DrsClient) RegisterFile(ctx context.Context, oid string, path string) (*DRSObject, error) { - // Base implementation without LFS specifics + // Base implementation without LFS specifics. return nil, fmt.Errorf("RegisterFile needs specific implementation (e.g. for LFS or cloud)") } diff --git a/drs/convert.go b/drs/convert.go deleted file mode 100644 index 6a45938..0000000 --- a/drs/convert.go +++ /dev/null @@ -1,107 +0,0 @@ -package drs - -import ( - "fmt" - "net/url" - - "github.com/calypr/data-client/apigen/internalapi" - "github.com/calypr/data-client/hash" -) - -// InternalRecordFromDrsObject conversion purposes -func InternalRecordFromDrsObject(drsObj *DRSObject) (*InternalRecord, error) { - hashesMap := hash.ConvertDrsChecksumsToMap(drsObj.Checksums) - - indexdObj := &InternalRecord{ - InternalRecord: internalapi.InternalRecord{ - Did: internalapi.PtrString(drsObj.Id), - Size: internalapi.PtrInt64(drsObj.Size), - FileName: drsObj.Name, - Urls: InternalURLFromDrsAccessURLs(drsObj.AccessMethods), - Authz: InternalAuthzFromDrsAccessMethods(drsObj.AccessMethods), - Hashes: &hashesMap, - }, - } - return indexdObj, nil -} - -func getVal[T any](p *T) T { - if p == nil { - var zero T - return zero - } - return *p -} - -func InternalRecordToDrsObject(indexdObj *InternalRecord) (*DRSObject, error) { - authz := indexdObj.Authz - urls := indexdObj.Urls - - accessMethods, err := DRSAccessMethodsFromInternalURLs(urls, authz) - if err != nil { - return nil, err - } - - res := &DRSObject{ - Id: getVal(indexdObj.Did), - Size: getVal(indexdObj.Size), - Name: indexdObj.FileName, - AccessMethods: accessMethods, - } - - if indexdObj.Hashes != nil { - res.Checksums = hash.ConvertMapToDrsChecksums(*indexdObj.Hashes) - } - - return res, nil -} - -func DRSAccessMethodsFromInternalURLs(urls []string, authz []string) ([]AccessMethod, error) { - var accessMethods []AccessMethod - for _, urlString := range urls { - var method AccessMethod - method.AccessUrl = &AccessURL{Url: urlString} - - parsed, err := url.Parse(urlString) - if err != nil { - return nil, fmt.Errorf("failed to parse url %q: %v", urlString, err) - } - if parsed.Scheme == "" { - method.Type = "https" - } else { - method.Type = parsed.Scheme - } - - if len(authz) > 0 { - method.Authorizations = &Authorizations{BearerAuthIssuers: []string{authz[0]}} - } - accessMethods = append(accessMethods, method) - } - return accessMethods, nil -} - -// InternalAuthzFromDrsAccessMethods extracts authz values from DRS access methods -func InternalAuthzFromDrsAccessMethods(accessMethods []AccessMethod) []string { - var authz []string - for _, drsURL := range accessMethods { - if drsURL.Authorizations != nil && len(drsURL.Authorizations.BearerAuthIssuers) > 0 { - authz = append(authz, drsURL.Authorizations.BearerAuthIssuers[0]) - } - } - return authz -} - -func InternalURLFromDrsAccessURLs(accessMethods []AccessMethod) []string { - var urls []string - for _, drsURL := range accessMethods { - if drsURL.AccessUrl != nil { - urls = append(urls, drsURL.AccessUrl.Url) - } - } - return urls -} - -// ToDrsObject converts an InternalRecordResponse (OutputInfo) to a DRSObject -func (outputInfo *OutputInfo) ToDrsObject() (*DRSObject, error) { - return InternalRecordToDrsObject(outputInfo.ToInternalRecord()) -} diff --git a/drs/drs.go b/drs/drs.go index d86f4d9..9bdd5b4 100644 --- a/drs/drs.go +++ b/drs/drs.go @@ -72,7 +72,7 @@ func FindMatchingRecord(records []DRSObject, organization, projectId string) (*D for _, record := range records { for _, access := range record.AccessMethods { - if access.Authorizations == nil { + if len(access.Authorizations.BearerAuthIssuers) == 0 { continue } @@ -128,13 +128,13 @@ func BuildDrsObjWithPrefix(fileName string, checksum string, size int64, drsId s drsObj := DRSObject{ Id: drsId, - Name: &fileName, + Name: fileName, AccessMethods: []AccessMethod{{ Type: "s3", - AccessUrl: &AccessURL{ + AccessUrl: AccessURL{ Url: fileURL, }, - Authorizations: &authorizations, + Authorizations: authorizations, }}, Checksums: []Checksum{{ Type: "sha256", @@ -150,32 +150,15 @@ func BuildDrsObjWithPrefix(fileName string, checksum string, size int64, drsId s // This is needed because the server expects checksums as an array of Checksum objects, // while DRSObject uses HashInfo (which marshals to the correct format but has different Go types). func ConvertToCandidate(obj *DRSObject) DRSObjectCandidate { - var name string - if obj.Name != nil { - name = *obj.Name - } - var version string - if obj.Version != nil { - version = *obj.Version - } - var mimeType string - if obj.MimeType != nil { - mimeType = *obj.MimeType - } - var description string - if obj.Description != nil { - description = *obj.Description - } - return DRSObjectCandidate{ - Name: name, + Name: obj.Name, Size: obj.Size, - Version: version, - MimeType: mimeType, + Version: obj.Version, + MimeType: obj.MimeType, Checksums: obj.Checksums, AccessMethods: obj.AccessMethods, - Contents: nil, // ContentsObject in gen is different - Description: description, + Contents: obj.Contents, + Description: obj.Description, Aliases: obj.Aliases, } } diff --git a/drs/internal_types.go b/drs/internal_types.go deleted file mode 100644 index 906b5c3..0000000 --- a/drs/internal_types.go +++ /dev/null @@ -1,107 +0,0 @@ -package drs - -import ( - "time" - - "github.com/calypr/data-client/apigen/internalapi" - "github.com/calypr/data-client/hash" -) - -// Internal compatibility types for Internal DRS servers. -// These are used internally by DrsClient to communicate with the server's /index and /ga4gh endpoints. - -type OutputObject struct { - Id string `json:"id"` - Name string `json:"name"` - SelfURI string `json:"self_uri,omitempty"` - Size int64 `json:"size"` - CreatedTime string `json:"created_time,omitempty"` - UpdatedTime string `json:"updated_time,omitempty"` - Version string `json:"version,omitempty"` - MimeType string `json:"mime_type,omitempty"` - Checksums []hash.Checksum `json:"checksums"` - AccessMethods []AccessMethod `json:"access_methods"` - Contents []Contents `json:"contents,omitempty"` - Description string `json:"description,omitempty"` - Aliases []string `json:"aliases,omitempty"` -} - -func ConvertOutputObjectToDRSObject(in *OutputObject) *DRSObject { - if in == nil { - return nil - } - - drsChecksums := make([]Checksum, len(in.Checksums)) - for i, c := range in.Checksums { - drsChecksums[i] = Checksum{ - Checksum: c.Checksum, - Type: string(c.Type), - } - } - - createdTime, _ := time.Parse(time.RFC3339, in.CreatedTime) - var updatedTimePtr *time.Time - if ut, err := time.Parse(time.RFC3339, in.UpdatedTime); err == nil { - updatedTimePtr = &ut - } - - return &DRSObject{ - Id: in.Id, - Name: internalapi.PtrString(in.Name), - SelfUri: in.SelfURI, - Size: in.Size, - CreatedTime: createdTime, - UpdatedTime: updatedTimePtr, - Version: internalapi.PtrString(in.Version), - MimeType: internalapi.PtrString(in.MimeType), - Checksums: drsChecksums, - AccessMethods: in.AccessMethods, - Contents: in.Contents, - Description: internalapi.PtrString(in.Description), - Aliases: in.Aliases, - } -} - -// InternalRecord embeds InternalRecord for backward compatibility -type InternalRecord struct { - internalapi.InternalRecord -} - -type ListRecords struct { - Records []OutputInfo `json:"records"` -} - -type OutputInfo struct { - internalapi.InternalRecordResponse -} - -// InternalRecordForm is used for legacy /index registration -type InternalRecordForm struct { - InternalRecord - Form string `json:"form"` - Rev string `json:"rev,omitempty"` -} - -func (outputInfo *OutputInfo) ToInternalRecord() *InternalRecord { - return &InternalRecord{ - InternalRecord: internalapi.InternalRecord{ - Did: outputInfo.Did, - Size: outputInfo.Size, - FileName: outputInfo.FileName, - Urls: outputInfo.Urls, - Authz: outputInfo.Authz, - Hashes: outputInfo.Hashes, - }, - } -} - -// UpdateInputInfo is the put object for index records -type UpdateInputInfo struct { - FileName *string `json:"file_name,omitempty"` - Metadata map[string]any `json:"metadata,omitempty"` - URLsMetadata map[string]any `json:"urls_metadata,omitempty"` - Version *string `json:"version,omitempty"` - URLs []string `json:"urls,omitempty"` - ACL []string `json:"acl,omitempty"` - Authz []string `json:"authz,omitempty"` -} diff --git a/drs/object_builder.go b/drs/object_builder.go index a653085..25144c1 100644 --- a/drs/object_builder.go +++ b/drs/object_builder.go @@ -59,13 +59,13 @@ func (b ObjectBuilder) Build(fileName string, checksum string, size int64, drsID drsObj := DRSObject{ Id: drsID, - Name: &fileName, + Name: fileName, AccessMethods: []AccessMethod{{ Type: accessType, - AccessUrl: &AccessURL{ + AccessUrl: AccessURL{ Url: fileURL, }, - Authorizations: &authorizations, + Authorizations: authorizations, }}, Checksums: []Checksum{{ Type: "sha256", diff --git a/drs/object_builder_test.go b/drs/object_builder_test.go index 4b7136e..2b15c9d 100644 --- a/drs/object_builder_test.go +++ b/drs/object_builder_test.go @@ -18,8 +18,8 @@ func TestObjectBuilderBuildSuccess(t *testing.T) { if obj.Id != "did-1" { t.Fatalf("unexpected Id: %s", obj.Id) } - if *obj.Name != "file.txt" { - t.Fatalf("unexpected Name: %s", *obj.Name) + if obj.Name != "file.txt" { + t.Fatalf("unexpected Name: %s", obj.Name) } if obj.Checksums[0].Checksum != "sha-256" { t.Fatalf("unexpected checksum: %v", obj.Checksums) diff --git a/drs/resolve.go b/drs/resolve.go index 44ff31f..4a28e94 100644 --- a/drs/resolve.go +++ b/drs/resolve.go @@ -36,14 +36,14 @@ func ResolveDownloadURL(ctx context.Context, client Client, guid string, accessI if accessID == "" { for _, am := range obj.AccessMethods { - if am.AccessId != nil && *am.AccessId != "" { - accessID = *am.AccessId + if am.AccessId != "" { + accessID = am.AccessId break } } if accessID == "" { for _, am := range obj.AccessMethods { - if am.AccessUrl != nil && am.AccessUrl.Url != "" { + if am.AccessUrl.Url != "" { return am.AccessUrl.Url, nil } } diff --git a/drs/syfon_adapter.go b/drs/syfon_adapter.go new file mode 100644 index 0000000..ce23bde --- /dev/null +++ b/drs/syfon_adapter.go @@ -0,0 +1,61 @@ +package drs + +import ( + "time" + + "github.com/calypr/data-client/hash" + syclient "github.com/calypr/syfon/client" +) + +func drsObjectToSyfonInternalRecord(obj *DRSObject) (syclient.InternalRecord, error) { + if obj == nil { + return syclient.InternalRecord{}, nil + } + out := syclient.InternalRecord{} + out.SetDid(obj.Id) + if obj.Name != "" { + out.SetFileName(obj.Name) + } + out.SetSize(obj.Size) + out.SetUrls(InternalURLFromDrsAccessURLs(obj.AccessMethods)) + out.SetAuthz(InternalAuthzFromDrsAccessMethods(obj.AccessMethods)) + out.SetHashes(hash.ConvertDrsChecksumsToMap(obj.Checksums)) + return out, nil +} + +func syfonInternalRecordToDRSObject(rec syclient.InternalRecord) (*DRSObject, error) { + accessMethods, err := DRSAccessMethodsFromInternalURLs(rec.GetUrls(), rec.GetAuthz()) + if err != nil { + return nil, err + } + checksums := hash.ConvertMapToDrsChecksums(rec.GetHashes()) + did := rec.GetDid() + obj := &DRSObject{ + Id: did, + SelfUri: "drs://" + did, + Size: rec.GetSize(), + AccessMethods: accessMethods, + Checksums: checksums, + } + if rec.GetFileName() != "" { + obj.Name = rec.GetFileName() + } + if t, ok := parseRFC3339(rec.GetCreatedDate()); ok { + obj.CreatedTime = t + } + if t, ok := parseRFC3339(rec.GetUpdatedDate()); ok { + obj.UpdatedTime = t + } + return obj, nil +} + +func parseRFC3339(v string) (time.Time, bool) { + if v == "" { + return time.Time{}, false + } + t, err := time.Parse(time.RFC3339, v) + if err != nil { + return time.Time{}, false + } + return t, true +} diff --git a/drs/types.go b/drs/types.go index a41e1d6..c8b66e1 100644 --- a/drs/types.go +++ b/drs/types.go @@ -1,50 +1,28 @@ package drs import ( - "github.com/calypr/data-client/apigen/drs" "github.com/calypr/data-client/hash" + syclient "github.com/calypr/syfon/client" ) type ChecksumType = string -type Checksum = drs.Checksum +type Checksum = syclient.Checksum type HashInfo = hash.HashInfo -type AccessURL = drs.AccessMethodAccessUrl -type Authorizations = drs.AccessMethodAuthorizations -type AccessMethod = drs.AccessMethod +type AccessURL = syclient.AccessMethodAccessURL +type Authorizations = syclient.AccessMethodAuthorizations +type AccessMethod = syclient.AccessMethod -type Contents = drs.ContentsObject +type Contents = syclient.ContentsObject -type DRSPage struct { - DRSObjects []DRSObject `json:"drs_objects"` -} +type DRSPage = syclient.DRSPage type DRSObjectResult struct { Object *DRSObject Error error } -type DRSObject = drs.DrsObject - -// DRSObjectCandidate represents a DRS object candidate for registration. -// This matches the server's expected format where checksums is an array of Checksum objects. -// Server-generated fields (id, created_time, updated_time, self_uri) are not included. -type DRSObjectCandidate struct { - Id string `json:"id,omitempty"` - Name string `json:"name,omitempty"` - Size int64 `json:"size"` - Version string `json:"version,omitempty"` - MimeType string `json:"mime_type,omitempty"` - Checksums []Checksum `json:"checksums"` - AccessMethods []AccessMethod `json:"access_methods,omitempty"` - Contents []Contents `json:"contents,omitempty"` - Description string `json:"description,omitempty"` - Aliases []string `json:"aliases,omitempty"` -} +type DRSObject = syclient.DRSObject -// RegisterObjectsRequest is the request body for registering objects in some DRS implementations. -// This matches the server's API specification. -type RegisterObjectsRequest struct { - Candidates []DRSObjectCandidate `json:"candidates"` - Passports []string `json:"passports,omitempty"` -} +type DRSObjectCandidate = syclient.DRSObjectCandidate +type RegisterObjectsRequest = syclient.RegisterObjectsRequest diff --git a/drs/upsert.go b/drs/upsert.go index 70cf0a6..1254f9c 100644 --- a/drs/upsert.go +++ b/drs/upsert.go @@ -34,7 +34,7 @@ func (c *DrsClient) UpsertRecord(ctx context.Context, url string, sha256 string, } c.logger.Debug("updating existing record with new url") - updatedRecord := DRSObject{AccessMethods: []AccessMethod{{AccessUrl: &AccessURL{Url: url}}}} + updatedRecord := DRSObject{AccessMethods: []AccessMethod{{AccessUrl: AccessURL{Url: url}}}} return c.UpdateRecord(ctx, &updatedRecord, matchingRecord.Id) } diff --git a/go.mod b/go.mod index e418828..9b0bcef 100644 --- a/go.mod +++ b/go.mod @@ -4,7 +4,8 @@ go 1.26.1 require ( github.com/aws/aws-sdk-go-v2/service/s3 v1.95.1 - github.com/golang-jwt/jwt/v5 v5.3.0 + github.com/calypr/syfon v0.0.0-20260402213132-2a2c82c5a4cd + github.com/golang-jwt/jwt/v5 v5.3.1 github.com/google/uuid v1.6.0 github.com/hashicorp/go-multierror v1.1.1 github.com/hashicorp/go-retryablehttp v0.7.8 @@ -15,10 +16,11 @@ require ( gocloud.dev v0.45.0 golang.org/x/sync v0.19.0 gopkg.in/ini.v1 v1.67.0 - gopkg.in/validator.v2 v2.0.1 gopkg.in/yaml.v3 v3.0.1 ) +require github.com/gorilla/mux v1.8.0 // indirect + require ( cel.dev/expr v0.25.1 // indirect cloud.google.com/go v0.123.0 // indirect @@ -40,14 +42,14 @@ require ( github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.54.0 // indirect github.com/VividCortex/ewma v1.2.0 // indirect github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d // indirect - github.com/aws/aws-sdk-go-v2 v1.41.1 // indirect + github.com/aws/aws-sdk-go-v2 v1.41.5 // indirect github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.4 // indirect github.com/aws/aws-sdk-go-v2/config v1.32.7 // indirect github.com/aws/aws-sdk-go-v2/credentials v1.19.7 // indirect github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.17 // indirect github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.20.12 // indirect - github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.17 // indirect - github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.17 // indirect + github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.21 // indirect + github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.21 // indirect github.com/aws/aws-sdk-go-v2/internal/ini v1.8.4 // indirect github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.17 // indirect github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.4 // indirect @@ -58,7 +60,8 @@ require ( github.com/aws/aws-sdk-go-v2/service/sso v1.30.9 // indirect github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.13 // indirect github.com/aws/aws-sdk-go-v2/service/sts v1.41.6 // indirect - github.com/aws/smithy-go v1.24.0 // indirect + github.com/aws/smithy-go v1.24.2 // indirect + github.com/calypr/syfon/apigen v0.0.0 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/clipperhouse/stringish v0.1.1 // indirect github.com/clipperhouse/uax29/v2 v2.3.0 // indirect @@ -104,4 +107,9 @@ require ( google.golang.org/genproto/googleapis/rpc v0.0.0-20251124214823-79d6a2a48846 // indirect google.golang.org/grpc v1.77.0 // indirect google.golang.org/protobuf v1.36.10 // indirect + gopkg.in/validator.v2 v2.0.1 // indirect ) + +replace github.com/calypr/syfon => ../syfon + +replace github.com/calypr/syfon/apigen => ../syfon/apigen diff --git a/go.sum b/go.sum index 15f6d6d..2cfae66 100644 --- a/go.sum +++ b/go.sum @@ -52,8 +52,8 @@ github.com/VividCortex/ewma v1.2.0 h1:f58SaIzcDXrSy3kWaHNvuJgJ3Nmz59Zji6XoJR/q1o github.com/VividCortex/ewma v1.2.0/go.mod h1:nz4BbCtbLyFDeC9SUHbtcT5644juEuWfUAUnGx7j5l4= github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d h1:licZJFw2RwpHMqeKTCYkitsPqHNxTmd4SNR5r94FGM8= github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d/go.mod h1:asat636LX7Bqt5lYEZ27JNDcqxfjdBQuJ/MM4CN/Lzo= -github.com/aws/aws-sdk-go-v2 v1.41.1 h1:ABlyEARCDLN034NhxlRUSZr4l71mh+T5KAeGh6cerhU= -github.com/aws/aws-sdk-go-v2 v1.41.1/go.mod h1:MayyLB8y+buD9hZqkCW3kX1AKq07Y5pXxtgB+rRFhz0= +github.com/aws/aws-sdk-go-v2 v1.41.5 h1:dj5kopbwUsVUVFgO4Fi5BIT3t4WyqIDjGKCangnV/yY= +github.com/aws/aws-sdk-go-v2 v1.41.5/go.mod h1:mwsPRE8ceUUpiTgF7QmQIJ7lgsKUPQOUl3o72QBrE1o= github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.4 h1:489krEF9xIGkOaaX3CE/Be2uWjiXrkCH6gUX+bZA/BU= github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.4/go.mod h1:IOAPF6oT9KCsceNTvvYMNHy0+kMF8akOjeDvPENWxp4= github.com/aws/aws-sdk-go-v2/config v1.32.7 h1:vxUyWGUwmkQ2g19n7JY/9YL8MfAIl7bTesIUykECXmY= @@ -64,10 +64,10 @@ github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.17 h1:I0GyV8wiYrP8XpA70g1HBc github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.17/go.mod h1:tyw7BOl5bBe/oqvoIeECFJjMdzXoa/dfVz3QQ5lgHGA= github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.20.12 h1:Zy6Tme1AA13kX8x3CnkHx5cqdGWGaj/anwOiWGnA0Xo= github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.20.12/go.mod h1:ql4uXYKoTM9WUAUSmthY4AtPVrlTBZOvnBJTiCUdPxI= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.17 h1:xOLELNKGp2vsiteLsvLPwxC+mYmO6OZ8PYgiuPJzF8U= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.17/go.mod h1:5M5CI3D12dNOtH3/mk6minaRwI2/37ifCURZISxA/IQ= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.17 h1:WWLqlh79iO48yLkj1v3ISRNiv+3KdQoZ6JWyfcsyQik= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.17/go.mod h1:EhG22vHRrvF8oXSTYStZhJc1aUgKtnJe+aOiFEV90cM= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.21 h1:Rgg6wvjjtX8bNHcvi9OnXWwcE0a2vGpbwmtICOsvcf4= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.21/go.mod h1:A/kJFst/nm//cyqonihbdpQZwiUhhzpqTsdbhDdRF9c= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.21 h1:PEgGVtPoB6NTpPrBgqSE5hE/o47Ij9qk/SEZFbUOe9A= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.21/go.mod h1:p+hz+PRAYlY3zcpJhPwXlLC4C+kqn70WIHwnzAfs6ps= github.com/aws/aws-sdk-go-v2/internal/ini v1.8.4 h1:WKuaxf++XKWlHWu9ECbMlha8WOEGm0OUEZqm4K/Gcfk= github.com/aws/aws-sdk-go-v2/internal/ini v1.8.4/go.mod h1:ZWy7j6v1vWGmPReu0iSGvRiise4YI5SkR3OHKTZ6Wuc= github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.17 h1:JqcdRG//czea7Ppjb+g/n4o8i/R50aTBHkA7vu0lK+k= @@ -90,8 +90,8 @@ github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.13 h1:gd84Omyu9JLriJVCbGApcLz github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.13/go.mod h1:sTGThjphYE4Ohw8vJiRStAcu3rbjtXRsdNB0TvZ5wwo= github.com/aws/aws-sdk-go-v2/service/sts v1.41.6 h1:5fFjR/ToSOzB2OQ/XqWpZBmNvmP/pJ1jOWYlFDJTjRQ= github.com/aws/aws-sdk-go-v2/service/sts v1.41.6/go.mod h1:qgFDZQSD/Kys7nJnVqYlWKnh0SSdMjAi0uSwON4wgYQ= -github.com/aws/smithy-go v1.24.0 h1:LpilSUItNPFr1eY85RYgTIg5eIEPtvFbskaFcmmIUnk= -github.com/aws/smithy-go v1.24.0/go.mod h1:LEj2LM3rBRQJxPZTB4KuzZkaZYnZPnvgIhb4pu07mx0= +github.com/aws/smithy-go v1.24.2 h1:FzA3bu/nt/vDvmnkg+R8Xl46gmzEDam6mZ1hzmwXFng= +github.com/aws/smithy-go v1.24.2/go.mod h1:YE2RhdIuDbA5E5bTdciG9KrW3+TiEONeUWCqxX9i1Fc= github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/clipperhouse/stringish v0.1.1 h1:+NSqMOr3GR6k1FdRhhnXrLfztGzuG+VuFDfatpWHKCs= @@ -122,8 +122,8 @@ github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI= github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= -github.com/golang-jwt/jwt/v5 v5.3.0 h1:pv4AsKCKKZuqlgs5sUmn4x8UlGa0kEVt/puTpKx9vvo= -github.com/golang-jwt/jwt/v5 v5.3.0/go.mod h1:fxCRLWMO43lRc8nhHWY6LGqRcf+1gQWArsqaEUEa5bE= +github.com/golang-jwt/jwt/v5 v5.3.1 h1:kYf81DTWFe7t+1VvL7eS+jKFVWaUnK9cB1qbwn63YCY= +github.com/golang-jwt/jwt/v5 v5.3.1/go.mod h1:fxCRLWMO43lRc8nhHWY6LGqRcf+1gQWArsqaEUEa5bE= github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= @@ -144,6 +144,8 @@ github.com/googleapis/enterprise-certificate-proxy v0.3.7 h1:zrn2Ee/nWmHulBx5sAV github.com/googleapis/enterprise-certificate-proxy v0.3.7/go.mod h1:MkHOF77EYAE7qfSuSS9PU6g4Nt4e11cnsDUowfwewLA= github.com/googleapis/gax-go/v2 v2.15.0 h1:SyjDc1mGgZU5LncH8gimWo9lW1DtIfPibOG81vgd/bo= github.com/googleapis/gax-go/v2 v2.15.0/go.mod h1:zVVkkxAQHa1RQpg9z2AUCMnKhi0Qld9rcmyfL1OZhoc= +github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= +github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= diff --git a/hash/hash.go b/hash/hash.go index eaf2250..008cd1f 100644 --- a/hash/hash.go +++ b/hash/hash.go @@ -3,7 +3,7 @@ package hash import ( "encoding/json" "fmt" - "github.com/calypr/data-client/apigen/drs" + syclient "github.com/calypr/syfon/client" ) // ChecksumType represents the digest method used to create the checksum @@ -144,7 +144,7 @@ func ConvertChecksumsToHashInfo(checksums []Checksum) HashInfo { return ConvertStringMapToHashInfo(checksumMap) } -func ConvertDrsChecksumsToMap(checksums []drs.Checksum) map[string]string { +func ConvertDrsChecksumsToMap(checksums []syclient.Checksum) map[string]string { result := make(map[string]string, len(checksums)) for _, c := range checksums { result[c.Type] = c.Checksum @@ -152,15 +152,15 @@ func ConvertDrsChecksumsToMap(checksums []drs.Checksum) map[string]string { return result } -func ConvertDrsChecksumsToHashInfo(checksums []drs.Checksum) HashInfo { +func ConvertDrsChecksumsToHashInfo(checksums []syclient.Checksum) HashInfo { checksumMap := ConvertDrsChecksumsToMap(checksums) return ConvertStringMapToHashInfo(checksumMap) } -func ConvertMapToDrsChecksums(hashes map[string]string) []drs.Checksum { - result := make([]drs.Checksum, 0, len(hashes)) +func ConvertMapToDrsChecksums(hashes map[string]string) []syclient.Checksum { + result := make([]syclient.Checksum, 0, len(hashes)) for t, c := range hashes { - result = append(result, drs.Checksum{ + result = append(result, syclient.Checksum{ Type: t, Checksum: c, }) diff --git a/localclient/client.go b/localclient/client.go index a38cb86..7542ef6 100644 --- a/localclient/client.go +++ b/localclient/client.go @@ -41,7 +41,7 @@ func NewLocalInterfaceFromCredential(cred *conf.Credential, logger *logs.Gen3Log config := conf.NewConfigure(logger.Logger) req := request.NewRequestInterface(logger, cred, config) dc := drs.NewLocalDrsClient(req, cred.APIEndpoint, logger.Logger) - tb := transfer.New(req, logger, localsigner.New(cred.APIEndpoint, req, dc)) + tb := transfer.New(req, logger, localsigner.New(cred.APIEndpoint, cred, dc)) return &LocalClient{ RequestInterface: req, diff --git a/mocks/mock_drs_client.go b/mocks/mock_drs_client.go index e58e5bd..8b8aa9e 100644 --- a/mocks/mock_drs_client.go +++ b/mocks/mock_drs_client.go @@ -195,29 +195,41 @@ func (mr *MockDrsClientMockRecorder) New(method, url any) *gomock.Call { } func (m *MockDrsClient) WithProject(projectId string) drs.Client { - return m + return m } func (m *MockDrsClient) WithOrganization(organization string) drs.Client { - return m + return m } func (m *MockDrsClient) WithBucket(bucketName string) drs.Client { - return m + return m } -func (m *MockDrsClient) GetProjectId() string { return "" } -func (m *MockDrsClient) GetBucketName() string { return "" } +func (m *MockDrsClient) GetProjectId() string { return "" } +func (m *MockDrsClient) GetBucketName() string { return "" } func (m *MockDrsClient) GetOrganization() string { return "" } -func (m *MockDrsClient) RegisterFile(ctx context.Context, oid, path string) (*drs.DRSObject, error) { return nil, nil } -func (m *MockDrsClient) AddURL(ctx context.Context, blobURL, sha256 string, opts ...drs.AddURLOption) (*drs.DRSObject, error) { return nil, nil } -func (m *MockDrsClient) ListObjectsByProject(ctx context.Context, projectId string) (chan drs.DRSObjectResult, error) { return nil, nil } -func (m *MockDrsClient) GetProjectSample(ctx context.Context, projectId string, limit int) ([]drs.DRSObject, error) { return nil, nil } -func (m *MockDrsClient) DeleteRecordsByProject(ctx context.Context, projectId string) error { return nil } -func (m *MockDrsClient) BuildDrsObj(fileName string, checksum string, size int64, drsId string) (*drs.DRSObject, error) { return nil, nil } +func (m *MockDrsClient) RegisterFile(ctx context.Context, oid, path string) (*drs.DRSObject, error) { + return nil, nil +} +func (m *MockDrsClient) AddURL(ctx context.Context, blobURL, sha256 string, opts ...drs.AddURLOption) (*drs.DRSObject, error) { + return nil, nil +} +func (m *MockDrsClient) ListObjectsByProject(ctx context.Context, projectId string) (chan drs.DRSObjectResult, error) { + return nil, nil +} +func (m *MockDrsClient) GetProjectSample(ctx context.Context, projectId string, limit int) ([]drs.DRSObject, error) { + return nil, nil +} +func (m *MockDrsClient) DeleteRecordsByProject(ctx context.Context, projectId string) error { + return nil +} +func (m *MockDrsClient) BuildDrsObj(fileName string, checksum string, size int64, drsId string) (*drs.DRSObject, error) { + return nil, nil +} func (m *MockDrsClient) DownloadFile(ctx context.Context, oid, destPath string) error { return nil } -func (m *MockDrsClient) Name() string { return "mock-drs-client" } -func (m *MockDrsClient) Logger() *logs.Gen3Logger { return nil } +func (m *MockDrsClient) Name() string { return "mock-drs-client" } +func (m *MockDrsClient) Logger() *logs.Gen3Logger { return nil } func (m *MockDrsClient) ResolveDownloadURL(ctx context.Context, guid string, accessID string) (string, error) { return "", nil } @@ -227,6 +239,9 @@ func (m *MockDrsClient) Download(ctx context.Context, fdr *common.FileDownloadRe func (m *MockDrsClient) ResolveUploadURL(ctx context.Context, guid string, filename string, metadata common.FileMetadata, bucket string) (string, error) { return "", nil } +func (m *MockDrsClient) ResolveUploadURLs(ctx context.Context, requests []common.UploadURLResolveRequest) ([]common.UploadURLResolveResponse, error) { + return nil, nil +} func (m *MockDrsClient) InitMultipartUpload(ctx context.Context, guid string, filename string, bucket string) (*common.MultipartUploadInit, error) { return nil, nil } @@ -236,7 +251,9 @@ func (m *MockDrsClient) GetMultipartUploadURL(ctx context.Context, key string, u func (m *MockDrsClient) CompleteMultipartUpload(ctx context.Context, key string, uploadID string, parts []common.MultipartUploadPart, bucket string) error { return nil } -func (m *MockDrsClient) Upload(ctx context.Context, url string, body io.Reader, size int64) error { return nil } +func (m *MockDrsClient) Upload(ctx context.Context, url string, body io.Reader, size int64) error { + return nil +} func (m *MockDrsClient) UploadPart(ctx context.Context, url string, body io.Reader, size int64) (string, error) { return "", nil } diff --git a/tests/download-multiple_test.go b/tests/download-multiple_test.go index f04fca0..3de9b5c 100644 --- a/tests/download-multiple_test.go +++ b/tests/download-multiple_test.go @@ -29,7 +29,7 @@ func Test_askGen3ForFileInfo_withShepherd(t *testing.T) { mockIndexd.EXPECT(). GetObject(gomock.Any(), testGUID). - Return(&drs.DRSObject{Id: testGUID, Name: &testFileName, Size: testFileSize}, nil) + Return(&drs.DRSObject{Id: testGUID, Name: testFileName, Size: testFileSize}, nil) logger := logs.NewGen3Logger(nil, "", "test") @@ -107,7 +107,7 @@ func Test_askGen3ForFileInfo_noShepherd(t *testing.T) { mockIndexd.EXPECT(). GetObject(gomock.Any(), testGUID). - Return(&drs.DRSObject{Id: testGUID, Name: &testFileName, Size: testFileSize}, nil) + Return(&drs.DRSObject{Id: testGUID, Name: testFileName, Size: testFileSize}, nil) logger := logs.NewGen3Logger(nil, "", "test") diff --git a/transfer/signer/gen3/signer.go b/transfer/signer/gen3/signer.go index b2c45bf..876e6a5 100644 --- a/transfer/signer/gen3/signer.go +++ b/transfer/signer/gen3/signer.go @@ -4,7 +4,6 @@ import ( "context" "encoding/json" "fmt" - "io" "net/http" "strings" @@ -13,6 +12,7 @@ import ( "github.com/calypr/data-client/drs" "github.com/calypr/data-client/fence" "github.com/calypr/data-client/request" + syclient "github.com/calypr/syfon/client" ) type Signer struct { @@ -20,14 +20,24 @@ type Signer struct { cred *conf.Credential drs drs.Client fence fence.FenceInterface + sy *syclient.Client } func New(req request.RequestInterface, cred *conf.Credential, dc drs.Client, fc fence.FenceInterface) *Signer { + opts := make([]syclient.Option, 0, 1) + baseURL := "" + if cred != nil { + baseURL = cred.APIEndpoint + if token := strings.TrimSpace(cred.AccessToken); token != "" { + opts = append(opts, syclient.WithBearerToken(token)) + } + } return &Signer{ req: req, cred: cred, drs: dc, fence: fc, + sy: syclient.New(baseURL, opts...), } } @@ -112,65 +122,24 @@ func (g *Signer) ResolveUploadURLs(ctx context.Context, requests []common.Upload return []common.UploadURLResolveResponse{}, nil } - type bulkUploadRequest struct { - Requests []struct { - FileID string `json:"file_id"` - Bucket string `json:"bucket,omitempty"` - FileName string `json:"file_name,omitempty"` - } `json:"requests"` - } - type bulkUploadResponse struct { - Results []struct { - FileID string `json:"file_id"` - Bucket string `json:"bucket,omitempty"` - FileName string `json:"file_name,omitempty"` - URL string `json:"url,omitempty"` - Status int `json:"status"` - Error string `json:"error,omitempty"` - } `json:"results"` - } - - payload := bulkUploadRequest{ - Requests: make([]struct { - FileID string `json:"file_id"` - Bucket string `json:"bucket,omitempty"` - FileName string `json:"file_name,omitempty"` - }, 0, len(requests)), - } + items := make([]syclient.UploadBulkItem, 0, len(requests)) for _, req := range requests { fileID := strings.TrimSpace(req.GUID) if fileID == "" { fileID = strings.TrimSpace(req.Filename) } - payload.Requests = append(payload.Requests, struct { - FileID string `json:"file_id"` - Bucket string `json:"bucket,omitempty"` - FileName string `json:"file_name,omitempty"` - }{ - FileID: fileID, - Bucket: req.Bucket, - FileName: req.Filename, - }) - } - - endpoint := strings.TrimRight(strings.TrimSpace(g.cred.APIEndpoint), "/") + "/data/upload/bulk" - rb := g.req.New(http.MethodPost, endpoint) - if _, err := rb.WithJSONBody(payload); err != nil { - return nil, err - } - resp, err := g.req.Do(ctx, rb) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - if resp.StatusCode >= 400 { - body, _ := io.ReadAll(resp.Body) - return nil, fmt.Errorf("bulk upload URL request failed with status %d: %s", resp.StatusCode, strings.TrimSpace(string(body))) + item := syclient.UploadBulkItem{FileId: fileID} + if req.Bucket != "" { + item.SetBucket(req.Bucket) + } + if req.Filename != "" { + item.SetFileName(req.Filename) + } + items = append(items, item) } - var out bulkUploadResponse - if err := json.NewDecoder(resp.Body).Decode(&out); err != nil { + out, err := g.sy.Data().UploadBulk(ctx, syclient.UploadBulkRequest{Requests: items}) + if err != nil { return nil, err } @@ -184,14 +153,14 @@ func (g *Signer) ResolveUploadURLs(ctx context.Context, requests []common.Upload Error: "missing result for request", } } - for i := range out.Results { + for i := range out.GetResults() { if i >= len(results) { break } - r := out.Results[i] - results[i].URL = r.URL - results[i].Status = r.Status - results[i].Error = r.Error + r := out.GetResults()[i] + results[i].URL = r.GetUrl() + results[i].Status = int(r.GetStatus()) + results[i].Error = r.GetError() if results[i].Status == 0 { results[i].Status = http.StatusOK } diff --git a/transfer/signer/local/signer.go b/transfer/signer/local/signer.go index 1452e6e..21976c4 100644 --- a/transfer/signer/local/signer.go +++ b/transfer/signer/local/signer.go @@ -2,52 +2,31 @@ package local import ( "context" - "encoding/json" "fmt" - "io" "net/http" - "net/url" "strings" "github.com/calypr/data-client/common" + "github.com/calypr/data-client/conf" drs "github.com/calypr/data-client/drs" - "github.com/calypr/data-client/request" + syclient "github.com/calypr/syfon/client" ) type Signer struct { - baseURL string - req request.RequestInterface - client drs.Client + client *syclient.Client + drsClient drs.Client } -type bulkUploadRequest struct { - Requests []bulkUploadItem `json:"requests"` -} - -type bulkUploadItem struct { - FileID string `json:"file_id"` - Bucket string `json:"bucket,omitempty"` - FileName string `json:"file_name,omitempty"` -} - -type bulkUploadResponse struct { - Results []bulkUploadResult `json:"results"` -} - -type bulkUploadResult struct { - FileID string `json:"file_id"` - Bucket string `json:"bucket,omitempty"` - FileName string `json:"file_name,omitempty"` - URL string `json:"url,omitempty"` - Status int `json:"status"` - Error string `json:"error,omitempty"` -} - -func New(baseURL string, req request.RequestInterface, dc drs.Client) *Signer { +func New(baseURL string, cred *conf.Credential, dc drs.Client) *Signer { + opts := make([]syclient.Option, 0, 1) + if cred != nil { + if token := strings.TrimSpace(cred.AccessToken); token != "" { + opts = append(opts, syclient.WithBearerToken(token)) + } + } return &Signer{ - baseURL: baseURL, - req: req, - client: dc, + client: syclient.New(baseURL, opts...), + drsClient: dc, } } @@ -57,76 +36,19 @@ func (d *Signer) DeleteFile(ctx context.Context, guid string) (string, error) { return "", fmt.Errorf("DeleteFile not implemented for local DRS signer") } -func (d *Signer) buildURL(paths ...string) (string, error) { - u, err := url.Parse(d.baseURL) - if err != nil { - return "", err - } - fullPath := u.Path - for _, p := range paths { - if p == "" { - continue - } - if !strings.HasSuffix(fullPath, "/") && !strings.HasPrefix(p, "/") { - fullPath += "/" - } - fullPath += p - } - u.Path = fullPath - return u.String(), nil -} - -func (d *Signer) doJSONRequest(ctx context.Context, method, url string, body interface{}, dst interface{}) error { - rb := d.req.New(method, url) - if body != nil { - if _, err := rb.WithJSONBody(body); err != nil { - return err - } - } - - resp, err := d.req.Do(ctx, rb) - if err != nil { - return err - } - defer resp.Body.Close() - - if resp.StatusCode >= 400 { - bodyBytes, _ := io.ReadAll(resp.Body) - return fmt.Errorf("request to %s failed with status %d: %s", url, resp.StatusCode, string(bodyBytes)) - } - - if dst != nil { - return json.NewDecoder(resp.Body).Decode(dst) - } - return nil -} - func (d *Signer) ResolveDownloadURL(ctx context.Context, guid string, accessID string) (string, error) { - return drs.ResolveDownloadURL(ctx, d.client, guid, accessID) + return drs.ResolveDownloadURL(ctx, d.drsClient, guid, accessID) } func (d *Signer) ResolveUploadURL(ctx context.Context, guid string, filename string, metadata common.FileMetadata, bucket string) (string, error) { - u, err := d.buildURL("data/upload", guid) + res, err := d.client.Data().UploadURL(ctx, syclient.UploadURLRequest{ + FileID: guid, + Bucket: bucket, + FileName: filename, + }) if err != nil { return "", err } - q := url.Values{} - if strings.TrimSpace(filename) != "" { - q.Set("file_name", filename) - } - if bucket != "" { - q.Set("bucket", bucket) - } - if encoded := q.Encode(); encoded != "" { - u += "?" + encoded - } - - var res struct { - URL string `json:"url"` - } - if err := d.doJSONRequest(ctx, http.MethodGet, u, nil, &res); err != nil { - return "", err - } return res.URL, nil } @@ -135,49 +57,28 @@ func (d *Signer) ResolveUploadURLs(ctx context.Context, requests []common.Upload return []common.UploadURLResolveResponse{}, nil } - u, err := d.buildURL("data/upload/bulk") - if err != nil { - return nil, err - } - - payload := bulkUploadRequest{Requests: make([]bulkUploadItem, 0, len(requests))} + items := make([]syclient.UploadBulkItem, 0, len(requests)) for _, req := range requests { fileID := strings.TrimSpace(req.GUID) if fileID == "" { fileID = strings.TrimSpace(req.Filename) } - payload.Requests = append(payload.Requests, bulkUploadItem{ - FileID: fileID, - Bucket: req.Bucket, - FileName: req.Filename, - }) + item := syclient.UploadBulkItem{FileId: fileID} + if req.Bucket != "" { + item.SetBucket(req.Bucket) + } + if req.Filename != "" { + item.SetFileName(req.Filename) + } + items = append(items, item) } - var out bulkUploadResponse - if err := d.doJSONRequest(ctx, http.MethodPost, u, payload, &out); err != nil { + out, err := d.client.Data().UploadBulk(ctx, syclient.UploadBulkRequest{Requests: items}) + if err != nil { return nil, err } results := make([]common.UploadURLResolveResponse, len(requests)) - if len(out.Results) == len(requests) { - for i := range requests { - r := out.Results[i] - results[i] = common.UploadURLResolveResponse{ - GUID: requests[i].GUID, - Filename: requests[i].Filename, - Bucket: requests[i].Bucket, - URL: r.URL, - Status: r.Status, - Error: r.Error, - } - if results[i].Status == 0 { - results[i].Status = http.StatusOK - } - } - return results, nil - } - - // If response count mismatches, align by request order and mark unresolved entries. for i := range requests { results[i] = common.UploadURLResolveResponse{ GUID: requests[i].GUID, @@ -187,14 +88,14 @@ func (d *Signer) ResolveUploadURLs(ctx context.Context, requests []common.Upload Error: "missing result for request", } } - for i := range out.Results { + for i := range out.GetResults() { if i >= len(results) { break } - r := out.Results[i] - results[i].URL = r.URL - results[i].Status = r.Status - results[i].Error = r.Error + r := out.GetResults()[i] + results[i].URL = r.GetUrl() + results[i].Status = int(r.GetStatus()) + results[i].Error = r.GetError() if results[i].Status == 0 { results[i].Status = http.StatusOK } @@ -203,32 +104,17 @@ func (d *Signer) ResolveUploadURLs(ctx context.Context, requests []common.Upload } func (d *Signer) InitMultipartUpload(ctx context.Context, guid string, filename string, bucket string) (*common.MultipartUploadInit, error) { - u, err := d.buildURL("data/multipart/init") - if err != nil { - return nil, err - } - - req := struct { - GUID string `json:"guid,omitempty"` - FileName string `json:"file_name,omitempty"` - Bucket string `json:"bucket,omitempty"` - }{ + res, err := d.client.Data().MultipartInit(ctx, syclient.MultipartInitRequest{ GUID: guid, FileName: filename, Bucket: bucket, - } - - var res struct { - GUID string `json:"guid"` - UploadID string `json:"uploadId"` - } - if err := d.doJSONRequest(ctx, http.MethodPost, u, req, &res); err != nil { + }) + if err != nil { return nil, err } if res.UploadID == "" { return nil, fmt.Errorf("server did not return uploadId") } - return &common.MultipartUploadInit{ GUID: res.GUID, UploadID: res.UploadID, @@ -236,27 +122,13 @@ func (d *Signer) InitMultipartUpload(ctx context.Context, guid string, filename } func (d *Signer) GetMultipartUploadURL(ctx context.Context, key string, uploadID string, partNumber int32, bucket string) (string, error) { - u, err := d.buildURL("data/multipart/upload") - if err != nil { - return "", err - } - - req := struct { - Key string `json:"key"` - Bucket string `json:"bucket,omitempty"` - UploadID string `json:"uploadId"` - PartNumber int32 `json:"partNumber"` - }{ + res, err := d.client.Data().MultipartUpload(ctx, syclient.MultipartUploadRequest{ Key: key, Bucket: bucket, UploadID: uploadID, PartNumber: partNumber, - } - - var res struct { - PresignedURL string `json:"presigned_url"` - } - if err := d.doJSONRequest(ctx, http.MethodPost, u, req, &res); err != nil { + }) + if err != nil { return "", err } if res.PresignedURL == "" { @@ -266,36 +138,17 @@ func (d *Signer) GetMultipartUploadURL(ctx context.Context, key string, uploadID } func (d *Signer) CompleteMultipartUpload(ctx context.Context, key string, uploadID string, parts []common.MultipartUploadPart, bucket string) error { - u, err := d.buildURL("data/multipart/complete") - if err != nil { - return err - } - - reqParts := make([]struct { - PartNumber int32 `json:"PartNumber"` - ETag string `json:"ETag"` - }, len(parts)) + reqParts := make([]syclient.MultipartPart, len(parts)) for i, p := range parts { - reqParts[i] = struct { - PartNumber int32 `json:"PartNumber"` - ETag string `json:"ETag"` - }{ + reqParts[i] = syclient.MultipartPart{ PartNumber: p.PartNumber, ETag: p.ETag, } } - - req := struct { - Key string `json:"key"` - Bucket string `json:"bucket,omitempty"` - UploadID string `json:"uploadId"` - Parts any `json:"parts"` - }{ + return d.client.Data().MultipartComplete(ctx, syclient.MultipartCompleteRequest{ Key: key, Bucket: bucket, UploadID: uploadID, Parts: reqParts, - } - - return d.doJSONRequest(ctx, http.MethodPost, u, req, nil) + }) } diff --git a/transfer/signer/local/signer_test.go b/transfer/signer/local/signer_test.go index 1fc9190..9e3a594 100644 --- a/transfer/signer/local/signer_test.go +++ b/transfer/signer/local/signer_test.go @@ -58,7 +58,7 @@ func TestResolveUploadURLsBatch(t *testing.T) { logger := slog.New(slog.NewTextHandler(io.Discard, nil)) req := &testRequestClient{client: srv.Client()} dc := drs.NewLocalDrsClient(req, srv.URL, logger) - signer := New(srv.URL, req, dc) + signer := New(srv.URL, nil, dc) out, err := signer.ResolveUploadURLs(context.Background(), []common.UploadURLResolveRequest{ {GUID: "did-1", Filename: "one.bin", Bucket: "b1"}, diff --git a/upload/multipart.go b/upload/multipart.go index 277c367..4f71a0b 100644 --- a/upload/multipart.go +++ b/upload/multipart.go @@ -19,6 +19,7 @@ import ( "github.com/calypr/data-client/common" "github.com/calypr/data-client/fence" "github.com/calypr/data-client/transfer" + syxfer "github.com/calypr/syfon/xfer" "github.com/vbauerster/mpb/v8" "github.com/vbauerster/mpb/v8/decor" ) @@ -273,30 +274,15 @@ func CompleteMultipartUpload(ctx context.Context, bk transfer.Uploader, key stri // uploadPart now returns the ETag and error directly. // It accepts a Context to allow for cancellation (e.g., if another part fails). func uploadPart(ctx context.Context, url string, data io.Reader, partSize int64) (string, error) { - req, err := http.NewRequestWithContext(ctx, http.MethodPut, url, data) + etag, err := syxfer.Upload(ctx, http.DefaultClient, url, data, partSize) if err != nil { return "", err } - - req.ContentLength = partSize - - resp, err := http.DefaultClient.Do(req) - if err != nil { - return "", err - } - defer resp.Body.Close() - - if resp.StatusCode != http.StatusOK { - body, _ := io.ReadAll(io.LimitReader(resp.Body, 1024)) - return "", fmt.Errorf("upload failed (%d): %s", resp.StatusCode, body) - } - - etag := resp.Header.Get("ETag") if etag == "" { return "", errors.New("no ETag returned") } - return strings.Trim(etag, `"`), nil + return etag, nil } func (s *multipartResumeState) matches(req common.FileUploadRequestObject, info os.FileInfo, chunkSize int64) bool { diff --git a/upload/orchestrator.go b/upload/orchestrator.go index 8da56f4..8112ecc 100644 --- a/upload/orchestrator.go +++ b/upload/orchestrator.go @@ -40,12 +40,13 @@ func RegisterFile(ctx context.Context, bk UploadBackend, dc drs.Client, drsObjec if len(drsObject.AccessMethods) > 0 { for _, am := range drsObject.AccessMethods { if am.Type == "s3" || am.Type == "gs" { - if am.AccessUrl != nil && am.AccessUrl.Url != "" { - parts := strings.Split(am.AccessUrl.Url, "/") - if candidate := parts[len(parts)-1]; candidate != "" { - uploadFilename = candidate - break - } + if am.AccessUrl.Url == "" { + continue + } + parts := strings.Split(am.AccessUrl.Url, "/") + if candidate := parts[len(parts)-1]; candidate != "" { + uploadFilename = candidate + break } } } diff --git a/upload/upload.go b/upload/upload.go index 924c9b1..1591d74 100644 --- a/upload/upload.go +++ b/upload/upload.go @@ -118,16 +118,20 @@ func RegisterAndUploadFile(ctx context.Context, dc drs.Client, bk transfer.Uploa uploadFilename := filepath.Base(filePath) if res != nil && len(res.AccessMethods) > 0 { for _, am := range res.AccessMethods { - if (am.Type == "s3" || am.Type == "gs") && am.AccessUrl != nil && am.AccessUrl.Url != "" { - parts := strings.Split(am.AccessUrl.Url, "/") - if len(parts) > 0 { - candidate := parts[len(parts)-1] - if candidate != "" { - uploadFilename = candidate - } + if am.Type != "s3" && am.Type != "gs" { + continue + } + if am.AccessUrl.Url == "" { + continue + } + parts := strings.Split(am.AccessUrl.Url, "/") + if len(parts) > 0 { + candidate := parts[len(parts)-1] + if candidate != "" { + uploadFilename = candidate } - break } + break } } From 8f6810960fda200f3a14c7bf2755ebdaf4b889ec Mon Sep 17 00:00:00 2001 From: matthewpeterkort Date: Thu, 2 Apr 2026 16:00:03 -0700 Subject: [PATCH 10/13] fix imports --- go.mod | 4 ++-- logs/tee_logger.go | 2 +- upload/singleFile.go | 9 +++++++-- 3 files changed, 10 insertions(+), 5 deletions(-) diff --git a/go.mod b/go.mod index 9b0bcef..d5b216b 100644 --- a/go.mod +++ b/go.mod @@ -4,7 +4,7 @@ go 1.26.1 require ( github.com/aws/aws-sdk-go-v2/service/s3 v1.95.1 - github.com/calypr/syfon v0.0.0-20260402213132-2a2c82c5a4cd + github.com/calypr/syfon v0.1.0 github.com/golang-jwt/jwt/v5 v5.3.1 github.com/google/uuid v1.6.0 github.com/hashicorp/go-multierror v1.1.1 @@ -61,7 +61,7 @@ require ( github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.13 // indirect github.com/aws/aws-sdk-go-v2/service/sts v1.41.6 // indirect github.com/aws/smithy-go v1.24.2 // indirect - github.com/calypr/syfon/apigen v0.0.0 // indirect + github.com/calypr/syfon/apigen v0.1.0 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/clipperhouse/stringish v0.1.1 // indirect github.com/clipperhouse/uax29/v2 v2.3.0 // indirect diff --git a/logs/tee_logger.go b/logs/tee_logger.go index 228e06b..9861187 100644 --- a/logs/tee_logger.go +++ b/logs/tee_logger.go @@ -210,7 +210,7 @@ func (t *Gen3Logger) SucceededContext(ctx context.Context, filePath, guid string func (t *Gen3Logger) succeededHelper(ctx context.Context, filePath, guid string, skip int) { msg := fmt.Sprintf("Succeeded: %s (GUID: %s)", filePath, guid) - t.logWithSkip(ctx, slog.LevelInfo, skip, msg) + t.logWithSkip(ctx, slog.LevelDebug, skip, msg) if t.succeededPath != "" { t.writeSucceededSync(filePath, guid) } diff --git a/upload/singleFile.go b/upload/singleFile.go index a531aa1..48e7031 100644 --- a/upload/singleFile.go +++ b/upload/singleFile.go @@ -12,7 +12,12 @@ import ( ) func UploadSingle(ctx context.Context, bk transfer.Uploader, logger *logs.Gen3Logger, req common.FileUploadRequestObject, showProgress bool) error { - logger.InfoContext(ctx, "File Upload Request", "request", req) + logger.DebugContext(ctx, "File upload request", + "source_path", req.SourcePath, + "object_key", req.ObjectKey, + "guid", req.GUID, + "bucket", req.Bucket, + ) // Helper to handle * in path if it was passed, though optimally caller handles this. // We will trust the SourcePath in the request object mostly, but for safety we can check existence. @@ -79,7 +84,7 @@ func UploadSingle(ctx context.Context, bk transfer.Uploader, logger *logs.Gen3Lo return err } - logger.InfoContext(ctx, "Successfully uploaded", "file", req.ObjectKey) + logger.DebugContext(ctx, "Successfully uploaded", "file", req.ObjectKey) logger.Succeeded(req.SourcePath, req.GUID) if showProgress { From 5ef59a2028302da0e5372def7cd114814d7a8a24 Mon Sep 17 00:00:00 2001 From: matthewpeterkort Date: Tue, 7 Apr 2026 15:41:47 -0700 Subject: [PATCH 11/13] move sifon client out of data-client into its own package in syfon repo --- cmd/delete.go | 42 --- cmd/download-multiple.go | 51 ++- cmd/download-single.go | 50 ++- cmd/generate-tsv.go | 17 - cmd/retry-upload.go | 11 +- cmd/upload-multipart.go | 28 +- cmd/upload-multiple.go | 55 +-- cmd/upload-single.go | 12 +- cmd/upload.go | 89 +---- common/types.go | 122 +------ conf/config.go | 15 +- credentials/interface.go | 18 - download/batch.go | 189 ----------- download/downloader.go | 171 ---------- download/file_info.go | 60 ---- download/orchestrator.go | 18 - download/progress_writer.go | 74 ----- download/progress_writer_test.go | 46 --- download/transfer.go | 460 ------------------------- download/transfer_test.go | 462 -------------------------- download/types.go | 60 ---- download/url_resolution.go | 48 --- download/utils.go | 77 ----- drs/access_method_conversions.go | 53 --- drs/client.go | 362 -------------------- drs/drs.go | 171 ---------- drs/interface.go | 54 --- drs/object_builder.go | 78 ----- drs/object_builder_test.go | 53 --- drs/prefetch_context.go | 24 -- drs/resolve.go | 62 ---- drs/server_client.go | 23 -- drs/storage_prefix_test.go | 25 -- drs/syfon_adapter.go | 61 ---- drs/types.go | 28 -- drs/upsert.go | 61 ---- fence/client.go | 2 +- g3client/client.go | 52 ++- ga4gh/data-repository-service-schemas | 1 - go.mod | 29 +- go.sum | 41 ++- hash/hash.go | 169 ---------- hash/hash_test.go | 53 --- localclient/client.go | 63 ---- logs/logger_test.go | 14 +- mocks/mock_drs_client.go | 260 --------------- mocks/mock_functions.go | 161 --------- mocks/mock_gen3interface.go | 82 ++--- mocks/mock_request.go | 4 +- request/builder.go | 5 + request/request.go | 2 +- runtime/client.go | 48 --- s3utils/s3_utils.go | 111 ------- tests/download-multiple_test.go | 14 +- tests/utils_test.go | 278 +++++----------- transfer/http.go | 69 ---- transfer/interface.go | 42 --- transfer/service.go | 81 ----- transfer/signer/gen3/signer.go | 192 ----------- transfer/signer/gen3/signer_test.go | 78 ----- transfer/signer/local/signer.go | 154 --------- transfer/signer/local/signer_test.go | 79 ----- transfer/storage/gocloud.go | 89 ----- upload/batch.go | 142 -------- upload/multipart.go | 346 ------------------- upload/multipart_test.go | 329 ------------------ upload/orchestrator.go | 122 ------- upload/progress_reader.go | 81 ----- upload/progress_reader_test.go | 46 --- upload/request.go | 58 ---- upload/retry.go | 170 ---------- upload/singleFile.go | 98 ------ upload/types.go | 27 -- upload/upload.go | 178 ---------- upload/utils.go | 185 ----------- upload/utils_test.go | 124 ------- 76 files changed, 339 insertions(+), 6970 deletions(-) delete mode 100644 cmd/delete.go delete mode 100644 cmd/generate-tsv.go delete mode 100644 credentials/interface.go delete mode 100644 download/batch.go delete mode 100644 download/downloader.go delete mode 100644 download/file_info.go delete mode 100644 download/orchestrator.go delete mode 100644 download/progress_writer.go delete mode 100644 download/progress_writer_test.go delete mode 100644 download/transfer.go delete mode 100644 download/transfer_test.go delete mode 100644 download/types.go delete mode 100644 download/url_resolution.go delete mode 100644 download/utils.go delete mode 100644 drs/access_method_conversions.go delete mode 100644 drs/client.go delete mode 100644 drs/drs.go delete mode 100644 drs/interface.go delete mode 100644 drs/object_builder.go delete mode 100644 drs/object_builder_test.go delete mode 100644 drs/prefetch_context.go delete mode 100644 drs/resolve.go delete mode 100644 drs/server_client.go delete mode 100644 drs/storage_prefix_test.go delete mode 100644 drs/syfon_adapter.go delete mode 100644 drs/types.go delete mode 100644 drs/upsert.go delete mode 160000 ga4gh/data-repository-service-schemas delete mode 100644 hash/hash.go delete mode 100644 hash/hash_test.go delete mode 100644 localclient/client.go delete mode 100644 mocks/mock_drs_client.go delete mode 100644 mocks/mock_functions.go delete mode 100644 runtime/client.go delete mode 100644 s3utils/s3_utils.go delete mode 100644 transfer/http.go delete mode 100644 transfer/interface.go delete mode 100644 transfer/service.go delete mode 100644 transfer/signer/gen3/signer.go delete mode 100644 transfer/signer/gen3/signer_test.go delete mode 100644 transfer/signer/local/signer.go delete mode 100644 transfer/signer/local/signer_test.go delete mode 100644 transfer/storage/gocloud.go delete mode 100644 upload/batch.go delete mode 100644 upload/multipart.go delete mode 100644 upload/multipart_test.go delete mode 100644 upload/orchestrator.go delete mode 100644 upload/progress_reader.go delete mode 100644 upload/progress_reader_test.go delete mode 100644 upload/request.go delete mode 100644 upload/retry.go delete mode 100644 upload/singleFile.go delete mode 100644 upload/types.go delete mode 100644 upload/upload.go delete mode 100644 upload/utils.go delete mode 100644 upload/utils_test.go diff --git a/cmd/delete.go b/cmd/delete.go deleted file mode 100644 index dc9171e..0000000 --- a/cmd/delete.go +++ /dev/null @@ -1,42 +0,0 @@ -package cmd - -import ( - "context" - - "github.com/calypr/data-client/g3client" - "github.com/calypr/data-client/logs" - "github.com/spf13/cobra" -) - -//Not support yet, place holder only - -func init() { - var guid string - var deleteCmd = &cobra.Command{ // nolint:deadcode,unused,varcheck - Use: "delete", - Short: "Send DELETE HTTP Request for given URI", - Long: `Deletes a given URI from the database. -If no profile is specified, "default" profile is used for authentication.`, - Example: `./data-client delete --uri=v0/submission/bpa/test/entities/example_id - ./data-client delete --profile=user1 --uri=v0/submission/bpa/test/entities/1af1d0ab-efec-4049-98f0-ae0f4bb1bc64`, - Run: func(cmd *cobra.Command, args []string) { - - logger, logCloser := logs.New(profile, logs.WithConsole()) - defer logCloser() - - g3i, err := g3client.NewGen3Interface(profile, logger) - if err != nil { - logger.Fatalf("Fatal NewGen3Interface error: %s\n", err) - } - - msg, err := g3i.FenceClient().DeleteRecord(context.Background(), guid) - if err != nil { - logger.Fatal(err) - } - logger.Println(msg) - }, - } - - deleteCmd.Flags().StringVar(&profile, "guid", "", "Specify the profile to check your access privileges") - RootCmd.AddCommand(deleteCmd) -} diff --git a/cmd/download-multiple.go b/cmd/download-multiple.go index ffdb3aa..7f390dc 100644 --- a/cmd/download-multiple.go +++ b/cmd/download-multiple.go @@ -8,12 +8,13 @@ import ( "os" "github.com/calypr/data-client/common" - "github.com/calypr/data-client/download" - "github.com/calypr/data-client/drs" + "github.com/calypr/data-client/conf" "github.com/calypr/data-client/g3client" - "github.com/calypr/data-client/localclient" "github.com/calypr/data-client/logs" - "github.com/calypr/data-client/transfer" + sydrs "github.com/calypr/syfon/client/drs" + sylogs "github.com/calypr/syfon/client/pkg/logs" + syrequest "github.com/calypr/syfon/client/pkg/request" + sydownload "github.com/calypr/syfon/client/xfer/download" "github.com/vbauerster/mpb/v8" "github.com/vbauerster/mpb/v8/decor" @@ -23,12 +24,8 @@ import ( func init() { var manifestPath string var downloadPath string - var filenameFormat string - var rename bool - var noPrompt bool - var protocol string var numParallel int - var skipCompleted bool + var profile string var downloadMultipleCmd = &cobra.Command{ Use: "download-multiple", @@ -36,27 +33,28 @@ func init() { Long: `Get presigned URLs for multiple of files specified in a manifest file and then download all of them.`, Example: `./data-client download-multiple --profile --manifest --download-path `, Run: func(cmd *cobra.Command, args []string) { - // don't initialize transmission logs for non-uploading related commands - logger, logCloser := logs.New(profile, logs.WithConsole(), logs.WithFailedLog(), logs.WithScoreboard(), logs.WithSucceededLog()) defer logCloser() - var dc drs.Client - var bk transfer.Backend + var dc sydrs.Client if backendType == "drs" { - lc, err := localclient.NewLocalInterface(profile, logger) + config := conf.NewConfigure(logger.Logger) + cred, err := config.Load(profile) if err != nil { log.Fatalf("Failed to parse config on profile %s, %v", profile, err) } - dc = lc.DRSClient() - bk = lc.DRSClient() + req := syrequest.NewRequestInterface( + sylogs.NewGen3Logger(logger.Logger, "", ""), + cred, + config, + ) + dc = sydrs.NewLocalDrsClient(req, cred.APIEndpoint, sylogs.NewGen3Logger(logger.Logger, "", "")) } else { g3i, err := g3client.NewGen3Interface(profile, logger) if err != nil { log.Fatalf("Failed to parse config on profile %s, %v", profile, err) } dc = g3i.DRSClient() - bk = g3i.DRSClient() } manifestPath, _ = common.GetAbsolutePath(manifestPath) @@ -94,18 +92,18 @@ func init() { logger.Fatalf("Error has occurred during unmarshalling manifest object: %v\n", err) } - err = download.DownloadMultiple( + err = sydownload.DownloadMultiple( context.Background(), dc, - bk, + dc, objects, downloadPath, - filenameFormat, - rename, - noPrompt, - protocol, + "original", + true, + false, + "", numParallel, - skipCompleted, + false, ) if err != nil { logger.Fatal(err.Error()) @@ -118,11 +116,6 @@ func init() { downloadMultipleCmd.Flags().StringVar(&manifestPath, "manifest", "", "The manifest file to read from. A valid manifest can be acquired by using the \"Download Manifest\" button in Data Explorer from a data common's portal") downloadMultipleCmd.MarkFlagRequired("manifest") //nolint:errcheck downloadMultipleCmd.Flags().StringVar(&downloadPath, "download-path", ".", "The directory in which to store the downloaded files") - downloadMultipleCmd.Flags().StringVar(&filenameFormat, "filename-format", "original", "The format of filename to be used, including \"original\", \"guid\" and \"combined\"") - downloadMultipleCmd.Flags().BoolVar(&rename, "rename", false, "Only useful when \"--filename-format=original\", will rename file by appending a counter value to its filename if set to true, otherwise the same filename will be used") - downloadMultipleCmd.Flags().BoolVar(&noPrompt, "no-prompt", false, "If set to true, will not display user prompt message for confirmation") - downloadMultipleCmd.Flags().StringVar(&protocol, "protocol", "", "Specify the preferred protocol with --protocol=s3") downloadMultipleCmd.Flags().IntVar(&numParallel, "numparallel", 1, "Number of downloads to run in parallel") - downloadMultipleCmd.Flags().BoolVar(&skipCompleted, "skip-completed", false, "If set to true, will check for filename and size before download and skip any files in \"download-path\" that matches both") RootCmd.AddCommand(downloadMultipleCmd) } diff --git a/cmd/download-single.go b/cmd/download-single.go index 69bf48b..1c9b485 100644 --- a/cmd/download-single.go +++ b/cmd/download-single.go @@ -5,23 +5,19 @@ import ( "log" "github.com/calypr/data-client/common" - "github.com/calypr/data-client/download" - "github.com/calypr/data-client/drs" + "github.com/calypr/data-client/conf" "github.com/calypr/data-client/g3client" - "github.com/calypr/data-client/localclient" "github.com/calypr/data-client/logs" - "github.com/calypr/data-client/transfer" + sydrs "github.com/calypr/syfon/client/drs" + sylogs "github.com/calypr/syfon/client/pkg/logs" + syrequest "github.com/calypr/syfon/client/pkg/request" + sydownload "github.com/calypr/syfon/client/xfer/download" "github.com/spf13/cobra" ) func init() { var guid string var downloadPath string - var protocol string - var filenameFormat string - var rename bool - var noPrompt bool - var skipCompleted bool var profile string var downloadSingleCmd = &cobra.Command{ @@ -30,27 +26,28 @@ func init() { Long: `Gets a presigned URL for a file from a GUID and then downloads the specified file.`, Example: `./data-client download-single --profile= --guid=206dfaa6-bcf1-4bc9-b2d0-77179f0f48fc`, Run: func(cmd *cobra.Command, args []string) { - // don't initialize transmission logs for non-uploading related commands - logger, logCloser := logs.New(profile, logs.WithConsole(), logs.WithFailedLog(), logs.WithSucceededLog(), logs.WithScoreboard()) defer logCloser() - var dc drs.Client - var bk transfer.Backend + var dc sydrs.Client if backendType == "drs" { - lc, err := localclient.NewLocalInterface(profile, logger) + config := conf.NewConfigure(logger.Logger) + cred, err := config.Load(profile) if err != nil { log.Fatalf("Failed to parse config on profile %s, %v", profile, err) } - dc = lc.DRSClient() - bk = lc.DRSClient() + req := syrequest.NewRequestInterface( + sylogs.NewGen3Logger(logger.Logger, "", ""), + cred, + config, + ) + dc = sydrs.NewLocalDrsClient(req, cred.APIEndpoint, sylogs.NewGen3Logger(logger.Logger, "", "")) } else { g3I, err := g3client.NewGen3Interface(profile, logger) if err != nil { log.Fatalf("Failed to parse config on profile %s, %v", profile, err) } dc = g3I.DRSClient() - bk = g3I.DRSClient() } objects := []common.ManifestObject{ @@ -58,18 +55,18 @@ func init() { GUID: guid, }, } - err := download.DownloadMultiple( + err := sydownload.DownloadMultiple( context.Background(), dc, - bk, + dc, objects, downloadPath, - filenameFormat, - rename, - noPrompt, - protocol, + "original", + true, + false, + "", 1, - skipCompleted, + false, ) if err != nil { logger.Println(err.Error()) @@ -82,10 +79,5 @@ func init() { downloadSingleCmd.Flags().StringVar(&guid, "guid", "", "Specify the guid for the data you would like to work with") downloadSingleCmd.MarkFlagRequired("guid") //nolint:errcheck downloadSingleCmd.Flags().StringVar(&downloadPath, "download-path", ".", "The directory in which to store the downloaded files") - downloadSingleCmd.Flags().StringVar(&filenameFormat, "filename-format", "original", "The format of filename to be used, including \"original\", \"guid\" and \"combined\"") - downloadSingleCmd.Flags().BoolVar(&rename, "rename", false, "Only useful when \"--filename-format=original\", will rename file by appending a counter value to its filename if set to true, otherwise the same filename will be used") - downloadSingleCmd.Flags().BoolVar(&noPrompt, "no-prompt", false, "If set to true, will not display user prompt message for confirmation") - downloadSingleCmd.Flags().StringVar(&protocol, "protocol", "", "Specify the preferred protocol with --protocol=gs") - downloadSingleCmd.Flags().BoolVar(&skipCompleted, "skip-completed", false, "If set to true, will check for filename and size before download and skip any files in \"download-path\" that matches both") RootCmd.AddCommand(downloadSingleCmd) } diff --git a/cmd/generate-tsv.go b/cmd/generate-tsv.go deleted file mode 100644 index 47d92c4..0000000 --- a/cmd/generate-tsv.go +++ /dev/null @@ -1,17 +0,0 @@ -package cmd - -import ( - "github.com/spf13/cobra" -) - -func init() { - var generateTSVCmd = &cobra.Command{ - Use: "generate-tsv", - Short: "Generate a file upload tsv from a template", - Long: `Fills in a Gen3 data file template with information from a directory of files.`, - Deprecated: "please use an older version of data-client", - Run: func(cmd *cobra.Command, args []string) {}, - } - - RootCmd.AddCommand(generateTSVCmd) -} diff --git a/cmd/retry-upload.go b/cmd/retry-upload.go index bdc73c3..672e9ce 100644 --- a/cmd/retry-upload.go +++ b/cmd/retry-upload.go @@ -6,7 +6,9 @@ import ( "github.com/calypr/data-client/common" "github.com/calypr/data-client/g3client" "github.com/calypr/data-client/logs" - "github.com/calypr/data-client/upload" + sylogs "github.com/calypr/syfon/client/pkg/logs" + sytransfer "github.com/calypr/syfon/client/transfer" + syupload "github.com/calypr/syfon/client/xfer/upload" "github.com/spf13/cobra" ) @@ -33,6 +35,10 @@ func init() { Logger.Fatalf("Failed to initialize client: %v", err) } bk := g3.DRSClient() + uploader, ok := bk.(sytransfer.Uploader) + if !ok { + Logger.Fatalf("DRS client does not implement transfer.Uploader") + } logger := g3.Logger() @@ -45,7 +51,8 @@ func init() { logger.Fatalf("Cannot read failed log: %v", err) } - upload.RetryFailedUploads(context.Background(), bk, logger, failedMap) + // Unified DRS Client serves as both logical resolver and technical movement writer Across S3, GCS, and Azure. + syupload.RetryFailedUploads(context.Background(), uploader, sylogs.NewGen3Logger(Logger.Logger, "", ""), failedMap) sb.PrintSB() }, } diff --git a/cmd/upload-multipart.go b/cmd/upload-multipart.go index e80983f..d65a927 100644 --- a/cmd/upload-multipart.go +++ b/cmd/upload-multipart.go @@ -8,7 +8,8 @@ import ( "github.com/calypr/data-client/common" "github.com/calypr/data-client/g3client" "github.com/calypr/data-client/logs" - "github.com/calypr/data-client/upload" + sytransfer "github.com/calypr/syfon/client/transfer" + syupload "github.com/calypr/syfon/client/xfer/upload" "github.com/spf13/cobra" ) @@ -22,9 +23,9 @@ func init() { var uploadMultipartCmd = &cobra.Command{ Use: "upload-multipart", - Short: "Upload a single file using multipart upload", - Long: `Uploads a large file to object storage using multipart upload. -This method is resilient to network interruptions and supports resume capability.`, + Short: "Upload a single file using managed multipart upload", + Long: `Uploads a file to object storage using managed multipart upload +(init -> presigned part URLs -> complete).`, Example: `./data-client upload-multipart --profile=myprofile --file-path=./large.bam ./data-client upload-multipart --profile=myprofile --file-path=./data.bam --guid=existing-guid`, Run: func(cmd *cobra.Command, args []string) { @@ -44,6 +45,10 @@ This method is resilient to network interruptions and supports resume capability logger.Fatalf("failed to initialize Gen3 interface: %v", err) } bk := g3.DRSClient() + uploader, ok := bk.(sytransfer.Uploader) + if !ok { + logger.Fatal("DRS client does not implement transfer.Uploader") + } absPath, err := common.GetAbsolutePath(filePath) if err != nil { @@ -57,13 +62,20 @@ This method is resilient to network interruptions and supports resume capability FileMetadata: common.FileMetadata{}, } - file, err := os.Open(absPath) + if fileInfo.Bucket == "" { + fileInfo.Bucket = bucketName + } + if fileInfo.Bucket == "" { + fileInfo.Bucket = bk.GetBucketName() + } + + // Force multipart path by using direct multipart entrypoint. + file, err := os.Open(fileInfo.SourcePath) if err != nil { - logger.Fatalf("cannot open file %s: %v", absPath, err) + logger.Fatal(err) } defer file.Close() - - err = upload.MultipartUpload(context.Background(), bk, fileInfo, file, true) + err = syupload.MultipartUpload(context.Background(), uploader, fileInfo, file, true) if err != nil { logger.Fatal(err) } diff --git a/cmd/upload-multiple.go b/cmd/upload-multiple.go index 24aff9b..93e74c6 100644 --- a/cmd/upload-multiple.go +++ b/cmd/upload-multiple.go @@ -12,7 +12,9 @@ import ( "github.com/calypr/data-client/common" "github.com/calypr/data-client/g3client" "github.com/calypr/data-client/logs" - "github.com/calypr/data-client/upload" + sylogs "github.com/calypr/syfon/client/pkg/logs" + sytransfer "github.com/calypr/syfon/client/transfer" + syupload "github.com/calypr/syfon/client/xfer/upload" "github.com/spf13/cobra" ) @@ -46,6 +48,10 @@ Options to run multipart uploads for large files and parallel batch uploading ar logger.Fatalf("Failed to parse config on profile %s: %v", profile, err) } bk := g3i.DRSClient() + uploader, ok := bk.(sytransfer.Uploader) + if !ok { + logger.Fatalf("DRS client does not implement transfer.Uploader") + } // Basic config validation profileConfig := g3i.Credentials().Current() @@ -80,7 +86,7 @@ Options to run multipart uploads for large files and parallel batch uploading ar for _, obj := range objects { localFilePath := filepath.Join(absUploadPath, obj.Title) - fur, err := upload.ProcessFilename(logger, absUploadPath, localFilePath, obj.GUID, includeSubDirName, false) + fur, err := syupload.ProcessFilename(sylogs.NewGen3Logger(logger.Logger, "", ""), absUploadPath, localFilePath, obj.GUID, includeSubDirName, false) if err != nil { logger.Printf("Skipping %s: %v\n", localFilePath, err) logger.Failed(localFilePath, filepath.Base(localFilePath), common.FileMetadata{}, obj.GUID, 0, false) @@ -100,50 +106,23 @@ Options to run multipart uploads for large files and parallel batch uploading ar return } - // Classify single vs multipart - single, multi := upload.SeparateSingleAndMultipartUploads(bk, requests) - - // Upload single-part files if batch { - workers, respCh, errCh, batchFURObjects := upload.InitBatchUploadChannels(numParallel, len(single)) - for i, furObject := range single { - // FileInfo processing and path normalization are already done, so we use the object directly - if len(batchFURObjects) < workers { - batchFURObjects = append(batchFURObjects, furObject) - } else { - upload.BatchUpload(ctx, bk, logger, batchFURObjects, workers, respCh, errCh, bucketName) - batchFURObjects = []common.FileUploadRequestObject{furObject} - } - if i == len(single)-1 && len(batchFURObjects) > 0 { - upload.BatchUpload(ctx, bk, logger, batchFURObjects, workers, respCh, errCh, bucketName) - } - } + workers, respCh, errCh, _ := syupload.InitBatchUploadChannels(numParallel, len(requests)) + syupload.BatchUpload(ctx, uploader, sylogs.NewGen3Logger(logger.Logger, "", ""), requests, workers, respCh, errCh, bucketName) } else { - for _, req := range single { - upload.UploadSingle(ctx, bk, logger, req, true) - } - } - - // Upload multipart files - for _, req := range multi { - - file, err := os.Open(req.SourcePath) - if err != nil { - g3i.Logger().Printf("Error opening file %s : %v", req.SourcePath, err) - continue - } - - err = upload.MultipartUpload(ctx, bk, req, file, true) - if err != nil { - logger.Println("Multipart upload failed:", err) + for _, req := range requests { + err = syupload.Upload(ctx, uploader, req, true) + if err != nil { + logger.Println("Upload failed:", err) + } } } - // Retry logic (only if nothing succeeded initially) + // Retry logic if len(logger.GetSucceededLogMap()) == 0 { failed := logger.GetFailedLogMap() if len(failed) > 0 { - upload.RetryFailedUploads(ctx, bk, logger, failed) + syupload.RetryFailedUploads(ctx, uploader, sylogs.NewGen3Logger(logger.Logger, "", ""), failed) } } diff --git a/cmd/upload-single.go b/cmd/upload-single.go index d1cb0e1..6d9600d 100644 --- a/cmd/upload-single.go +++ b/cmd/upload-single.go @@ -9,7 +9,8 @@ import ( "github.com/calypr/data-client/common" "github.com/calypr/data-client/g3client" "github.com/calypr/data-client/logs" - "github.com/calypr/data-client/upload" + sytransfer "github.com/calypr/syfon/client/transfer" + syupload "github.com/calypr/syfon/client/xfer/upload" "github.com/spf13/cobra" ) @@ -32,14 +33,19 @@ func init() { log.Fatalf("Failed to parse config on profile %s: %v", profile, err) } bk := g3i.DRSClient() + uploader, ok := bk.(sytransfer.Uploader) + if !ok { + log.Fatalln("DRS client does not implement transfer.Uploader") + } - req := common.FileUploadRequestObject{ + fur := common.FileUploadRequestObject{ SourcePath: filePath, ObjectKey: filepath.Base(filePath), Bucket: bucketName, GUID: guid, } - err = upload.UploadSingle(context.Background(), bk, logger, req, true) + // Unified DRS client serves as its own transport writer Across S3, GCS, and Azure. + err = syupload.Upload(context.Background(), uploader, fur, true) if err != nil { log.Fatalln(err.Error()) } diff --git a/cmd/upload.go b/cmd/upload.go index 6d123f8..a86583d 100644 --- a/cmd/upload.go +++ b/cmd/upload.go @@ -9,7 +9,9 @@ import ( "github.com/calypr/data-client/common" "github.com/calypr/data-client/g3client" "github.com/calypr/data-client/logs" - "github.com/calypr/data-client/upload" + sylogs "github.com/calypr/syfon/client/pkg/logs" + sytransfer "github.com/calypr/syfon/client/transfer" + syupload "github.com/calypr/syfon/client/xfer/upload" "github.com/spf13/cobra" ) @@ -41,7 +43,6 @@ func init() { if err != nil { log.Fatalf("Failed to parse config on profile %s, %v", profile, err) } - bk := g3i.DRSClient() logger := g3i.Logger() if hasMetadata { @@ -64,20 +65,16 @@ func init() { logger.Println("\nThe following file(s) has been found in path \"" + uploadPath + "\" and will be uploaded:") for _, filePath := range filePaths { - // Use ProcessFilename to create the unified object (GUID is empty here, as this command requests a new GUID) - // ProcessFilename signature: (uploadPath, filePath, objectId, includeSubDirName, includeMetadata) - furObject, err := upload.ProcessFilename(g3i.Logger(), uploadPath, filePath, "", includeSubDirName, hasMetadata) + syLogger := sylogs.NewGen3Logger(g3i.Logger().Logger, "", "") + furObject, err := syupload.ProcessFilename(syLogger, uploadPath, filePath, "", includeSubDirName, hasMetadata) furObject.Bucket = bucketName - // Handle case where ProcessFilename fails (e.g., metadata parsing error) if err != nil { - // Use the data available for logging the failure g3i.Logger().Failed(filePath, filepath.Base(filePath), common.FileMetadata{}, "", 0, false) logger.Println("Error processing file path or metadata: " + err.Error()) continue } - // Optional: Display file path before proceeding file, _ := os.Open(filePath) if fi, _ := file.Stat(); !fi.IsDir() { logger.Println("\t" + filePath) @@ -86,83 +83,29 @@ func init() { uploadRequestObjects = append(uploadRequestObjects, furObject) } - // fmt.Fprintln(os.Stderr) logger.Println() - if len(uploadRequestObjects) == 0 { - logger.Println("No valid file upload requests were created.") - return + // Unified DRS Client serves as both logical resolver and technical movement writer Across S3, GCS, and Azure. + drsClient := g3i.DRSClient() + uploader, ok := drsClient.(sytransfer.Uploader) + if !ok { + logger.Fatal("DRS client does not implement transfer.Uploader") } - singlePartObjects, multipartObjects := upload.SeparateSingleAndMultipartUploads(bk, uploadRequestObjects) - if batch { - workers, respCh, errCh, batchFURObjects := upload.InitBatchUploadChannels(numParallel, len(singlePartObjects)) - - for _, furObject := range singlePartObjects { - if len(batchFURObjects) < workers { - batchFURObjects = append(batchFURObjects, furObject) - } else { - upload.BatchUpload(ctx, bk, Logger, batchFURObjects, workers, respCh, errCh, bucketName) - batchFURObjects = []common.FileUploadRequestObject{furObject} - } - } - if len(batchFURObjects) > 0 { - upload.BatchUpload(ctx, bk, Logger, batchFURObjects, workers, respCh, errCh, bucketName) - } - - if len(errCh) > 0 { - close(errCh) - for err := range errCh { - if err != nil { - logger.Printf("Error occurred during uploading: %s\n", err.Error()) - } - } - } + workers, respCh, errCh, _ := syupload.InitBatchUploadChannels(numParallel, len(uploadRequestObjects)) + syupload.BatchUpload(ctx, uploader, sylogs.NewGen3Logger(Logger.Logger, "", ""), uploadRequestObjects, workers, respCh, errCh, bucketName) } else { - for _, furObject := range singlePartObjects { - file, err := os.Open(furObject.SourcePath) + for _, furObject := range uploadRequestObjects { + err := syupload.Upload(ctx, uploader, furObject, true) if err != nil { - logger.Failed(furObject.SourcePath, furObject.ObjectKey, furObject.FileMetadata, furObject.GUID, 0, false) - logger.Println("File open error: " + err.Error()) - continue + logger.Error("Upload failed", "path", furObject.SourcePath, "error", err) } - defer file.Close() - fi, err := file.Stat() - if err != nil { - logger.Failed(furObject.SourcePath, furObject.ObjectKey, furObject.FileMetadata, furObject.GUID, 0, false) - logger.Println("File stat error for file" + fi.Name() + ", file may be missing or unreadable because of permissions.\n") - continue - } - upload.UploadSingle(ctx, bk, Logger, furObject, true) } } - if len(multipartObjects) > 0 { - cred := g3i.Credentials().Current() - if cred.UseShepherd == "true" || - cred.UseShepherd == "" && common.DefaultUseShepherd == true { - logger.Printf("error: Shepherd currently does not support multipart uploads. For the moment, please disable Shepherd with\n $ data-client configure --profile=%v --use-shepherd=false\nand try again", cred.Profile) - return - } - g3i.Logger().Println("Multipart uploading...") - for _, furObject := range multipartObjects { - file, err := os.Open(furObject.SourcePath) - if err != nil { - logger.Failed(furObject.SourcePath, furObject.ObjectKey, furObject.FileMetadata, furObject.GUID, 0, false) - logger.Println("File open error: " + err.Error()) - continue - } - err = upload.MultipartUpload(ctx, bk, furObject, file, true) - if err != nil { - g3i.Logger().Println(err.Error()) - } else { - g3i.Logger().Scoreboard().IncrementSB(0) - } - } - } if len(g3i.Logger().GetSucceededLogMap()) == 0 { - upload.RetryFailedUploads(ctx, bk, Logger, g3i.Logger().GetFailedLogMap()) + syupload.RetryFailedUploads(ctx, uploader, sylogs.NewGen3Logger(Logger.Logger, "", ""), g3i.Logger().GetFailedLogMap()) } g3i.Logger().Scoreboard().PrintSB() }, diff --git a/common/types.go b/common/types.go index c52a744..64b6e6e 100644 --- a/common/types.go +++ b/common/types.go @@ -1,109 +1,17 @@ package common -import ( - "io" - "net/http" -) - -type AccessTokenStruct struct { - AccessToken string `json:"access_token"` -} - -// FileUploadRequestObject defines a object for file upload -type FileUploadRequestObject struct { - SourcePath string - ObjectKey string - FileMetadata FileMetadata - GUID string - PresignedURL string - Bucket string `json:"bucket,omitempty"` -} - -// FileDownloadResponseObject defines a object for file download -type FileDownloadResponseObject struct { - DownloadPath string - Filename string - GUID string - PresignedURL string - // Range is kept for backward compatibility with resume-download semantics (start offset). - Range int64 - // RangeStart/RangeEnd provide explicit byte range requests (inclusive). - RangeStart *int64 - RangeEnd *int64 - Overwrite bool - Skip bool - Response *http.Response - Writer io.Writer -} - -// FileMetadata defines the metadata accepted by the new object management API, Shepherd -type FileMetadata struct { - Authz []string `json:"authz"` - Aliases []string `json:"aliases"` - // Metadata is an encoded JSON string of any arbitrary metadata the user wishes to upload. - Metadata map[string]any `json:"metadata"` -} - -// RetryObject defines a object for retry upload -type RetryObject struct { - SourcePath string - ObjectKey string - FileMetadata FileMetadata - GUID string - RetryCount int - Multipart bool - Bucket string -} - -// MultipartUploadInit captures the response needed to upload multipart parts. -type MultipartUploadInit struct { - GUID string - UploadID string -} - -// MultipartUploadPart represents an uploaded part that must be completed. -type MultipartUploadPart struct { - PartNumber int32 - ETag string -} - -type ManifestObject struct { - GUID string `json:"object_id"` - SubjectID string `json:"subject_id"` - Title string `json:"title"` - Size int64 `json:"size"` -} - -// ShepherdInitRequestObject represents the payload sent to Shepherd -type ShepherdInitRequestObject struct { - Filename string `json:"file_name"` - Authz ShepherdAuthz `json:"authz"` - Aliases []string `json:"aliases"` - Metadata map[string]any `json:"metadata"` -} - -type ShepherdAuthz struct { - Version string `json:"version"` - ResourcePaths []string `json:"resource_paths"` -} - -type PresignedURLResponse struct { - GUID string `json:"guid"` - URL string `json:"upload_url"` -} - -type UploadURLResolveRequest struct { - GUID string - Filename string - Metadata FileMetadata - Bucket string -} - -type UploadURLResolveResponse struct { - GUID string - Filename string - Bucket string - URL string - Status int - Error string -} +import sycommon "github.com/calypr/syfon/client/pkg/common" + +type AccessTokenStruct = sycommon.AccessTokenStruct +type FileUploadRequestObject = sycommon.FileUploadRequestObject +type FileDownloadResponseObject = sycommon.FileDownloadResponseObject +type FileMetadata = sycommon.FileMetadata +type RetryObject = sycommon.RetryObject +type MultipartUploadInit = sycommon.MultipartUploadInit +type MultipartUploadPart = sycommon.MultipartUploadPart +type ManifestObject = sycommon.ManifestObject +type ShepherdInitRequestObject = sycommon.ShepherdInitRequestObject +type ShepherdAuthz = sycommon.ShepherdAuthz +type PresignedURLResponse = sycommon.PresignedURLResponse +type UploadURLResolveRequest = sycommon.UploadURLResolveRequest +type UploadURLResolveResponse = sycommon.UploadURLResolveResponse diff --git a/conf/config.go b/conf/config.go index 9e59eea..ba5f700 100644 --- a/conf/config.go +++ b/conf/config.go @@ -1,6 +1,6 @@ package conf -//go:generate mockgen -destination=../mocks/mock_configure.go -package=mocks github.com/calypr/data-client/conf ManagerInterface +//go:generate go run go.uber.org/mock/mockgen@v0.6.0 -destination=../mocks/mock_configure.go -package=mocks github.com/calypr/data-client/conf ManagerInterface import ( "encoding/json" @@ -12,22 +12,13 @@ import ( "strings" "github.com/calypr/data-client/common" + syconf "github.com/calypr/syfon/client/conf" "gopkg.in/ini.v1" ) var ErrProfileNotFound = errors.New("profile not found in config file") -type Credential struct { - Profile string - KeyID string - APIKey string - AccessToken string - APIEndpoint string - UseShepherd string - MinShepherdVersion string - Bucket string - ProjectID string -} +type Credential = syconf.Credential type Manager struct { Logger *slog.Logger diff --git a/credentials/interface.go b/credentials/interface.go deleted file mode 100644 index e827c0a..0000000 --- a/credentials/interface.go +++ /dev/null @@ -1,18 +0,0 @@ -package credentials - -import ( - "context" - - "github.com/calypr/data-client/conf" -) - -// Reader exposes current in-memory credential state. -type Reader interface { - Current() *conf.Credential -} - -// Manager exposes read and export operations for credentials. -type Manager interface { - Reader - Export(ctx context.Context, cred *conf.Credential) error -} diff --git a/download/batch.go b/download/batch.go deleted file mode 100644 index bdd6605..0000000 --- a/download/batch.go +++ /dev/null @@ -1,189 +0,0 @@ -package download - -import ( - "context" - "fmt" - "io" - "os" - "path/filepath" - "sync" - "sync/atomic" - - "github.com/calypr/data-client/common" - "github.com/calypr/data-client/logs" - "github.com/calypr/data-client/transfer" - "github.com/hashicorp/go-multierror" - "github.com/vbauerster/mpb/v8" - "github.com/vbauerster/mpb/v8/decor" - "golang.org/x/sync/errgroup" -) - -// downloadFiles performs bounded parallel downloads and collects ALL errors -func downloadFiles( - ctx context.Context, - bk transfer.Downloader, - files []common.FileDownloadResponseObject, - numParallel int, - protocol string, -) (int, error) { - if len(files) == 0 { - return 0, nil - } - - logger := bk.Logger() - - protocolText := "" - if protocol != "" { - protocolText = "?protocol=" + protocol - } - - // Scoreboard: maxRetries = 0 for now (no retry logic yet) - sb := logs.NewSB(0, logger.Logger) - - progress := common.GetProgress(ctx) - useProgressBars := (progress == nil) - - var p *mpb.Progress - if useProgressBars { - p = mpb.New(mpb.WithOutput(os.Stdout)) - } - - var eg errgroup.Group - eg.SetLimit(numParallel) - - var success atomic.Int64 - var mu sync.Mutex - var allErrors []*multierror.Error - - for i := range files { - fdr := &files[i] // capture loop variable - - eg.Go(func() error { - var err error - - defer func() { - if err != nil { - // Final failure bucket - sb.IncrementSB(len(sb.Counts) - 1) - - mu.Lock() - allErrors = append(allErrors, multierror.Append(nil, err)) - mu.Unlock() - } else { - success.Add(1) - sb.IncrementSB(0) // success, no retries - } - }() - - // Get presigned URL - if err = GetDownloadResponse(ctx, bk, fdr, protocolText); err != nil { - err = fmt.Errorf("get URL for %s (GUID: %s): %w", fdr.Filename, fdr.GUID, err) - return err - } - - // Prepare directories - fullPath := filepath.Join(fdr.DownloadPath, fdr.Filename) - if dir := filepath.Dir(fullPath); dir != "." { - if err = os.MkdirAll(dir, 0766); err != nil { - _ = fdr.Response.Body.Close() - err = fmt.Errorf("mkdir for %s: %w", fullPath, err) - return err - } - } - - flags := os.O_CREATE | os.O_WRONLY - if fdr.Range > 0 { - flags |= os.O_APPEND - } else if fdr.Overwrite { - flags |= os.O_TRUNC - } - - file, err := os.OpenFile(fullPath, flags, 0666) - if err != nil { - _ = fdr.Response.Body.Close() - err = fmt.Errorf("open local file %s: %w", fullPath, err) - return err - } - - // Progress bar for this file - total := fdr.Response.ContentLength + fdr.Range - var writer io.Writer = file - var bar *mpb.Bar - var tracker *progressWriter - - if useProgressBars { - bar = p.AddBar(total, - mpb.PrependDecorators( - decor.Name(truncateFilename(fdr.Filename, 40)+" "), - decor.CountersKibiByte("% .1f / % .1f"), - ), - mpb.AppendDecorators( - decor.Percentage(), - decor.AverageSpeed(decor.SizeB1024(0), "% .1f"), - ), - ) - - if fdr.Range > 0 { - bar.SetCurrent(fdr.Range) - } - - writer = bar.ProxyWriter(file) - } else if progress != nil { - tracker = newProgressWriter(file, progress, fdr.GUID, total) - writer = tracker - } - - _, copyErr := io.Copy(writer, fdr.Response.Body) - _ = fdr.Response.Body.Close() - _ = file.Close() - - if tracker != nil { - if finalizeErr := tracker.Finalize(); finalizeErr != nil && copyErr == nil { - copyErr = finalizeErr - } - } - - if copyErr != nil { - if bar != nil { - bar.Abort(true) - } - err = fmt.Errorf("download failed for %s: %w", fdr.Filename, copyErr) - return err - } - - return nil - }) - } - - // Wait for all downloads - _ = eg.Wait() - if p != nil { - p.Wait() - } - - // Combine errors - var combinedError error - mu.Lock() - if len(allErrors) > 0 { - multiErr := multierror.Append(nil, nil) - for _, e := range allErrors { - multiErr = multierror.Append(multiErr, e.Errors...) - } - combinedError = multiErr.ErrorOrNil() - } - mu.Unlock() - - downloaded := int(success.Load()) - - // Print scoreboard summary - sb.PrintSB() - - if combinedError != nil { - logger.Info(fmt.Sprintf("%d files downloaded, but %d failed:", downloaded, len(allErrors))) - logger.Info(combinedError.Error()) - } else { - logger.Info(fmt.Sprintf("%d files downloaded successfully.", downloaded)) - } - - return downloaded, combinedError -} diff --git a/download/downloader.go b/download/downloader.go deleted file mode 100644 index d7fcbef..0000000 --- a/download/downloader.go +++ /dev/null @@ -1,171 +0,0 @@ -package download - -import ( - "context" - "fmt" - "log/slog" - "os" - "strings" - - "github.com/calypr/data-client/common" - "github.com/calypr/data-client/drs" - "github.com/calypr/data-client/transfer" - "github.com/vbauerster/mpb/v8" - "github.com/vbauerster/mpb/v8/decor" -) - -// DownloadMultiple is the public entry point called from g3cmd -func DownloadMultiple( - ctx context.Context, - dc drs.Client, - bk transfer.Downloader, - objects []common.ManifestObject, - downloadPath string, - filenameFormat string, - rename bool, - noPrompt bool, - protocol string, - numParallel int, - skipCompleted bool, -) error { - logger := bk.Logger() - - // === Input validation === - if numParallel < 1 { - return fmt.Errorf("numparallel must be a positive integer") - } - - var err error - downloadPath, err = common.ParseRootPath(downloadPath) - if err != nil { - return fmt.Errorf("invalid download path: %w", err) - } - if !strings.HasSuffix(downloadPath, "/") { - downloadPath += "/" - } - - filenameFormat = strings.ToLower(strings.TrimSpace(filenameFormat)) - if filenameFormat != "original" && filenameFormat != "guid" && filenameFormat != "combined" { - return fmt.Errorf("filename-format must be one of: original, guid, combined") - } - if (filenameFormat == "guid" || filenameFormat == "combined") && rename { - logger.WarnContext(ctx, "NOTICE: rename flag is ignored in guid/combined mode") - rename = false - } - - // === Warnings and user confirmation === - if err := handleWarningsAndConfirmation(ctx, logger.Logger, downloadPath, filenameFormat, rename, noPrompt); err != nil { - return err // aborted by user - } - - // === Create download directory === - if err := os.MkdirAll(downloadPath, 0766); err != nil { - return fmt.Errorf("cannot create directory %s: %w", downloadPath, err) - } - - // === Prepare files (metadata + local validation) === - toDownload, skipped, renamed, err := prepareFiles(ctx, dc, bk, objects, downloadPath, filenameFormat, rename, skipCompleted, protocol) - if err != nil { - return err - } - - logger.InfoContext(ctx, "Summary", - "Total objects", len(objects), - "To download", len(toDownload), - "Skipped", len(skipped)) - - // === Download phase === - downloaded, downloadErr := downloadFiles(ctx, bk, toDownload, numParallel, protocol) - - // === Final summary === - logger.InfoContext(ctx, fmt.Sprintf("%d files downloaded successfully.", downloaded)) - printRenamed(ctx, logger.Logger, renamed) - printSkipped(ctx, logger.Logger, skipped) - - if downloadErr != nil { - logger.WarnContext(ctx, "Some downloads failed. See errors above.") - } - - return nil // we log failures but don't fail the whole command unless critical -} - -// handleWarningsAndConfirmation prints warnings and asks for confirmation if needed -func handleWarningsAndConfirmation(ctx context.Context, logger *slog.Logger, downloadPath, filenameFormat string, rename, noPrompt bool) error { - if filenameFormat == "guid" || filenameFormat == "combined" { - logger.WarnContext(ctx, fmt.Sprintf("WARNING: in %q mode, duplicate files in %q will be overwritten", filenameFormat, downloadPath)) - } else if !rename { - logger.WarnContext(ctx, fmt.Sprintf("WARNING: rename=false in original mode – duplicates in %q will be overwritten", downloadPath)) - } else { - logger.InfoContext(ctx, fmt.Sprintf("NOTICE: rename=true in original mode – duplicates in %q will be renamed with a counter", downloadPath)) - } - - if noPrompt { - return nil - } - if !AskForConfirmation(logger, "Proceed? (y/N)") { - return fmt.Errorf("aborted by user") - } - return nil -} - -// prepareFiles gathers metadata, checks local files, collects skips/renames -func prepareFiles( - ctx context.Context, - dc drs.Client, - bk transfer.Downloader, - objects []common.ManifestObject, - downloadPath, filenameFormat string, - rename, skipCompleted bool, - protocol string, -) ([]common.FileDownloadResponseObject, []RenamedOrSkippedFileInfo, []RenamedOrSkippedFileInfo, error) { - logger := bk.Logger() - renamed := make([]RenamedOrSkippedFileInfo, 0) - skipped := make([]RenamedOrSkippedFileInfo, 0) - toDownload := make([]common.FileDownloadResponseObject, 0, len(objects)) - - p := mpb.New(mpb.WithOutput(os.Stdout)) - bar := p.AddBar(int64(len(objects)), - mpb.PrependDecorators(decor.Name("Preparing "), decor.CountersNoUnit("%d / %d")), - mpb.AppendDecorators(decor.Percentage()), - ) - - for _, obj := range objects { - if obj.GUID == "" { - logger.WarnContext(ctx, "Empty GUID, skipping entry") - bar.Increment() - continue - } - - info := &IndexdResponse{Name: obj.Title, Size: obj.Size} - var err error - if info.Name == "" || info.Size == 0 { - // Very strict object id checking - info, err = GetFileInfo(ctx, dc, logger, obj.GUID, protocol, downloadPath, filenameFormat, rename, &renamed) - if err != nil { - return nil, nil, nil, err - } - } - - fdr := common.FileDownloadResponseObject{ - DownloadPath: downloadPath, - Filename: info.Name, - GUID: obj.GUID, - } - - if !rename { - validateLocalFileStat(logger.Logger, &fdr, int64(info.Size), skipCompleted) - } - - if fdr.Skip { - logger.InfoContext(ctx, fmt.Sprintf("Skipping %q (GUID: %s) – complete local copy exists", fdr.Filename, fdr.GUID)) - skipped = append(skipped, RenamedOrSkippedFileInfo{GUID: fdr.GUID, OldFilename: fdr.Filename}) - } else { - toDownload = append(toDownload, fdr) - } - - bar.Increment() - } - p.Wait() - logger.InfoContext(ctx, "Preparation complete") - return toDownload, skipped, renamed, nil -} diff --git a/download/file_info.go b/download/file_info.go deleted file mode 100644 index 1b15afe..0000000 --- a/download/file_info.go +++ /dev/null @@ -1,60 +0,0 @@ -package download - -import ( - "context" - - "github.com/calypr/data-client/drs" - "github.com/calypr/data-client/logs" -) - -func GetFileInfo( - ctx context.Context, - dc drs.Client, - logger *logs.Gen3Logger, - guid, protocol, downloadPath, filenameFormat string, - rename bool, - renamedFiles *[]RenamedOrSkippedFileInfo, -) (*IndexdResponse, error) { - drsObj, err := drs.ResolveObject(ctx, dc, guid) - if err != nil { - logger.Warn("Failed to get file details", "guid", guid, "error", err) - // Fallback: use GUID as filename if failed? - // Original code: "All meta-data lookups failed... Using GUID as default" - *renamedFiles = append(*renamedFiles, RenamedOrSkippedFileInfo{GUID: guid, OldFilename: guid, NewFilename: guid}) - return &IndexdResponse{Name: guid, Size: 0}, nil - } - - name := "" - name = drsObj.Name - if name == "" { - // If name is empty (some DRS servers might not return it?), use GUID - name = guid - } - - finalName := applyFilenameFormat(name, guid, downloadPath, filenameFormat, rename, renamedFiles) - return &IndexdResponse{Name: finalName, Size: drsObj.Size}, nil -} - -func applyFilenameFormat(baseName, guid, downloadPath, format string, rename bool, renamedFiles *[]RenamedOrSkippedFileInfo) string { - switch format { - case "guid": - return guid - case "combined": - return guid + "_" + baseName - case "original": - if !rename { - return baseName - } - newName := processOriginalFilename(downloadPath, baseName) - if newName != baseName { - *renamedFiles = append(*renamedFiles, RenamedOrSkippedFileInfo{ - GUID: guid, - OldFilename: baseName, - NewFilename: newName, - }) - } - return newName - default: - return baseName - } -} diff --git a/download/orchestrator.go b/download/orchestrator.go deleted file mode 100644 index 47634e7..0000000 --- a/download/orchestrator.go +++ /dev/null @@ -1,18 +0,0 @@ -package download - -import ( - "context" - - "github.com/calypr/data-client/common" - "github.com/calypr/data-client/drs" - "github.com/calypr/data-client/transfer" -) - -// DownloadFile is a high-level orchestrator that downloads a file using the provided backend. -func DownloadFile(ctx context.Context, dc drs.Client, bk transfer.Downloader, guid, destPath string) error { - opts := DownloadOptions{ - MultipartThreshold: int64(5 * common.GB), - } - // Note: We could expose more options here if needed - return DownloadToPathWithOptions(ctx, dc, bk, bk.Logger().Logger, guid, destPath, "", opts) -} diff --git a/download/progress_writer.go b/download/progress_writer.go deleted file mode 100644 index 3917234..0000000 --- a/download/progress_writer.go +++ /dev/null @@ -1,74 +0,0 @@ -package download - -import ( - "fmt" - "io" - - "github.com/calypr/data-client/common" -) - -type progressWriter struct { - writer io.Writer - onProgress common.ProgressCallback - hash string - total int64 - bytesSoFar int64 - bytesSinceReport int64 -} - -func newProgressWriter(writer io.Writer, onProgress common.ProgressCallback, hash string, total int64) *progressWriter { - return &progressWriter{ - writer: writer, - onProgress: onProgress, - hash: hash, - total: total, - } -} - -func (pw *progressWriter) Write(p []byte) (int, error) { - n, err := pw.writer.Write(p) - if n > 0 && pw.onProgress != nil { - delta := int64(n) - pw.bytesSoFar += delta - pw.bytesSinceReport += delta - - if pw.bytesSinceReport >= common.OnProgressThreshold { - if progressErr := pw.onProgress(common.ProgressEvent{ - Event: "progress", - Oid: pw.hash, - BytesSoFar: pw.bytesSoFar, - BytesSinceLast: pw.bytesSinceReport, - }); progressErr != nil { - return n, progressErr - } - pw.bytesSinceReport = 0 - } - } - return n, err -} - -func (pw *progressWriter) Finalize() error { - if pw.onProgress != nil && pw.bytesSinceReport > 0 { - _ = pw.onProgress(common.ProgressEvent{ - Event: "progress", - Oid: pw.hash, - BytesSoFar: pw.bytesSoFar, - BytesSinceLast: pw.bytesSinceReport, - }) - pw.bytesSinceReport = 0 - } - if pw.total > 0 && pw.bytesSoFar < pw.total { - delta := pw.total - pw.bytesSoFar - pw.bytesSoFar = pw.total - if pw.onProgress != nil { - _ = pw.onProgress(common.ProgressEvent{ - Event: "progress", - Oid: pw.hash, - BytesSoFar: pw.bytesSoFar, - BytesSinceLast: delta, - }) - } - return fmt.Errorf("download incomplete: %d/%d bytes", pw.bytesSoFar-delta, pw.total) - } - return nil -} diff --git a/download/progress_writer_test.go b/download/progress_writer_test.go deleted file mode 100644 index b11af3d..0000000 --- a/download/progress_writer_test.go +++ /dev/null @@ -1,46 +0,0 @@ -package download - -import ( - "bytes" - "io" - "testing" - - "github.com/calypr/data-client/common" -) - -func TestProgressWriterFinalizes(t *testing.T) { - payload := bytes.Repeat([]byte("b"), 20) - var events []common.ProgressEvent - - writer := newProgressWriter(io.Discard, func(event common.ProgressEvent) error { - events = append(events, event) - return nil - }, "oid-456", int64(len(payload))) - - if _, err := writer.Write(payload); err != nil { - t.Fatalf("write failed: %v", err) - } - if err := writer.Finalize(); err != nil { - t.Fatalf("finalize failed: %v", err) - } - - if len(events) == 0 { - t.Fatal("expected progress events, got none") - } - - var total int64 - for _, event := range events { - if event.Event != "progress" { - t.Fatalf("unexpected event type: %s", event.Event) - } - total += event.BytesSinceLast - } - - last := events[len(events)-1] - if last.BytesSoFar != int64(len(payload)) { - t.Fatalf("expected final bytesSoFar %d, got %d", len(payload), last.BytesSoFar) - } - if total != int64(len(payload)) { - t.Fatalf("expected bytesSinceLast sum %d, got %d", len(payload), total) - } -} diff --git a/download/transfer.go b/download/transfer.go deleted file mode 100644 index 8c15bd8..0000000 --- a/download/transfer.go +++ /dev/null @@ -1,460 +0,0 @@ -package download - -import ( - "context" - "errors" - "fmt" - "io" - "log/slog" - "net/http" - "os" - "path/filepath" - "strconv" - "strings" - "sync/atomic" - - "github.com/calypr/data-client/common" - "github.com/calypr/data-client/drs" - "github.com/calypr/data-client/transfer" - "golang.org/x/sync/errgroup" -) - -type DownloadOptions struct { - MultipartThreshold int64 - ChunkSize int64 - Concurrency int -} - -func defaultDownloadOptions() DownloadOptions { - return DownloadOptions{ - MultipartThreshold: common.GB, - ChunkSize: 64 * common.MB, - Concurrency: 8, - } -} - -// DownloadSingleWithProgress downloads a single object while emitting progress events. -func DownloadSingleWithProgress( - ctx context.Context, - dc drs.Client, - bk transfer.Downloader, - guid string, - downloadPath string, - protocol string, -) error { - progress := common.GetProgress(ctx) - var err error - downloadPath, err = common.ParseRootPath(downloadPath) - if err != nil { - return fmt.Errorf("invalid download path: %w", err) - } - if !strings.HasSuffix(downloadPath, "/") { - downloadPath += "/" - } - - renamed := make([]RenamedOrSkippedFileInfo, 0) - info, err := GetFileInfo(ctx, dc, bk.Logger(), guid, protocol, downloadPath, "original", false, &renamed) - if err != nil { - return err - } - - fdr := common.FileDownloadResponseObject{ - DownloadPath: downloadPath, - Filename: info.Name, - GUID: guid, - } - - protocolText := "" - if protocol != "" { - protocolText = "?protocol=" + protocol - } - if err := GetDownloadResponse(ctx, bk, &fdr, protocolText); err != nil { - return err - } - - fullPath := filepath.Join(fdr.DownloadPath, fdr.Filename) - if dir := filepath.Dir(fullPath); dir != "." { - if err = os.MkdirAll(dir, 0766); err != nil { - _ = fdr.Response.Body.Close() - return fmt.Errorf("mkdir for %s: %w", fullPath, err) - } - } - - flags := os.O_CREATE | os.O_WRONLY - if fdr.Range > 0 { - flags |= os.O_APPEND - } else if fdr.Overwrite { - flags |= os.O_TRUNC - } - - file, err := os.OpenFile(fullPath, flags, 0666) - if err != nil { - _ = fdr.Response.Body.Close() - return fmt.Errorf("open local file %s: %w", fullPath, err) - } - - total := info.Size - var writer io.Writer = file - var tracker *progressWriter - if progress != nil { - tracker = newProgressWriter(file, progress, guid, total) - writer = tracker - } - - _, copyErr := io.Copy(writer, fdr.Response.Body) - _ = fdr.Response.Body.Close() - _ = file.Close() - if tracker != nil { - if finalizeErr := tracker.Finalize(); finalizeErr != nil && copyErr == nil { - copyErr = finalizeErr - } - } - if copyErr != nil { - return fmt.Errorf("download failed for %s: %w", fdr.Filename, copyErr) - } - return nil -} - -// DownloadToPath downloads a single object using the provided backend -func DownloadToPath( - ctx context.Context, - dc drs.Client, - bk transfer.Downloader, - logger *slog.Logger, - guid string, - dstPath string, - protocol string, -) error { - opts := defaultDownloadOptions() - return DownloadToPathWithOptions(ctx, dc, bk, logger, guid, dstPath, protocol, opts) -} - -func DownloadToPathWithOptions( - ctx context.Context, - dc drs.Client, - bk transfer.Downloader, - logger *slog.Logger, - guid string, - dstPath string, - protocol string, - opts DownloadOptions, -) error { - if opts.MultipartThreshold <= 0 { - opts.MultipartThreshold = defaultDownloadOptions().MultipartThreshold - } - if opts.ChunkSize <= 0 { - opts.ChunkSize = defaultDownloadOptions().ChunkSize - } - if opts.Concurrency <= 0 { - opts.Concurrency = defaultDownloadOptions().Concurrency - } - - info, err := drs.ResolveObject(ctx, dc, guid) - if err != nil { - return fmt.Errorf("get file details failed: %w", err) - } - - // If size is unknown or small, single stream is safest. - if info.Size <= 0 || info.Size < opts.MultipartThreshold { - return downloadToPathSingle(ctx, bk, logger, guid, dstPath, protocol, info.Size) - } - - // If a partial file already exists, resumable single-stream download is safer than - // parallel range writes and avoids restarting from zero. - if st, statErr := os.Stat(dstPath); statErr == nil { - if st.Size() == info.Size { - return nil - } - if st.Size() > 0 && st.Size() < info.Size { - return downloadToPathSingle(ctx, bk, logger, guid, dstPath, protocol, info.Size) - } - } - - if err := downloadToPathMultipart(ctx, bk, logger, guid, dstPath, protocol, info.Size, opts); err != nil { - return err - } - - return nil -} - -func downloadToPathSingle( - ctx context.Context, - bk transfer.Downloader, - logger *slog.Logger, - guid string, - dstPath string, - protocol string, - expectedSize int64, -) error { - progress := common.GetProgress(ctx) - hash := common.GetOid(ctx) - - var existingSize int64 - if st, err := os.Stat(dstPath); err == nil { - existingSize = st.Size() - if expectedSize > 0 && existingSize == expectedSize { - return nil - } - } - - fdr := common.FileDownloadResponseObject{ - GUID: guid, - } - if existingSize > 0 { - fdr.Range = existingSize - } - - protocolText := "" - if protocol != "" { - protocolText = "?protocol=" + protocol - } - - if err := GetDownloadResponse(ctx, bk, &fdr, protocolText); err != nil { - // Mimic failed context logging from original - // We'd need to reconstruct the "logger.FailedContext" logic if using raw slog - // For now, simple error logging or rely on caller to log context? - // The original code used g3i.Logger().FailedContext... - // Let's just log error - logger.Error("Download failed", "error", err, "path", dstPath, "guid", guid) - return err - } - defer fdr.Response.Body.Close() - - if existingSize > 0 && fdr.Response.StatusCode == http.StatusOK { - // Server ignored range; restart from zero. - existingSize = 0 - } - - if dir := filepath.Dir(dstPath); dir != "." { - if err := os.MkdirAll(dir, 0766); err != nil { - logger.Error("Mkdir failed", "error", err, "path", dstPath) - return fmt.Errorf("mkdir for %s: %w", dstPath, err) - } - } - - flags := os.O_CREATE | os.O_WRONLY - if existingSize > 0 { - flags |= os.O_APPEND - } else { - flags |= os.O_TRUNC - } - file, err := os.OpenFile(dstPath, flags, 0666) - if err != nil { - logger.Error("Create file failed", "error", err, "path", dstPath) - return fmt.Errorf("create local file %s: %w", dstPath, err) - } - defer file.Close() - - var writer io.Writer = file - if progress != nil { - total := fdr.Response.ContentLength + existingSize - tracker := newProgressWriter(file, progress, hash, total) - if existingSize > 0 { - tracker.bytesSoFar = existingSize - } - writer = tracker - defer tracker.Finalize() - } - - reader := io.Reader(fdr.Response.Body) - if failAfter := parseInjectedDownloadFailureBytes(); failAfter > 0 { - reader = &failAfterReader{ - r: reader, - remaining: failAfter, - } - } - - if _, err := io.Copy(writer, reader); err != nil { - logger.Error("Copy failed", "error", err, "path", dstPath) - return fmt.Errorf("copy to %s: %w", dstPath, err) - } - if expectedSize > 0 { - if st, err := os.Stat(dstPath); err == nil && st.Size() != expectedSize { - return fmt.Errorf("download incomplete for %s: expected %d bytes, got %d", dstPath, expectedSize, st.Size()) - } - } - - // Success logging is up to caller or we can do simple info - // logger.Info("Download succeeded", "path", dstPath, "guid", guid) - return nil -} - -func parseInjectedDownloadFailureBytes() int64 { - raw := strings.TrimSpace(os.Getenv("DATA_CLIENT_TEST_FAIL_DOWNLOAD_AFTER_BYTES")) - if raw == "" { - return 0 - } - n, err := strconv.ParseInt(raw, 10, 64) - if err != nil || n <= 0 { - return 0 - } - return n -} - -type failAfterReader struct { - r io.Reader - remaining int64 - failed bool -} - -func (f *failAfterReader) Read(p []byte) (int, error) { - if f.failed { - return 0, errors.New("injected test interruption during download") - } - if f.remaining <= 0 { - f.failed = true - return 0, errors.New("injected test interruption during download") - } - if int64(len(p)) > f.remaining { - p = p[:f.remaining] - } - n, err := f.r.Read(p) - f.remaining -= int64(n) - if err != nil { - return n, err - } - if f.remaining <= 0 { - f.failed = true - return n, errors.New("injected test interruption during download") - } - return n, nil -} - -func downloadToPathMultipart( - ctx context.Context, - bk transfer.Downloader, - logger *slog.Logger, - guid string, - dstPath string, - protocol string, - totalSize int64, - opts DownloadOptions, -) error { - protocolText := "" - if protocol != "" { - protocolText = "?protocol=" + protocol - } - - signedURL, err := bk.ResolveDownloadURL(ctx, guid, protocolText) - if err != nil { - return fmt.Errorf("failed to resolve download URL for %s: %w", guid, err) - } - - // Preflight first ranged read to verify server honors ranges. - rangeStart := int64(0) - rangeEnd := opts.ChunkSize - 1 - if rangeEnd >= totalSize { - rangeEnd = totalSize - 1 - } - preflight := &common.FileDownloadResponseObject{ - GUID: guid, - PresignedURL: signedURL, - RangeStart: &rangeStart, - RangeEnd: &rangeEnd, - } - - resp, err := bk.Download(ctx, preflight) - if err != nil { - return fmt.Errorf("multipart preflight request failed: %w", err) - } - _ = resp.Body.Close() - if resp.StatusCode != 206 { - return fmt.Errorf("range requests not supported (status %d)", resp.StatusCode) - } - - if dir := filepath.Dir(dstPath); dir != "." { - if err := os.MkdirAll(dir, 0766); err != nil { - return fmt.Errorf("mkdir for %s: %w", dstPath, err) - } - } - - file, err := os.OpenFile(dstPath, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0666) - if err != nil { - return fmt.Errorf("create local file %s: %w", dstPath, err) - } - defer file.Close() - - if err := file.Truncate(totalSize); err != nil { - return fmt.Errorf("pre-allocate %s: %w", dstPath, err) - } - - progress := common.GetProgress(ctx) - hash := common.GetOid(ctx) - if hash == "" { - hash = guid - } - var soFar atomic.Int64 - - totalParts := int((totalSize + opts.ChunkSize - 1) / opts.ChunkSize) - g, gctx := errgroup.WithContext(ctx) - g.SetLimit(opts.Concurrency) - - for i := 0; i < totalParts; i++ { - partStart := int64(i) * opts.ChunkSize - partEnd := partStart + opts.ChunkSize - 1 - if partEnd >= totalSize { - partEnd = totalSize - 1 - } - ps := partStart - pe := partEnd - - g.Go(func() error { - fdr := &common.FileDownloadResponseObject{ - GUID: guid, - PresignedURL: signedURL, - RangeStart: &ps, - RangeEnd: &pe, - } - - partResp, err := bk.Download(gctx, fdr) - if err != nil { - return fmt.Errorf("range download %d-%d failed: %w", ps, pe, err) - } - defer partResp.Body.Close() - - if partResp.StatusCode != 206 { - return fmt.Errorf("range download %d-%d returned status %d", ps, pe, partResp.StatusCode) - } - - buf, err := io.ReadAll(partResp.Body) - if err != nil { - return fmt.Errorf("range read %d-%d failed: %w", ps, pe, err) - } - - if _, err := file.WriteAt(buf, ps); err != nil { - return fmt.Errorf("range write %d-%d failed: %w", ps, pe, err) - } - - if progress != nil { - current := soFar.Add(int64(len(buf))) - _ = progress(common.ProgressEvent{ - Event: "progress", - Oid: hash, - BytesSinceLast: int64(len(buf)), - BytesSoFar: current, - }) - } - - return nil - }) - } - - if err := g.Wait(); err != nil { - return err - } - - if progress != nil { - final := soFar.Load() - if final < totalSize { - _ = progress(common.ProgressEvent{ - Event: "progress", - Oid: hash, - BytesSinceLast: totalSize - final, - BytesSoFar: totalSize, - }) - } - } - - logger.Info("multipart download completed", "guid", guid, "size", totalSize) - return nil -} diff --git a/download/transfer_test.go b/download/transfer_test.go deleted file mode 100644 index a1dd9ec..0000000 --- a/download/transfer_test.go +++ /dev/null @@ -1,462 +0,0 @@ -package download - -import ( - "bytes" - "context" - "errors" - "io" - "net/http" - "net/url" - "os" - "path/filepath" - "strings" - "testing" - - "github.com/calypr/data-client/common" - "github.com/calypr/data-client/drs" - "github.com/calypr/data-client/hash" - "github.com/calypr/data-client/logs" - "github.com/calypr/data-client/request" -) - -type fakeBackend struct { - logger *logs.Gen3Logger - doFunc func(context.Context, *common.FileDownloadResponseObject) (*http.Response, error) - data []byte - size int64 -} - -func (f *fakeBackend) Name() string { return "Fake" } -func (f *fakeBackend) Logger() *logs.Gen3Logger { return f.logger } - -func (f *fakeBackend) fileDetails(guid string) *drs.DRSObject { - size := f.size - if size == 0 && len(f.data) > 0 { - size = int64(len(f.data)) - } - if size == 0 { - size = 64 - } - name := "payload.bin" - accessID := "s3" - return &drs.DRSObject{ - Name: name, - Size: size, - AccessMethods: []drs.AccessMethod{ - {AccessId: accessID, Type: "s3"}, - }, - } -} - -func (f *fakeBackend) ResolveDownloadURL(ctx context.Context, guid string, accessID string) (string, error) { - if guid == "test-fallback" { - return "", errors.New("fallback") - } - return "https://download.example.com/object", nil -} - -func (f *fakeBackend) ResolveUploadURL(ctx context.Context, guid string, filename string, metadata common.FileMetadata, bucket string) (string, error) { - return "", errors.New("not implemented") -} - -func (f *fakeBackend) InitMultipartUpload(ctx context.Context, guid string, filename string, bucket string) (*common.MultipartUploadInit, error) { - return nil, errors.New("not implemented") -} - -func (f *fakeBackend) GetMultipartUploadURL(ctx context.Context, key string, uploadID string, partNumber int32, bucket string) (string, error) { - return "", errors.New("not implemented") -} - -func (f *fakeBackend) CompleteMultipartUpload(ctx context.Context, key string, uploadID string, parts []common.MultipartUploadPart, bucket string) error { - return errors.New("not implemented") -} - -func (f *fakeBackend) Upload(ctx context.Context, url string, body io.Reader, size int64) error { - return errors.New("not implemented") -} - -func (f *fakeBackend) UploadPart(ctx context.Context, url string, body io.Reader, size int64) (string, error) { - return "", errors.New("not implemented") -} - -func (f *fakeBackend) DeleteFile(ctx context.Context, guid string) (string, error) { - return "", errors.New("not implemented") -} - -func (f *fakeBackend) Download(ctx context.Context, fdr *common.FileDownloadResponseObject) (*http.Response, error) { - if f.doFunc != nil { - return f.doFunc(ctx, fdr) - } - if fdr.Range > 0 { - start := fdr.Range - if start < 0 || start > int64(len(f.data)) { - return nil, errors.New("invalid resume range") - } - if start == int64(len(f.data)) { - return newDownloadResponse(fdr.PresignedURL, []byte{}, http.StatusPartialContent), nil - } - return newDownloadResponse(fdr.PresignedURL, f.data[start:], http.StatusPartialContent), nil - } - if fdr.RangeStart != nil && fdr.RangeEnd != nil { - start, end := *fdr.RangeStart, *fdr.RangeEnd - if start < 0 || end >= int64(len(f.data)) || start > end { - return nil, errors.New("invalid range") - } - return newDownloadResponse(fdr.PresignedURL, f.data[start:end+1], http.StatusPartialContent), nil - } - return newDownloadResponse(fdr.PresignedURL, f.data, http.StatusOK), nil -} - -type fakeDrsClient struct { - backend *fakeBackend -} - -func (f *fakeDrsClient) GetObject(ctx context.Context, id string) (*drs.DRSObject, error) { - return f.backend.fileDetails(id), nil -} - -func (f *fakeDrsClient) GetObjectByHash(ctx context.Context, checksum *hash.Checksum) ([]drs.DRSObject, error) { - obj := f.backend.fileDetails(checksum.Checksum) - return []drs.DRSObject{*obj}, nil -} - -func (f *fakeDrsClient) BatchGetObjectsByHash(ctx context.Context, hashes []string) (map[string][]drs.DRSObject, error) { - return map[string][]drs.DRSObject{}, nil -} - -func (f *fakeDrsClient) ListObjects(ctx context.Context) (chan drs.DRSObjectResult, error) { - return nil, errors.New("not implemented") -} - -func (f *fakeDrsClient) ListObjectsByProject(ctx context.Context, projectId string) (chan drs.DRSObjectResult, error) { - return nil, errors.New("not implemented") -} - -func (f *fakeDrsClient) GetProjectSample(ctx context.Context, projectId string, limit int) ([]drs.DRSObject, error) { - return nil, errors.New("not implemented") -} - -func (f *fakeDrsClient) RegisterRecord(ctx context.Context, record *drs.DRSObject) (*drs.DRSObject, error) { - return nil, errors.New("not implemented") -} - -func (f *fakeDrsClient) RegisterRecords(ctx context.Context, records []*drs.DRSObject) ([]*drs.DRSObject, error) { - return nil, errors.New("not implemented") -} - -func (f *fakeDrsClient) UpdateRecord(ctx context.Context, updateInfo *drs.DRSObject, did string) (*drs.DRSObject, error) { - return nil, errors.New("not implemented") -} - -func (f *fakeDrsClient) DeleteRecord(ctx context.Context, did string) error { - return errors.New("not implemented") -} - -func (f *fakeDrsClient) DeleteRecordsByProject(ctx context.Context, projectId string) error { - return errors.New("not implemented") -} - -func (f *fakeDrsClient) GetDownloadURL(ctx context.Context, id string, accessType string) (*drs.AccessURL, error) { - return nil, errors.New("not implemented") -} - -func (f *fakeDrsClient) AddURL(ctx context.Context, blobURL, sha256 string, opts ...drs.AddURLOption) (*drs.DRSObject, error) { - return nil, errors.New("not implemented") -} - -func (f *fakeDrsClient) UpsertRecord(ctx context.Context, url string, sha256 string, fileSize int64, projectId string) (*drs.DRSObject, error) { - return nil, errors.New("not implemented") -} - -func (f *fakeDrsClient) BuildDrsObj(fileName string, checksum string, size int64, drsId string) (*drs.DRSObject, error) { - return nil, errors.New("not implemented") -} - -func (f *fakeDrsClient) GetProjectId() string { return "" } -func (f *fakeDrsClient) GetBucketName() string { return "" } -func (f *fakeDrsClient) GetOrganization() string { return "" } -func (f *fakeDrsClient) WithProject(projectId string) drs.Client { return f } -func (f *fakeDrsClient) WithOrganization(organization string) drs.Client { return f } -func (f *fakeDrsClient) WithBucket(bucketName string) drs.Client { return f } -func (f *fakeDrsClient) New(method, url string) *request.RequestBuilder { - return &request.RequestBuilder{Method: method, Url: url} -} -func (f *fakeDrsClient) Do(ctx context.Context, req *request.RequestBuilder) (*http.Response, error) { - return nil, errors.New("not implemented") -} - -func TestDownloadSingleWithProgressEmitsEvents(t *testing.T) { - payload := bytes.Repeat([]byte("d"), 64) - downloadDir := t.TempDir() - downloadPath := downloadDir + string(os.PathSeparator) - - var events []common.ProgressEvent - progress := func(event common.ProgressEvent) error { - events = append(events, event) - return nil - } - - fake := &fakeBackend{ - logger: logs.NewGen3Logger(nil, "", ""), - data: payload, - } - dc := &fakeDrsClient{backend: fake} - - ctx := common.WithProgress(context.Background(), progress) - err := DownloadSingleWithProgress(ctx, dc, fake, "guid-123", downloadPath, "") - if err != nil { - t.Fatalf("download failed: %v", err) - } - - if len(events) == 0 { - t.Fatal("expected progress events") - } - for i := 1; i < len(events); i++ { - if events[i].BytesSoFar < events[i-1].BytesSoFar { - t.Fatalf("bytesSoFar not monotonic: %d then %d", events[i-1].BytesSoFar, events[i].BytesSoFar) - } - } - last := events[len(events)-1] - if last.BytesSoFar != int64(len(payload)) { - t.Fatalf("expected final bytesSoFar %d, got %d", len(payload), last.BytesSoFar) - } - fullPath := filepath.Join(downloadPath, "payload.bin") - if _, err := os.Stat(fullPath); err != nil { - t.Fatalf("expected file to exist: %v", err) - } -} - -func TestDownloadSingleWithProgressFinalizeOnError(t *testing.T) { - downloadDir := t.TempDir() - downloadPath := downloadDir + string(os.PathSeparator) - - var events []common.ProgressEvent - progress := func(event common.ProgressEvent) error { - events = append(events, event) - return nil - } - - fake := &fakeBackend{ - logger: logs.NewGen3Logger(nil, "", ""), - data: []byte("short"), - size: 64, - } - dc := &fakeDrsClient{backend: fake} - - ctx := common.WithProgress(context.Background(), progress) - err := DownloadSingleWithProgress(ctx, dc, fake, "guid-123", downloadPath, "") - if err == nil { - t.Fatal("expected download error") - } - - if len(events) == 0 { - t.Fatal("expected progress events") - } - last := events[len(events)-1] - if last.BytesSoFar != 64 { - t.Fatalf("expected finalize bytesSoFar 64, got %d", last.BytesSoFar) - } -} - -func newDownloadJSONResponse(rawURL, body string) *http.Response { - parsedURL, err := url.Parse(rawURL) - if err != nil { - parsedURL = &url.URL{} - } - return &http.Response{ - StatusCode: http.StatusOK, - Body: io.NopCloser(strings.NewReader(body)), - Request: &http.Request{URL: parsedURL}, - Header: make(http.Header), - } -} - -func TestDownloadToPathMultipart(t *testing.T) { - payload := bytes.Repeat([]byte("z"), 2*1024*1024) // 2MB - tmpDir := t.TempDir() - dst := filepath.Join(tmpDir, "multipart.bin") - - fake := &fakeBackend{ - logger: logs.NewGen3Logger(nil, "", ""), - data: payload, - size: int64(len(payload)), - } - dc := &fakeDrsClient{backend: fake} - - err := DownloadToPathWithOptions( - context.Background(), - dc, - fake, - fake.Logger().Logger, - "guid-789", - dst, - "", - DownloadOptions{ - MultipartThreshold: 1 * 1024 * 1024, - ChunkSize: 256 * 1024, - Concurrency: 4, - }, - ) - if err != nil { - t.Fatalf("multipart download failed: %v", err) - } - - got, err := os.ReadFile(dst) - if err != nil { - t.Fatalf("read failed: %v", err) - } - if !bytes.Equal(payload, got) { - t.Fatal("downloaded payload mismatch") - } -} - -func TestDownloadToPathSingleResumeFromPartial(t *testing.T) { - payload := bytes.Repeat([]byte("r"), 1024) - tmpDir := t.TempDir() - dst := filepath.Join(tmpDir, "resume.bin") - prefix := payload[:300] - if err := os.WriteFile(dst, prefix, 0o666); err != nil { - t.Fatalf("write partial file: %v", err) - } - - var gotRange int64 = -1 - fake := &fakeBackend{ - logger: logs.NewGen3Logger(nil, "", ""), - data: payload, - size: int64(len(payload)), - doFunc: func(_ context.Context, fdr *common.FileDownloadResponseObject) (*http.Response, error) { - gotRange = fdr.Range - if fdr.Range <= 0 { - return nil, errors.New("expected resume range") - } - return newDownloadResponse(fdr.PresignedURL, payload[fdr.Range:], http.StatusPartialContent), nil - }, - } - dc := &fakeDrsClient{backend: fake} - - err := DownloadToPathWithOptions( - context.Background(), - dc, - fake, - fake.Logger().Logger, - "guid-resume", - dst, - "", - DownloadOptions{ - MultipartThreshold: 1 * common.GB, // force single-stream path - ChunkSize: 64 * common.MB, - Concurrency: 2, - }, - ) - if err != nil { - t.Fatalf("resume download failed: %v", err) - } - if gotRange != int64(len(prefix)) { - t.Fatalf("expected range %d, got %d", len(prefix), gotRange) - } - - got, err := os.ReadFile(dst) - if err != nil { - t.Fatalf("read result: %v", err) - } - if !bytes.Equal(got, payload) { - t.Fatal("resumed file mismatch") - } -} - -func TestDownloadToPathSingleRangeIgnoredRestarts(t *testing.T) { - payload := bytes.Repeat([]byte("k"), 2048) - tmpDir := t.TempDir() - dst := filepath.Join(tmpDir, "range-ignored.bin") - if err := os.WriteFile(dst, payload[:500], 0o666); err != nil { - t.Fatalf("write partial: %v", err) - } - - fake := &fakeBackend{ - logger: logs.NewGen3Logger(nil, "", ""), - data: payload, - size: int64(len(payload)), - doFunc: func(_ context.Context, fdr *common.FileDownloadResponseObject) (*http.Response, error) { - // Simulate server ignoring Range and returning full body with 200. - if fdr.Range <= 0 { - return nil, errors.New("expected range request") - } - return newDownloadResponse(fdr.PresignedURL, payload, http.StatusOK), nil - }, - } - dc := &fakeDrsClient{backend: fake} - - err := DownloadToPathWithOptions( - context.Background(), - dc, - fake, - fake.Logger().Logger, - "guid-range-ignored", - dst, - "", - DownloadOptions{MultipartThreshold: 1 * common.GB, ChunkSize: 64 * common.MB, Concurrency: 2}, - ) - if err != nil { - t.Fatalf("download failed: %v", err) - } - - got, err := os.ReadFile(dst) - if err != nil { - t.Fatalf("read result: %v", err) - } - if !bytes.Equal(got, payload) { - t.Fatal("range-ignored restart did not produce full file") - } -} - -func TestDownloadToPathAlreadyCompleteSkipsDownload(t *testing.T) { - payload := bytes.Repeat([]byte("c"), 512) - tmpDir := t.TempDir() - dst := filepath.Join(tmpDir, "complete.bin") - if err := os.WriteFile(dst, payload, 0o666); err != nil { - t.Fatalf("write complete file: %v", err) - } - - calls := 0 - fake := &fakeBackend{ - logger: logs.NewGen3Logger(nil, "", ""), - data: payload, - size: int64(len(payload)), - doFunc: func(_ context.Context, _ *common.FileDownloadResponseObject) (*http.Response, error) { - calls++ - return newDownloadResponse("https://download.example.com/object", payload, http.StatusOK), nil - }, - } - dc := &fakeDrsClient{backend: fake} - - err := DownloadToPathWithOptions( - context.Background(), - dc, - fake, - fake.Logger().Logger, - "guid-complete", - dst, - "", - DownloadOptions{MultipartThreshold: 1 * common.GB, ChunkSize: 64 * common.MB, Concurrency: 2}, - ) - if err != nil { - t.Fatalf("download call failed: %v", err) - } - if calls != 0 { - t.Fatalf("expected no backend download calls, got %d", calls) - } -} - -func newDownloadResponse(rawURL string, payload []byte, status int) *http.Response { - parsedURL, err := url.Parse(rawURL) - if err != nil { - parsedURL = &url.URL{} - } - return &http.Response{ - StatusCode: status, - Body: io.NopCloser(bytes.NewReader(payload)), - ContentLength: int64(len(payload)), - Request: &http.Request{URL: parsedURL}, - Header: make(http.Header), - } -} diff --git a/download/types.go b/download/types.go deleted file mode 100644 index 9758cb7..0000000 --- a/download/types.go +++ /dev/null @@ -1,60 +0,0 @@ -package download - -import ( - "log/slog" - "os" - - "github.com/calypr/data-client/common" -) - -type IndexdResponse struct { - Name string - Size int64 -} -type RenamedOrSkippedFileInfo struct { - GUID string - OldFilename string - NewFilename string -} - -func validateLocalFileStat( - logger *slog.Logger, - fdr *common.FileDownloadResponseObject, - filesize int64, - skipCompleted bool, -) { - fullPath := fdr.DownloadPath + fdr.Filename - - fi, err := os.Stat(fullPath) - if err != nil { - if os.IsNotExist(err) { - // No local file → full download, nothing special - return - } - logger.Error("Error statting local file", "path", fullPath, "error", err) - logger.Info("Will attempt full download anyway") - return - } - - localSize := fi.Size() - - // User doesn't want to skip completed files → force full overwrite - if !skipCompleted { - fdr.Overwrite = true - return - } - - // Exact match → skip entirely - if localSize == filesize { - fdr.Skip = true - return - } - - // Local file larger than expected → overwrite fully (corrupted or different file) - if localSize > filesize { - fdr.Overwrite = true - return - } - - fdr.Range = localSize -} diff --git a/download/url_resolution.go b/download/url_resolution.go deleted file mode 100644 index e18ba5a..0000000 --- a/download/url_resolution.go +++ /dev/null @@ -1,48 +0,0 @@ -package download - -import ( - "context" - "errors" - "fmt" - "io" - "net/http" - "strings" - - "github.com/calypr/data-client/common" - "github.com/calypr/data-client/transfer" -) - -// GetDownloadResponse gets presigned URL and prepares HTTP response -func GetDownloadResponse(ctx context.Context, bk transfer.Downloader, fdr *common.FileDownloadResponseObject, protocolText string) error { - url, err := bk.ResolveDownloadURL(ctx, fdr.GUID, protocolText) - if err != nil { - return fmt.Errorf("failed to resolve download URL for %s: %w", fdr.GUID, err) - } - fdr.PresignedURL = url - - return makeDownloadRequest(ctx, bk, fdr) -} - -func makeDownloadRequest(ctx context.Context, bk transfer.Downloader, fdr *common.FileDownloadResponseObject) error { - resp, err := bk.Download(ctx, fdr) - - if err != nil { - return errors.New("Request failed: " + strings.ReplaceAll(err.Error(), fdr.PresignedURL, "")) - } - - // Check for non-success status codes - if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusPartialContent { - defer resp.Body.Close() // Ensure the body is closed - - bodyBytes, err := io.ReadAll(resp.Body) - bodyString := "" - if err == nil { - bodyString = string(bodyBytes) - } - - return fmt.Errorf("non-OK response: %d, body: %s", resp.StatusCode, bodyString) - } - - fdr.Response = resp - return nil -} diff --git a/download/utils.go b/download/utils.go deleted file mode 100644 index 7209c44..0000000 --- a/download/utils.go +++ /dev/null @@ -1,77 +0,0 @@ -package download - -import ( - "bufio" - "os" - "path/filepath" - "strconv" - "strings" - - "context" - "fmt" - "log/slog" -) - -// AskForConfirmation asks user for confirmation before proceed, will wait if user entered garbage -func AskForConfirmation(logger *slog.Logger, s string) bool { - reader := bufio.NewReader(os.Stdin) - - for { - logger.Info(fmt.Sprintf("%s [y/n]: ", s)) - - response, err := reader.ReadString('\n') - if err != nil { - logger.Error("Error occurred during parsing user's confirmation: " + err.Error()) - os.Exit(1) - } - - switch strings.ToLower(strings.TrimSpace(response)) { - case "y", "yes": - return true - case "n", "no": - return false - default: - return false // Example of defaulting to false - } - } -} - -func processOriginalFilename(downloadPath string, actualFilename string) string { - _, err := os.Stat(downloadPath + actualFilename) - if os.IsNotExist(err) { - return actualFilename - } - extension := filepath.Ext(actualFilename) - filename := strings.TrimSuffix(actualFilename, extension) - counter := 2 - for { - newFilename := filename + "_" + strconv.Itoa(counter) + extension - _, err := os.Stat(downloadPath + newFilename) - if os.IsNotExist(err) { - return newFilename - } - counter++ - } -} - -// truncateFilename shortens long filenames for progress bar display -func truncateFilename(name string, max int) string { - if len(name) <= max { - return name - } - return "..." + name[len(name)-max+3:] -} - -// printRenamed shows renamed files in final summary -func printRenamed(ctx context.Context, logger *slog.Logger, renamed []RenamedOrSkippedFileInfo) { - for _, r := range renamed { - logger.InfoContext(ctx, fmt.Sprintf("Renamed %q to %q (GUID: %s)", r.OldFilename, r.NewFilename, r.GUID)) - } -} - -// printSkipped shows skipped files in final summary -func printSkipped(ctx context.Context, logger *slog.Logger, skipped []RenamedOrSkippedFileInfo) { - for _, s := range skipped { - logger.InfoContext(ctx, fmt.Sprintf("Skipped %q (GUID: %s)", s.OldFilename, s.GUID)) - } -} diff --git a/drs/access_method_conversions.go b/drs/access_method_conversions.go deleted file mode 100644 index 22cb0bb..0000000 --- a/drs/access_method_conversions.go +++ /dev/null @@ -1,53 +0,0 @@ -package drs - -import ( - "fmt" - "net/url" -) - -func DRSAccessMethodsFromInternalURLs(urls []string, authz []string) ([]AccessMethod, error) { - accessMethods := make([]AccessMethod, 0, len(urls)) - for _, urlString := range urls { - method := AccessMethod{ - AccessUrl: AccessURL{Url: urlString}, - } - - parsed, err := url.Parse(urlString) - if err != nil { - return nil, fmt.Errorf("failed to parse url %q: %v", urlString, err) - } - if parsed.Scheme == "" { - method.Type = "https" - } else { - method.Type = parsed.Scheme - } - - if len(authz) > 0 { - method.Authorizations = Authorizations{BearerAuthIssuers: []string{authz[0]}} - } - accessMethods = append(accessMethods, method) - } - return accessMethods, nil -} - -// InternalAuthzFromDrsAccessMethods extracts authz values from DRS access methods. -func InternalAuthzFromDrsAccessMethods(accessMethods []AccessMethod) []string { - authz := make([]string, 0, len(accessMethods)) - for _, drsURL := range accessMethods { - if len(drsURL.Authorizations.BearerAuthIssuers) > 0 { - authz = append(authz, drsURL.Authorizations.BearerAuthIssuers[0]) - } - } - return authz -} - -func InternalURLFromDrsAccessURLs(accessMethods []AccessMethod) []string { - urls := make([]string, 0, len(accessMethods)) - for _, drsURL := range accessMethods { - if drsURL.AccessUrl.Url == "" { - continue - } - urls = append(urls, drsURL.AccessUrl.Url) - } - return urls -} diff --git a/drs/client.go b/drs/client.go deleted file mode 100644 index 5a4fe2b..0000000 --- a/drs/client.go +++ /dev/null @@ -1,362 +0,0 @@ -package drs - -import ( - "context" - "fmt" - "log/slog" - "strings" - - "github.com/calypr/data-client/conf" - "github.com/calypr/data-client/hash" - "github.com/calypr/data-client/request" - syclient "github.com/calypr/syfon/client" -) - -type DrsClient struct { - request.RequestInterface - provider endpointProvider - syfon *syclient.Client - logger *slog.Logger - projectId string - organization string - bucketName string -} - -type endpointProvider interface { - APIEndpoint() string - AccessToken() string -} - -type gen3Provider struct { - cred *conf.Credential -} - -func (p gen3Provider) APIEndpoint() string { return p.cred.APIEndpoint } -func (p gen3Provider) AccessToken() string { return p.cred.AccessToken } - -type localProvider struct { - endpoint string -} - -func (p localProvider) APIEndpoint() string { return p.endpoint } -func (p localProvider) AccessToken() string { return "" } - -// NewDrsClient creates a new DrsClient. -func NewDrsClient(req request.RequestInterface, cred *conf.Credential, logger *slog.Logger) Client { - provider := gen3Provider{cred: cred} - return &DrsClient{ - RequestInterface: req, - provider: provider, - syfon: buildSyfonClient(req, provider.APIEndpoint(), provider.AccessToken()), - logger: logger, - } -} - -// NewLocalDrsClient creates a DRS client for local/non-Gen3 mode. -// It intentionally carries no bearer token. -func NewLocalDrsClient(req request.RequestInterface, endpoint string, logger *slog.Logger) Client { - provider := localProvider{endpoint: endpoint} - return &DrsClient{ - RequestInterface: req, - provider: provider, - syfon: buildSyfonClient(req, provider.APIEndpoint(), ""), - logger: logger, - } -} - -func buildSyfonClient(req request.RequestInterface, endpoint, token string) *syclient.Client { - opts := make([]syclient.Option, 0, 2) - if strings.TrimSpace(token) != "" { - opts = append(opts, syclient.WithBearerToken(token)) - } - if r, ok := req.(*request.Request); ok && r.RetryClient != nil { - opts = append(opts, syclient.WithHTTPClient(r.RetryClient.StandardClient())) - } - return syclient.New(endpoint, opts...) -} - -func (c *DrsClient) GetProjectId() string { - return c.projectId -} - -func (c *DrsClient) GetBucketName() string { - return c.bucketName -} - -func (c *DrsClient) GetOrganization() string { - return c.organization -} - -func (c *DrsClient) WithProject(projectId string) Client { - c.projectId = projectId - return c -} - -func (c *DrsClient) WithOrganization(organization string) Client { - c.organization = organization - return c -} - -func (c *DrsClient) WithBucket(bucketName string) Client { - c.bucketName = bucketName - return c -} - -func (c *DrsClient) GetObject(ctx context.Context, id string) (*DRSObject, error) { - obj, err := c.syfon.DRS().GetObject(ctx, id) - if err != nil { - return nil, err - } - return &obj, nil -} - -func (c *DrsClient) GetObjectByHash(ctx context.Context, checksum *hash.Checksum) ([]DRSObject, error) { - if checksum == nil { - return nil, fmt.Errorf("checksum is required") - } - resp, err := c.syfon.Index().List(ctx, syclient.ListRecordsOptions{ - Hash: fmt.Sprintf("%s:%s", string(checksum.Type), checksum.Checksum), - }) - if err != nil { - return nil, err - } - - out := make([]DRSObject, 0, len(resp.Records)) - for _, rec := range resp.Records { - drsObj, err := syfonInternalRecordToDRSObject(rec) - if err != nil { - return nil, err - } - out = append(out, *drsObj) - } - return out, nil -} - -func (c *DrsClient) GetDownloadURL(ctx context.Context, id string, accessType string) (*AccessURL, error) { - access, err := c.syfon.DRS().GetAccessURL(ctx, id, accessType) - if err != nil { - return nil, err - } - return &AccessURL{Url: access.Url}, nil -} - -func (c *DrsClient) ListObjectsByProject(ctx context.Context, projectId string) (chan DRSObjectResult, error) { - resourcePath, err := ProjectToResource(c.organization, projectId) - if err != nil { - return nil, err - } - - resp, err := c.syfon.Index().List(ctx, syclient.ListRecordsOptions{Authz: resourcePath}) - if err != nil { - return nil, err - } - - out := make(chan DRSObjectResult, len(resp.Records)) - go func() { - defer close(out) - for _, elem := range resp.Records { - drsObj, err := syfonInternalRecordToDRSObject(elem) - if err != nil { - out <- DRSObjectResult{Error: err} - continue - } - out <- DRSObjectResult{Object: drsObj} - } - }() - return out, nil -} - -func (c *DrsClient) ListObjects(ctx context.Context) (chan DRSObjectResult, error) { - const pageSize = 50 - out := make(chan DRSObjectResult, pageSize) - - go func() { - defer close(out) - for page := 0; ; page++ { - resp, err := c.syfon.DRS().ListObjects(ctx, pageSize, page) - if err != nil { - out <- DRSObjectResult{Error: err} - return - } - if len(resp.DrsObjects) == 0 { - return - } - for _, elem := range resp.DrsObjects { - obj := elem - out <- DRSObjectResult{Object: &obj} - } - } - }() - return out, nil -} - -func (c *DrsClient) GetProjectSample(ctx context.Context, projectId string, limit int) ([]DRSObject, error) { - if limit <= 0 { - limit = 1 - } - - objChan, err := c.ListObjectsByProject(ctx, projectId) - if err != nil { - return nil, err - } - - result := make([]DRSObject, 0, limit) - for objResult := range objChan { - if objResult.Error != nil { - return nil, objResult.Error - } - result = append(result, *objResult.Object) - - if len(result) >= limit { - go func() { - for range objChan { - } - }() - break - } - } - return result, nil -} - -func (c *DrsClient) RegisterRecord(ctx context.Context, record *DRSObject) (*DRSObject, error) { - internalRecord, err := drsObjectToSyfonInternalRecord(record) - if err != nil { - return nil, err - } - created, err := c.syfon.Index().Create(ctx, internalRecord) - if err != nil { - return nil, err - } - return syfonInternalRecordToDRSObject(created) -} - -func (c *DrsClient) RegisterRecords(ctx context.Context, records []*DRSObject) ([]*DRSObject, error) { - if len(records) == 0 { - return nil, nil - } - - candidates := make([]syclient.DRSObjectCandidate, len(records)) - for i, r := range records { - candidates[i] = ConvertToCandidate(r) - } - - resp, err := c.syfon.DRS().RegisterObjects(ctx, syclient.RegisterObjectsRequest{Candidates: candidates}) - if err != nil { - return nil, err - } - if len(resp.Objects) == 0 { - return nil, fmt.Errorf("register response did not include objects") - } - - out := make([]*DRSObject, 0, len(resp.Objects)) - for _, obj := range resp.Objects { - o := obj - out = append(out, &o) - } - return out, nil -} - -func (c *DrsClient) UpdateRecord(ctx context.Context, updateInfo *DRSObject, did string) (*DRSObject, error) { - existing, err := c.syfon.Index().Get(ctx, did) - if err != nil { - return nil, err - } - - updated := existing - updated.SetDid(did) - if len(updateInfo.AccessMethods) > 0 { - newURLs := make([]string, 0, len(updateInfo.AccessMethods)) - for _, a := range updateInfo.AccessMethods { - if a.AccessUrl.Url == "" { - continue - } - newURLs = append(newURLs, a.AccessUrl.Url) - } - updated.SetUrls(appendUnique(updated.GetUrls(), newURLs)) - - authz := InternalAuthzFromDrsAccessMethods(updateInfo.AccessMethods) - updated.SetAuthz(appendUnique(updated.GetAuthz(), authz)) - } - - if updateInfo.Name != "" { - updated.SetFileName(updateInfo.Name) - } - if updateInfo.Size > 0 { - updated.SetSize(updateInfo.Size) - } - if len(updateInfo.Checksums) > 0 { - updated.SetHashes(hash.ConvertDrsChecksumsToMap(updateInfo.Checksums)) - } - - res, err := c.syfon.Index().Update(ctx, did, updated) - if err != nil { - return nil, err - } - return syfonInternalRecordToDRSObject(res) -} - -func (c *DrsClient) DeleteRecord(ctx context.Context, did string) error { - return c.syfon.Index().Delete(ctx, did) -} - -func (c *DrsClient) DeleteRecordsByProject(ctx context.Context, projectId string) error { - resourcePath, err := ProjectToResource(c.organization, projectId) - if err != nil { - return err - } - _, err = c.syfon.Index().DeleteByQuery(ctx, syclient.DeleteByQueryOptions{Authz: resourcePath}) - return err -} - -func (c *DrsClient) BatchGetObjectsByHash(ctx context.Context, hashes []string) (map[string][]DRSObject, error) { - if len(hashes) == 0 { - return nil, nil - } - - resp, err := c.syfon.Index().BulkHashes(ctx, syclient.BulkHashesRequest{Hashes: hashes}) - if err != nil { - return nil, err - } - - result := make(map[string][]DRSObject) - for _, rec := range resp.Records { - drsObj, err := syfonInternalRecordToDRSObject(rec) - if err != nil { - continue - } - hInfo := hash.ConvertDrsChecksumsToHashInfo(drsObj.Checksums) - if hInfo.SHA256 != "" { - result[hInfo.SHA256] = append(result[hInfo.SHA256], *drsObj) - } - } - return result, nil -} - -func appendUnique(existing []string, toAdd []string) []string { - seen := make(map[string]bool) - for _, v := range existing { - seen[v] = true - } - for _, v := range toAdd { - if !seen[v] { - existing = append(existing, v) - seen[v] = true - } - } - return existing -} - -// BuildDrsObj matches git-drs behavior but moved to core. -func (c *DrsClient) BuildDrsObj(fileName string, checksum string, size int64, drsId string) (*DRSObject, error) { - return BuildDrsObj(fileName, checksum, size, drsId, c.GetBucketName(), c.GetOrganization(), c.GetProjectId()) -} - -// RegisterFile matches git-drs behavior but moved to core. -func (c *DrsClient) RegisterFile(ctx context.Context, oid string, path string) (*DRSObject, error) { - // Base implementation without LFS specifics. - return nil, fmt.Errorf("RegisterFile needs specific implementation (e.g. for LFS or cloud)") -} - -func (c *DrsClient) DownloadFile(ctx context.Context, oid string, destPath string) error { - return fmt.Errorf("DownloadFile implementation moved to high-level client") -} diff --git a/drs/drs.go b/drs/drs.go deleted file mode 100644 index 9bdd5b4..0000000 --- a/drs/drs.go +++ /dev/null @@ -1,171 +0,0 @@ -package drs - -import ( - "fmt" - "strings" - - "github.com/google/uuid" -) - -// NAMESPACE is the UUID namespace used for generating DRS UUIDs -var NAMESPACE = uuid.NewMD5(uuid.NameSpaceURL, []byte("calypr.org")) - -func sanitizePathComponent(v string) string { - v = strings.TrimSpace(v) - v = strings.Trim(v, "/") - v = strings.ReplaceAll(v, "\\", "/") - return strings.ReplaceAll(v, " ", "_") -} - -// StoragePrefix returns the bucket key prefix used for object placement. -// Preferred layout is "/" when organization is provided. -// When organization is empty, it falls back to "/" for hyphenated -// project IDs or "default/" otherwise. -func StoragePrefix(org, project string) string { - org = sanitizePathComponent(org) - project = sanitizePathComponent(project) - if project == "" { - return "" - } - if org != "" { - return org + "/" + project - } - if strings.Contains(project, "-") { - parts := strings.SplitN(project, "-", 2) - return sanitizePathComponent(parts[0]) + "/" + sanitizePathComponent(parts[1]) - } - return "default/" + project -} - -func ProjectToResource(org, project string) (string, error) { - if org != "" { - return "/programs/" + org + "/projects/" + project, nil - } - if project == "" { - return "", fmt.Errorf("error: project ID is empty") - } - if !strings.Contains(project, "-") { - return "/programs/default/projects/" + project, nil - } - projectIdArr := strings.SplitN(project, "-", 2) - return "/programs/" + projectIdArr[0] + "/projects/" + projectIdArr[1], nil -} - -// From git-drs/drsmap/drs_map.go - -func DrsUUID(projectId string, hash string) string { - // create UUID based on project ID and hash - hashStr := fmt.Sprintf("%s:%s", projectId, hash) - return uuid.NewSHA1(NAMESPACE, []byte(hashStr)).String() -} - -func FindMatchingRecord(records []DRSObject, organization, projectId string) (*DRSObject, error) { - if len(records) == 0 { - return nil, nil - } - - // Convert project ID to resource path format for comparison - expectedAuthz, err := ProjectToResource(organization, projectId) - if err != nil { - return nil, fmt.Errorf("error converting project ID to resource format: %v", err) - } - - for _, record := range records { - for _, access := range record.AccessMethods { - if len(access.Authorizations.BearerAuthIssuers) == 0 { - continue - } - - // Check BearerAuthIssuers using a map for O(1) lookup (ref: "lists suck") - issuersMap := make(map[string]struct{}, len(access.Authorizations.BearerAuthIssuers)) - for _, issuer := range access.Authorizations.BearerAuthIssuers { - issuersMap[issuer] = struct{}{} - } - - if _, ok := issuersMap[expectedAuthz]; ok { - return &record, nil - } - } - } - return nil, nil -} - -// DRS UUID generation using SHA1 (compatible with git-drs) -func GenerateDrsID(projectId, hash string) string { - return DrsUUID(projectId, hash) -} - -func BuildDrsObj(fileName string, checksum string, size int64, drsId string, bucketName string, org string, projectId string) (*DRSObject, error) { - return BuildDrsObjWithPrefix(fileName, checksum, size, drsId, bucketName, org, projectId, "") -} - -func BuildDrsObjWithPrefix(fileName string, checksum string, size int64, drsId string, bucketName string, org string, projectId string, storagePrefix string) (*DRSObject, error) { - if bucketName == "" { - return nil, fmt.Errorf("error: bucket name is empty") - } - - checksum = NormalizeOid(checksum) - prefix := strings.Trim(strings.TrimSpace(storagePrefix), "/") - if prefix == "" { - prefix = StoragePrefix(org, projectId) - } - var fileURL string - // Canonical CAS-style storage path: - // s3://bucket/{org}/{project}/sha256 - if prefix != "" { - fileURL = fmt.Sprintf("s3://%s/%s/%s", bucketName, prefix, checksum) - } else { - fileURL = fmt.Sprintf("s3://%s/%s", bucketName, checksum) - } - - authzStr, err := ProjectToResource(org, projectId) - if err != nil { - return nil, err - } - authorizations := Authorizations{ - BearerAuthIssuers: []string{authzStr}, - } - - drsObj := DRSObject{ - Id: drsId, - Name: fileName, - AccessMethods: []AccessMethod{{ - Type: "s3", - AccessUrl: AccessURL{ - Url: fileURL, - }, - Authorizations: authorizations, - }}, - Checksums: []Checksum{{ - Type: "sha256", - Checksum: checksum, - }}, - Size: size, - } - - return &drsObj, nil -} - -// ConvertToCandidate converts a DRSObject to a DRSObjectCandidate for registration. -// This is needed because the server expects checksums as an array of Checksum objects, -// while DRSObject uses HashInfo (which marshals to the correct format but has different Go types). -func ConvertToCandidate(obj *DRSObject) DRSObjectCandidate { - return DRSObjectCandidate{ - Name: obj.Name, - Size: obj.Size, - Version: obj.Version, - MimeType: obj.MimeType, - Checksums: obj.Checksums, - AccessMethods: obj.AccessMethods, - Contents: obj.Contents, - Description: obj.Description, - Aliases: obj.Aliases, - } -} - -func NormalizeOid(oid string) string { - if strings.HasPrefix(oid, "sha256:") { - return strings.TrimPrefix(oid, "sha256:") - } - return oid -} diff --git a/drs/interface.go b/drs/interface.go deleted file mode 100644 index babcf4d..0000000 --- a/drs/interface.go +++ /dev/null @@ -1,54 +0,0 @@ -package drs - -import ( - "context" - - "github.com/calypr/data-client/hash" - "github.com/calypr/data-client/request" -) - -// Client is the primary interface for interacting with a Calypr DRS server. -// It replaces the legacy IndexdInterface with a modern DRS-first API. -type Client interface { - request.RequestInterface - - // Metadata retrieval - GetObject(ctx context.Context, id string) (*DRSObject, error) - GetObjectByHash(ctx context.Context, checksum *hash.Checksum) ([]DRSObject, error) - BatchGetObjectsByHash(ctx context.Context, hashes []string) (map[string][]DRSObject, error) - - // Listing - ListObjects(ctx context.Context) (chan DRSObjectResult, error) - ListObjectsByProject(ctx context.Context, projectId string) (chan DRSObjectResult, error) - GetProjectSample(ctx context.Context, projectId string, limit int) ([]DRSObject, error) - - // Mutations - RegisterRecord(ctx context.Context, record *DRSObject) (*DRSObject, error) - RegisterRecords(ctx context.Context, records []*DRSObject) ([]*DRSObject, error) - UpdateRecord(ctx context.Context, updateInfo *DRSObject, did string) (*DRSObject, error) - DeleteRecord(ctx context.Context, did string) error - DeleteRecordsByProject(ctx context.Context, projectId string) error - - // Download/URL resolution - GetDownloadURL(ctx context.Context, id string, accessType string) (*AccessURL, error) - - // Extensions - // Add an object storage URL to an existing record - AddURL(ctx context.Context, blobURL, sha256 string, opts ...AddURLOption) (*DRSObject, error) - - // Utility operations - UpsertRecord(ctx context.Context, url string, sha256 string, fileSize int64, projectId string) (*DRSObject, error) - BuildDrsObj(fileName string, checksum string, size int64, drsId string) (*DRSObject, error) - - // Runtime context info - GetProjectId() string - GetBucketName() string - GetOrganization() string - - // Fluent configuration - WithProject(projectId string) Client - WithOrganization(organization string) Client - WithBucket(bucketName string) Client -} - -type AddURLOption func(map[string]any) diff --git a/drs/object_builder.go b/drs/object_builder.go deleted file mode 100644 index 25144c1..0000000 --- a/drs/object_builder.go +++ /dev/null @@ -1,78 +0,0 @@ -package drs - -import ( - "fmt" - "strings" - -) - -type ObjectBuilder struct { - Bucket string - ProjectID string - Organization string - StoragePrefix string - AccessType string - PathStyle string // "CAS" or "" (Gen3 default) -} - -func NewObjectBuilder(bucket, projectID string) ObjectBuilder { - return ObjectBuilder{ - Bucket: bucket, - ProjectID: projectID, - AccessType: "s3", - PathStyle: "Gen3", // Defaults to Gen3 behavior - } -} - -func (b ObjectBuilder) Build(fileName string, checksum string, size int64, drsID string) (*DRSObject, error) { - if b.Bucket == "" { - return nil, fmt.Errorf("error: bucket name is empty in config file") - } - accessType := b.AccessType - if accessType == "" { - accessType = "s3" - } - - // Remove sha256: prefix if present for clean S3 key. - checksum = strings.TrimPrefix(checksum, "sha256:") - prefix := strings.Trim(strings.TrimSpace(b.StoragePrefix), "/") - if prefix == "" { - prefix = StoragePrefix(b.Organization, b.ProjectID) - } - - var fileURL string - // Canonical CAS-style (s3://bucket/{org}/{project}/sha256). - // PathStyle is kept for compatibility, but object identity is checksum-first. - if prefix != "" { - fileURL = fmt.Sprintf("s3://%s/%s/%s", b.Bucket, prefix, checksum) - } else { - fileURL = fmt.Sprintf("s3://%s/%s", b.Bucket, checksum) - } - - authzStr, err := ProjectToResource(b.Organization, b.ProjectID) - if err != nil { - return nil, err - } - authorizations := Authorizations{ - BearerAuthIssuers: []string{authzStr}, - } - - drsObj := DRSObject{ - Id: drsID, - Name: fileName, - AccessMethods: []AccessMethod{{ - Type: accessType, - AccessUrl: AccessURL{ - Url: fileURL, - }, - Authorizations: authorizations, - }}, - Checksums: []Checksum{{ - Type: "sha256", - Checksum: checksum, - }}, - Size: size, - } - - return &drsObj, nil -} diff --git a/drs/object_builder_test.go b/drs/object_builder_test.go deleted file mode 100644 index 2b15c9d..0000000 --- a/drs/object_builder_test.go +++ /dev/null @@ -1,53 +0,0 @@ -package drs - -import ( - "strings" - "testing" -) - -func TestObjectBuilderBuildSuccess(t *testing.T) { - builder := ObjectBuilder{ - ProjectID: "test-project", - Bucket: "bucket", - } - - obj, err := builder.Build("file.txt", "sha-256", 12, "did-1") - if err != nil { - t.Fatalf("Build error: %v", err) - } - if obj.Id != "did-1" { - t.Fatalf("unexpected Id: %s", obj.Id) - } - if obj.Name != "file.txt" { - t.Fatalf("unexpected Name: %s", obj.Name) - } - if obj.Checksums[0].Checksum != "sha-256" { - t.Fatalf("unexpected checksum: %v", obj.Checksums) - } - if obj.Size != 12 { - t.Fatalf("unexpected size: %d", obj.Size) - } - if len(obj.AccessMethods) != 1 { - t.Fatalf("expected 1 access method, got %d", len(obj.AccessMethods)) - } - if !strings.Contains(obj.AccessMethods[0].AccessUrl.Url, "bucket/test/project/sha-256") { - t.Fatalf("unexpected access URL: %s", obj.AccessMethods[0].AccessUrl.Url) - } - if len(obj.Aliases) != 0 { - t.Fatalf("expected no aliases, got: %#v", obj.Aliases) - } - if obj.AccessMethods[0].Type != "s3" { - t.Fatalf("unexpected access method type: %s", obj.AccessMethods[0].Type) - } -} - -func TestObjectBuilderBuildEmptyBucket(t *testing.T) { - builder := ObjectBuilder{ - ProjectID: "test-project", - Bucket: "", - } - - if _, err := builder.Build("file.txt", "sha-256", 12, "did-1"); err == nil { - t.Fatalf("expected error when Bucket is empty") - } -} diff --git a/drs/prefetch_context.go b/drs/prefetch_context.go deleted file mode 100644 index 316a565..0000000 --- a/drs/prefetch_context.go +++ /dev/null @@ -1,24 +0,0 @@ -package drs - -import "context" - -type prefetchedBySHAContextKey struct{} - -// WithPrefetchedBySHA stores pre-resolved DRS records keyed by normalized sha256. -func WithPrefetchedBySHA(ctx context.Context, bySHA map[string]DRSObject) context.Context { - if len(bySHA) == 0 { - return ctx - } - return context.WithValue(ctx, prefetchedBySHAContextKey{}, bySHA) -} - -// PrefetchedBySHA returns a pre-resolved DRS record for a normalized sha256. -func PrefetchedBySHA(ctx context.Context, sha256 string) (DRSObject, bool) { - m, ok := ctx.Value(prefetchedBySHAContextKey{}).(map[string]DRSObject) - if !ok || len(m) == 0 { - return DRSObject{}, false - } - obj, exists := m[sha256] - return obj, exists -} - diff --git a/drs/resolve.go b/drs/resolve.go deleted file mode 100644 index 4a28e94..0000000 --- a/drs/resolve.go +++ /dev/null @@ -1,62 +0,0 @@ -package drs - -import ( - "context" - "fmt" - "strings" - - "github.com/calypr/data-client/hash" -) - -// ResolveObject centralizes object-id vs checksum resolution logic. -func ResolveObject(ctx context.Context, client Client, guid string) (*DRSObject, error) { - if oid := NormalizeOid(guid); oid != "" { - if cached, ok := PrefetchedBySHA(ctx, oid); ok { - obj := cached - return &obj, nil - } - if recs, err := client.GetObjectByHash(ctx, &hash.Checksum{Type: "sha256", Checksum: oid}); err == nil && len(recs) > 0 { - return &recs[0], nil - } - } - return client.GetObject(ctx, guid) -} - -// ResolveDownloadURL resolves access method and object id when caller does not already provide a concrete access id. -func ResolveDownloadURL(ctx context.Context, client Client, guid string, accessID string) (string, error) { - obj, err := ResolveObject(ctx, client, guid) - if err != nil { - return "", err - } - - resolvedID := strings.TrimSpace(obj.Id) - if resolvedID == "" { - resolvedID = guid - } - - if accessID == "" { - for _, am := range obj.AccessMethods { - if am.AccessId != "" { - accessID = am.AccessId - break - } - } - if accessID == "" { - for _, am := range obj.AccessMethods { - if am.AccessUrl.Url != "" { - return am.AccessUrl.Url, nil - } - } - return "", fmt.Errorf("no suitable access method found for object %s", guid) - } - } - - accessURL, err := client.GetDownloadURL(ctx, resolvedID, accessID) - if err != nil { - return "", err - } - if accessURL == nil || accessURL.Url == "" { - return "", fmt.Errorf("empty access URL for object %s", guid) - } - return accessURL.Url, nil -} diff --git a/drs/server_client.go b/drs/server_client.go deleted file mode 100644 index 2e6df3c..0000000 --- a/drs/server_client.go +++ /dev/null @@ -1,23 +0,0 @@ -package drs - -import "github.com/calypr/data-client/transfer" - -// ServerClient composes DRS metadata operations and transfer operations -// against the same server endpoint/runtime mode. -type ServerClient interface { - Client - transfer.Backend -} - -type composedServerClient struct { - Client - transfer.Backend -} - -func ComposeServerClient(c Client, b transfer.Backend) ServerClient { - return &composedServerClient{ - Client: c, - Backend: b, - } -} - diff --git a/drs/storage_prefix_test.go b/drs/storage_prefix_test.go deleted file mode 100644 index 4276a70..0000000 --- a/drs/storage_prefix_test.go +++ /dev/null @@ -1,25 +0,0 @@ -package drs - -import "testing" - -func TestStoragePrefix(t *testing.T) { - tests := []struct { - name string - org string - project string - want string - }{ - {name: "org + project", org: "cbdsTest", project: "git_drs_e2e_test", want: "cbdsTest/git_drs_e2e_test"}, - {name: "hyphenated project", org: "", project: "prog-proj", want: "prog/proj"}, - {name: "plain project", org: "", project: "projonly", want: "default/projonly"}, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - got := StoragePrefix(tt.org, tt.project) - if got != tt.want { - t.Fatalf("StoragePrefix(%q,%q)=%q want=%q", tt.org, tt.project, got, tt.want) - } - }) - } -} diff --git a/drs/syfon_adapter.go b/drs/syfon_adapter.go deleted file mode 100644 index ce23bde..0000000 --- a/drs/syfon_adapter.go +++ /dev/null @@ -1,61 +0,0 @@ -package drs - -import ( - "time" - - "github.com/calypr/data-client/hash" - syclient "github.com/calypr/syfon/client" -) - -func drsObjectToSyfonInternalRecord(obj *DRSObject) (syclient.InternalRecord, error) { - if obj == nil { - return syclient.InternalRecord{}, nil - } - out := syclient.InternalRecord{} - out.SetDid(obj.Id) - if obj.Name != "" { - out.SetFileName(obj.Name) - } - out.SetSize(obj.Size) - out.SetUrls(InternalURLFromDrsAccessURLs(obj.AccessMethods)) - out.SetAuthz(InternalAuthzFromDrsAccessMethods(obj.AccessMethods)) - out.SetHashes(hash.ConvertDrsChecksumsToMap(obj.Checksums)) - return out, nil -} - -func syfonInternalRecordToDRSObject(rec syclient.InternalRecord) (*DRSObject, error) { - accessMethods, err := DRSAccessMethodsFromInternalURLs(rec.GetUrls(), rec.GetAuthz()) - if err != nil { - return nil, err - } - checksums := hash.ConvertMapToDrsChecksums(rec.GetHashes()) - did := rec.GetDid() - obj := &DRSObject{ - Id: did, - SelfUri: "drs://" + did, - Size: rec.GetSize(), - AccessMethods: accessMethods, - Checksums: checksums, - } - if rec.GetFileName() != "" { - obj.Name = rec.GetFileName() - } - if t, ok := parseRFC3339(rec.GetCreatedDate()); ok { - obj.CreatedTime = t - } - if t, ok := parseRFC3339(rec.GetUpdatedDate()); ok { - obj.UpdatedTime = t - } - return obj, nil -} - -func parseRFC3339(v string) (time.Time, bool) { - if v == "" { - return time.Time{}, false - } - t, err := time.Parse(time.RFC3339, v) - if err != nil { - return time.Time{}, false - } - return t, true -} diff --git a/drs/types.go b/drs/types.go deleted file mode 100644 index c8b66e1..0000000 --- a/drs/types.go +++ /dev/null @@ -1,28 +0,0 @@ -package drs - -import ( - "github.com/calypr/data-client/hash" - syclient "github.com/calypr/syfon/client" -) - -type ChecksumType = string -type Checksum = syclient.Checksum -type HashInfo = hash.HashInfo - -type AccessURL = syclient.AccessMethodAccessURL -type Authorizations = syclient.AccessMethodAuthorizations -type AccessMethod = syclient.AccessMethod - -type Contents = syclient.ContentsObject - -type DRSPage = syclient.DRSPage - -type DRSObjectResult struct { - Object *DRSObject - Error error -} - -type DRSObject = syclient.DRSObject - -type DRSObjectCandidate = syclient.DRSObjectCandidate -type RegisterObjectsRequest = syclient.RegisterObjectsRequest diff --git a/drs/upsert.go b/drs/upsert.go deleted file mode 100644 index 1254f9c..0000000 --- a/drs/upsert.go +++ /dev/null @@ -1,61 +0,0 @@ -package drs - -import ( - "context" - "fmt" - "slices" - - "github.com/calypr/data-client/hash" -) - -// UpsertRecord creates or updates a record with a new URL. -func (c *DrsClient) UpsertRecord(ctx context.Context, url string, sha256 string, fileSize int64, projectId string) (*DRSObject, error) { - sha256 = NormalizeOid(sha256) - - // Query current state - records, err := c.GetObjectByHash(ctx, &hash.Checksum{Type: hash.ChecksumTypeSHA256, Checksum: sha256}) - if err != nil { - return nil, fmt.Errorf("error querying DRS server: %v", err) - } - - var matchingRecord *DRSObject - for i := range records { - // Match by checksum content identity - if hash.ConvertDrsChecksumsToHashInfo(records[i].Checksums).SHA256 == sha256 { - matchingRecord = &records[i] - break - } - } - - if matchingRecord != nil { - existingURLs := InternalURLFromDrsAccessURLs(matchingRecord.AccessMethods) - if slices.Contains(existingURLs, url) { - return matchingRecord, nil - } - - c.logger.Debug("updating existing record with new url") - updatedRecord := DRSObject{AccessMethods: []AccessMethod{{AccessUrl: AccessURL{Url: url}}}} - return c.UpdateRecord(ctx, &updatedRecord, matchingRecord.Id) - } - - // If no record exists, create one - c.logger.Debug("creating new record") - uuid := GenerateDrsID(projectId, sha256) - - // Use simplified BuildDrsObj (helper in same package) - drsObj, err := BuildDrsObj("", sha256, fileSize, uuid, c.GetBucketName(), c.GetOrganization(), projectId) - if err != nil { - return nil, err - } - - return c.RegisterRecord(ctx, drsObj) -} - -// Internal methods to support specialized behaviors from git-drs -// (These can be overridden or extended) - -func (c *DrsClient) AddURL(ctx context.Context, blobURL, sha256 string, opts ...AddURLOption) (*DRSObject, error) { - // Simple wrapper for UpsertRecord for now, but allows for more complex logic if needed - // In a real implementation, this would handle cloud inspection too. - return c.UpsertRecord(ctx, blobURL, sha256, 0, c.GetProjectId()) -} diff --git a/fence/client.go b/fence/client.go index 6e9a71c..fc6b73d 100644 --- a/fence/client.go +++ b/fence/client.go @@ -23,7 +23,7 @@ import ( // FenceBucketEndpoint is the endpoint postfix for FENCE bucket list const FenceBucketEndpoint = "/data/buckets" -//go:generate mockgen -destination=../mocks/mock_fence.go -package=mocks github.com/calypr/data-client/fence FenceInterface +//go:generate go run go.uber.org/mock/mockgen@v0.6.0 -destination=../mocks/mock_fence.go -package=mocks github.com/calypr/data-client/fence FenceInterface // FenceInterface defines the interface for Fence client type FenceInterface interface { diff --git a/g3client/client.go b/g3client/client.go index 115374b..4a95d36 100644 --- a/g3client/client.go +++ b/g3client/client.go @@ -7,25 +7,25 @@ import ( "strings" "github.com/calypr/data-client/conf" - "github.com/calypr/data-client/credentials" - "github.com/calypr/data-client/drs" "github.com/calypr/data-client/fence" "github.com/calypr/data-client/logs" "github.com/calypr/data-client/request" "github.com/calypr/data-client/requestor" "github.com/calypr/data-client/sower" - "github.com/calypr/data-client/transfer" - gen3signer "github.com/calypr/data-client/transfer/signer/gen3" + "github.com/calypr/syfon/client/credentials" + "github.com/calypr/syfon/client/drs" + sylogs "github.com/calypr/syfon/client/pkg/logs" + syrequest "github.com/calypr/syfon/client/pkg/request" version "github.com/hashicorp/go-version" ) -//go:generate mockgen -destination=../mocks/mock_gen3interface.go -package=mocks github.com/calypr/data-client/g3client Gen3Interface +//go:generate go run go.uber.org/mock/mockgen@v0.6.0 -destination=../mocks/mock_gen3interface.go -package=mocks github.com/calypr/data-client/g3client Gen3Interface type Gen3Interface interface { request.RequestInterface Logger() *logs.Gen3Logger Credentials() credentials.Manager - DRSClient() drs.ServerClient + DRSClient() drs.Client FenceClient() fence.FenceInterface RequestorClient() requestor.RequestorInterface SowerClient() sower.SowerInterface @@ -67,8 +67,13 @@ func (g *Gen3Client) initializeClients() { if shouldInit(FenceClient) { g.fence = fence.NewFenceClient(g.RequestInterface, g.credential, g.logger.Logger) } - if shouldInit(IndexdClient) { - g.indexd = drs.NewDrsClient(g.RequestInterface, g.credential, g.logger.Logger) + if shouldInit(SyfonClient) { + syReq := syrequest.NewRequestInterface( + sylogs.NewGen3Logger(g.logger.Logger, "", ""), + g.credential, + g.config, + ) + g.syfon = drs.NewDrsClient(syReq, g.credential, sylogs.NewGen3Logger(g.logger.Logger, "", "")) } if shouldInit(SowerClient) { g.sower = sower.NewSowerClient(g.RequestInterface, g.credential.APIEndpoint) @@ -81,8 +86,7 @@ func (g *Gen3Client) initializeClients() { type Gen3Client struct { Ctx context.Context fence fence.FenceInterface - indexd drs.Client - server drs.ServerClient + syfon drs.Client sower sower.SowerInterface requestor requestor.RequestorInterface config conf.ManagerInterface @@ -91,7 +95,6 @@ type Gen3Client struct { credential *conf.Credential creds credentials.Manager logger *logs.Gen3Logger - transfer transfer.Backend requestedClients []ClientType } @@ -100,7 +103,7 @@ type ClientType string const ( FenceClient ClientType = "fence" - IndexdClient ClientType = "indexd" + SyfonClient ClientType = "syfon" SowerClient ClientType = "sower" RequestorClient ClientType = "requestor" ) @@ -113,23 +116,16 @@ func WithClients(clients ...ClientType) Option { } } -func (g *Gen3Client) DRSClient() drs.ServerClient { - if g.server != nil { - return g.server +func (g *Gen3Client) DRSClient() drs.Client { + if g.syfon == nil { + syReq := syrequest.NewRequestInterface( + sylogs.NewGen3Logger(g.logger.Logger, "", ""), + g.credential, + g.config, + ) + g.syfon = drs.NewDrsClient(syReq, g.credential, sylogs.NewGen3Logger(g.logger.Logger, "", "")) } - if g.transfer != nil { - g.server = drs.ComposeServerClient(g.indexd, g.transfer) - return g.server - } - if g.fence == nil { - g.fence = fence.NewFenceClient(g.RequestInterface, g.credential, g.logger.Logger) - } - if g.indexd == nil { - g.indexd = drs.NewDrsClient(g.RequestInterface, g.credential, g.logger.Logger) - } - g.transfer = transfer.New(g.RequestInterface, g.logger, gen3signer.New(g.RequestInterface, g.credential, g.indexd, g.fence)) - g.server = drs.ComposeServerClient(g.indexd, g.transfer) - return g.server + return g.syfon } func (g *Gen3Client) FenceClient() fence.FenceInterface { diff --git a/ga4gh/data-repository-service-schemas b/ga4gh/data-repository-service-schemas deleted file mode 160000 index 935a209..0000000 --- a/ga4gh/data-repository-service-schemas +++ /dev/null @@ -1 +0,0 @@ -Subproject commit 935a20952e1071421c28d569b8c8e0e940bc001f diff --git a/go.mod b/go.mod index d5b216b..793a137 100644 --- a/go.mod +++ b/go.mod @@ -3,19 +3,15 @@ module github.com/calypr/data-client go 1.26.1 require ( - github.com/aws/aws-sdk-go-v2/service/s3 v1.95.1 - github.com/calypr/syfon v0.1.0 + github.com/calypr/syfon/client v0.1.2 github.com/golang-jwt/jwt/v5 v5.3.1 - github.com/google/uuid v1.6.0 github.com/hashicorp/go-multierror v1.1.1 github.com/hashicorp/go-retryablehttp v0.7.8 - github.com/hashicorp/go-version v1.8.0 + github.com/hashicorp/go-version v1.9.0 github.com/spf13/cobra v1.10.2 - github.com/vbauerster/mpb/v8 v8.11.2 + github.com/vbauerster/mpb/v8 v8.12.0 go.uber.org/mock v0.6.0 - gocloud.dev v0.45.0 - golang.org/x/sync v0.19.0 - gopkg.in/ini.v1 v1.67.0 + gopkg.in/ini.v1 v1.67.1 gopkg.in/yaml.v3 v3.0.1 ) @@ -56,15 +52,15 @@ require ( github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.9.8 // indirect github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.17 // indirect github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.17 // indirect + github.com/aws/aws-sdk-go-v2/service/s3 v1.95.1 // indirect github.com/aws/aws-sdk-go-v2/service/signin v1.0.5 // indirect github.com/aws/aws-sdk-go-v2/service/sso v1.30.9 // indirect github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.13 // indirect github.com/aws/aws-sdk-go-v2/service/sts v1.41.6 // indirect github.com/aws/smithy-go v1.24.2 // indirect - github.com/calypr/syfon/apigen v0.1.0 // indirect + github.com/calypr/syfon/apigen v0.1.2 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect - github.com/clipperhouse/stringish v0.1.1 // indirect - github.com/clipperhouse/uax29/v2 v2.3.0 // indirect + github.com/clipperhouse/uax29/v2 v2.7.0 // indirect github.com/cncf/xds/go v0.0.0-20251110193048-8bfbf64dc13e // indirect github.com/envoyproxy/go-control-plane/envoy v1.36.0 // indirect github.com/envoyproxy/protoc-gen-validate v1.2.1 // indirect @@ -73,6 +69,7 @@ require ( github.com/go-logr/logr v1.4.3 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/google/s2a-go v0.1.9 // indirect + github.com/google/uuid v1.6.0 // indirect github.com/google/wire v0.7.0 // indirect github.com/googleapis/enterprise-certificate-proxy v0.3.7 // indirect github.com/googleapis/gax-go/v2 v2.15.0 // indirect @@ -80,7 +77,7 @@ require ( github.com/hashicorp/go-cleanhttp v0.5.2 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/kylelemons/godebug v1.1.0 // indirect - github.com/mattn/go-runewidth v0.0.19 // indirect + github.com/mattn/go-runewidth v0.0.20 // indirect github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 // indirect github.com/spf13/pflag v1.0.10 // indirect @@ -94,10 +91,12 @@ require ( go.opentelemetry.io/otel/sdk v1.40.0 // indirect go.opentelemetry.io/otel/sdk/metric v1.40.0 // indirect go.opentelemetry.io/otel/trace v1.40.0 // indirect + gocloud.dev v0.45.0 // indirect golang.org/x/crypto v0.45.0 // indirect golang.org/x/net v0.47.0 // indirect golang.org/x/oauth2 v0.33.0 // indirect - golang.org/x/sys v0.40.0 // indirect + golang.org/x/sync v0.19.0 // indirect + golang.org/x/sys v0.41.0 // indirect golang.org/x/text v0.31.0 // indirect golang.org/x/time v0.14.0 // indirect golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da // indirect @@ -109,7 +108,3 @@ require ( google.golang.org/protobuf v1.36.10 // indirect gopkg.in/validator.v2 v2.0.1 // indirect ) - -replace github.com/calypr/syfon => ../syfon - -replace github.com/calypr/syfon/apigen => ../syfon/apigen diff --git a/go.sum b/go.sum index 2cfae66..6e197fa 100644 --- a/go.sum +++ b/go.sum @@ -92,15 +92,19 @@ github.com/aws/aws-sdk-go-v2/service/sts v1.41.6 h1:5fFjR/ToSOzB2OQ/XqWpZBmNvmP/ github.com/aws/aws-sdk-go-v2/service/sts v1.41.6/go.mod h1:qgFDZQSD/Kys7nJnVqYlWKnh0SSdMjAi0uSwON4wgYQ= github.com/aws/smithy-go v1.24.2 h1:FzA3bu/nt/vDvmnkg+R8Xl46gmzEDam6mZ1hzmwXFng= github.com/aws/smithy-go v1.24.2/go.mod h1:YE2RhdIuDbA5E5bTdciG9KrW3+TiEONeUWCqxX9i1Fc= +github.com/calypr/syfon/apigen v0.1.2 h1:/9IJcrS655DW4W/cPW0QoqBOhZrgTnRfUAlYOZ3q1e8= +github.com/calypr/syfon/apigen v0.1.2/go.mod h1:2oqNkseovHqbU8kYu4nU+w5VrwhzAj6KfPL7YTBQEGk= +github.com/calypr/syfon/client v0.1.2 h1:MdfV/bLBf0ZkLREuKOVfEKMRiDfpFgLuYSW8GiYOxdk= +github.com/calypr/syfon/client v0.1.2/go.mod h1:xh7ndhABQz7UlM/tq80ZfYKDs2vBuwgfyrAhs3ghg7Y= github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/clipperhouse/stringish v0.1.1 h1:+NSqMOr3GR6k1FdRhhnXrLfztGzuG+VuFDfatpWHKCs= -github.com/clipperhouse/stringish v0.1.1/go.mod h1:v/WhFtE1q0ovMta2+m+UbpZ+2/HEXNWYXQgCt4hdOzA= -github.com/clipperhouse/uax29/v2 v2.3.0 h1:SNdx9DVUqMoBuBoW3iLOj4FQv3dN5mDtuqwuhIGpJy4= -github.com/clipperhouse/uax29/v2 v2.3.0/go.mod h1:Wn1g7MK6OoeDT0vL+Q0SQLDz/KpfsVRgg6W7ihQeh4g= +github.com/clipperhouse/uax29/v2 v2.7.0 h1:+gs4oBZ2gPfVrKPthwbMzWZDaAFPGYK72F0NJv2v7Vk= +github.com/clipperhouse/uax29/v2 v2.7.0/go.mod h1:EFJ2TJMRUaplDxHKj1qAEhCtQPW2tJSwu5BF98AuoVM= github.com/cncf/xds/go v0.0.0-20251110193048-8bfbf64dc13e h1:gt7U1Igw0xbJdyaCM5H2CnlAlPSkzrhsebQB6WQWjLA= github.com/cncf/xds/go v0.0.0-20251110193048-8bfbf64dc13e/go.mod h1:KdCmV+x/BuvyMxRnYBlmVaq4OLiKW6iRQfvC62cvdkI= github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/envoyproxy/go-control-plane v0.13.5-0.20251024222203-75eaa193e329 h1:K+fnvUM0VZ7ZFJf0n4L/BRlnsb9pL/GuDG6FqaH+PwM= @@ -157,8 +161,8 @@ github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+l github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= github.com/hashicorp/go-retryablehttp v0.7.8 h1:ylXZWnqa7Lhqpk0L1P1LzDtGcCR0rPVUrx/c8Unxc48= github.com/hashicorp/go-retryablehttp v0.7.8/go.mod h1:rjiScheydd+CxvumBsIrFKlx3iS0jrZ7LvzFGFmuKbw= -github.com/hashicorp/go-version v1.8.0 h1:KAkNb1HAiZd1ukkxDFGmokVZe1Xy9HG6NUp+bPle2i4= -github.com/hashicorp/go-version v1.8.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/go-version v1.9.0 h1:CeOIz6k+LoN3qX9Z0tyQrPtiB1DFYRPfCIBtaXPSCnA= +github.com/hashicorp/go-version v1.9.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/keybase/go-keychain v0.0.1 h1:way+bWYa6lDppZoZcgMbYsvC7GxljxrskdNInRtuthU= @@ -173,12 +177,13 @@ github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxec github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= -github.com/mattn/go-runewidth v0.0.19 h1:v++JhqYnZuu5jSKrk9RbgF5v4CGUjqRfBm05byFGLdw= -github.com/mattn/go-runewidth v0.0.19/go.mod h1:XBkDxAl56ILZc9knddidhrOlY5R/pDhgLpndooCuJAs= +github.com/mattn/go-runewidth v0.0.20 h1:WcT52H91ZUAwy8+HUkdM3THM6gXqXuLJi9O3rjcQQaQ= +github.com/mattn/go-runewidth v0.0.20/go.mod h1:XBkDxAl56ILZc9knddidhrOlY5R/pDhgLpndooCuJAs= github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c h1:+mdjkGKdHQG3305AYmdv1U2eRNDiU2ErMBj1gwrq8eQ= github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c/go.mod h1:7rwL4CYBLnjLxUqIJNnCWiEdr3bn6IUYi15bNlnbCCU= github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 h1:GFCKgmp0tecUJ0sJuv4pzYCqS9+RGSn52M3FUwPs+uo= github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10/go.mod h1:t/avpk3KcrXxUnYOhZhMXJlSEyie6gQbtLq5NM3loB8= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ= @@ -191,10 +196,17 @@ github.com/spf13/pflag v1.0.10 h1:4EBh2KAYBwaONj6b2Ye1GiHfwjqyROoF4RwYO+vPwFk= github.com/spf13/pflag v1.0.10/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spiffe/go-spiffe/v2 v2.6.0 h1:l+DolpxNWYgruGQVV0xsfeya3CsC7m8iBzDnMpsbLuo= github.com/spiffe/go-spiffe/v2 v2.6.0/go.mod h1:gm2SeUoMZEtpnzPNs2Csc0D/gX33k1xIx7lEzqblHEs= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= -github.com/vbauerster/mpb/v8 v8.11.2 h1:OqLoHznUVU7SKS/WV+1dB5/hm20YLheYupiHhL5+M1Y= -github.com/vbauerster/mpb/v8 v8.11.2/go.mod h1:mEB/M353al1a7wMUNtiymmPsEkGlJgeJmtlbY5adCJ8= +github.com/vbauerster/mpb/v8 v8.12.0 h1:+gneY3ifzc88tKDzOtfG8k8gfngCx615S2ZmFM4liWg= +github.com/vbauerster/mpb/v8 v8.12.0/go.mod h1:V02YIuMVo301Y1VE9VtZlD8s84OMsk+EKN6mwvf/588= go.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ64= go.opentelemetry.io/auto/sdk v1.2.1/go.mod h1:KRTj+aOaElaLi+wW1kO/DZRXwkF4C5xPbEe3ZiIhN7Y= go.opentelemetry.io/contrib/detectors/gcp v1.38.0 h1:ZoYbqX7OaA/TAikspPl3ozPI6iY6LiIY9I8cUfm+pJs= @@ -229,8 +241,8 @@ golang.org/x/oauth2 v0.33.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwE golang.org/x/sync v0.19.0 h1:vV+1eWNmZ5geRlYjzm2adRgW2/mcpevXNg50YZtPCE4= golang.org/x/sync v0.19.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.40.0 h1:DBZZqJ2Rkml6QMQsZywtnjnnGvHza6BTfYFWY9kjEWQ= -golang.org/x/sys v0.40.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= +golang.org/x/sys v0.41.0 h1:Ivj+2Cp/ylzLiEU89QhWblYnOE9zerudt9Ftecq2C6k= +golang.org/x/sys v0.41.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= golang.org/x/text v0.31.0 h1:aC8ghyu4JhP8VojJ2lEHBnochRno1sgL6nEi9WGFGMM= golang.org/x/text v0.31.0/go.mod h1:tKRAlv61yKIjGGHX/4tP1LTbc13YSec1pxVEWXzfoeM= golang.org/x/time v0.14.0 h1:MRx4UaLrDotUKUdCIqzPC48t1Y9hANFKIRpNx+Te8PI= @@ -254,9 +266,10 @@ google.golang.org/protobuf v1.36.10/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= -gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= -gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/ini.v1 v1.67.1 h1:tVBILHy0R6e4wkYOn3XmiITt/hEVH4TFMYvAX2Ytz6k= +gopkg.in/ini.v1 v1.67.1/go.mod h1:x/cyOwCgZqOkJoDIJ3c1KNHMo10+nLGAhh+kn3Zizss= gopkg.in/validator.v2 v2.0.1 h1:xF0KWyGWXm/LM2G1TrEjqOu4pa6coO9AlWSf3msVfDY= gopkg.in/validator.v2 v2.0.1/go.mod h1:lIUZBlB3Im4s/eYp39Ry/wkR02yOPhZ9IwIRBjuPuG8= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/hash/hash.go b/hash/hash.go deleted file mode 100644 index 008cd1f..0000000 --- a/hash/hash.go +++ /dev/null @@ -1,169 +0,0 @@ -package hash - -import ( - "encoding/json" - "fmt" - syclient "github.com/calypr/syfon/client" -) - -// ChecksumType represents the digest method used to create the checksum -type ChecksumType string - -// IANA Named Information Hash Algorithm Registry values and other common types -const ( - ChecksumTypeSHA1 ChecksumType = "sha1" - ChecksumTypeSHA256 ChecksumType = "sha256" - ChecksumTypeSHA512 ChecksumType = "sha512" - ChecksumTypeMD5 ChecksumType = "md5" - ChecksumTypeETag ChecksumType = "etag" - ChecksumTypeCRC32C ChecksumType = "crc32c" - ChecksumTypeTrunc512 ChecksumType = "trunc512" -) - -// IsValid checks if the checksum type is a known/recommended value -func (ct ChecksumType) IsValid() bool { - switch ct { - case ChecksumTypeSHA256, ChecksumTypeSHA512, ChecksumTypeSHA1, ChecksumTypeMD5, - ChecksumTypeETag, ChecksumTypeCRC32C: - return true - default: - return false - } -} - -// String returns the string representation of the checksum type -func (ct ChecksumType) String() string { - return string(ct) -} - -var SupportedChecksums = map[string]bool{ - string(ChecksumTypeSHA1): true, - string(ChecksumTypeSHA256): true, - string(ChecksumTypeSHA512): true, - string(ChecksumTypeMD5): true, - string(ChecksumTypeETag): true, - string(ChecksumTypeCRC32C): true, - string(ChecksumTypeTrunc512): true, -} - -type Checksum struct { - Checksum string `json:"checksum"` - Type ChecksumType `json:"type"` -} - -type HashInfo struct { - MD5 string `json:"md5,omitempty"` - SHA string `json:"sha,omitempty"` - SHA256 string `json:"sha256,omitempty"` - SHA512 string `json:"sha512,omitempty"` - CRC string `json:"crc,omitempty"` - ETag string `json:"etag,omitempty"` -} - -// UnmarshalJSON accepts both the DRS map-based schema (Indexd) and the array-of-checksums schema (GA4GH). -func (h *HashInfo) UnmarshalJSON(data []byte) error { - if string(data) == "null" { - *h = HashInfo{} - return nil - } - - var mapPayload map[string]string - if err := json.Unmarshal(data, &mapPayload); err == nil { - *h = ConvertStringMapToHashInfo(mapPayload) - return nil - } - - var checksumPayload []Checksum - if err := json.Unmarshal(data, &checksumPayload); err == nil { - *h = ConvertChecksumsToHashInfo(checksumPayload) - return nil - } - - return fmt.Errorf("unsupported HashInfo payload: %s", string(data)) -} - -func ConvertStringMapToHashInfo(inputHashes map[string]string) HashInfo { - hashInfo := HashInfo{} - - for key, value := range inputHashes { - if !SupportedChecksums[key] { - continue // Disregard unsupported types - } - switch key { - case string(ChecksumTypeMD5): - hashInfo.MD5 = value - case string(ChecksumTypeSHA1): - hashInfo.SHA = value - case string(ChecksumTypeSHA256): - hashInfo.SHA256 = value - case string(ChecksumTypeSHA512): - hashInfo.SHA512 = value - case string(ChecksumTypeCRC32C): - hashInfo.CRC = value - case string(ChecksumTypeETag): - hashInfo.ETag = value - } - } - - return hashInfo -} - -func ConvertHashInfoToMap(hashes HashInfo) map[string]string { - result := make(map[string]string) - if hashes.MD5 != "" { - result["md5"] = hashes.MD5 - } - if hashes.SHA != "" { - result["sha"] = hashes.SHA - } - if hashes.SHA256 != "" { - result["sha256"] = hashes.SHA256 - } - if hashes.SHA512 != "" { - result["sha512"] = hashes.SHA512 - } - if hashes.CRC != "" { - result["crc"] = hashes.CRC - } - if hashes.ETag != "" { - result["etag"] = hashes.ETag - } - return result -} - -func ConvertChecksumsToMap(checksums []Checksum) map[string]string { - result := make(map[string]string, len(checksums)) - for _, c := range checksums { - result[string(c.Type)] = c.Checksum - } - return result -} - -func ConvertChecksumsToHashInfo(checksums []Checksum) HashInfo { - checksumMap := ConvertChecksumsToMap(checksums) - return ConvertStringMapToHashInfo(checksumMap) -} - -func ConvertDrsChecksumsToMap(checksums []syclient.Checksum) map[string]string { - result := make(map[string]string, len(checksums)) - for _, c := range checksums { - result[c.Type] = c.Checksum - } - return result -} - -func ConvertDrsChecksumsToHashInfo(checksums []syclient.Checksum) HashInfo { - checksumMap := ConvertDrsChecksumsToMap(checksums) - return ConvertStringMapToHashInfo(checksumMap) -} - -func ConvertMapToDrsChecksums(hashes map[string]string) []syclient.Checksum { - result := make([]syclient.Checksum, 0, len(hashes)) - for t, c := range hashes { - result = append(result, syclient.Checksum{ - Type: t, - Checksum: c, - }) - } - return result -} diff --git a/hash/hash_test.go b/hash/hash_test.go deleted file mode 100644 index f08c7ea..0000000 --- a/hash/hash_test.go +++ /dev/null @@ -1,53 +0,0 @@ -package hash - -import ( - "encoding/json" - "testing" -) - -func TestChecksumType_IsValid(t *testing.T) { - tests := []struct { - name string - ct ChecksumType - want bool - }{ - {"valid sha256", ChecksumTypeSHA256, true}, - {"valid md5", ChecksumTypeMD5, true}, - {"invalid type", "invalid", false}, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - if got := tt.ct.IsValid(); got != tt.want { - t.Errorf("ChecksumType.IsValid() = %v, want %v", got, tt.want) - } - }) - } -} - -func TestHashInfo_UnmarshalJSON_Map(t *testing.T) { - jsonMap := `{"sha256": "hash-val", "md5": "md5-val"}` - var h HashInfo - if err := json.Unmarshal([]byte(jsonMap), &h); err != nil { - t.Fatalf("UnmarshalJSON failed: %v", err) - } - if h.SHA256 != "hash-val" { - t.Errorf("expected SHA256 hash-val, got %s", h.SHA256) - } - if h.MD5 != "md5-val" { - t.Errorf("expected MD5 md5-val, got %s", h.MD5) - } -} - -func TestHashInfo_UnmarshalJSON_List(t *testing.T) { - jsonList := `[{"type": "sha256", "checksum": "hash-val"}, {"type": "md5", "checksum": "md5-val"}]` - var h HashInfo - if err := json.Unmarshal([]byte(jsonList), &h); err != nil { - t.Fatalf("UnmarshalJSON failed: %v", err) - } - if h.SHA256 != "hash-val" { - t.Errorf("expected SHA256 hash-val, got %s", h.SHA256) - } - if h.MD5 != "md5-val" { - t.Errorf("expected MD5 md5-val, got %s", h.MD5) - } -} diff --git a/localclient/client.go b/localclient/client.go deleted file mode 100644 index 7542ef6..0000000 --- a/localclient/client.go +++ /dev/null @@ -1,63 +0,0 @@ -package localclient - -import ( - "github.com/calypr/data-client/conf" - "github.com/calypr/data-client/credentials" - "github.com/calypr/data-client/drs" - "github.com/calypr/data-client/logs" - "github.com/calypr/data-client/request" - "github.com/calypr/data-client/transfer" - localsigner "github.com/calypr/data-client/transfer/signer/local" -) - -// LocalInterface is the local-mode top-level facade. -// It mirrors the Gen3 facade shape for indexing and transfer operations. -type LocalInterface interface { - request.RequestInterface - Logger() *logs.Gen3Logger - Credentials() credentials.Reader - DRSClient() drs.ServerClient -} - -type LocalClient struct { - request.RequestInterface - - credential *conf.Credential - creds credentials.Reader - logger *logs.Gen3Logger - server drs.ServerClient -} - -func NewLocalInterface(profile string, logger *logs.Gen3Logger) (LocalInterface, error) { - config := conf.NewConfigure(logger.Logger) - cred, err := config.Load(profile) - if err != nil { - return nil, err - } - return NewLocalInterfaceFromCredential(cred, logger), nil -} - -func NewLocalInterfaceFromCredential(cred *conf.Credential, logger *logs.Gen3Logger) LocalInterface { - config := conf.NewConfigure(logger.Logger) - req := request.NewRequestInterface(logger, cred, config) - dc := drs.NewLocalDrsClient(req, cred.APIEndpoint, logger.Logger) - tb := transfer.New(req, logger, localsigner.New(cred.APIEndpoint, cred, dc)) - - return &LocalClient{ - RequestInterface: req, - credential: cred, - creds: &staticCredentials{cred: cred}, - logger: logger, - server: drs.ComposeServerClient(dc, tb), - } -} - -type staticCredentials struct { - cred *conf.Credential -} - -func (c *staticCredentials) Current() *conf.Credential { return c.cred } - -func (l *LocalClient) Logger() *logs.Gen3Logger { return l.logger } -func (l *LocalClient) Credentials() credentials.Reader { return l.creds } -func (l *LocalClient) DRSClient() drs.ServerClient { return l.server } diff --git a/logs/logger_test.go b/logs/logger_test.go index 7e689f8..382d2b5 100644 --- a/logs/logger_test.go +++ b/logs/logger_test.go @@ -68,7 +68,7 @@ func TestNew_WithScoreboardOption(t *testing.T) { t.Fatal("Expected non-nil logger") } - if logger.scoreboard == nil { + if logger.Scoreboard() == nil { t.Error("Expected non-nil scoreboard when WithScoreboard option is used") } } @@ -82,9 +82,8 @@ func TestNew_WithFailedLogOption(t *testing.T) { t.Fatal("Expected non-nil logger") } - if logger.failedPath == "" { - t.Error("Expected non-empty failed path when WithFailedLog option is used") - } + // Ensure failed-log helpers remain callable with syfon-backed logger. + _ = logger.GetFailedLogMap() } func TestNew_WithSucceededLogOption(t *testing.T) { @@ -96,9 +95,8 @@ func TestNew_WithSucceededLogOption(t *testing.T) { t.Fatal("Expected non-nil logger") } - if logger.succeededPath == "" { - t.Error("Expected non-empty succeeded path when WithSucceededLog option is used") - } + // Ensure succeeded-log helpers remain callable with syfon-backed logger. + _ = logger.GetSucceededLogMap() } func TestNew_WithBaseLogger(t *testing.T) { @@ -136,7 +134,7 @@ func TestNew_WithMultipleOptions(t *testing.T) { t.Error("Expected non-nil embedded slog logger") } - if logger.scoreboard == nil { + if logger.Scoreboard() == nil { t.Error("Expected non-nil scoreboard") } diff --git a/mocks/mock_drs_client.go b/mocks/mock_drs_client.go deleted file mode 100644 index 8b8aa9e..0000000 --- a/mocks/mock_drs_client.go +++ /dev/null @@ -1,260 +0,0 @@ -// Code generated by MockGen. DO NOT EDIT. -// Source: github.com/calypr/data-client/drs (interfaces: Client) - -package mocks - -import ( - context "context" - io "io" - http "net/http" - reflect "reflect" - - common "github.com/calypr/data-client/common" - drs "github.com/calypr/data-client/drs" - hash "github.com/calypr/data-client/hash" - logs "github.com/calypr/data-client/logs" - request "github.com/calypr/data-client/request" - gomock "go.uber.org/mock/gomock" -) - -// MockDrsClient is a mock of Client interface. -type MockDrsClient struct { - ctrl *gomock.Controller - recorder *MockDrsClientMockRecorder -} - -// MockDrsClientMockRecorder is the mock recorder for MockDrsClient. -type MockDrsClientMockRecorder struct { - mock *MockDrsClient -} - -// NewMockDrsClient creates a new mock instance. -func NewMockDrsClient(ctrl *gomock.Controller) *MockDrsClient { - mock := &MockDrsClient{ctrl: ctrl} - mock.recorder = &MockDrsClientMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockDrsClient) EXPECT() *MockDrsClientMockRecorder { - return m.recorder -} - -func (m *MockDrsClient) BatchGetObjectsByHash(ctx context.Context, hashes []string) (map[string][]drs.DRSObject, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "BatchGetObjectsByHash", ctx, hashes) - ret0, _ := ret[0].(map[string][]drs.DRSObject) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -func (mr *MockDrsClientMockRecorder) BatchGetObjectsByHash(ctx, hashes any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BatchGetObjectsByHash", reflect.TypeOf((*MockDrsClient)(nil).BatchGetObjectsByHash), ctx, hashes) -} - -func (m *MockDrsClient) DeleteRecord(ctx context.Context, did string) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "DeleteRecord", ctx, did) - ret0, _ := ret[0].(error) - return ret0 -} - -func (mr *MockDrsClientMockRecorder) DeleteRecord(ctx, did any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteRecord", reflect.TypeOf((*MockDrsClient)(nil).DeleteRecord), ctx, did) -} - -func (m *MockDrsClient) GetDownloadURL(ctx context.Context, id string, accessType string) (*drs.AccessURL, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetDownloadURL", ctx, id, accessType) - ret0, _ := ret[0].(*drs.AccessURL) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -func (mr *MockDrsClientMockRecorder) GetDownloadURL(ctx, id, accessType any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetDownloadURL", reflect.TypeOf((*MockDrsClient)(nil).GetDownloadURL), ctx, id, accessType) -} - -func (m *MockDrsClient) GetObject(ctx context.Context, id string) (*drs.DRSObject, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetObject", ctx, id) - ret0, _ := ret[0].(*drs.DRSObject) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -func (mr *MockDrsClientMockRecorder) GetObject(ctx, id any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetObject", reflect.TypeOf((*MockDrsClient)(nil).GetObject), ctx, id) -} - -func (m *MockDrsClient) GetObjectByHash(ctx context.Context, checksum *hash.Checksum) ([]drs.DRSObject, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetObjectByHash", ctx, checksum) - ret0, _ := ret[0].([]drs.DRSObject) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -func (mr *MockDrsClientMockRecorder) GetObjectByHash(ctx, checksum any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetObjectByHash", reflect.TypeOf((*MockDrsClient)(nil).GetObjectByHash), ctx, checksum) -} - -func (m *MockDrsClient) RegisterRecord(ctx context.Context, record *drs.DRSObject) (*drs.DRSObject, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "RegisterRecord", ctx, record) - ret0, _ := ret[0].(*drs.DRSObject) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -func (mr *MockDrsClientMockRecorder) RegisterRecord(ctx, record any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RegisterRecord", reflect.TypeOf((*MockDrsClient)(nil).RegisterRecord), ctx, record) -} - -func (m *MockDrsClient) UpdateRecord(ctx context.Context, updateInfo *drs.DRSObject, did string) (*drs.DRSObject, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "UpdateRecord", ctx, updateInfo, did) - ret0, _ := ret[0].(*drs.DRSObject) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -func (mr *MockDrsClientMockRecorder) UpdateRecord(ctx, updateInfo, did any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateRecord", reflect.TypeOf((*MockDrsClient)(nil).UpdateRecord), ctx, updateInfo, did) -} - -func (m *MockDrsClient) UpsertRecord(ctx context.Context, url string, sha256 string, fileSize int64, projectId string) (*drs.DRSObject, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "UpsertRecord", ctx, url, sha256, fileSize, projectId) - ret0, _ := ret[0].(*drs.DRSObject) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -func (mr *MockDrsClientMockRecorder) UpsertRecord(ctx, url, sha256, fileSize, projectId any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpsertRecord", reflect.TypeOf((*MockDrsClient)(nil).UpsertRecord), ctx, url, sha256, fileSize, projectId) -} - -func (m *MockDrsClient) Do(ctx context.Context, req *request.RequestBuilder) (*http.Response, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Do", ctx, req) - ret0, _ := ret[0].(*http.Response) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -func (mr *MockDrsClientMockRecorder) Do(ctx, req any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Do", reflect.TypeOf((*MockDrsClient)(nil).Do), ctx, req) -} - -func (m *MockDrsClient) RegisterRecords(ctx context.Context, records []*drs.DRSObject) ([]*drs.DRSObject, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "RegisterRecords", ctx, records) - ret0, _ := ret[0].([]*drs.DRSObject) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -func (mr *MockDrsClientMockRecorder) RegisterRecords(ctx, records any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RegisterRecords", reflect.TypeOf((*MockDrsClient)(nil).RegisterRecords), ctx, records) -} - -func (m *MockDrsClient) ListObjects(ctx context.Context) (chan drs.DRSObjectResult, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ListObjects", ctx) - ret0, _ := ret[0].(chan drs.DRSObjectResult) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -func (mr *MockDrsClientMockRecorder) ListObjects(ctx any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListObjects", reflect.TypeOf((*MockDrsClient)(nil).ListObjects), ctx) -} - -func (m *MockDrsClient) New(method, url string) *request.RequestBuilder { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "New", method, url) - ret0, _ := ret[0].(*request.RequestBuilder) - return ret0 -} - -func (mr *MockDrsClientMockRecorder) New(method, url any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "New", reflect.TypeOf((*MockDrsClient)(nil).New), method, url) -} - -func (m *MockDrsClient) WithProject(projectId string) drs.Client { - return m -} - -func (m *MockDrsClient) WithOrganization(organization string) drs.Client { - return m -} - -func (m *MockDrsClient) WithBucket(bucketName string) drs.Client { - return m -} - -func (m *MockDrsClient) GetProjectId() string { return "" } -func (m *MockDrsClient) GetBucketName() string { return "" } -func (m *MockDrsClient) GetOrganization() string { return "" } -func (m *MockDrsClient) RegisterFile(ctx context.Context, oid, path string) (*drs.DRSObject, error) { - return nil, nil -} -func (m *MockDrsClient) AddURL(ctx context.Context, blobURL, sha256 string, opts ...drs.AddURLOption) (*drs.DRSObject, error) { - return nil, nil -} -func (m *MockDrsClient) ListObjectsByProject(ctx context.Context, projectId string) (chan drs.DRSObjectResult, error) { - return nil, nil -} -func (m *MockDrsClient) GetProjectSample(ctx context.Context, projectId string, limit int) ([]drs.DRSObject, error) { - return nil, nil -} -func (m *MockDrsClient) DeleteRecordsByProject(ctx context.Context, projectId string) error { - return nil -} -func (m *MockDrsClient) BuildDrsObj(fileName string, checksum string, size int64, drsId string) (*drs.DRSObject, error) { - return nil, nil -} -func (m *MockDrsClient) DownloadFile(ctx context.Context, oid, destPath string) error { return nil } -func (m *MockDrsClient) Name() string { return "mock-drs-client" } -func (m *MockDrsClient) Logger() *logs.Gen3Logger { return nil } -func (m *MockDrsClient) ResolveDownloadURL(ctx context.Context, guid string, accessID string) (string, error) { - return "", nil -} -func (m *MockDrsClient) Download(ctx context.Context, fdr *common.FileDownloadResponseObject) (*http.Response, error) { - return nil, nil -} -func (m *MockDrsClient) ResolveUploadURL(ctx context.Context, guid string, filename string, metadata common.FileMetadata, bucket string) (string, error) { - return "", nil -} -func (m *MockDrsClient) ResolveUploadURLs(ctx context.Context, requests []common.UploadURLResolveRequest) ([]common.UploadURLResolveResponse, error) { - return nil, nil -} -func (m *MockDrsClient) InitMultipartUpload(ctx context.Context, guid string, filename string, bucket string) (*common.MultipartUploadInit, error) { - return nil, nil -} -func (m *MockDrsClient) GetMultipartUploadURL(ctx context.Context, key string, uploadID string, partNumber int32, bucket string) (string, error) { - return "", nil -} -func (m *MockDrsClient) CompleteMultipartUpload(ctx context.Context, key string, uploadID string, parts []common.MultipartUploadPart, bucket string) error { - return nil -} -func (m *MockDrsClient) Upload(ctx context.Context, url string, body io.Reader, size int64) error { - return nil -} -func (m *MockDrsClient) UploadPart(ctx context.Context, url string, body io.Reader, size int64) (string, error) { - return "", nil -} -func (m *MockDrsClient) DeleteFile(ctx context.Context, guid string) (string, error) { return "", nil } diff --git a/mocks/mock_functions.go b/mocks/mock_functions.go deleted file mode 100644 index 9f905fd..0000000 --- a/mocks/mock_functions.go +++ /dev/null @@ -1,161 +0,0 @@ -// Code generated by MockGen. DO NOT EDIT. -// Source: github.com/calypr/data-client/api (interfaces: FunctionInterface) -// -// Generated by this command: -// -// mockgen -destination=../mocks/mock_functions.go -package=mocks github.com/calypr/data-client/api FunctionInterface -// - -// Package mocks is a generated GoMock package. -package mocks - -import ( - context "context" - http "net/http" - reflect "reflect" - - conf "github.com/calypr/data-client/conf" - request "github.com/calypr/data-client/request" - gomock "go.uber.org/mock/gomock" -) - -// MockFunctionInterface is a mock of FunctionInterface interface. -type MockFunctionInterface struct { - ctrl *gomock.Controller - recorder *MockFunctionInterfaceMockRecorder - isgomock struct{} -} - -// MockFunctionInterfaceMockRecorder is the mock recorder for MockFunctionInterface. -type MockFunctionInterfaceMockRecorder struct { - mock *MockFunctionInterface -} - -// NewMockFunctionInterface creates a new mock instance. -func NewMockFunctionInterface(ctrl *gomock.Controller) *MockFunctionInterface { - mock := &MockFunctionInterface{ctrl: ctrl} - mock.recorder = &MockFunctionInterfaceMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockFunctionInterface) EXPECT() *MockFunctionInterfaceMockRecorder { - return m.recorder -} - -// CheckForShepherdAPI mocks base method. -func (m *MockFunctionInterface) CheckForShepherdAPI(ctx context.Context) (bool, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "CheckForShepherdAPI", ctx) - ret0, _ := ret[0].(bool) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// CheckForShepherdAPI indicates an expected call of CheckForShepherdAPI. -func (mr *MockFunctionInterfaceMockRecorder) CheckForShepherdAPI(ctx any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CheckForShepherdAPI", reflect.TypeOf((*MockFunctionInterface)(nil).CheckForShepherdAPI), ctx) -} - -// CheckPrivileges mocks base method. -func (m *MockFunctionInterface) CheckPrivileges(ctx context.Context) (map[string]any, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "CheckPrivileges", ctx) - ret0, _ := ret[0].(map[string]any) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// CheckPrivileges indicates an expected call of CheckPrivileges. -func (mr *MockFunctionInterfaceMockRecorder) CheckPrivileges(ctx any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CheckPrivileges", reflect.TypeOf((*MockFunctionInterface)(nil).CheckPrivileges), ctx) -} - -// DeleteRecord mocks base method. -func (m *MockFunctionInterface) DeleteRecord(ctx context.Context, guid string) (string, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "DeleteRecord", ctx, guid) - ret0, _ := ret[0].(string) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// DeleteRecord indicates an expected call of DeleteRecord. -func (mr *MockFunctionInterfaceMockRecorder) DeleteRecord(ctx, guid any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteRecord", reflect.TypeOf((*MockFunctionInterface)(nil).DeleteRecord), ctx, guid) -} - -// Do mocks base method. -func (m *MockFunctionInterface) Do(ctx context.Context, req *request.RequestBuilder) (*http.Response, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Do", ctx, req) - ret0, _ := ret[0].(*http.Response) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// Do indicates an expected call of Do. -func (mr *MockFunctionInterfaceMockRecorder) Do(ctx, req any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Do", reflect.TypeOf((*MockFunctionInterface)(nil).Do), ctx, req) -} - -// ExportCredential mocks base method. -func (m *MockFunctionInterface) ExportCredential(ctx context.Context, cred *conf.Credential) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ExportCredential", ctx, cred) - ret0, _ := ret[0].(error) - return ret0 -} - -// ExportCredential indicates an expected call of ExportCredential. -func (mr *MockFunctionInterfaceMockRecorder) ExportCredential(ctx, cred any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ExportCredential", reflect.TypeOf((*MockFunctionInterface)(nil).ExportCredential), ctx, cred) -} - -// GetDownloadPresignedUrl mocks base method. -func (m *MockFunctionInterface) GetDownloadPresignedUrl(ctx context.Context, guid, protocolText string) (string, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetDownloadPresignedUrl", ctx, guid, protocolText) - ret0, _ := ret[0].(string) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetDownloadPresignedUrl indicates an expected call of GetDownloadPresignedUrl. -func (mr *MockFunctionInterfaceMockRecorder) GetDownloadPresignedUrl(ctx, guid, protocolText any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetDownloadPresignedUrl", reflect.TypeOf((*MockFunctionInterface)(nil).GetDownloadPresignedUrl), ctx, guid, protocolText) -} - -// New mocks base method. -func (m *MockFunctionInterface) New(method, url string) *request.RequestBuilder { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "New", method, url) - ret0, _ := ret[0].(*request.RequestBuilder) - return ret0 -} - -// New indicates an expected call of New. -func (mr *MockFunctionInterfaceMockRecorder) New(method, url any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "New", reflect.TypeOf((*MockFunctionInterface)(nil).New), method, url) -} - -// NewAccessToken mocks base method. -func (m *MockFunctionInterface) NewAccessToken(ctx context.Context) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "NewAccessToken", ctx) - ret0, _ := ret[0].(error) - return ret0 -} - -// NewAccessToken indicates an expected call of NewAccessToken. -func (mr *MockFunctionInterfaceMockRecorder) NewAccessToken(ctx any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NewAccessToken", reflect.TypeOf((*MockFunctionInterface)(nil).NewAccessToken), ctx) -} diff --git a/mocks/mock_gen3interface.go b/mocks/mock_gen3interface.go index 4364179..2d7cd69 100644 --- a/mocks/mock_gen3interface.go +++ b/mocks/mock_gen3interface.go @@ -14,13 +14,13 @@ import ( http "net/http" reflect "reflect" - credentials "github.com/calypr/data-client/credentials" - drs "github.com/calypr/data-client/drs" fence "github.com/calypr/data-client/fence" logs "github.com/calypr/data-client/logs" request "github.com/calypr/data-client/request" requestor "github.com/calypr/data-client/requestor" sower "github.com/calypr/data-client/sower" + credentials "github.com/calypr/syfon/client/credentials" + drs "github.com/calypr/syfon/client/drs" gomock "go.uber.org/mock/gomock" ) @@ -48,26 +48,25 @@ func (m *MockGen3Interface) EXPECT() *MockGen3InterfaceMockRecorder { return m.recorder } -// Do mocks base method. -func (m *MockGen3Interface) Do(ctx context.Context, req *request.RequestBuilder) (*http.Response, error) { +// Credentials mocks base method. +func (m *MockGen3Interface) Credentials() credentials.Manager { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Do", ctx, req) - ret0, _ := ret[0].(*http.Response) - ret1, _ := ret[1].(error) - return ret0, ret1 + ret := m.ctrl.Call(m, "Credentials") + ret0, _ := ret[0].(credentials.Manager) + return ret0 } -// Do indicates an expected call of Do. -func (mr *MockGen3InterfaceMockRecorder) Do(ctx, req any) *gomock.Call { +// Credentials indicates an expected call of Credentials. +func (mr *MockGen3InterfaceMockRecorder) Credentials() *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Do", reflect.TypeOf((*MockGen3Interface)(nil).Do), ctx, req) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Credentials", reflect.TypeOf((*MockGen3Interface)(nil).Credentials)) } // DRSClient mocks base method. -func (m *MockGen3Interface) DRSClient() drs.ServerClient { +func (m *MockGen3Interface) DRSClient() drs.Client { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "DRSClient") - ret0, _ := ret[0].(drs.ServerClient) + ret0, _ := ret[0].(drs.Client) return ret0 } @@ -77,6 +76,21 @@ func (mr *MockGen3InterfaceMockRecorder) DRSClient() *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DRSClient", reflect.TypeOf((*MockGen3Interface)(nil).DRSClient)) } +// Do mocks base method. +func (m *MockGen3Interface) Do(ctx context.Context, req *request.RequestBuilder) (*http.Response, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Do", ctx, req) + ret0, _ := ret[0].(*http.Response) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Do indicates an expected call of Do. +func (mr *MockGen3InterfaceMockRecorder) Do(ctx, req any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Do", reflect.TypeOf((*MockGen3Interface)(nil).Do), ctx, req) +} + // FenceClient mocks base method. func (m *MockGen3Interface) FenceClient() fence.FenceInterface { m.ctrl.T.Helper() @@ -91,34 +105,6 @@ func (mr *MockGen3InterfaceMockRecorder) FenceClient() *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FenceClient", reflect.TypeOf((*MockGen3Interface)(nil).FenceClient)) } -// Credentials mocks base method. -func (m *MockGen3Interface) Credentials() credentials.Manager { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Credentials") - ret0, _ := ret[0].(credentials.Manager) - return ret0 -} - -// Credentials indicates an expected call of Credentials. -func (mr *MockGen3InterfaceMockRecorder) Credentials() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Credentials", reflect.TypeOf((*MockGen3Interface)(nil).Credentials)) -} - -// RequestorClient mocks base method. -func (m *MockGen3Interface) RequestorClient() requestor.RequestorInterface { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "RequestorClient") - ret0, _ := ret[0].(requestor.RequestorInterface) - return ret0 -} - -// RequestorClient indicates an expected call of RequestorClient. -func (mr *MockGen3InterfaceMockRecorder) RequestorClient() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RequestorClient", reflect.TypeOf((*MockGen3Interface)(nil).RequestorClient)) -} - // Logger mocks base method. func (m *MockGen3Interface) Logger() *logs.Gen3Logger { m.ctrl.T.Helper() @@ -147,6 +133,20 @@ func (mr *MockGen3InterfaceMockRecorder) New(method, url any) *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "New", reflect.TypeOf((*MockGen3Interface)(nil).New), method, url) } +// RequestorClient mocks base method. +func (m *MockGen3Interface) RequestorClient() requestor.RequestorInterface { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "RequestorClient") + ret0, _ := ret[0].(requestor.RequestorInterface) + return ret0 +} + +// RequestorClient indicates an expected call of RequestorClient. +func (mr *MockGen3InterfaceMockRecorder) RequestorClient() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RequestorClient", reflect.TypeOf((*MockGen3Interface)(nil).RequestorClient)) +} + // SowerClient mocks base method. func (m *MockGen3Interface) SowerClient() sower.SowerInterface { m.ctrl.T.Helper() diff --git a/mocks/mock_request.go b/mocks/mock_request.go index 8ccd2a0..0d2be55 100644 --- a/mocks/mock_request.go +++ b/mocks/mock_request.go @@ -3,7 +3,7 @@ // // Generated by this command: // -// mockgen -destination=../mocks/mock_request.go -package=mocks github.com/calypr/data-client/request RequestInterface +// mockgen -destination=./mocks/mock_request.go -package=mocks github.com/calypr/data-client/request RequestInterface // // Package mocks is a generated GoMock package. @@ -14,7 +14,7 @@ import ( http "net/http" reflect "reflect" - request "github.com/calypr/data-client/request" + request "github.com/calypr/syfon/client/pkg/request" gomock "go.uber.org/mock/gomock" ) diff --git a/request/builder.go b/request/builder.go index e12e923..59b9192 100644 --- a/request/builder.go +++ b/request/builder.go @@ -58,3 +58,8 @@ func (ar *RequestBuilder) WithSkipAuth(skip bool) *RequestBuilder { ar.SkipAuth = skip return ar } + +func (ar *RequestBuilder) WithPartSize(size int64) *RequestBuilder { + ar.PartSize = size + return ar +} diff --git a/request/request.go b/request/request.go index b5603da..102f3f0 100644 --- a/request/request.go +++ b/request/request.go @@ -1,6 +1,6 @@ package request -//go:generate mockgen -destination=../mocks/mock_request.go -package=mocks github.com/calypr/data-client/request RequestInterface +//go:generate mockgen -destination=../mocks/mock_request.go -package=mocks github.com/calypr/syfon/client/pkg/request RequestInterface import ( "context" diff --git a/runtime/client.go b/runtime/client.go deleted file mode 100644 index 2dc8ab7..0000000 --- a/runtime/client.go +++ /dev/null @@ -1,48 +0,0 @@ -package runtime - -import ( - "fmt" - "strings" - - "github.com/calypr/data-client/drs" - "github.com/calypr/data-client/g3client" - "github.com/calypr/data-client/localclient" - "github.com/calypr/data-client/logs" - "github.com/calypr/data-client/transfer" -) - -// Client composes metadata and transfer concerns for a selected runtime mode. -type Client struct { - g3 g3client.Gen3Interface - drs drs.ServerClient -} - -func New(profile string, mode string, logger *logs.Gen3Logger) (*Client, error) { - mode = strings.ToLower(strings.TrimSpace(mode)) - switch mode { - case "", "gen3": - g3, err := g3client.NewGen3Interface(profile, logger) - if err != nil { - return nil, err - } - return &Client{ - g3: g3, - drs: g3.DRSClient(), - }, nil - case "drs": - lc, err := localclient.NewLocalInterface(profile, logger) - if err != nil { - return nil, err - } - return &Client{ - g3: nil, - drs: lc.DRSClient(), - }, nil - default: - return nil, fmt.Errorf("unsupported backend mode %q", mode) - } -} - -func (c *Client) Gen3() g3client.Gen3Interface { return c.g3 } -func (c *Client) DRS() drs.ServerClient { return c.drs } -func (c *Client) Transfer() transfer.Backend { return c.drs } diff --git a/s3utils/s3_utils.go b/s3utils/s3_utils.go deleted file mode 100644 index 8aefc78..0000000 --- a/s3utils/s3_utils.go +++ /dev/null @@ -1,111 +0,0 @@ -package s3utils - -import ( - "context" - "fmt" - "log/slog" - "strings" - "time" - - "net/url" - - "gocloud.dev/blob" - _ "gocloud.dev/blob/azureblob" - _ "gocloud.dev/blob/fileblob" - _ "gocloud.dev/blob/gcsblob" - _ "gocloud.dev/blob/s3blob" - - "github.com/aws/aws-sdk-go-v2/service/s3" - "github.com/calypr/data-client/fence" -) - -// ParseBlobURL parses a URL like s3://bucket/key and returns (bucket, key, error). -func ParseBlobURL(blobURL string) (string, string, error) { - u, err := url.Parse(blobURL) - if err != nil { - return "", "", fmt.Errorf("invalid blob URL %s: %w", blobURL, err) - } - if u.Scheme == "" { - return "", "", fmt.Errorf("URL requires a scheme prefix (e.g. s3://, gs://): %s", blobURL) - } - - bucket := u.Host - if u.Scheme == "file" { - bucket = "file:///" - } - - key := strings.TrimPrefix(u.Path, "/") - if key == "" { - return "", "", fmt.Errorf("invalid blob URL (missing key/path): %s", blobURL) - } - - return bucket, key, nil -} - -// ValidateInputs checks if the Blob URL and SHA256 hash are valid. -func ValidateInputs(s3URL, sha256 string) error { - if s3URL == "" { - return fmt.Errorf("Blob URL is required") - } - if sha256 == "" { - return fmt.Errorf("SHA256 hash is required") - } - u, err := url.Parse(s3URL) - if err != nil || u.Scheme == "" { - return fmt.Errorf("invalid Blob URL: must have a scheme (like s3://, gs://)") - } - if len(sha256) != 64 { - return fmt.Errorf("invalid SHA256 hash: must be 64 characters") - } - return nil -} - -// FetchS3MetadataWithBucketDetails fetches S3 metadata (size and modified date) for a given S3 URL. -// FetchS3MetadataWithBucketDetails fetches metadata using generic go-cloud capabilities. -// This makes it compatible with multiple cloud providers instead of a bare-bones specific setup. -func FetchS3MetadataWithBucketDetails( - ctx context.Context, - s3URL string, - awsAccessKey string, - awsSecretKey string, - region string, - endpoint string, - bucketDetails *fence.S3Bucket, - s3Client *s3.Client, // kept for backward compatibility signature, though it is no longer strictly required for basic HEAD. - logger *slog.Logger, -) (int64, string, error) { - u, err := url.Parse(s3URL) - if err != nil { - return 0, "", fmt.Errorf("failed to parse url %s: %w", s3URL, err) - } - - bucketURL := fmt.Sprintf("%s://%s", u.Scheme, u.Host) - key := strings.TrimPrefix(u.Path, "/") - - // Optionally pass credentials logic. By default go-cloud checks environment. - // For AWS, you could override credentials, but typically users want standard config loading - // which go-cloud openers handle out of the box (e.g. AWS_PROFILE, AWS_REGION, AWS_ACCESS_KEY_ID). - - bucket, err := blob.OpenBucket(ctx, bucketURL) - if err != nil { - return 0, "", fmt.Errorf("failed to open bucket via go-cloud string %s: %w", bucketURL, err) - } - defer bucket.Close() - - attrs, err := bucket.Attributes(ctx, key) - if err != nil { - return 0, "", fmt.Errorf("failed to get attributes for %s: %w", key, err) - } - - lastMod := "" - if !attrs.ModTime.IsZero() { - lastMod = attrs.ModTime.Format(time.RFC3339) - } - - return attrs.Size, lastMod, nil -} - -type S3Meta struct { - Size int64 - LastModified string -} diff --git a/tests/download-multiple_test.go b/tests/download-multiple_test.go index 3de9b5c..d9ea0c4 100644 --- a/tests/download-multiple_test.go +++ b/tests/download-multiple_test.go @@ -5,10 +5,10 @@ import ( "fmt" "testing" - "github.com/calypr/data-client/download" - drs "github.com/calypr/data-client/drs" - "github.com/calypr/data-client/logs" - "github.com/calypr/data-client/mocks" + "github.com/calypr/syfon/client/drs" + "github.com/calypr/syfon/client/mocks" + sylogs "github.com/calypr/syfon/client/pkg/logs" + "github.com/calypr/syfon/client/xfer/download" "go.uber.org/mock/gomock" ) @@ -31,7 +31,7 @@ func Test_askGen3ForFileInfo_withShepherd(t *testing.T) { GetObject(gomock.Any(), testGUID). Return(&drs.DRSObject{Id: testGUID, Name: testFileName, Size: testFileSize}, nil) - logger := logs.NewGen3Logger(nil, "", "test") + logger := sylogs.NewGen3Logger(nil, "", "test") skipped := []download.RenamedOrSkippedFileInfo{} info, err := download.GetFileInfo(context.Background(), mockIndexd, logger, testGUID, "", "", "original", true, &skipped) @@ -67,7 +67,7 @@ func Test_askGen3ForFileInfo_withShepherd_shepherdError(t *testing.T) { GetObject(gomock.Any(), testGUID). Return(nil, fmt.Errorf("Indexd error")) - logger := logs.NewGen3Logger(nil, "", "test") + logger := sylogs.NewGen3Logger(nil, "", "test") skipped := []download.RenamedOrSkippedFileInfo{} info, err := download.GetFileInfo(context.Background(), mockIndexd, logger, testGUID, "", "", "original", true, &skipped) @@ -109,7 +109,7 @@ func Test_askGen3ForFileInfo_noShepherd(t *testing.T) { GetObject(gomock.Any(), testGUID). Return(&drs.DRSObject{Id: testGUID, Name: testFileName, Size: testFileSize}, nil) - logger := logs.NewGen3Logger(nil, "", "test") + logger := sylogs.NewGen3Logger(nil, "", "test") skipped := []download.RenamedOrSkippedFileInfo{} info, err := download.GetFileInfo(context.Background(), mockIndexd, logger, testGUID, "", "", "original", true, &skipped) diff --git a/tests/utils_test.go b/tests/utils_test.go index 7687e9e..289a74a 100644 --- a/tests/utils_test.go +++ b/tests/utils_test.go @@ -2,129 +2,85 @@ package tests import ( "context" - "fmt" "io" "net/http" "strings" "testing" "github.com/calypr/data-client/common" - "github.com/calypr/data-client/conf" - "github.com/calypr/data-client/download" - "github.com/calypr/data-client/fence" - "github.com/calypr/data-client/logs" - "github.com/calypr/data-client/mocks" - "github.com/calypr/data-client/request" - "github.com/calypr/data-client/transfer" - gen3signer "github.com/calypr/data-client/transfer/signer/gen3" - "github.com/calypr/data-client/upload" - "go.uber.org/mock/gomock" + sylogs "github.com/calypr/syfon/client/pkg/logs" + "github.com/calypr/syfon/client/xfer/download" + "github.com/calypr/syfon/client/xfer/upload" ) -type staticCredentialsManager struct { - cred *conf.Credential +type fakeDownloader struct { + resolveFn func(ctx context.Context, guid, accessID string) (string, error) + downloadFn func(ctx context.Context, fdr *common.FileDownloadResponseObject) (*http.Response, error) } -func (s *staticCredentialsManager) Current() *conf.Credential { return s.cred } -func (s *staticCredentialsManager) Export(ctx context.Context, cred *conf.Credential) error { - return nil +func (f *fakeDownloader) Name() string { return "fake-downloader" } +func (f *fakeDownloader) Logger() *sylogs.Gen3Logger { return sylogs.NewGen3Logger(nil, "", "test") } +func (f *fakeDownloader) ResolveDownloadURL(ctx context.Context, guid, accessID string) (string, error) { + return f.resolveFn(ctx, guid, accessID) +} +func (f *fakeDownloader) Download(ctx context.Context, fdr *common.FileDownloadResponseObject) (*http.Response, error) { + return f.downloadFn(ctx, fdr) } -func TestGetDownloadResponse_withShepherd(t *testing.T) { - testGUID := "000000-0000000-0000000-000000" - testFilename := "test-file" - mockDownloadURL := "https://example.com/example.pfb" - - mockCtrl := gomock.NewController(t) - defer mockCtrl.Finish() - - mockGen3 := mocks.NewMockGen3Interface(mockCtrl) - mockFence := mocks.NewMockFenceInterface(mockCtrl) - mockDrs := mocks.NewMockDrsClient(mockCtrl) - - // Mock credential - mockGen3.EXPECT().Credentials().Return(&staticCredentialsManager{cred: &conf.Credential{}}).AnyTimes() - mockGen3.EXPECT().FenceClient().Return(mockFence).AnyTimes() - mockGen3.EXPECT().DRSClient().Return(mockDrs).AnyTimes() - mockGen3.EXPECT().Logger().Return(logs.NewGen3Logger(nil, "", "test")).AnyTimes() - - mockFence.EXPECT(). - GetDownloadPresignedUrl(gomock.Any(), testGUID, ""). - Return(mockDownloadURL, nil) - - mockGen3.EXPECT(). - New(http.MethodGet, mockDownloadURL). - Return(&request.RequestBuilder{ - Method: http.MethodGet, - Url: mockDownloadURL, - Headers: make(map[string]string), - }). - AnyTimes() - - // Mock successful response from the presigned URL - mockResp := &http.Response{ - StatusCode: 200, - Body: io.NopCloser(strings.NewReader("content")), - } - mockGen3.EXPECT(). - Do(gomock.Any(), gomock.Any()). - Return(mockResp, nil) - - mockFDRObj := common.FileDownloadResponseObject{ - Filename: testFilename, - GUID: testGUID, - Range: 0, - } +type fakeUploader struct { + resolveFn func(ctx context.Context, guid, filename string, metadata common.FileMetadata, bucket string) (string, error) +} - bk := transfer.New(mockGen3, logs.NewGen3Logger(nil, "", "test"), gen3signer.New(mockGen3, &conf.Credential{}, mockDrs, mockFence)) - err := download.GetDownloadResponse(context.Background(), bk, &mockFDRObj, "") - if err != nil { - t.Fatalf("Unexpected error: %v", err) - } +func (f *fakeUploader) Name() string { return "fake-uploader" } +func (f *fakeUploader) Logger() *sylogs.Gen3Logger { return sylogs.NewGen3Logger(nil, "", "test") } - if mockFDRObj.PresignedURL != mockDownloadURL { - t.Errorf("Wanted URL %s, got %s", mockDownloadURL, mockFDRObj.PresignedURL) - } +func (f *fakeUploader) ResolveUploadURL(ctx context.Context, guid, filename string, metadata common.FileMetadata, bucket string) (string, error) { + return f.resolveFn(ctx, guid, filename, metadata, bucket) +} +func (f *fakeUploader) ResolveUploadURLs(ctx context.Context, requests []common.UploadURLResolveRequest) ([]common.UploadURLResolveResponse, error) { + return nil, nil +} +func (f *fakeUploader) InitMultipartUpload(ctx context.Context, guid string, filename string, bucket string) (*common.MultipartUploadInit, error) { + return nil, nil +} +func (f *fakeUploader) GetMultipartUploadURL(ctx context.Context, key string, uploadID string, partNumber int32, bucket string) (string, error) { + return "", nil +} +func (f *fakeUploader) CompleteMultipartUpload(ctx context.Context, key string, uploadID string, parts []common.MultipartUploadPart, bucket string) error { + return nil +} +func (f *fakeUploader) Upload(ctx context.Context, url string, body io.Reader, size int64) error { + return nil +} +func (f *fakeUploader) UploadPart(ctx context.Context, url string, body io.Reader, size int64) (string, error) { + return "", nil +} +func (f *fakeUploader) DeleteFile(ctx context.Context, guid string) (string, error) { + return "", nil } -func TestGetDownloadResponse_noShepherd(t *testing.T) { +func TestGetDownloadResponse(t *testing.T) { testGUID := "000000-0000000-0000000-000000" testFilename := "test-file" mockDownloadURL := "https://example.com/example.pfb" - mockCtrl := gomock.NewController(t) - defer mockCtrl.Finish() - - mockGen3 := mocks.NewMockGen3Interface(mockCtrl) - mockFence := mocks.NewMockFenceInterface(mockCtrl) - mockDrs := mocks.NewMockDrsClient(mockCtrl) - - mockGen3.EXPECT().Credentials().Return(&staticCredentialsManager{cred: &conf.Credential{}}).AnyTimes() - mockGen3.EXPECT().FenceClient().Return(mockFence).AnyTimes() - mockGen3.EXPECT().DRSClient().Return(mockDrs).AnyTimes() - mockGen3.EXPECT().Logger().Return(logs.NewGen3Logger(nil, "", "test")).AnyTimes() - - mockFence.EXPECT(). - GetDownloadPresignedUrl(gomock.Any(), testGUID, ""). - Return(mockDownloadURL, nil) - - mockGen3.EXPECT(). - New(http.MethodGet, mockDownloadURL). - Return(&request.RequestBuilder{ - Method: http.MethodGet, - Url: mockDownloadURL, - Headers: make(map[string]string), - }). - AnyTimes() - - // Mock successful response - mockResp := &http.Response{ - StatusCode: 200, - Body: io.NopCloser(strings.NewReader("content")), + bk := &fakeDownloader{ + resolveFn: func(ctx context.Context, guid, accessID string) (string, error) { + if guid != testGUID { + t.Fatalf("unexpected guid: %s", guid) + } + return mockDownloadURL, nil + }, + downloadFn: func(ctx context.Context, fdr *common.FileDownloadResponseObject) (*http.Response, error) { + if fdr.PresignedURL != mockDownloadURL { + t.Fatalf("expected URL %s, got %s", mockDownloadURL, fdr.PresignedURL) + } + return &http.Response{ + StatusCode: 200, + Body: io.NopCloser(strings.NewReader("content")), + }, nil + }, } - mockGen3.EXPECT(). - Do(gomock.Any(), gomock.Any()). - Return(mockResp, nil) mockFDRObj := common.FileDownloadResponseObject{ Filename: testFilename, @@ -132,112 +88,40 @@ func TestGetDownloadResponse_noShepherd(t *testing.T) { Range: 0, } - bk := transfer.New(mockGen3, logs.NewGen3Logger(nil, "", "test"), gen3signer.New(mockGen3, &conf.Credential{}, mockDrs, mockFence)) err := download.GetDownloadResponse(context.Background(), bk, &mockFDRObj, "") if err != nil { - t.Fatalf("Unexpected error: %v", err) + t.Fatalf("unexpected error: %v", err) } - if mockFDRObj.PresignedURL != mockDownloadURL { - t.Errorf("Wanted URL %s, got %s", mockDownloadURL, mockFDRObj.PresignedURL) + t.Errorf("wanted URL %s, got %s", mockDownloadURL, mockFDRObj.PresignedURL) } } -func TestGeneratePresignedUploadURL_noShepherd(t *testing.T) { +func TestGeneratePresignedUploadURL(t *testing.T) { testFilename := "test-file" - testBucketname := "test-bucket" - mockPresignedURL := "https://example.com/example.pfb" - mockGUID := "000000-0000000-0000000-000000" - - mockCtrl := gomock.NewController(t) - defer mockCtrl.Finish() - - mockGen3 := mocks.NewMockGen3Interface(mockCtrl) - mockFence := mocks.NewMockFenceInterface(mockCtrl) - mockDrs := mocks.NewMockDrsClient(mockCtrl) - - mockGen3.EXPECT().Credentials().Return(&staticCredentialsManager{cred: &conf.Credential{}}).AnyTimes() - mockGen3.EXPECT().FenceClient().Return(mockFence).AnyTimes() - mockGen3.EXPECT().DRSClient().Return(mockDrs).AnyTimes() - mockGen3.EXPECT().Logger().Return(logs.NewGen3Logger(nil, "", "test")).AnyTimes() - - // No Shepherd - mockFence.EXPECT(). - CheckForShepherdAPI(gomock.Any()). - Return(false, nil) - - mockFence.EXPECT(). - InitUpload(gomock.Any(), testFilename, testBucketname, ""). - Return(fence.FenceResponse{ - URL: mockPresignedURL, - GUID: mockGUID, - }, nil) - - bk := transfer.New(mockGen3, logs.NewGen3Logger(nil, "", "test"), gen3signer.New(mockGen3, &conf.Credential{}, mockDrs, mockFence)) - resp, err := upload.GeneratePresignedUploadURL(context.Background(), bk, testFilename, common.FileMetadata{}, testBucketname) + testBucket := "test-bucket" + mockUploadURL := "https://example.com/upload" + + bk := &fakeUploader{ + resolveFn: func(ctx context.Context, guid, filename string, metadata common.FileMetadata, bucket string) (string, error) { + if filename != testFilename { + t.Fatalf("unexpected filename: %s", filename) + } + if bucket != testBucket { + t.Fatalf("unexpected bucket: %s", bucket) + } + return mockUploadURL, nil + }, + } + + resp, err := upload.GeneratePresignedUploadURL(context.Background(), bk, testFilename, common.FileMetadata{}, testBucket) if err != nil { - t.Fatalf("Unexpected error: %v", err) + t.Fatalf("unexpected error: %v", err) } - - if resp.URL != mockPresignedURL { - t.Errorf("Wanted URL %s, got %s", mockPresignedURL, resp.URL) + if resp.URL != mockUploadURL { + t.Errorf("wanted URL %s, got %s", mockUploadURL, resp.URL) } if resp.GUID != "" { - t.Errorf("Wanted empty GUID, got %s", resp.GUID) - } -} - -func TestGeneratePresignedUploadURL_withShepherd(t *testing.T) { - testFilename := "test-file" - testBucketname := "test-bucket" - mockPresignedURL := "https://example.com/example.pfb" - mockGUID := "000000-0000000-0000000-000000" - - testMetadata := common.FileMetadata{ - Aliases: []string{"test-alias-1", "test-alias-2"}, - Authz: []string{"authz-resource-1", "authz-resource-2"}, - Metadata: map[string]any{"arbitrary": "metadata"}, - } - - mockCtrl := gomock.NewController(t) - defer mockCtrl.Finish() - - mockGen3 := mocks.NewMockGen3Interface(mockCtrl) - mockFence := mocks.NewMockFenceInterface(mockCtrl) - mockDrs := mocks.NewMockDrsClient(mockCtrl) - - mockGen3.EXPECT().Credentials().Return(&staticCredentialsManager{cred: &conf.Credential{AccessToken: "token"}}).AnyTimes() - mockGen3.EXPECT().FenceClient().Return(mockFence).AnyTimes() - mockGen3.EXPECT().DRSClient().Return(mockDrs).AnyTimes() - mockGen3.EXPECT().Logger().Return(logs.NewGen3Logger(nil, "", "test")).AnyTimes() - - // Shepherd is deployed - mockFence.EXPECT(). - CheckForShepherdAPI(gomock.Any()). - Return(true, nil) - - // Shepherd returns GUID and upload_url - shepherdResp := &http.Response{ - StatusCode: 201, - Body: io.NopCloser(strings.NewReader(fmt.Sprintf( - `{"guid": "%s", "upload_url": "%s"}`, mockGUID, mockPresignedURL, - ))), - } - - mockFence.EXPECT(). - Do(gomock.Any(), gomock.Any()). - Return(shepherdResp, nil) - - bk := transfer.New(mockGen3, logs.NewGen3Logger(nil, "", "test"), gen3signer.New(mockGen3, &conf.Credential{AccessToken: "token", APIEndpoint: "https://example.com"}, mockDrs, mockFence)) - respObj, err := upload.GeneratePresignedUploadURL(context.Background(), bk, testFilename, testMetadata, testBucketname) - if err != nil { - t.Fatalf("Unexpected error: %v", err) - } - - if respObj.URL != mockPresignedURL { - t.Errorf("Wanted URL %s, got %s", mockPresignedURL, respObj.URL) - } - if respObj.GUID != "" { - t.Errorf("Wanted empty GUID, got %s", respObj.GUID) + t.Errorf("wanted empty GUID, got %s", resp.GUID) } } diff --git a/transfer/http.go b/transfer/http.go deleted file mode 100644 index 3653e2a..0000000 --- a/transfer/http.go +++ /dev/null @@ -1,69 +0,0 @@ -package transfer - -import ( - "context" - "fmt" - "io" - "net/http" - "strconv" - "strings" - - "github.com/calypr/data-client/common" - "github.com/calypr/data-client/request" -) - -// ResolveRange parses range information from FileDownloadResponseObject. -func ResolveRange(fdr *common.FileDownloadResponseObject) (start int64, end *int64, ok bool) { - if fdr == nil { - return 0, nil, false - } - if fdr.RangeStart != nil { - return *fdr.RangeStart, fdr.RangeEnd, true - } - if fdr.Range > 0 { - return fdr.Range, nil, true - } - return 0, nil, false -} - -// DoUpload performs a presigned PUT request and returns ETag when available. -func DoUpload(ctx context.Context, req request.RequestInterface, url string, body io.Reader, size int64) (string, error) { - rb := req.New(http.MethodPut, url).WithBody(body).WithSkipAuth(true) - if size > 0 { - rb.PartSize = size - } - - resp, err := req.Do(ctx, rb) - if err != nil { - return "", fmt.Errorf("upload to %s failed: %w", url, err) - } - defer resp.Body.Close() - - if resp.StatusCode >= 400 { - bodyBytes, _ := io.ReadAll(resp.Body) - return "", fmt.Errorf("upload to %s failed with status %d: %s", url, resp.StatusCode, string(bodyBytes)) - } - - return strings.Trim(resp.Header.Get("ETag"), `"`), nil -} - -// GenericDownload performs GET (optionally ranged) against a signed URL. -func GenericDownload(ctx context.Context, req request.RequestInterface, fdr *common.FileDownloadResponseObject) (*http.Response, error) { - skipAuth := common.IsCloudPresignedURL(fdr.PresignedURL) - - rb := req.New(http.MethodGet, fdr.PresignedURL) - start, end, hasRange := ResolveRange(fdr) - if hasRange { - rangeHeader := "bytes=" + strconv.FormatInt(start, 10) + "-" - if end != nil { - rangeHeader += strconv.FormatInt(*end, 10) - } - rb.WithHeader("Range", rangeHeader) - } - - if skipAuth { - rb.WithSkipAuth(true) - } - - return req.Do(ctx, rb) -} diff --git a/transfer/interface.go b/transfer/interface.go deleted file mode 100644 index 1a4540e..0000000 --- a/transfer/interface.go +++ /dev/null @@ -1,42 +0,0 @@ -package transfer - -import ( - "context" - "io" - "net/http" - - "github.com/calypr/data-client/common" - "github.com/calypr/data-client/logs" -) - -// Service captures identity and logging for a transfer implementation. -type Service interface { - Name() string - Logger() *logs.Gen3Logger -} - -// Downloader is the signed URL resolution and byte download surface. -type Downloader interface { - Service - ResolveDownloadURL(ctx context.Context, guid string, accessID string) (string, error) - Download(ctx context.Context, fdr *common.FileDownloadResponseObject) (*http.Response, error) -} - -// Uploader is the signed URL and multipart upload surface. -type Uploader interface { - Service - ResolveUploadURL(ctx context.Context, guid string, filename string, metadata common.FileMetadata, bucket string) (string, error) - ResolveUploadURLs(ctx context.Context, requests []common.UploadURLResolveRequest) ([]common.UploadURLResolveResponse, error) - InitMultipartUpload(ctx context.Context, guid string, filename string, bucket string) (*common.MultipartUploadInit, error) - GetMultipartUploadURL(ctx context.Context, key string, uploadID string, partNumber int32, bucket string) (string, error) - CompleteMultipartUpload(ctx context.Context, key string, uploadID string, parts []common.MultipartUploadPart, bucket string) error - Upload(ctx context.Context, url string, body io.Reader, size int64) error - UploadPart(ctx context.Context, url string, body io.Reader, size int64) (string, error) - DeleteFile(ctx context.Context, guid string) (string, error) -} - -// Backend is the composed transfer surface used by upload/download workflows. -type Backend interface { - Downloader - Uploader -} diff --git a/transfer/service.go b/transfer/service.go deleted file mode 100644 index d061942..0000000 --- a/transfer/service.go +++ /dev/null @@ -1,81 +0,0 @@ -package transfer - -import ( - "context" - "io" - "net/http" - - "github.com/calypr/data-client/common" - "github.com/calypr/data-client/logs" - "github.com/calypr/data-client/request" -) - -// Signer defines mode-specific signed URL and multipart orchestration. -type Signer interface { - Name() string - ResolveDownloadURL(ctx context.Context, guid string, accessID string) (string, error) - ResolveUploadURL(ctx context.Context, guid string, filename string, metadata common.FileMetadata, bucket string) (string, error) - ResolveUploadURLs(ctx context.Context, requests []common.UploadURLResolveRequest) ([]common.UploadURLResolveResponse, error) - InitMultipartUpload(ctx context.Context, guid string, filename string, bucket string) (*common.MultipartUploadInit, error) - GetMultipartUploadURL(ctx context.Context, key string, uploadID string, partNumber int32, bucket string) (string, error) - CompleteMultipartUpload(ctx context.Context, key string, uploadID string, parts []common.MultipartUploadPart, bucket string) error - DeleteFile(ctx context.Context, guid string) (string, error) -} - -type client struct { - req request.RequestInterface - logger *logs.Gen3Logger - signer Signer -} - -func New(req request.RequestInterface, logger *logs.Gen3Logger, signer Signer) Backend { - return &client{ - req: req, - logger: logger, - signer: signer, - } -} - -func (c *client) Name() string { return c.signer.Name() } -func (c *client) Logger() *logs.Gen3Logger { return c.logger } - -func (c *client) DeleteFile(ctx context.Context, guid string) (string, error) { - return c.signer.DeleteFile(ctx, guid) -} - -func (c *client) Download(ctx context.Context, fdr *common.FileDownloadResponseObject) (*http.Response, error) { - return GenericDownload(ctx, c.req, fdr) -} - -func (c *client) ResolveDownloadURL(ctx context.Context, guid string, accessID string) (string, error) { - return c.signer.ResolveDownloadURL(ctx, guid, accessID) -} - -func (c *client) ResolveUploadURL(ctx context.Context, guid string, filename string, metadata common.FileMetadata, bucket string) (string, error) { - return c.signer.ResolveUploadURL(ctx, guid, filename, metadata, bucket) -} - -func (c *client) ResolveUploadURLs(ctx context.Context, requests []common.UploadURLResolveRequest) ([]common.UploadURLResolveResponse, error) { - return c.signer.ResolveUploadURLs(ctx, requests) -} - -func (c *client) InitMultipartUpload(ctx context.Context, guid string, filename string, bucket string) (*common.MultipartUploadInit, error) { - return c.signer.InitMultipartUpload(ctx, guid, filename, bucket) -} - -func (c *client) GetMultipartUploadURL(ctx context.Context, key string, uploadID string, partNumber int32, bucket string) (string, error) { - return c.signer.GetMultipartUploadURL(ctx, key, uploadID, partNumber, bucket) -} - -func (c *client) CompleteMultipartUpload(ctx context.Context, key string, uploadID string, parts []common.MultipartUploadPart, bucket string) error { - return c.signer.CompleteMultipartUpload(ctx, key, uploadID, parts, bucket) -} - -func (c *client) Upload(ctx context.Context, url string, body io.Reader, size int64) error { - _, err := DoUpload(ctx, c.req, url, body, size) - return err -} - -func (c *client) UploadPart(ctx context.Context, url string, body io.Reader, size int64) (string, error) { - return DoUpload(ctx, c.req, url, body, size) -} diff --git a/transfer/signer/gen3/signer.go b/transfer/signer/gen3/signer.go deleted file mode 100644 index 876e6a5..0000000 --- a/transfer/signer/gen3/signer.go +++ /dev/null @@ -1,192 +0,0 @@ -package gen3 - -import ( - "context" - "encoding/json" - "fmt" - "net/http" - "strings" - - "github.com/calypr/data-client/common" - "github.com/calypr/data-client/conf" - "github.com/calypr/data-client/drs" - "github.com/calypr/data-client/fence" - "github.com/calypr/data-client/request" - syclient "github.com/calypr/syfon/client" -) - -type Signer struct { - req request.RequestInterface - cred *conf.Credential - drs drs.Client - fence fence.FenceInterface - sy *syclient.Client -} - -func New(req request.RequestInterface, cred *conf.Credential, dc drs.Client, fc fence.FenceInterface) *Signer { - opts := make([]syclient.Option, 0, 1) - baseURL := "" - if cred != nil { - baseURL = cred.APIEndpoint - if token := strings.TrimSpace(cred.AccessToken); token != "" { - opts = append(opts, syclient.WithBearerToken(token)) - } - } - return &Signer{ - req: req, - cred: cred, - drs: dc, - fence: fc, - sy: syclient.New(baseURL, opts...), - } -} - -func (g *Signer) Name() string { - return "Gen3" -} - -func (g *Signer) DeleteFile(ctx context.Context, guid string) (string, error) { - return g.fence.DeleteRecord(ctx, guid) -} - -func (g *Signer) ResolveDownloadURL(ctx context.Context, guid string, accessID string) (string, error) { - url, err := g.fence.GetDownloadPresignedUrl(ctx, guid, accessID) - if err == nil && url != "" { - return url, nil - } - resolved, errIdx := drs.ResolveDownloadURL(ctx, g.drs, guid, accessID) - if errIdx == nil { - return resolved, nil - } - if err != nil { - return "", err - } - return "", errIdx -} - -func (g *Signer) ResolveUploadURL(ctx context.Context, guid string, filename string, metadata common.FileMetadata, bucket string) (string, error) { - hasShepherd, err := g.fence.CheckForShepherdAPI(ctx) - if err != nil || !hasShepherd { - var msg fence.FenceResponse - if guid != "" { - msg, err = g.fence.GetUploadPresignedUrl(ctx, guid, filename, bucket) - } else { - msg, err = g.fence.InitUpload(ctx, filename, bucket, "") - } - if err != nil { - return "", err - } - if msg.URL == "" { - return "", fmt.Errorf("error generating presigned upload URL for %s", filename) - } - return msg.URL, nil - } - - payload := common.ShepherdInitRequestObject{ - Filename: filename, - Authz: common.ShepherdAuthz{ - Version: "0", ResourcePaths: metadata.Authz, - }, - Aliases: metadata.Aliases, - Metadata: metadata.Metadata, - } - reader, err := common.ToJSONReader(payload) - if err != nil { - return "", err - } - - resp, err := g.fence.Do(ctx, &request.RequestBuilder{ - Url: g.cred.APIEndpoint + common.ShepherdEndpoint + "/objects", - Method: http.MethodPost, - Body: reader, - Token: g.cred.AccessToken, - }) - if err != nil { - return "", fmt.Errorf("shepherd upload init failed: %w", err) - } - defer resp.Body.Close() - - if resp.StatusCode != http.StatusCreated && resp.StatusCode != http.StatusOK { - return "", fmt.Errorf("shepherd upload init failed with status %d", resp.StatusCode) - } - - var res common.PresignedURLResponse - if err := json.NewDecoder(resp.Body).Decode(&res); err != nil { - return "", err - } - return res.URL, nil -} - -func (g *Signer) ResolveUploadURLs(ctx context.Context, requests []common.UploadURLResolveRequest) ([]common.UploadURLResolveResponse, error) { - if len(requests) == 0 { - return []common.UploadURLResolveResponse{}, nil - } - - items := make([]syclient.UploadBulkItem, 0, len(requests)) - for _, req := range requests { - fileID := strings.TrimSpace(req.GUID) - if fileID == "" { - fileID = strings.TrimSpace(req.Filename) - } - item := syclient.UploadBulkItem{FileId: fileID} - if req.Bucket != "" { - item.SetBucket(req.Bucket) - } - if req.Filename != "" { - item.SetFileName(req.Filename) - } - items = append(items, item) - } - - out, err := g.sy.Data().UploadBulk(ctx, syclient.UploadBulkRequest{Requests: items}) - if err != nil { - return nil, err - } - - results := make([]common.UploadURLResolveResponse, len(requests)) - for i := range requests { - results[i] = common.UploadURLResolveResponse{ - GUID: requests[i].GUID, - Filename: requests[i].Filename, - Bucket: requests[i].Bucket, - Status: http.StatusBadGateway, - Error: "missing result for request", - } - } - for i := range out.GetResults() { - if i >= len(results) { - break - } - r := out.GetResults()[i] - results[i].URL = r.GetUrl() - results[i].Status = int(r.GetStatus()) - results[i].Error = r.GetError() - if results[i].Status == 0 { - results[i].Status = http.StatusOK - } - } - return results, nil -} - -func (g *Signer) InitMultipartUpload(ctx context.Context, guid string, filename string, bucket string) (*common.MultipartUploadInit, error) { - res, err := g.fence.InitMultipartUpload(ctx, filename, bucket, guid) - if err != nil { - return nil, err - } - if strings.TrimSpace(res.UploadID) == "" { - return nil, fmt.Errorf("fence multipart init did not return uploadId") - } - return &common.MultipartUploadInit{GUID: res.GUID, UploadID: res.UploadID}, nil -} - -func (g *Signer) GetMultipartUploadURL(ctx context.Context, key string, uploadID string, partNumber int32, bucket string) (string, error) { - return g.fence.GenerateMultipartPresignedURL(ctx, key, uploadID, int(partNumber), bucket) -} - -func (g *Signer) CompleteMultipartUpload(ctx context.Context, key string, uploadID string, parts []common.MultipartUploadPart, bucket string) error { - fParts := make([]fence.MultipartPart, len(parts)) - for i, p := range parts { - fParts[i] = fence.MultipartPart{PartNumber: int(p.PartNumber), ETag: p.ETag} - } - return g.fence.CompleteMultipartUpload(ctx, key, uploadID, fParts, bucket) -} diff --git a/transfer/signer/gen3/signer_test.go b/transfer/signer/gen3/signer_test.go deleted file mode 100644 index 1949f2b..0000000 --- a/transfer/signer/gen3/signer_test.go +++ /dev/null @@ -1,78 +0,0 @@ -package gen3 - -import ( - "context" - "encoding/json" - "net/http" - "net/http/httptest" - "testing" - - "github.com/calypr/data-client/common" - "github.com/calypr/data-client/conf" - "github.com/calypr/data-client/request" -) - -type testRequestClient struct { - client *http.Client -} - -func (t *testRequestClient) New(method, url string) *request.RequestBuilder { - return &request.RequestBuilder{Method: method, Url: url, Headers: map[string]string{}} -} - -func (t *testRequestClient) Do(ctx context.Context, rb *request.RequestBuilder) (*http.Response, error) { - req, err := http.NewRequestWithContext(ctx, rb.Method, rb.Url, rb.Body) - if err != nil { - return nil, err - } - for k, v := range rb.Headers { - req.Header.Set(k, v) - } - return t.client.Do(req) -} - -func TestResolveUploadURLsUsesSingleBulkRequest(t *testing.T) { - var calls int - srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - if r.Method != http.MethodPost || r.URL.Path != "/data/upload/bulk" { - http.NotFound(w, r) - return - } - calls++ - var req map[string]any - if err := json.NewDecoder(r.Body).Decode(&req); err != nil { - t.Fatalf("decode request body: %v", err) - } - _ = json.NewEncoder(w).Encode(map[string]any{ - "results": []map[string]any{ - {"file_id": "did-1", "file_name": "one.bin", "url": "https://signed/one", "status": 200}, - {"file_id": "did-2", "file_name": "two.bin", "url": "https://signed/two", "status": 200}, - }, - }) - })) - defer srv.Close() - - signer := New( - &testRequestClient{client: srv.Client()}, - &conf.Credential{APIEndpoint: srv.URL}, - nil, - nil, - ) - - out, err := signer.ResolveUploadURLs(context.Background(), []common.UploadURLResolveRequest{ - {GUID: "did-1", Filename: "one.bin", Bucket: "b1"}, - {GUID: "did-2", Filename: "two.bin", Bucket: "b1"}, - }) - if err != nil { - t.Fatalf("ResolveUploadURLs error: %v", err) - } - if calls != 1 { - t.Fatalf("expected exactly one bulk call, got %d", calls) - } - if len(out) != 2 { - t.Fatalf("expected 2 responses, got %d", len(out)) - } - if out[0].URL == "" || out[1].URL == "" { - t.Fatalf("expected signed URLs in both results, got %+v", out) - } -} diff --git a/transfer/signer/local/signer.go b/transfer/signer/local/signer.go deleted file mode 100644 index 21976c4..0000000 --- a/transfer/signer/local/signer.go +++ /dev/null @@ -1,154 +0,0 @@ -package local - -import ( - "context" - "fmt" - "net/http" - "strings" - - "github.com/calypr/data-client/common" - "github.com/calypr/data-client/conf" - drs "github.com/calypr/data-client/drs" - syclient "github.com/calypr/syfon/client" -) - -type Signer struct { - client *syclient.Client - drsClient drs.Client -} - -func New(baseURL string, cred *conf.Credential, dc drs.Client) *Signer { - opts := make([]syclient.Option, 0, 1) - if cred != nil { - if token := strings.TrimSpace(cred.AccessToken); token != "" { - opts = append(opts, syclient.WithBearerToken(token)) - } - } - return &Signer{ - client: syclient.New(baseURL, opts...), - drsClient: dc, - } -} - -func (d *Signer) Name() string { return "DRS" } - -func (d *Signer) DeleteFile(ctx context.Context, guid string) (string, error) { - return "", fmt.Errorf("DeleteFile not implemented for local DRS signer") -} - -func (d *Signer) ResolveDownloadURL(ctx context.Context, guid string, accessID string) (string, error) { - return drs.ResolveDownloadURL(ctx, d.drsClient, guid, accessID) -} - -func (d *Signer) ResolveUploadURL(ctx context.Context, guid string, filename string, metadata common.FileMetadata, bucket string) (string, error) { - res, err := d.client.Data().UploadURL(ctx, syclient.UploadURLRequest{ - FileID: guid, - Bucket: bucket, - FileName: filename, - }) - if err != nil { - return "", err - } - return res.URL, nil -} - -func (d *Signer) ResolveUploadURLs(ctx context.Context, requests []common.UploadURLResolveRequest) ([]common.UploadURLResolveResponse, error) { - if len(requests) == 0 { - return []common.UploadURLResolveResponse{}, nil - } - - items := make([]syclient.UploadBulkItem, 0, len(requests)) - for _, req := range requests { - fileID := strings.TrimSpace(req.GUID) - if fileID == "" { - fileID = strings.TrimSpace(req.Filename) - } - item := syclient.UploadBulkItem{FileId: fileID} - if req.Bucket != "" { - item.SetBucket(req.Bucket) - } - if req.Filename != "" { - item.SetFileName(req.Filename) - } - items = append(items, item) - } - - out, err := d.client.Data().UploadBulk(ctx, syclient.UploadBulkRequest{Requests: items}) - if err != nil { - return nil, err - } - - results := make([]common.UploadURLResolveResponse, len(requests)) - for i := range requests { - results[i] = common.UploadURLResolveResponse{ - GUID: requests[i].GUID, - Filename: requests[i].Filename, - Bucket: requests[i].Bucket, - Status: http.StatusBadGateway, - Error: "missing result for request", - } - } - for i := range out.GetResults() { - if i >= len(results) { - break - } - r := out.GetResults()[i] - results[i].URL = r.GetUrl() - results[i].Status = int(r.GetStatus()) - results[i].Error = r.GetError() - if results[i].Status == 0 { - results[i].Status = http.StatusOK - } - } - return results, nil -} - -func (d *Signer) InitMultipartUpload(ctx context.Context, guid string, filename string, bucket string) (*common.MultipartUploadInit, error) { - res, err := d.client.Data().MultipartInit(ctx, syclient.MultipartInitRequest{ - GUID: guid, - FileName: filename, - Bucket: bucket, - }) - if err != nil { - return nil, err - } - if res.UploadID == "" { - return nil, fmt.Errorf("server did not return uploadId") - } - return &common.MultipartUploadInit{ - GUID: res.GUID, - UploadID: res.UploadID, - }, nil -} - -func (d *Signer) GetMultipartUploadURL(ctx context.Context, key string, uploadID string, partNumber int32, bucket string) (string, error) { - res, err := d.client.Data().MultipartUpload(ctx, syclient.MultipartUploadRequest{ - Key: key, - Bucket: bucket, - UploadID: uploadID, - PartNumber: partNumber, - }) - if err != nil { - return "", err - } - if res.PresignedURL == "" { - return "", fmt.Errorf("server did not return presigned_url") - } - return res.PresignedURL, nil -} - -func (d *Signer) CompleteMultipartUpload(ctx context.Context, key string, uploadID string, parts []common.MultipartUploadPart, bucket string) error { - reqParts := make([]syclient.MultipartPart, len(parts)) - for i, p := range parts { - reqParts[i] = syclient.MultipartPart{ - PartNumber: p.PartNumber, - ETag: p.ETag, - } - } - return d.client.Data().MultipartComplete(ctx, syclient.MultipartCompleteRequest{ - Key: key, - Bucket: bucket, - UploadID: uploadID, - Parts: reqParts, - }) -} diff --git a/transfer/signer/local/signer_test.go b/transfer/signer/local/signer_test.go deleted file mode 100644 index 9e3a594..0000000 --- a/transfer/signer/local/signer_test.go +++ /dev/null @@ -1,79 +0,0 @@ -package local - -import ( - "context" - "encoding/json" - "io" - "log/slog" - "net/http" - "net/http/httptest" - "strings" - "testing" - - "github.com/calypr/data-client/common" - "github.com/calypr/data-client/drs" - "github.com/calypr/data-client/request" -) - -type testRequestClient struct { - client *http.Client -} - -func (t *testRequestClient) New(method, url string) *request.RequestBuilder { - return &request.RequestBuilder{Method: method, Url: url, Headers: map[string]string{}} -} - -func (t *testRequestClient) Do(ctx context.Context, rb *request.RequestBuilder) (*http.Response, error) { - req, err := http.NewRequestWithContext(ctx, rb.Method, rb.Url, rb.Body) - if err != nil { - return nil, err - } - for k, v := range rb.Headers { - req.Header.Set(k, v) - } - return t.client.Do(req) -} - -func TestResolveUploadURLsBatch(t *testing.T) { - srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - if r.Method != http.MethodPost || r.URL.Path != "/data/upload/bulk" { - http.NotFound(w, r) - return - } - var req map[string]any - if err := json.NewDecoder(r.Body).Decode(&req); err != nil { - t.Fatalf("decode request: %v", err) - } - results := map[string]any{ - "results": []map[string]any{ - {"file_id": "did-1", "file_name": "one.bin", "url": "https://signed/one", "status": 200}, - {"file_id": "did-2", "file_name": "two.bin", "status": 400, "error": "bucket credential not found"}, - }, - } - w.Header().Set("Content-Type", "application/json") - _ = json.NewEncoder(w).Encode(results) - })) - defer srv.Close() - - logger := slog.New(slog.NewTextHandler(io.Discard, nil)) - req := &testRequestClient{client: srv.Client()} - dc := drs.NewLocalDrsClient(req, srv.URL, logger) - signer := New(srv.URL, nil, dc) - - out, err := signer.ResolveUploadURLs(context.Background(), []common.UploadURLResolveRequest{ - {GUID: "did-1", Filename: "one.bin", Bucket: "b1"}, - {GUID: "did-2", Filename: "two.bin", Bucket: "b1"}, - }) - if err != nil { - t.Fatalf("ResolveUploadURLs returned error: %v", err) - } - if len(out) != 2 { - t.Fatalf("expected 2 results, got %d", len(out)) - } - if out[0].Status != http.StatusOK || out[0].URL == "" { - t.Fatalf("expected first result success, got %+v", out[0]) - } - if out[1].Status != http.StatusBadRequest || !strings.Contains(out[1].Error, "bucket credential not found") { - t.Fatalf("expected second result error, got %+v", out[1]) - } -} diff --git a/transfer/storage/gocloud.go b/transfer/storage/gocloud.go deleted file mode 100644 index a0c1dd9..0000000 --- a/transfer/storage/gocloud.go +++ /dev/null @@ -1,89 +0,0 @@ -package storage - -import ( - "bytes" - "context" - "fmt" - "io" - "time" - - "gocloud.dev/blob" - _ "gocloud.dev/blob/azureblob" - _ "gocloud.dev/blob/gcsblob" - _ "gocloud.dev/blob/s3blob" -) - -// Bucket abstracts cross-cloud object operations used by transfer paths. -type Bucket interface { - Upload(ctx context.Context, key string, body io.Reader) error - Download(ctx context.Context, key string) ([]byte, error) - SignedDownloadURL(ctx context.Context, key string, ttl time.Duration) (string, error) - SignedUploadURL(ctx context.Context, key string, ttl time.Duration) (string, error) - Close() error -} - -type GoCloudBucket struct { - b *blob.Bucket -} - -// Open opens a go-cloud bucket URL, e.g.: -// s3://bucket, gs://bucket, azblob://container -func Open(ctx context.Context, bucketURL string) (Bucket, error) { - b, err := blob.OpenBucket(ctx, bucketURL) - if err != nil { - return nil, err - } - return &GoCloudBucket{b: b}, nil -} - -func (g *GoCloudBucket) Upload(ctx context.Context, key string, body io.Reader) error { - w, err := g.b.NewWriter(ctx, key, nil) - if err != nil { - return err - } - if _, err = io.Copy(w, body); err != nil { - _ = w.Close() - return err - } - return w.Close() -} - -func (g *GoCloudBucket) Download(ctx context.Context, key string) ([]byte, error) { - r, err := g.b.NewReader(ctx, key, nil) - if err != nil { - return nil, err - } - defer r.Close() - var buf bytes.Buffer - if _, err := io.Copy(&buf, r); err != nil { - return nil, err - } - return buf.Bytes(), nil -} - -func (g *GoCloudBucket) SignedDownloadURL(ctx context.Context, key string, ttl time.Duration) (string, error) { - u, err := g.b.SignedURL(ctx, key, &blob.SignedURLOptions{ - Method: "GET", - Expiry: ttl, - ContentType: "", - }) - if err != nil { - return "", fmt.Errorf("signed download url failed: %w", err) - } - return u, nil -} - -func (g *GoCloudBucket) SignedUploadURL(ctx context.Context, key string, ttl time.Duration) (string, error) { - u, err := g.b.SignedURL(ctx, key, &blob.SignedURLOptions{ - Method: "PUT", - Expiry: ttl, - }) - if err != nil { - return "", fmt.Errorf("signed upload url failed: %w", err) - } - return u, nil -} - -func (g *GoCloudBucket) Close() error { - return g.b.Close() -} diff --git a/upload/batch.go b/upload/batch.go deleted file mode 100644 index bfa8c13..0000000 --- a/upload/batch.go +++ /dev/null @@ -1,142 +0,0 @@ -package upload - -import ( - "context" - "fmt" - "net/http" - "os" - "sync" - - "github.com/calypr/data-client/common" - "github.com/calypr/data-client/logs" - "github.com/calypr/data-client/transfer" - "github.com/vbauerster/mpb/v8" - "github.com/vbauerster/mpb/v8/decor" -) - -func InitBatchUploadChannels(numParallel int, inputSliceLen int) (int, chan *http.Response, chan error, []common.FileUploadRequestObject) { - workers := numParallel - if workers < 1 || workers > inputSliceLen { - workers = inputSliceLen - } - if workers < 1 { - workers = 1 - } - - respCh := make(chan *http.Response, inputSliceLen) - errCh := make(chan error, inputSliceLen) - batchSlice := make([]common.FileUploadRequestObject, 0, workers) - - return workers, respCh, errCh, batchSlice -} - -func BatchUpload( - ctx context.Context, - bk transfer.Uploader, - logger *logs.Gen3Logger, - furObjects []common.FileUploadRequestObject, - workers int, - respCh chan *http.Response, - errCh chan error, - bucketName string, -) { - if len(furObjects) == 0 { - return - } - - // Ensure bucket is set - for i := range furObjects { - if furObjects[i].Bucket == "" { - furObjects[i].Bucket = bucketName - } - } - - progress := mpb.New(mpb.WithOutput(os.Stdout)) - - workCh := make(chan common.FileUploadRequestObject, len(furObjects)) - - var wg sync.WaitGroup - for i := 0; i < workers; i++ { - wg.Add(1) - go func() { - defer wg.Done() - for fur := range workCh { - // --- Ensure presigned URL --- - if fur.PresignedURL == "" { - resp, err := GeneratePresignedUploadURL(ctx, bk, fur.ObjectKey, fur.FileMetadata, fur.Bucket) - if err != nil { - logger.Failed(fur.SourcePath, fur.ObjectKey, fur.FileMetadata, "", 0, false) - errCh <- err - continue - } - fur.PresignedURL = resp.URL - fur.GUID = resp.GUID - logger.Failed(fur.SourcePath, fur.ObjectKey, fur.FileMetadata, resp.GUID, 0, false) // update log - } - - // --- Open file --- - file, err := os.Open(fur.SourcePath) - if err != nil { - logger.Failed(fur.SourcePath, fur.ObjectKey, fur.FileMetadata, fur.GUID, 0, false) - errCh <- fmt.Errorf("file open error: %w", err) - continue - } - - fi, err := file.Stat() - if err != nil { - file.Close() - logger.Failed(fur.SourcePath, fur.ObjectKey, fur.FileMetadata, fur.GUID, 0, false) - errCh <- fmt.Errorf("file stat error: %w", err) - continue - } - - if fi.Size() > common.FileSizeLimit { - file.Close() - logger.Failed(fur.SourcePath, fur.ObjectKey, fur.FileMetadata, fur.GUID, 0, false) - errCh <- fmt.Errorf("file size exceeds limit: %s", fur.ObjectKey) - continue - } - - // --- Progress bar --- - bar := progress.AddBar(fi.Size(), - mpb.PrependDecorators( - decor.Name(fur.ObjectKey+" "), - decor.CountersKibiByte("% .1f / % .1f"), - ), - mpb.AppendDecorators( - decor.Percentage(), - decor.AverageSpeed(decor.SizeB1024(0), " % .1f"), - ), - ) - - proxyReader := bar.ProxyReader(file) - - // --- Upload --- - err = bk.Upload(ctx, fur.PresignedURL, proxyReader, fi.Size()) - - // Cleanup - file.Close() - bar.Abort(false) - - if err != nil { - logger.Failed(fur.SourcePath, fur.ObjectKey, fur.FileMetadata, fur.GUID, 0, false) - errCh <- err - continue - } - - // Success - logger.DeleteFromFailedLog(fur.SourcePath) - logger.Succeeded(fur.SourcePath, fur.GUID) - logger.Scoreboard().IncrementSB(0) - } - }() - } - - for _, obj := range furObjects { - workCh <- obj - } - close(workCh) - - wg.Wait() - progress.Wait() -} diff --git a/upload/multipart.go b/upload/multipart.go deleted file mode 100644 index 4f71a0b..0000000 --- a/upload/multipart.go +++ /dev/null @@ -1,346 +0,0 @@ -package upload - -import ( - "context" - "crypto/sha256" - "encoding/hex" - "encoding/json" - "errors" - "fmt" - "io" - "net/http" - "os" - "path/filepath" - "sort" - "strings" - "sync" - "sync/atomic" - - "github.com/calypr/data-client/common" - "github.com/calypr/data-client/fence" - "github.com/calypr/data-client/transfer" - syxfer "github.com/calypr/syfon/xfer" - "github.com/vbauerster/mpb/v8" - "github.com/vbauerster/mpb/v8/decor" -) - -type multipartResumeState struct { - SourcePath string `json:"source_path"` - ObjectKey string `json:"object_key"` - GUID string `json:"guid"` - Bucket string `json:"bucket"` - FileSize int64 `json:"file_size"` - FileModUnixNano int64 `json:"file_mod_unix_nano"` - ChunkSize int64 `json:"chunk_size"` - UploadID string `json:"upload_id"` - FinalGUID string `json:"final_guid"` - Key string `json:"key"` - Completed map[int]string `json:"completed"` -} - -func MultipartUpload(ctx context.Context, bk transfer.Uploader, req common.FileUploadRequestObject, file *os.File, showProgress bool) error { - bk.Logger().DebugContext(ctx, "File Multipart Upload Request", "request", req) - failUploadOnce := strings.TrimSpace(os.Getenv("DATA_CLIENT_TEST_FAIL_UPLOAD_PART_ONCE")) == "1" - var injectedUploadFailure atomic.Bool - - stat, err := file.Stat() - if err != nil { - return fmt.Errorf("cannot stat file: %w", err) - } - - fileSize := stat.Size() - if fileSize == 0 { - return fmt.Errorf("file is empty: %s", req.ObjectKey) - } - - var p *mpb.Progress - var bar *mpb.Bar - if showProgress { - p = mpb.New(mpb.WithOutput(os.Stdout)) - bar = p.AddBar(fileSize, - mpb.PrependDecorators( - decor.Name(req.ObjectKey+" "), - decor.CountersKibiByte("%.1f / %.1f"), - ), - mpb.AppendDecorators( - decor.Percentage(), - decor.AverageSpeed(decor.SizeB1024(0), " % .1f"), - ), - ) - } - - chunkSize := OptimalChunkSize(fileSize) - checkpointPath, err := multipartCheckpointPath(req) - if err != nil { - return err - } - state, loaded := loadMultipartState(checkpointPath) - if !loaded || !state.matches(req, stat, chunkSize) { - uploadID, finalGUID, initErr := initMultipartUpload(ctx, bk, req, req.Bucket) - if initErr != nil { - return fmt.Errorf("failed to initiate multipart upload: %w", initErr) - } - state = &multipartResumeState{ - SourcePath: req.SourcePath, - ObjectKey: req.ObjectKey, - GUID: req.GUID, - Bucket: req.Bucket, - FileSize: fileSize, - FileModUnixNano: stat.ModTime().UnixNano(), - ChunkSize: chunkSize, - UploadID: uploadID, - FinalGUID: finalGUID, - Key: req.ObjectKey, - Completed: map[int]string{}, - } - if saveErr := saveMultipartState(checkpointPath, state); saveErr != nil { - return saveErr - } - } - uploadID := state.UploadID - key := state.Key - bk.Logger().DebugContext(ctx, "Initialized Upload", "id", uploadID, "guid", state.FinalGUID, "key", key) - - numChunks := int((fileSize + chunkSize - 1) / chunkSize) - - chunks := make(chan int, numChunks) - for partNum := 1; partNum <= numChunks; partNum++ { - if _, ok := state.Completed[partNum]; ok { - continue - } - chunks <- partNum - } - close(chunks) - - var ( - wg sync.WaitGroup - mu sync.Mutex - uploadErrors []error - totalBytes int64 // Atomic counter for monotonically increasing BytesSoFar - ) - for partNum := range state.Completed { - offset := int64(partNum-1) * chunkSize - size := chunkSize - if offset+size > fileSize { - size = fileSize - offset - } - totalBytes += size - } - - progressCallback := common.GetProgress(ctx) - oid := common.GetOid(ctx) - if oid == "" { - oid = resolveUploadOID(req) - } - - // 3. Worker logic - worker := func() { - defer wg.Done() - - for partNum := range chunks { - if failUploadOnce && injectedUploadFailure.CompareAndSwap(false, true) { - mu.Lock() - uploadErrors = append(uploadErrors, fmt.Errorf("injected test interruption before multipart part %d", partNum)) - mu.Unlock() - return - } - - offset := int64(partNum-1) * chunkSize - size := chunkSize - if offset+size > fileSize { - size = fileSize - offset - } - - // SectionReader implements io.Reader, io.ReaderAt, and io.Seeker - // It allows each worker to read its own segment without a shared buffer. - section := io.NewSectionReader(file, offset, size) - - url, err := generateMultipartPresignedURL(ctx, bk, key, uploadID, partNum, req.Bucket) - if err != nil { - mu.Lock() - uploadErrors = append(uploadErrors, fmt.Errorf("URL generation failed part %d: %w", partNum, err)) - mu.Unlock() - return - } - - // Perform the upload using the section directly - etag, err := uploadPart(ctx, url, section, size) - if err != nil { - mu.Lock() - uploadErrors = append(uploadErrors, fmt.Errorf("upload failed part %d: %w", partNum, err)) - mu.Unlock() - return - } - - mu.Lock() - state.Completed[partNum] = etag - if err := saveMultipartState(checkpointPath, state); err != nil { - uploadErrors = append(uploadErrors, fmt.Errorf("failed to persist multipart resume checkpoint: %w", err)) - mu.Unlock() - return - } - if bar != nil { - bar.IncrInt64(size) - } - if progressCallback != nil { - currentTotal := atomic.AddInt64(&totalBytes, size) - _ = progressCallback(common.ProgressEvent{ - Event: "progress", - Oid: oid, - BytesSinceLast: size, - BytesSoFar: currentTotal, - }) - } - mu.Unlock() - } - } - - // Launch workers - for range common.MaxConcurrentUploads { - wg.Add(1) - go worker() - } - wg.Wait() - - if p != nil { - p.Wait() - } - - if len(uploadErrors) > 0 { - return fmt.Errorf("multipart upload failed with %d errors: %v", len(uploadErrors), uploadErrors) - } - - // 5. Finalize the upload - parts := make([]fence.MultipartPart, 0, len(state.Completed)) - for partNum, etag := range state.Completed { - parts = append(parts, fence.MultipartPart{ - PartNumber: partNum, - ETag: etag, - }) - } - sort.Slice(parts, func(i, j int) bool { - return parts[i].PartNumber < parts[j].PartNumber - }) - - if err := CompleteMultipartUpload(ctx, bk, key, uploadID, parts, req.Bucket); err != nil { - return fmt.Errorf("failed to complete multipart upload: %w", err) - } - - bk.Logger().DebugContext(ctx, "Successfully uploaded", "file", req.ObjectKey, "key", key) - _ = os.Remove(checkpointPath) - return nil -} - -func initMultipartUpload(ctx context.Context, bk transfer.Uploader, furObject common.FileUploadRequestObject, bucketName string) (string, string, error) { - msg, err := bk.InitMultipartUpload(ctx, furObject.GUID, furObject.ObjectKey, bucketName) - - if err != nil { - if strings.Contains(err.Error(), "404") { - return "", "", errors.New(err.Error() + "\nPlease check to ensure FENCE version is at 2.8.0 or beyond") - } - return "", "", errors.New("Error has occurred during multipart upload initialization, detailed error message: " + err.Error()) - } - - if msg.UploadID == "" || msg.GUID == "" { - return "", "", errors.New("unknown error has occurred during multipart upload initialization. Please check logs from Gen3 services") - } - return msg.UploadID, msg.GUID, nil -} - -func generateMultipartPresignedURL(ctx context.Context, bk transfer.Uploader, key string, uploadID string, partNumber int, bucketName string) (string, error) { - url, err := bk.GetMultipartUploadURL(ctx, key, uploadID, int32(partNumber), bucketName) - if err != nil { - return "", errors.New("Error has occurred during multipart upload presigned url generation, detailed error message: " + err.Error()) - } - - if url == "" { - return "", errors.New("unknown error has occurred during multipart upload presigned url generation. Please check logs from Gen3 services") - } - return url, nil -} - -func CompleteMultipartUpload(ctx context.Context, bk transfer.Uploader, key string, uploadID string, parts []fence.MultipartPart, bucketName string) error { - cParts := make([]common.MultipartUploadPart, len(parts)) - for i, p := range parts { - cParts[i] = common.MultipartUploadPart{PartNumber: int32(p.PartNumber), ETag: p.ETag} - } - err := bk.CompleteMultipartUpload(ctx, key, uploadID, cParts, bucketName) - if err != nil { - return errors.New("Error has occurred during completing multipart upload, detailed error message: " + err.Error()) - } - return nil -} - -// uploadPart now returns the ETag and error directly. -// It accepts a Context to allow for cancellation (e.g., if another part fails). -func uploadPart(ctx context.Context, url string, data io.Reader, partSize int64) (string, error) { - etag, err := syxfer.Upload(ctx, http.DefaultClient, url, data, partSize) - if err != nil { - return "", err - } - if etag == "" { - return "", errors.New("no ETag returned") - } - - return etag, nil -} - -func (s *multipartResumeState) matches(req common.FileUploadRequestObject, info os.FileInfo, chunkSize int64) bool { - if s == nil { - return false - } - return s.SourcePath == req.SourcePath && - s.ObjectKey == req.ObjectKey && - s.GUID == req.GUID && - s.Bucket == req.Bucket && - s.FileSize == info.Size() && - s.FileModUnixNano == info.ModTime().UnixNano() && - s.ChunkSize == chunkSize && - s.UploadID != "" && - s.Key != "" -} - -func multipartCheckpointPath(req common.FileUploadRequestObject) (string, error) { - cacheDir := strings.TrimSpace(os.Getenv("DATA_CLIENT_CACHE_DIR")) - if cacheDir == "" { - var err error - cacheDir, err = os.UserCacheDir() - if err != nil || cacheDir == "" { - cacheDir = os.TempDir() - } - } - base := filepath.Join(cacheDir, "calypr", "data-client", "multipart-resume") - if err := os.MkdirAll(base, 0o755); err != nil { - return "", err - } - sum := sha256.Sum256([]byte(req.SourcePath + "|" + req.ObjectKey + "|" + req.GUID + "|" + req.Bucket)) - name := hex.EncodeToString(sum[:]) + ".json" - return filepath.Join(base, name), nil -} - -func loadMultipartState(path string) (*multipartResumeState, bool) { - data, err := os.ReadFile(path) - if err != nil { - return nil, false - } - var st multipartResumeState - if err := json.Unmarshal(data, &st); err != nil { - return nil, false - } - if st.Completed == nil { - st.Completed = map[int]string{} - } - return &st, true -} - -func saveMultipartState(path string, state *multipartResumeState) error { - data, err := json.Marshal(state) - if err != nil { - return err - } - tmpPath := path + ".tmp" - if err := os.WriteFile(tmpPath, data, 0o644); err != nil { - return err - } - return os.Rename(tmpPath, path) -} diff --git a/upload/multipart_test.go b/upload/multipart_test.go deleted file mode 100644 index 810b402..0000000 --- a/upload/multipart_test.go +++ /dev/null @@ -1,329 +0,0 @@ -package upload - -import ( - "bytes" - "context" - "encoding/json" - "fmt" - "io" - "net/http" - "net/http/httptest" - "net/url" - "os" - "path/filepath" - "strings" - "sync" - "sync/atomic" - "testing" - - "github.com/calypr/data-client/common" - "github.com/calypr/data-client/conf" - "github.com/calypr/data-client/logs" - "github.com/calypr/data-client/request" -) - -type fakeGen3Upload struct { - cred *conf.Credential - logger *logs.Gen3Logger - doFunc func(context.Context, *request.RequestBuilder) (*http.Response, error) -} - -func (f *fakeGen3Upload) Name() string { return "fake" } -func (f *fakeGen3Upload) Logger() *logs.Gen3Logger { return f.logger } - -func (f *fakeGen3Upload) Do(ctx context.Context, req *request.RequestBuilder) (*http.Response, error) { - return f.doFunc(ctx, req) -} -func (f *fakeGen3Upload) New(method, url string) *request.RequestBuilder { - return &request.RequestBuilder{Method: method, Url: url} -} - -func (f *fakeGen3Upload) ResolveUploadURL(ctx context.Context, guid string, filename string, metadata common.FileMetadata, bucket string) (string, error) { - return "", fmt.Errorf("not implemented") -} -func (f *fakeGen3Upload) ResolveUploadURLs(ctx context.Context, requests []common.UploadURLResolveRequest) ([]common.UploadURLResolveResponse, error) { - return nil, fmt.Errorf("not implemented") -} - -func (f *fakeGen3Upload) InitMultipartUpload(ctx context.Context, guid string, filename string, bucket string) (*common.MultipartUploadInit, error) { - resp, err := f.Do(ctx, &request.RequestBuilder{Url: common.FenceDataMultipartInitEndpoint}) - if err != nil { - return nil, err - } - defer resp.Body.Close() - var msg struct { - UploadID string `json:"uploadId"` - GUID string `json:"guid"` - } - if err := json.NewDecoder(resp.Body).Decode(&msg); err != nil { - return nil, err - } - return &common.MultipartUploadInit{GUID: msg.GUID, UploadID: msg.UploadID}, nil -} -func (f *fakeGen3Upload) GetMultipartUploadURL(ctx context.Context, key string, uploadID string, partNumber int32, bucket string) (string, error) { - resp, err := f.Do(ctx, &request.RequestBuilder{Url: common.FenceDataMultipartUploadEndpoint}) - if err != nil { - return "", err - } - defer resp.Body.Close() - var msg struct { - PresignedURL string `json:"presigned_url"` - } - if err := json.NewDecoder(resp.Body).Decode(&msg); err != nil { - return "", err - } - return msg.PresignedURL, nil -} -func (f *fakeGen3Upload) CompleteMultipartUpload(ctx context.Context, key string, uploadID string, parts []common.MultipartUploadPart, bucket string) error { - _, err := f.Do(ctx, &request.RequestBuilder{Url: common.FenceDataMultipartCompleteEndpoint}) - return err -} -func (f *fakeGen3Upload) Upload(ctx context.Context, url string, body io.Reader, size int64) error { - return nil -} -func (f *fakeGen3Upload) UploadPart(ctx context.Context, url string, body io.Reader, size int64) (string, error) { - return "", nil -} -func (f *fakeGen3Upload) DeleteFile(ctx context.Context, guid string) (string, error) { - return "", nil -} - -func TestMultipartUploadProgressIntegration(t *testing.T) { - ctx := context.Background() - server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - if r.Method != http.MethodPut { - w.WriteHeader(http.StatusMethodNotAllowed) - return - } - _, _ = io.Copy(io.Discard, r.Body) - _ = r.Body.Close() - w.Header().Set("ETag", "etag-123") - w.WriteHeader(http.StatusOK) - })) - defer server.Close() - - file, err := os.CreateTemp(t.TempDir(), "multipart-*.bin") - if err != nil { - t.Fatalf("create temp file: %v", err) - } - defer file.Close() - - fileSize := int64(101 * common.MB) - if err := file.Truncate(fileSize); err != nil { - t.Fatalf("truncate file: %v", err) - } - if _, err := file.Seek(0, io.SeekStart); err != nil { - t.Fatalf("seek file: %v", err) - } - - var ( - events []common.ProgressEvent - mu sync.Mutex - ) - progress := func(event common.ProgressEvent) error { - mu.Lock() - defer mu.Unlock() - events = append(events, event) - return nil - } - - logger := logs.NewGen3Logger(nil, "", "") - fake := &fakeGen3Upload{ - cred: &conf.Credential{ - APIEndpoint: "https://example.com", - AccessToken: "token", - }, - logger: logger, - doFunc: func(_ context.Context, req *request.RequestBuilder) (*http.Response, error) { - switch { - case strings.Contains(req.Url, common.FenceDataMultipartInitEndpoint): - return newJSONResponse(req.Url, `{"uploadId":"upload-123","guid":"guid-123"}`), nil - case strings.Contains(req.Url, common.FenceDataMultipartUploadEndpoint): - return newJSONResponse(req.Url, fmt.Sprintf(`{"presigned_url":"%s"}`, server.URL)), nil - case strings.Contains(req.Url, common.FenceDataMultipartCompleteEndpoint): - return newJSONResponse(req.Url, `{}`), nil - default: - return nil, fmt.Errorf("unexpected request url: %s", req.Url) - } - }, - } - - requestObject := common.FileUploadRequestObject{ - SourcePath: file.Name(), - ObjectKey: "multipart.bin", - GUID: "guid-123", - Bucket: "bucket", - } - - ctx = common.WithProgress(ctx, progress) - ctx = common.WithOid(ctx, "guid-123") - - if err := MultipartUpload(ctx, fake, requestObject, file, false); err != nil { - t.Fatalf("multipart upload failed: %v", err) - } - - mu.Lock() - defer mu.Unlock() - if len(events) == 0 { - t.Fatal("expected progress events") - } - for i := 1; i < len(events); i++ { - if events[i].BytesSoFar < events[i-1].BytesSoFar { - t.Fatalf("bytesSoFar not monotonic: %d then %d", events[i-1].BytesSoFar, events[i].BytesSoFar) - } - } - last := events[len(events)-1] - if last.BytesSoFar != fileSize { - t.Fatalf("expected final bytesSoFar %d, got %d", fileSize, last.BytesSoFar) - } -} - -func TestMultipartUploadResumesWithoutReinit(t *testing.T) { - ctx := context.Background() - - var putCount atomic.Int64 - var failFirstPut atomic.Bool - failFirstPut.Store(true) - origTransport := http.DefaultClient.Transport - http.DefaultClient.Transport = roundTripFunc(func(req *http.Request) (*http.Response, error) { - if req.Method != http.MethodPut { - return &http.Response{ - StatusCode: http.StatusMethodNotAllowed, - Body: io.NopCloser(strings.NewReader("method not allowed")), - Header: make(http.Header), - Request: req, - }, nil - } - _, _ = io.Copy(io.Discard, req.Body) - _ = req.Body.Close() - - if failFirstPut.Load() && putCount.Load() == 0 { - putCount.Add(1) - return &http.Response{ - StatusCode: http.StatusInternalServerError, - Body: io.NopCloser(strings.NewReader("simulated failure")), - Header: make(http.Header), - Request: req, - }, nil - } - n := putCount.Add(1) - h := make(http.Header) - h.Set("ETag", fmt.Sprintf("etag-%d", n)) - return &http.Response{ - StatusCode: http.StatusOK, - Body: io.NopCloser(strings.NewReader("")), - Header: h, - Request: req, - }, nil - }) - defer func() { http.DefaultClient.Transport = origTransport }() - - tmp := t.TempDir() - t.Setenv("DATA_CLIENT_CACHE_DIR", tmp) - path := filepath.Join(tmp, "large.bin") - f, err := os.Create(path) - if err != nil { - t.Fatalf("create temp file: %v", err) - } - // Sparse file >100MB triggers multipart with multiple parts. - if err := f.Truncate(120 * common.MB); err != nil { - _ = f.Close() - t.Fatalf("truncate temp file: %v", err) - } - if err := f.Close(); err != nil { - t.Fatalf("close temp file: %v", err) - } - - initCalls := 0 - completeCalls := 0 - logger := logs.NewGen3Logger(nil, "", "") - fake := &fakeGen3Upload{ - cred: &conf.Credential{ - APIEndpoint: "https://example.com", - AccessToken: "token", - }, - logger: logger, - doFunc: func(_ context.Context, req *request.RequestBuilder) (*http.Response, error) { - switch { - case strings.Contains(req.Url, common.FenceDataMultipartInitEndpoint): - initCalls++ - return newJSONResponse(req.Url, `{"uploadId":"upload-resume-1","guid":"guid-resume-1"}`), nil - case strings.Contains(req.Url, common.FenceDataMultipartUploadEndpoint): - return newJSONResponse(req.Url, `{"presigned_url":"https://upload.invalid/part"}`), nil - case strings.Contains(req.Url, common.FenceDataMultipartCompleteEndpoint): - completeCalls++ - return newJSONResponse(req.Url, `{}`), nil - default: - return nil, fmt.Errorf("unexpected request url: %s", req.Url) - } - }, - } - - req := common.FileUploadRequestObject{ - SourcePath: path, - ObjectKey: "resume.bin", - GUID: "guid-resume-1", - Bucket: "bucket", - } - checkpointPath, err := multipartCheckpointPath(req) - if err != nil { - t.Fatalf("checkpoint path: %v", err) - } - _ = os.Remove(checkpointPath) - - file1, err := os.Open(path) - if err != nil { - t.Fatalf("open file1: %v", err) - } - err = MultipartUpload(ctx, fake, req, file1, false) - _ = file1.Close() - if err == nil { - t.Fatal("expected first multipart upload to fail") - } - if initCalls != 1 { - t.Fatalf("expected one init after first run, got %d", initCalls) - } - if _, statErr := os.Stat(checkpointPath); statErr != nil { - t.Fatalf("expected checkpoint to exist after failure: %v", statErr) - } - - failFirstPut.Store(false) - file2, err := os.Open(path) - if err != nil { - t.Fatalf("open file2: %v", err) - } - err = MultipartUpload(ctx, fake, req, file2, false) - _ = file2.Close() - if err != nil { - t.Fatalf("resume multipart upload failed: %v", err) - } - - if initCalls != 1 { - t.Fatalf("expected resume to reuse existing upload init; init calls = %d", initCalls) - } - if completeCalls != 1 { - t.Fatalf("expected one complete call, got %d", completeCalls) - } - if _, statErr := os.Stat(checkpointPath); !os.IsNotExist(statErr) { - t.Fatalf("expected checkpoint cleanup after success, stat err: %v", statErr) - } -} - -func newJSONResponse(rawURL, body string) *http.Response { - parsedURL, err := url.Parse(rawURL) - if err != nil { - parsedURL = &url.URL{} - } - return &http.Response{ - StatusCode: http.StatusOK, - Body: io.NopCloser(bytes.NewBufferString(body)), - Request: &http.Request{URL: parsedURL}, - Header: make(http.Header), - } -} - -type roundTripFunc func(*http.Request) (*http.Response, error) - -func (f roundTripFunc) RoundTrip(req *http.Request) (*http.Response, error) { - return f(req) -} diff --git a/upload/orchestrator.go b/upload/orchestrator.go deleted file mode 100644 index 8112ecc..0000000 --- a/upload/orchestrator.go +++ /dev/null @@ -1,122 +0,0 @@ -package upload - -import ( - "context" - "fmt" - "io" - "os" - "path/filepath" - "strings" - - "github.com/calypr/data-client/common" - "github.com/calypr/data-client/drs" - "github.com/calypr/data-client/transfer" -) - -// RegisterFile orchestrates the full registration and upload flow: -// 1. Build a DRS object from the local file (if not provided). -// 2. Register metadata with the DRS server via the provided drs.Client. -// 3. Upload the file content via the provided Backend. -func RegisterFile(ctx context.Context, bk UploadBackend, dc drs.Client, drsObject *drs.DRSObject, filePath string, bucketName string) (*drs.DRSObject, error) { - // 1. Ensure we have a valid OID/metadata. - // (Logic ported and generalized from git-drs/client/local/local_client.go) - - if drsObject == nil { - return nil, fmt.Errorf("drsObject must be provided (containing at least checksums/size)") - } - - // 2. Register with DRS server - res, err := dc.RegisterRecord(ctx, drsObject) - if err != nil { - return nil, fmt.Errorf("failed to register record: %w", err) - } - drsObject = res - - // 3. Check if file is already downloadable (optional but good optimization) - // (Skipping for now to prioritize core functionality, but can be added back) - - // 4. Determine upload filename/key - uploadFilename := filepath.Base(filePath) - if len(drsObject.AccessMethods) > 0 { - for _, am := range drsObject.AccessMethods { - if am.Type == "s3" || am.Type == "gs" { - if am.AccessUrl.Url == "" { - continue - } - parts := strings.Split(am.AccessUrl.Url, "/") - if candidate := parts[len(parts)-1]; candidate != "" { - uploadFilename = candidate - break - } - } - } - } - - // 5. Perform Upload - file, err := os.Open(filePath) - if err != nil { - return nil, fmt.Errorf("failed to open file for upload: %w", err) - } - defer file.Close() - - stat, err := file.Stat() - if err != nil { - return nil, fmt.Errorf("failed to stat file: %w", err) - } - - threshold := int64(5 * common.GB) // Default threshold - if stat.Size() < threshold { - uploadURL, err := bk.ResolveUploadURL(ctx, drsObject.Id, uploadFilename, common.FileMetadata{}, bucketName) - if err != nil { - return nil, fmt.Errorf("failed to get upload URL: %w", err) - } - if err := bk.Upload(ctx, uploadURL, file, stat.Size()); err != nil { - return nil, fmt.Errorf("upload failed: %w", err) - } - } else { - if err := multipartUpload(ctx, bk, drsObject.Id, uploadFilename, bucketName, file, stat.Size()); err != nil { - return nil, fmt.Errorf("multipart upload failed: %w", err) - } - } - - return drsObject, nil -} - -func multipartUpload(ctx context.Context, bk UploadBackend, guid, filename, bucket string, file *os.File, size int64) error { - initResp, err := bk.InitMultipartUpload(ctx, guid, filename, bucket) - if err != nil { - return err - } - - chunkSize := OptimalChunkSize(size) - numChunks := int((size + chunkSize - 1) / chunkSize) - parts := make([]common.MultipartUploadPart, numChunks) - - for partNum := 1; partNum <= numChunks; partNum++ { - offset := int64(partNum-1) * chunkSize - partSize := chunkSize - if offset+partSize > size { - partSize = size - offset - } - - partURL, err := bk.GetMultipartUploadURL(ctx, "", initResp.UploadID, int32(partNum), bucket) - if err != nil { - return err - } - - section := io.NewSectionReader(file, offset, partSize) - etag, err := bk.UploadPart(ctx, partURL, section, partSize) - if err != nil { - return err - } - - parts[partNum-1] = common.MultipartUploadPart{ - PartNumber: int32(partNum), - ETag: etag, - } - } - - return bk.CompleteMultipartUpload(ctx, "", initResp.UploadID, parts, bucket) -} - -type UploadBackend = transfer.Uploader diff --git a/upload/progress_reader.go b/upload/progress_reader.go deleted file mode 100644 index da12f7d..0000000 --- a/upload/progress_reader.go +++ /dev/null @@ -1,81 +0,0 @@ -package upload - -import ( - "fmt" - "io" - - "github.com/calypr/data-client/common" -) - -type progressReader struct { - reader io.Reader - onProgress common.ProgressCallback - hash string - total int64 - bytesSoFar int64 - bytesSinceReport int64 -} - -func newProgressReader(reader io.Reader, onProgress common.ProgressCallback, hash string, total int64) *progressReader { - return &progressReader{ - reader: reader, - onProgress: onProgress, - hash: hash, - total: total, - } -} - -func resolveUploadOID(req common.FileUploadRequestObject) string { - if req.ObjectKey != "" { - return req.ObjectKey - } - return req.GUID -} - -func (pr *progressReader) Read(p []byte) (int, error) { - n, err := pr.reader.Read(p) - if n > 0 && pr.onProgress != nil { - delta := int64(n) - pr.bytesSoFar += delta - pr.bytesSinceReport += delta - - if pr.bytesSinceReport >= common.OnProgressThreshold { - if progressErr := pr.onProgress(common.ProgressEvent{ - Event: "progress", - Oid: pr.hash, - BytesSoFar: pr.bytesSoFar, - BytesSinceLast: pr.bytesSinceReport, - }); progressErr != nil { - return n, progressErr - } - pr.bytesSinceReport = 0 - } - } - return n, err -} - -func (pr *progressReader) Finalize() error { - if pr.onProgress != nil && pr.bytesSinceReport > 0 { - _ = pr.onProgress(common.ProgressEvent{ - Event: "progress", - Oid: pr.hash, - BytesSoFar: pr.bytesSoFar, - BytesSinceLast: pr.bytesSinceReport, - }) - pr.bytesSinceReport = 0 - } - if pr.total > 0 && pr.bytesSoFar < pr.total { - delta := pr.total - pr.bytesSoFar - pr.bytesSoFar = pr.total - if pr.onProgress != nil { - _ = pr.onProgress(common.ProgressEvent{ - Event: "progress", - Oid: pr.hash, - BytesSoFar: pr.bytesSoFar, - BytesSinceLast: delta, - }) - } - return fmt.Errorf("upload incomplete: %d/%d bytes", pr.bytesSoFar-delta, pr.total) - } - return nil -} diff --git a/upload/progress_reader_test.go b/upload/progress_reader_test.go deleted file mode 100644 index 789afa0..0000000 --- a/upload/progress_reader_test.go +++ /dev/null @@ -1,46 +0,0 @@ -package upload - -import ( - "bytes" - "io" - "testing" - - "github.com/calypr/data-client/common" -) - -func TestProgressReaderFinalizes(t *testing.T) { - payload := bytes.Repeat([]byte("a"), 16) - var events []common.ProgressEvent - - reader := newProgressReader(bytes.NewReader(payload), func(event common.ProgressEvent) error { - events = append(events, event) - return nil - }, "oid-123", int64(len(payload))) - - if _, err := io.Copy(io.Discard, reader); err != nil { - t.Fatalf("copy failed: %v", err) - } - if err := reader.Finalize(); err != nil { - t.Fatalf("finalize failed: %v", err) - } - - if len(events) == 0 { - t.Fatal("expected progress events, got none") - } - - var total int64 - for _, event := range events { - if event.Event != "progress" { - t.Fatalf("unexpected event type: %s", event.Event) - } - total += event.BytesSinceLast - } - - last := events[len(events)-1] - if last.BytesSoFar != int64(len(payload)) { - t.Fatalf("expected final bytesSoFar %d, got %d", len(payload), last.BytesSoFar) - } - if total != int64(len(payload)) { - t.Fatalf("expected bytesSinceLast sum %d, got %d", len(payload), total) - } -} diff --git a/upload/request.go b/upload/request.go deleted file mode 100644 index e21dff2..0000000 --- a/upload/request.go +++ /dev/null @@ -1,58 +0,0 @@ -package upload - -import ( - "context" - "fmt" - // Added for io.Reader - "os" - - "github.com/calypr/data-client/common" - "github.com/calypr/data-client/transfer" - "github.com/vbauerster/mpb/v8" - "github.com/vbauerster/mpb/v8/decor" -) - -// GeneratePresignedURL handles both Shepherd and Fence fallback -func GeneratePresignedUploadURL(ctx context.Context, bk transfer.Uploader, filename string, metadata common.FileMetadata, bucket string) (*common.PresignedURLResponse, error) { - url, err := bk.ResolveUploadURL(ctx, "", filename, metadata, bucket) - if err != nil { - return nil, err - } - var res common.PresignedURLResponse - res = common.PresignedURLResponse{URL: url, GUID: ""} - return &res, nil -} - -// GenerateUploadRequest helps preparing the HTTP request for upload and the progress bar for single part upload -func generateUploadRequest(ctx context.Context, bk transfer.Uploader, req common.FileUploadRequestObject, file *os.File, p *mpb.Progress) (common.FileUploadRequestObject, error) { - if req.PresignedURL == "" { - url, err := bk.ResolveUploadURL(ctx, req.GUID, req.ObjectKey, req.FileMetadata, req.Bucket) - if err != nil { - return req, fmt.Errorf("Upload error: %w", err) - } - req.PresignedURL = url - } - - fi, err := file.Stat() - if err != nil { - return req, fmt.Errorf("stat failed: %w", err) - } - - if fi.Size() > common.FileSizeLimit { - return req, fmt.Errorf("file size exceeds limit") - } - - if p != nil { - p.AddBar(fi.Size(), - mpb.PrependDecorators( - decor.Name(req.ObjectKey, decor.WC{W: len(req.ObjectKey) + 1, C: decor.DindentRight}), - decor.CountersKibiByte("% .2f / % .2f"), - ), - mpb.AppendDecorators( - decor.OnComplete(decor.Percentage(decor.WC{W: 5}), "done"), - ), - ) - } - - return req, nil -} diff --git a/upload/retry.go b/upload/retry.go deleted file mode 100644 index 15ff73f..0000000 --- a/upload/retry.go +++ /dev/null @@ -1,170 +0,0 @@ -package upload - -import ( - "context" - "os" - "path/filepath" - "time" - - "github.com/calypr/data-client/common" - "github.com/calypr/data-client/logs" - "github.com/calypr/data-client/transfer" -) - -// GetWaitTime calculates exponential backoff with cap -func GetWaitTime(retryCount int) time.Duration { - exp := 1 << retryCount // 2^retryCount - seconds := int64(exp) - if seconds > common.MaxWaitTime { - seconds = common.MaxWaitTime - } - return time.Duration(seconds) * time.Second -} - -// RetryFailedUploads re-uploads previously failed files with exponential backoff -func RetryFailedUploads(ctx context.Context, bk transfer.Uploader, logger *logs.Gen3Logger, failedMap map[string]common.RetryObject) { - if len(failedMap) == 0 { - logger.Println("No failed files to retry.") - return - } - - sb := logger.Scoreboard() - - logger.Printf("Starting retry-upload for %d failed Uploads", len(failedMap)) - retryChan := make(chan common.RetryObject, len(failedMap)) - - // Queue only non-already-succeeded files - for _, ro := range failedMap { - retryChan <- ro - } - - if len(retryChan) == 0 { - logger.Println("All previously failed files have since succeeded.") - return - } - - for ro := range retryChan { - ro.RetryCount++ - logger.Printf("#%d retry — %s\n", ro.RetryCount, ro.SourcePath) - wait := GetWaitTime(ro.RetryCount) - logger.Printf("Waiting %.0f seconds before retry...\n", wait.Seconds()) - time.Sleep(wait) - - // Clean up old record if exists - if ro.GUID != "" { - if msg, err := bk.DeleteFile( - ctx, - ro.GUID, - ); err == nil { - logger.Println(msg) - } - } - - file, err := os.Open(ro.SourcePath) - if err != nil { - continue - } - - // Ensure filename is set - if ro.ObjectKey == "" { - absPath, _ := common.GetAbsolutePath(ro.SourcePath) - ro.ObjectKey = filepath.Base(absPath) - } - - if ro.Multipart { - // Retry multipart - req := common.FileUploadRequestObject{ - SourcePath: ro.SourcePath, - ObjectKey: ro.ObjectKey, - GUID: ro.GUID, - FileMetadata: ro.FileMetadata, - Bucket: ro.Bucket, - } - err = MultipartUpload(ctx, bk, req, file, true) - if err == nil { - logger.Succeeded(ro.SourcePath, req.GUID) - if sb != nil { - sb.IncrementSB(ro.RetryCount - 1) - } - continue - } - } else { - // Retry single-part - respObj, err := GeneratePresignedUploadURL(ctx, bk, ro.ObjectKey, ro.FileMetadata, ro.Bucket) - if err != nil { - handleRetryFailure(ctx, bk, logger, ro, retryChan, err) - continue - } - - file, err := os.Open(ro.SourcePath) - if err != nil { - handleRetryFailure(ctx, bk, logger, ro, retryChan, err) - continue - } - stat, _ := file.Stat() - file.Close() - - if stat.Size() > common.FileSizeLimit { - ro.Multipart = true - retryChan <- ro - continue - } - - fur := common.FileUploadRequestObject{ - SourcePath: ro.SourcePath, - ObjectKey: ro.ObjectKey, - FileMetadata: ro.FileMetadata, - GUID: respObj.GUID, - PresignedURL: respObj.URL, - } - - fur, err = generateUploadRequest(ctx, bk, fur, nil, nil) - if err != nil { - handleRetryFailure(ctx, bk, logger, ro, retryChan, err) - continue - } - - err = UploadSingle(ctx, bk, logger, fur, true) - if err == nil { - logger.Succeeded(ro.SourcePath, fur.GUID) - if sb != nil { - sb.IncrementSB(ro.RetryCount - 1) - } - continue - } - } - - // On failure, requeue if retries remain - handleRetryFailure(ctx, bk, logger, ro, retryChan, err) - } -} - -// handleRetryFailure logs failure and requeues if retries remain -func handleRetryFailure(ctx context.Context, bk transfer.Uploader, logger *logs.Gen3Logger, ro common.RetryObject, retryChan chan common.RetryObject, err error) { - logger.Failed(ro.SourcePath, ro.ObjectKey, ro.FileMetadata, ro.GUID, ro.RetryCount, ro.Multipart) - if err != nil { - logger.Println("Retry error:", err) - } - - if ro.RetryCount < common.MaxRetryCount { - retryChan <- ro - return - } - - // Max retries reached — final cleanup - if ro.GUID != "" { - if msg, err := bk.DeleteFile(ctx, ro.GUID); err == nil { - logger.Println("Cleaned up failed record:", msg) - } else { - logger.Println("Cleanup failed:", err) - } - } - - if sb := logger.Scoreboard(); sb != nil { - sb.IncrementSB(common.MaxRetryCount + 1) - } - - if len(retryChan) == 0 { - close(retryChan) - } -} diff --git a/upload/singleFile.go b/upload/singleFile.go deleted file mode 100644 index 48e7031..0000000 --- a/upload/singleFile.go +++ /dev/null @@ -1,98 +0,0 @@ -package upload - -import ( - "context" - "fmt" - "io" - "os" - - "github.com/calypr/data-client/common" - "github.com/calypr/data-client/logs" - "github.com/calypr/data-client/transfer" -) - -func UploadSingle(ctx context.Context, bk transfer.Uploader, logger *logs.Gen3Logger, req common.FileUploadRequestObject, showProgress bool) error { - logger.DebugContext(ctx, "File upload request", - "source_path", req.SourcePath, - "object_key", req.ObjectKey, - "guid", req.GUID, - "bucket", req.Bucket, - ) - - // Helper to handle * in path if it was passed, though optimally caller handles this. - // We will trust the SourcePath in the request object mostly, but for safety we can check existence. - // But commonly parsing happens before creating the object usually. - // Let's assume req.SourcePath is a single valid file path for now as per design. - - file, err := os.Open(req.SourcePath) - if err != nil { - if showProgress { - sb := logger.Scoreboard() - if sb != nil { - sb.IncrementSB(len(sb.Counts)) - sb.PrintSB() - } - } - logger.Failed(req.SourcePath, req.ObjectKey, common.FileMetadata{}, "", 0, false) - logger.ErrorContext(ctx, "File open error", "file", req.SourcePath, "error", err) - return fmt.Errorf("[ERROR] when opening file path %s, an error occurred: %s\n", req.SourcePath, err.Error()) - } - defer file.Close() - - fi, err := file.Stat() - if err != nil { - return fmt.Errorf("failed to stat file: %w", err) - } - fileSize := fi.Size() - - furObject, err := generateUploadRequest(ctx, bk, req, file, nil) - if err != nil { - if showProgress { - sb := logger.Scoreboard() - if sb != nil { - sb.IncrementSB(len(sb.Counts)) - sb.PrintSB() - } - } - logger.Failed(req.SourcePath, req.ObjectKey, common.FileMetadata{}, req.GUID, 0, false) - logger.ErrorContext(ctx, "Error occurred during request generation", "file", req.SourcePath, "error", err) - return fmt.Errorf("[ERROR] Error occurred during request generation for file %s: %s\n", req.SourcePath, err.Error()) - } - - progressCallback := common.GetProgress(ctx) - oid := common.GetOid(ctx) - if oid == "" { - oid = resolveUploadOID(furObject) - } - - var reader io.Reader = file - var progressTracker *progressReader - if progressCallback != nil { - progressTracker = newProgressReader(file, progressCallback, oid, fileSize) - reader = progressTracker - } - - err = bk.Upload(ctx, furObject.PresignedURL, reader, fileSize) - if progressTracker != nil { - if finalizeErr := progressTracker.Finalize(); finalizeErr != nil && err == nil { - err = finalizeErr - } - } - - if err != nil { - logger.ErrorContext(ctx, "Upload failed", "error", err) - return err - } - - logger.DebugContext(ctx, "Successfully uploaded", "file", req.ObjectKey) - logger.Succeeded(req.SourcePath, req.GUID) - - if showProgress { - sb := logger.Scoreboard() - if sb != nil { - sb.IncrementSB(0) - sb.PrintSB() - } - } - return nil -} diff --git a/upload/types.go b/upload/types.go deleted file mode 100644 index 91697a5..0000000 --- a/upload/types.go +++ /dev/null @@ -1,27 +0,0 @@ -package upload - -import "github.com/calypr/data-client/common" - -type UploadConfig struct { - BucketName string - NumParallel int - ForceMultipart bool - IncludeSubDirName bool - HasMetadata bool - ShowProgress bool -} - -// FileInfo is a helper struct for including subdirname as filename -type FileInfo struct { - FilePath string - Filename string - FileMetadata common.FileMetadata - ObjectId string -} - -// RenamedOrSkippedFileInfo is a helper struct for recording renamed or skipped files -type RenamedOrSkippedFileInfo struct { - GUID string - OldFilename string - NewFilename string -} diff --git a/upload/upload.go b/upload/upload.go deleted file mode 100644 index 1591d74..0000000 --- a/upload/upload.go +++ /dev/null @@ -1,178 +0,0 @@ -package upload - -import ( - "context" - "fmt" - "os" - "path/filepath" - "strings" - - "github.com/calypr/data-client/common" - drs "github.com/calypr/data-client/drs" - "github.com/calypr/data-client/transfer" - "github.com/vbauerster/mpb/v8" -) - -// Upload is a unified catch-all function that automatically chooses between -// single-part and multipart upload based on file size. -func Upload(ctx context.Context, bk transfer.Uploader, req common.FileUploadRequestObject, showProgress bool) error { - bk.Logger().DebugContext(ctx, "Processing Upload Request", "source", req.SourcePath) - - file, err := os.Open(req.SourcePath) - if err != nil { - return fmt.Errorf("cannot open file %s: %w", req.SourcePath, err) - } - defer file.Close() - - stat, err := file.Stat() - if err != nil { - return fmt.Errorf("cannot stat file: %w", err) - } - - fileSize := stat.Size() - if fileSize == 0 { - return fmt.Errorf("file is empty: %s", req.ObjectKey) - } - - // Use Single-Part if file is smaller than 5GB (or your defined limit) - if fileSize < 5*common.GB { - bk.Logger().DebugContext(ctx, "performing single-part upload", "size", fileSize) - return UploadSingle(ctx, bk, bk.Logger(), req, true) - } - bk.Logger().DebugContext(ctx, "performing multipart upload", "size", fileSize) - return MultipartUpload(ctx, bk, req, file, showProgress) -} - -// UploadSingleFile handles single-part upload with progress -func UploadSingleFile(ctx context.Context, bk transfer.Uploader, req common.FileUploadRequestObject, showProgress bool) error { - logger := bk.Logger() - file, err := os.Open(req.SourcePath) - if err != nil { - return err - } - defer file.Close() - - fi, _ := file.Stat() - if fi.Size() > common.FileSizeLimit { - return fmt.Errorf("file exceeds 5GB limit") - } - - // Generate request with progress bar - var p *mpb.Progress - if showProgress { - p = mpb.New(mpb.WithOutput(os.Stdout)) - } - - // Populate PresignedURL and GUID if missing - fur, err := generateUploadRequest(ctx, bk, req, file, p) - if err != nil { - return err - } - - if fi.Size() < int64(common.FileSizeLimit) { - return UploadSingle(ctx, bk, logger, fur, true) - } - return MultipartUpload(ctx, bk, fur, file, showProgress) -} - -// RegisterAndUploadFile orchestrates registration with Indexd and uploading via Fence. -// It handles checking for existing records, upsert logic, checking if file is already downloadable, and performing the upload. -func RegisterAndUploadFile(ctx context.Context, dc drs.Client, bk transfer.Uploader, drsObject *drs.DRSObject, filePath string, bucketName string, upsert bool) (*drs.DRSObject, error) { - logger := bk.Logger() - res, err := dc.RegisterRecord(ctx, drsObject) - if err != nil { - if strings.Contains(err.Error(), "already exists") { - if !upsert { - logger.DebugContext(ctx, "record already exists", "id", drsObject.Id) - } else { - logger.DebugContext(ctx, "record already exists, recreating", "id", drsObject.Id) - err = dc.DeleteRecord(ctx, drsObject.Id) - if err != nil { - return nil, fmt.Errorf("failed to delete existing record: %w", err) - } - res, err = dc.RegisterRecord(ctx, drsObject) - if err != nil { - return nil, fmt.Errorf("failed to re-register record: %w", err) - } - } - } else { - return nil, fmt.Errorf("error registering record: %w", err) - } - } - - // 2. Check if file is downloadable - downloadable, err := isFileDownloadable(ctx, dc, drsObject.Id) - if err != nil { - return nil, fmt.Errorf("failed to check if file is downloadable: %w", err) - } - - if downloadable { - logger.DebugContext(ctx, "file already downloadable, skipping upload", "id", drsObject.Id) - if res != nil { - return res, nil - } - return dc.GetObject(ctx, drsObject.Id) - } - - // 3. Upload File - uploadFilename := filepath.Base(filePath) - if res != nil && len(res.AccessMethods) > 0 { - for _, am := range res.AccessMethods { - if am.Type != "s3" && am.Type != "gs" { - continue - } - if am.AccessUrl.Url == "" { - continue - } - parts := strings.Split(am.AccessUrl.Url, "/") - if len(parts) > 0 { - candidate := parts[len(parts)-1] - if candidate != "" { - uploadFilename = candidate - } - } - break - } - } - - req := common.FileUploadRequestObject{ - SourcePath: filePath, - ObjectKey: uploadFilename, - GUID: drsObject.Id, - Bucket: bucketName, - } - - err = Upload(ctx, bk, req, false) - if err != nil { - return nil, fmt.Errorf("failed to upload file: %w", err) - } - - if res != nil { - return res, nil - } - return dc.GetObject(ctx, drsObject.Id) -} - -func isFileDownloadable(ctx context.Context, dc drs.Client, did string) (bool, error) { - obj, err := dc.GetObject(ctx, did) - if err != nil { - return false, err - } - - if len(obj.AccessMethods) == 0 { - return false, nil - } - - accessType := obj.AccessMethods[0].Type - res, err := dc.GetDownloadURL(ctx, did, accessType) - if err != nil { - return false, nil - } - - if res.Url == "" { - return false, nil - } - - err = common.CanDownloadFile(res.Url) - return err == nil, nil -} diff --git a/upload/utils.go b/upload/utils.go deleted file mode 100644 index ca985f6..0000000 --- a/upload/utils.go +++ /dev/null @@ -1,185 +0,0 @@ -package upload - -import ( - "encoding/json" - "errors" - "fmt" - "os" - "path/filepath" - "strings" - - "github.com/calypr/data-client/common" - "github.com/calypr/data-client/logs" - "github.com/calypr/data-client/transfer" -) - -func SeparateSingleAndMultipartUploads(bk transfer.Uploader, objects []common.FileUploadRequestObject) ([]common.FileUploadRequestObject, []common.FileUploadRequestObject) { - fileSizeLimit := common.FileSizeLimit - logger := bk.Logger() - - var singlepartObjects []common.FileUploadRequestObject - var multipartObjects []common.FileUploadRequestObject - - for _, object := range objects { - fi, err := os.Stat(object.SourcePath) - if err != nil { - if os.IsNotExist(err) { - logger.Error("The file you specified does not exist locally", "path", object.SourcePath) - } else { - logger.Error("File stat error", "path", object.SourcePath, "error", err) - } - continue - } - if fi.IsDir() { - continue - } - if fi.Size() > common.MultipartFileSizeLimit { - logger.Warn("File exceeds max limit", "name", fi.Name(), "size", fi.Size()) - continue - } - if fi.Size() > int64(fileSizeLimit) { - multipartObjects = append(multipartObjects, object) - } else { - singlepartObjects = append(singlepartObjects, object) - } - } - return singlepartObjects, multipartObjects -} - -// ProcessFilename returns an FileInfo object which has the information about the path and name to be used for upload of a file -func ProcessFilename(logger *logs.Gen3Logger, uploadPath string, filePath string, objectId string, includeSubDirName bool, includeMetadata bool) (common.FileUploadRequestObject, error) { - var err error - filePath, err = common.GetAbsolutePath(filePath) - if err != nil { - return common.FileUploadRequestObject{}, err - } - - filename := filepath.Base(filePath) // Default to base filename - - var metadata common.FileMetadata - if includeSubDirName { - absUploadPath, err := common.GetAbsolutePath(uploadPath) - if err != nil { - return common.FileUploadRequestObject{}, err - } - - // Ensure absUploadPath is a directory path for relative calculation - // Trim the optional wildcard if present - uploadDir := strings.TrimSuffix(absUploadPath, common.PathSeparator+"*") - fileInfo, err := os.Stat(uploadDir) - if err != nil { - return common.FileUploadRequestObject{}, err - } - if fileInfo.IsDir() { - // Calculate the path of the file relative to the upload directory - relPath, err := filepath.Rel(uploadDir, filePath) - if err != nil { - return common.FileUploadRequestObject{}, err - } - filename = relPath - } - } - - if includeMetadata { - // The metadata path is the file name plus '_metadata.json' - metadataFilePath := strings.TrimSuffix(filePath, filepath.Ext(filePath)) + "_metadata.json" - var metadataFileBytes []byte - if _, err := os.Stat(metadataFilePath); err == nil { - metadataFileBytes, err = os.ReadFile(metadataFilePath) - if err != nil { - return common.FileUploadRequestObject{}, errors.New("Error reading metadata file " + metadataFilePath + ": " + err.Error()) - } - err := json.Unmarshal(metadataFileBytes, &metadata) - if err != nil { - return common.FileUploadRequestObject{}, errors.New("Error parsing metadata file " + metadataFilePath + ": " + err.Error()) - } - } else { - // No metadata file was found for this file -- proceed, but warn the user. - logger.Printf("WARNING: File metadata is enabled, but could not find the metadata file %v for file %v. Execute `data-client upload --help` for more info on file metadata.\n", metadataFilePath, filePath) - } - } - return common.FileUploadRequestObject{SourcePath: filePath, ObjectKey: filename, FileMetadata: metadata, GUID: objectId}, nil -} - -// FormatSize helps to parse a int64 size into string -func FormatSize(size int64) string { - var unitSize int64 - switch { - case size >= common.TB: - unitSize = common.TB - case size >= common.GB: - unitSize = common.GB - case size >= common.MB: - unitSize = common.MB - case size >= common.KB: - unitSize = common.KB - default: - unitSize = common.B - } - - var unitMap = map[int64]string{ - common.B: "B", - common.KB: "KB", - common.MB: "MB", - common.GB: "GB", - common.TB: "TB", - } - - return fmt.Sprintf("%.1f"+unitMap[unitSize], float64(size)/float64(unitSize)) -} - -// OptimalChunkSize returns a recommended chunk size for the given fileSize (in bytes). -// - <= 100 MB: return fileSize (use single PUT) -// - >100 MB and <= 1 GB: 10 MB -// - >1 GB and <= 10 GB: scaled between 25 MB and 128 MB -// - >10 GB and <= 100 GB: 256 MB -// - >100 GB: scaled between 512 MB and 1024 MB (1 GB) -// See: -// https://cloud.switch.ch/-/documentation/s3/multipart-uploads/#best-practices -func OptimalChunkSize(fileSize int64) int64 { - if fileSize <= 0 { - return 1 * common.MB - } - - switch { - case fileSize <= 100*common.MB: - // Single PUT: return whole file size - return fileSize - - case fileSize <= 1*common.GB: - return 10 * common.MB - - case fileSize <= 10*common.GB: - return scaleLinear(fileSize, 1*common.GB, 10*common.GB, 25*common.MB, 128*common.MB) - - case fileSize <= 100*common.GB: - return 256 * common.MB - - default: - // Scale for very large files; cap scaling at 1 TB for ratio purposes - return scaleLinear(fileSize, 100*common.GB, 1000*common.GB, 512*common.MB, 1024*common.MB) - } -} - -// scaleLinear scales size in [minSize, maxSize] to chunk in [minChunk, maxChunk] (linear). -// Result is rounded down to nearest MB and clamped to [minChunk, maxChunk]. -func scaleLinear(size, minSize, maxSize, minChunk, maxChunk int64) int64 { - if size <= minSize { - return minChunk - } - if size >= maxSize { - return maxChunk - } - ratio := float64(size-minSize) / float64(maxSize-minSize) - chunkF := float64(minChunk) + ratio*(float64(maxChunk-minChunk)) - // round down to nearest MB - mb := int64(common.MB) - chunk := int64(chunkF) / mb * mb - if chunk < minChunk { - return minChunk - } - if chunk > maxChunk { - return maxChunk - } - return chunk -} diff --git a/upload/utils_test.go b/upload/utils_test.go deleted file mode 100644 index 6abe45e..0000000 --- a/upload/utils_test.go +++ /dev/null @@ -1,124 +0,0 @@ -package upload - -import ( - "testing" - - "github.com/calypr/data-client/common" -) - -func TestOptimalChunkSize(t *testing.T) { - tests := []struct { - name string - fileSize int64 - wantChunkSize int64 - wantParts int64 - }{ - { - name: "0 bytes", - fileSize: 0, - wantChunkSize: 1 * common.MB, - wantParts: 0, - }, - { - name: "1MB", - fileSize: 1 * common.MB, - wantChunkSize: 1 * common.MB, - wantParts: 1, - }, - { - name: "100MB", - fileSize: 100 * common.MB, - wantChunkSize: 100 * common.MB, - wantParts: 1, - }, - { - name: "100MB+1B", - fileSize: 100*common.MB + 1, - wantChunkSize: 10 * common.MB, - wantParts: 11, - }, - { - name: "500MB", - fileSize: 500 * common.MB, - wantChunkSize: 10 * common.MB, - wantParts: 50, - }, - { - name: "1GB", - fileSize: 1 * common.GB, - wantChunkSize: 10 * common.MB, - wantParts: 103, - }, - { - name: "1GB+1B", - fileSize: 1*common.GB + 1, - wantChunkSize: 25 * common.MB, - wantParts: 41, - }, - { - name: "5GB", - fileSize: 5 * common.GB, - wantChunkSize: 70 * common.MB, - wantParts: 74, - }, - { - name: "10GB", - fileSize: 10 * common.GB, - wantChunkSize: 128 * common.MB, - wantParts: 80, - }, - { - name: "10GB+1B", - fileSize: 10*common.GB + 1, - wantChunkSize: 256 * common.MB, - wantParts: 41, - }, - { - name: "50GB", - fileSize: 50 * common.GB, - wantChunkSize: 256 * common.MB, - wantParts: 200, - }, - { - name: "100GB", - fileSize: 100 * common.GB, - wantChunkSize: 256 * common.MB, - wantParts: 400, - }, - { - name: "100GB+1B", - fileSize: 100*common.GB + 1, - wantChunkSize: 512 * common.MB, - wantParts: 201, - }, - { - name: "500GB", - fileSize: 500 * common.GB, - wantChunkSize: 739 * common.MB, - wantParts: 693, - }, - { - name: "1TB", - fileSize: 1 * common.TB, - wantChunkSize: 1 * common.GB, - wantParts: 1024, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - chunkSize := OptimalChunkSize(tt.fileSize) - if chunkSize != tt.wantChunkSize { - t.Fatalf("chunk size = %d, want %d", chunkSize, tt.wantChunkSize) - } - - parts := int64(0) - if tt.fileSize > 0 && chunkSize > 0 { - parts = (tt.fileSize + chunkSize - 1) / chunkSize - } - if parts != tt.wantParts { - t.Fatalf("parts = %d, want %d", parts, tt.wantParts) - } - }) - } -} From 4008665e72860014f564162fa3e565a81a510059 Mon Sep 17 00:00:00 2001 From: matthewpeterkort Date: Wed, 8 Apr 2026 09:55:41 -0700 Subject: [PATCH 12/13] fix test --- tests/download-multiple_test.go | 18 ++---------------- 1 file changed, 2 insertions(+), 16 deletions(-) diff --git a/tests/download-multiple_test.go b/tests/download-multiple_test.go index d9ea0c4..ccfc3e5 100644 --- a/tests/download-multiple_test.go +++ b/tests/download-multiple_test.go @@ -22,11 +22,6 @@ func Test_askGen3ForFileInfo_withShepherd(t *testing.T) { mockIndexd := mocks.NewMockDrsClient(mockCtrl) - // New behavior: tries GetObjectByHash first - mockIndexd.EXPECT(). - GetObjectByHash(gomock.Any(), gomock.Any()). - Return(nil, fmt.Errorf("not a hash")) - mockIndexd.EXPECT(). GetObject(gomock.Any(), testGUID). Return(&drs.DRSObject{Id: testGUID, Name: testFileName, Size: testFileSize}, nil) @@ -58,14 +53,10 @@ func Test_askGen3ForFileInfo_withShepherd_shepherdError(t *testing.T) { mockIndexd := mocks.NewMockDrsClient(mockCtrl) - // New behavior: tries GetObjectByHash first - mockIndexd.EXPECT(). - GetObjectByHash(gomock.Any(), gomock.Any()). - Return(nil, fmt.Errorf("not a hash")) - mockIndexd.EXPECT(). GetObject(gomock.Any(), testGUID). - Return(nil, fmt.Errorf("Indexd error")) + Return(nil, fmt.Errorf("Indexd error")). + Times(2) logger := sylogs.NewGen3Logger(nil, "", "test") @@ -100,11 +91,6 @@ func Test_askGen3ForFileInfo_noShepherd(t *testing.T) { mockIndexd := mocks.NewMockDrsClient(mockCtrl) - // New behavior: tries GetObjectByHash first - mockIndexd.EXPECT(). - GetObjectByHash(gomock.Any(), gomock.Any()). - Return(nil, fmt.Errorf("not a hash")) - mockIndexd.EXPECT(). GetObject(gomock.Any(), testGUID). Return(&drs.DRSObject{Id: testGUID, Name: testFileName, Size: testFileSize}, nil) From 3c7e4de91829722cc3b704a4eb4012504f83a6d8 Mon Sep 17 00:00:00 2001 From: matthewpeterkort Date: Thu, 9 Apr 2026 11:51:46 -0700 Subject: [PATCH 13/13] demisitify collaborators command --- cmd/collaborator.go | 119 ++++++++++++++++++++++---------------------- 1 file changed, 59 insertions(+), 60 deletions(-) diff --git a/cmd/collaborator.go b/cmd/collaborator.go index c307423..c2b2bf9 100644 --- a/cmd/collaborator.go +++ b/cmd/collaborator.go @@ -14,26 +14,13 @@ import ( "gopkg.in/yaml.v3" ) -var collaboratorCmd = &cobra.Command{ - Use: "collaborator", +var collaboratorsCmd = &cobra.Command{ + Use: "collaborators", Short: "Manage collaborators and access requests", } var emailRegex = regexp.MustCompile(`^[a-z0-9._%+\-]+@[a-z0-9.\-]+\.[a-z]{2,}$`) -func validateProjectAndUser(projectID, username string) error { - if !emailRegex.MatchString(strings.ToLower(username)) { - return fmt.Errorf("invalid username '%s': must be a valid email address", username) - } - - parts := strings.Split(projectID, "-") - if len(parts) != 2 || parts[0] == "" || parts[1] == "" { - return fmt.Errorf("invalid project_id '%s': must be in the form 'program-project'", projectID) - } - - return nil -} - func printRequest(r requestor.Request) { b, err := yaml.Marshal(r) if err != nil { @@ -43,17 +30,17 @@ func printRequest(r requestor.Request) { fmt.Println(string(b)) } -func getRequestorClient() (requestor.RequestorInterface, func()) { - if profile == "" { - fmt.Println("Error: profile is required. Please specify a profile using the --profile flag.") +func getRequestorClient(localProfile string) (requestor.RequestorInterface, func()) { + if localProfile == "" { + fmt.Println("Error: profile is required.") os.Exit(1) } // Initialize logger - logger, logCloser := logs.New(profile) + logger, logCloser := logs.New(localProfile) // Initialize base Gen3 interface and build requestor client from it. - g3i, err := g3client.NewGen3Interface(profile, logger) + g3i, err := g3client.NewGen3Interface(localProfile, logger) if err != nil { fmt.Printf("Error accessing Gen3: %v\n", err) logCloser() @@ -64,14 +51,16 @@ func getRequestorClient() (requestor.RequestorInterface, func()) { } var collaboratorListCmd = &cobra.Command{ - Use: "ls", + Use: "ls [profile]", Short: "List requests", + Args: cobra.ExactArgs(1), Run: func(cmd *cobra.Command, args []string) { + p := args[0] mine, _ := cmd.Flags().GetBool("mine") active, _ := cmd.Flags().GetBool("active") username, _ := cmd.Flags().GetString("username") - client, closer := getRequestorClient() + client, closer := getRequestorClient(p) defer closer() requests, err := client.ListRequests(cmd.Context(), mine, active, username) @@ -87,10 +76,12 @@ var collaboratorListCmd = &cobra.Command{ } var collaboratorPendingCmd = &cobra.Command{ - Use: "pending", + Use: "pending [profile]", Short: "List pending requests", + Args: cobra.ExactArgs(1), Run: func(cmd *cobra.Command, args []string) { - client, closer := getRequestorClient() + p := args[0] + client, closer := getRequestorClient(p) defer closer() // Fetch all requests @@ -110,22 +101,26 @@ var collaboratorPendingCmd = &cobra.Command{ } var collaboratorAddUserCmd = &cobra.Command{ - Use: "add [project_id] [username]", + Use: "add [profile] [email] [program] [project]", Short: "Add a user to a project", - Args: func(cmd *cobra.Command, args []string) error { - if err := cobra.ExactArgs(2)(cmd, args); err != nil { - return err - } - return validateProjectAndUser(args[0], args[1]) - }, + Args: cobra.ExactArgs(4), Run: func(cmd *cobra.Command, args []string) { - projectID := args[0] + p := args[0] username := args[1] + program := args[2] + project := args[3] + projectID := fmt.Sprintf("%s-%s", program, project) + + if !emailRegex.MatchString(strings.ToLower(username)) { + fmt.Printf("Error: invalid email address '%s'\n", username) + os.Exit(1) + } + write, _ := cmd.Flags().GetBool("write") guppy, _ := cmd.Flags().GetBool("guppy") approve, _ := cmd.Flags().GetBool("approve") - client, closer := getRequestorClient() + client, closer := getRequestorClient(p) defer closer() reqs, err := client.AddUser(cmd.Context(), projectID, username, write, guppy) @@ -156,20 +151,24 @@ var collaboratorAddUserCmd = &cobra.Command{ } var collaboratorRemoveUserCmd = &cobra.Command{ - Use: "rm [project_id] [username]", + Use: "rm [profile] [email] [program] [project]", Short: "Remove a user from a project", - Args: func(cmd *cobra.Command, args []string) error { - if err := cobra.ExactArgs(2)(cmd, args); err != nil { - return err - } - return validateProjectAndUser(args[0], args[1]) - }, + Args: cobra.ExactArgs(4), Run: func(cmd *cobra.Command, args []string) { - projectID := args[0] + p := args[0] username := args[1] + program := args[2] + project := args[3] + projectID := fmt.Sprintf("%s-%s", program, project) + + if !emailRegex.MatchString(strings.ToLower(username)) { + fmt.Printf("Error: invalid email address '%s'\n", username) + os.Exit(1) + } + approve, _ := cmd.Flags().GetBool("approve") - client, closer := getRequestorClient() + client, closer := getRequestorClient(p) defer closer() reqs, err := client.RemoveUser(cmd.Context(), projectID, username) @@ -199,13 +198,14 @@ var collaboratorRemoveUserCmd = &cobra.Command{ } var collaboratorApproveCmd = &cobra.Command{ - Use: "approve [request_id]", + Use: "approve [profile] [request_id]", Short: "Approve a request (sign it)", - Args: cobra.ExactArgs(1), + Args: cobra.ExactArgs(2), Run: func(cmd *cobra.Command, args []string) { - requestID := args[0] + p := args[0] + requestID := args[1] - client, closer := getRequestorClient() + client, closer := getRequestorClient(p) defer closer() req, err := client.UpdateRequest(cmd.Context(), requestID, "SIGNED") @@ -220,15 +220,16 @@ var collaboratorApproveCmd = &cobra.Command{ } var collaboratorUpdateCmd = &cobra.Command{ - Use: "update [request_id] [status]", + Use: "update [profile] [request_id] [status]", Short: "Update a request status", Hidden: true, - Args: cobra.ExactArgs(2), + Args: cobra.ExactArgs(3), Run: func(cmd *cobra.Command, args []string) { - requestID := args[0] - status := args[1] + p := args[0] + requestID := args[1] + status := args[2] - client, closer := getRequestorClient() + client, closer := getRequestorClient(p) defer closer() req, err := client.UpdateRequest(cmd.Context(), requestID, status) @@ -242,13 +243,13 @@ var collaboratorUpdateCmd = &cobra.Command{ } func init() { - RootCmd.AddCommand(collaboratorCmd) - collaboratorCmd.AddCommand(collaboratorListCmd) - collaboratorCmd.AddCommand(collaboratorPendingCmd) - collaboratorCmd.AddCommand(collaboratorAddUserCmd) - collaboratorCmd.AddCommand(collaboratorRemoveUserCmd) - collaboratorCmd.AddCommand(collaboratorApproveCmd) - collaboratorCmd.AddCommand(collaboratorUpdateCmd) + RootCmd.AddCommand(collaboratorsCmd) + collaboratorsCmd.AddCommand(collaboratorListCmd) + collaboratorsCmd.AddCommand(collaboratorPendingCmd) + collaboratorsCmd.AddCommand(collaboratorAddUserCmd) + collaboratorsCmd.AddCommand(collaboratorRemoveUserCmd) + collaboratorsCmd.AddCommand(collaboratorApproveCmd) + collaboratorsCmd.AddCommand(collaboratorUpdateCmd) collaboratorListCmd.Flags().Bool("mine", false, "List my requests") collaboratorListCmd.Flags().Bool("active", false, "List only active requests") @@ -259,6 +260,4 @@ func init() { collaboratorAddUserCmd.Flags().BoolP("approve", "a", false, "Automatically approve the requests") collaboratorRemoveUserCmd.Flags().BoolP("approve", "a", false, "Automatically approve the revoke requests") - - collaboratorCmd.PersistentFlags().StringVar(&profile, "profile", "", "Specify profile to use") }