Uplink C bindings part 4 (#2260)

This commit is contained in:
Bryan White 2019-06-21 20:44:00 +02:00 committed by GitHub
parent 043d603cbe
commit 9304817927
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
5 changed files with 323 additions and 42 deletions

View File

@ -14,7 +14,7 @@ func newBucketInfo(bucket *storj.Bucket) C.BucketInfo {
return C.BucketInfo{
name: C.CString(bucket.Name),
created: C.int64_t(bucket.Created.Unix()),
path_cipher: C.uint8_t(bucket.PathCipher),
path_cipher: cipherToCCipherSuite(bucket.PathCipher),
segment_size: C.uint64_t(bucket.SegmentsSize),
encryption_parameters: convertEncryptionParameters(&bucket.EncryptionParameters),
@ -22,10 +22,24 @@ func newBucketInfo(bucket *storj.Bucket) C.BucketInfo {
}
}
// newObjectInfo returns a C object struct converted from a go object struct.
func newObjectInfo(object *storj.Object) C.ObjectInfo {
return C.ObjectInfo{
version: C.uint32_t(object.Version),
bucket: newBucketInfo(&object.Bucket),
path: C.CString(object.Path),
is_prefix: C.bool(object.IsPrefix),
content_type: C.CString(object.ContentType),
created: C.int64_t(object.Created.Unix()),
modified: C.int64_t(object.Modified.Unix()),
expires: C.int64_t(object.Expires.Unix()),
}
}
// convertEncryptionParameters converts Go EncryptionParameters to C.
func convertEncryptionParameters(goParams *storj.EncryptionParameters) C.EncryptionParameters {
return C.EncryptionParameters{
cipher_suite: C.uint8_t(goParams.CipherSuite),
cipher_suite: toCCipherSuite(goParams.CipherSuite),
block_size: C.int32_t(goParams.BlockSize),
}
}
@ -33,7 +47,7 @@ func convertEncryptionParameters(goParams *storj.EncryptionParameters) C.Encrypt
// convertRedundancyScheme converts Go RedundancyScheme to C.
func convertRedundancyScheme(scheme *storj.RedundancyScheme) C.RedundancyScheme {
return C.RedundancyScheme{
algorithm: C.uint8_t(scheme.Algorithm),
algorithm: C.RedundancyAlgorithm(scheme.Algorithm),
share_size: C.int32_t(scheme.ShareSize),
required_shares: C.int16_t(scheme.RequiredShares),
repair_shares: C.int16_t(scheme.RepairShares),
@ -41,3 +55,13 @@ func convertRedundancyScheme(scheme *storj.RedundancyScheme) C.RedundancyScheme
total_shares: C.int16_t(scheme.TotalShares),
}
}
// cipherToCCipherSuite converts a go cipher to its respective C cipher suite.
func cipherToCCipherSuite(cipher storj.Cipher) C.CipherSuite {
return C.CipherSuite(cipher.ToCipherSuite())
}
// cipherToCCipherSuite converts a go cipher to its respective C cipher suite.
func toCCipherSuite(cipherSuite storj.CipherSuite) C.CipherSuite {
return C.CipherSuite(cipherSuite)
}

View File

@ -12,8 +12,81 @@ import (
"unsafe"
"storj.io/storj/lib/uplink"
"storj.io/storj/pkg/storj"
)
// Object is a scoped uplink.Object
type Object struct {
scope
*uplink.Object
}
// open_object returns an Object handle, if authorized.
//export open_object
func open_object(bucketHandle C.BucketRef, objectPath *C.char, cerr **C.char) C.ObjectRef {
bucket, ok := universe.Get(bucketHandle._handle).(*Bucket)
if !ok {
*cerr = C.CString("invalid bucket")
return C.ObjectRef{}
}
scope := bucket.scope.child()
object, err := bucket.OpenObject(scope.ctx, C.GoString(objectPath))
if err != nil {
*cerr = C.CString(err.Error())
return C.ObjectRef{}
}
return C.ObjectRef{universe.Add(&Object{scope, object})}
}
// close_object closes the object.
//export close_object
func close_object(objectHandle C.ObjectRef, cerr **C.char) {
object, ok := universe.Get(objectHandle._handle).(*Object)
if !ok {
*cerr = C.CString("invalid object")
return
}
universe.Del(objectHandle._handle)
defer object.cancel()
if err := object.Close(); err != nil {
*cerr = C.CString(err.Error())
return
}
}
// get_object_meta returns the object meta which contains metadata about a specific Object.
//export get_object_meta
func get_object_meta(cObject C.ObjectRef, cErr **C.char) C.ObjectMeta {
object, ok := universe.Get(cObject._handle).(*Object)
if !ok {
*cErr = C.CString("invalid object")
return C.ObjectMeta{}
}
checksumLen := len(object.Meta.Checksum)
checksumPtr := C.malloc(C.size_t(checksumLen))
checksum := (*[1 << 30]uint8)(checksumPtr)
copy((*checksum)[:], object.Meta.Checksum)
return C.ObjectMeta{
bucket: C.CString(object.Meta.Bucket),
path: C.CString(object.Meta.Path),
is_prefix: C.bool(object.Meta.IsPrefix),
content_type: C.CString(object.Meta.ContentType),
created: C.int64_t(object.Meta.Created.Unix()),
modified: C.int64_t(object.Meta.Modified.Unix()),
expires: C.int64_t(object.Meta.Expires.Unix()),
size: C.uint64_t(object.Meta.Size),
checksum_bytes: (*C.uint8_t)(checksumPtr),
checksum_length: C.uint64_t(checksumLen),
}
}
type Upload struct {
scope
wc io.WriteCloser // 🚽
@ -54,21 +127,20 @@ func upload(cBucket C.BucketRef, path *C.char, cOpts *C.UploadOptions, cErr **C.
}
//export upload_write
func upload_write(uploader C.UploaderRef, bytes *C.uint8_t, length C.int, cErr **C.char) (writeLength C.int) {
func upload_write(uploader C.UploaderRef, bytes *C.uint8_t, length C.size_t, cErr **C.char) (writeLength C.size_t) {
upload, ok := universe.Get(uploader._handle).(*Upload)
if !ok {
*cErr = C.CString("invalid uploader")
return C.int(0)
return C.size_t(0)
}
buf := (*[1 << 30]byte)(unsafe.Pointer(bytes))[:length]
n, err := upload.wc.Write(buf)
if err == io.EOF {
return C.EOF
if err != nil {
*cErr = C.CString(err.Error())
}
return C.int(n)
return C.size_t(n)
}
//export upload_commit
@ -89,6 +161,54 @@ func upload_commit(uploader C.UploaderRef, cErr **C.char) {
}
}
// list_objects lists objects a user is authorized to see.
//export list_objects
func list_objects(bucketRef C.BucketRef, cListOpts *C.ListOptions, cErr **C.char) (cObjList C.ObjectList) {
bucket, ok := universe.Get(bucketRef._handle).(*Bucket)
if !ok {
*cErr = C.CString("invalid bucket")
return cObjList
}
scope := bucket.scope.child()
var opts *uplink.ListOptions
if unsafe.Pointer(cListOpts) != nil {
opts = &uplink.ListOptions{
Prefix: C.GoString(cListOpts.cursor),
Cursor: C.GoString(cListOpts.cursor),
Delimiter: rune(cListOpts.delimiter),
Recursive: bool(cListOpts.recursive),
Direction: storj.ListDirection(cListOpts.direction),
Limit: int(cListOpts.limit),
}
}
objectList, err := bucket.ListObjects(scope.ctx, opts)
if err != nil {
*cErr = C.CString(err.Error())
return cObjList
}
objListLen := len(objectList.Items)
objectSize := int(C.sizeof_ObjectInfo)
ptr := C.malloc(C.size_t(objListLen * objectSize))
cObjectsPtr := (*[1 << 30]C.ObjectInfo)(ptr)
for i, object := range objectList.Items {
object := object
cObjectsPtr[i] = newObjectInfo(&object)
}
return C.ObjectList{
bucket: C.CString(objectList.Bucket),
prefix: C.CString(objectList.Prefix),
more: C.bool(objectList.More),
items: (*C.ObjectInfo)(unsafe.Pointer(cObjectsPtr)),
length: C.int32_t(objListLen),
}
}
type Download struct {
scope
rc interface {
@ -123,21 +243,20 @@ func download(bucketRef C.BucketRef, path *C.char, cErr **C.char) (downloader C.
}
//export download_read
func download_read(downloader C.DownloaderRef, bytes *C.uint8_t, length C.int, cErr **C.char) (readLength C.int) {
func download_read(downloader C.DownloaderRef, bytes *C.uint8_t, length C.size_t, cErr **C.char) C.size_t {
download, ok := universe.Get(downloader._handle).(*Download)
if !ok {
*cErr = C.CString("invalid downloader")
return C.int(0)
return C.size_t(0)
}
buf := (*[1 << 30]byte)(unsafe.Pointer(bytes))[:length]
n, err := download.rc.Read(buf)
if err == io.EOF {
return C.EOF
if err != nil && err != io.EOF {
*cErr = C.CString(err.Error())
}
return C.int(n)
return C.size_t(n)
}
//export download_close
@ -162,3 +281,48 @@ func free_upload_opts(uploadOpts *C.UploadOptions) {
C.free(unsafe.Pointer(uploadOpts.content_type))
uploadOpts.content_type = nil
}
// free_object_meta frees the object meta
//export free_object_meta
func free_object_meta(objectMeta *C.ObjectMeta) {
C.free(unsafe.Pointer(objectMeta.bucket))
objectMeta.bucket = nil
C.free(unsafe.Pointer(objectMeta.path))
objectMeta.path = nil
C.free(unsafe.Pointer(objectMeta.content_type))
objectMeta.content_type = nil
C.free(unsafe.Pointer(objectMeta.checksum_bytes))
objectMeta.checksum_bytes = nil
}
// free_object_info frees the object info
//export free_object_info
func free_object_info(objectInfo *C.ObjectInfo) {
bucketInfo := objectInfo.bucket
free_bucket_info(&bucketInfo)
C.free(unsafe.Pointer(objectInfo.path))
objectInfo.path = nil
C.free(unsafe.Pointer(objectInfo.content_type))
objectInfo.content_type = nil
}
// free_list_objects frees the list of objects
//export free_list_objects
func free_list_objects(objectList *C.ObjectList) {
C.free(unsafe.Pointer(objectList.bucket))
objectList.bucket = nil
C.free(unsafe.Pointer(objectList.prefix))
objectList.prefix = nil
items := (*[1 << 30]C.ObjectInfo)(unsafe.Pointer(objectList.items))[:objectList.length]
for _, item := range items {
item := item
free_object_info((*C.ObjectInfo)(unsafe.Pointer(&item)))
}
}

View File

@ -8,12 +8,12 @@
BucketConfig test_bucket_config() {
BucketConfig config = {};
config.path_cipher = 0;
config.path_cipher = STORJ_ENC_AESGCM;
config.encryption_parameters.cipher_suite = 1; // TODO: make a named const
config.encryption_parameters.cipher_suite = STORJ_ENC_AESGCM;
config.encryption_parameters.block_size = 2048;
config.redundancy_scheme.algorithm = 1; // TODO: make a named const
config.redundancy_scheme.algorithm = STORJ_REED_SOLOMON;
config.redundancy_scheme.share_size = 1024;
config.redundancy_scheme.required_shares = 2;
config.redundancy_scheme.repair_shares = 4;

View File

@ -57,51 +57,89 @@ void handle_project(ProjectRef project) {
UploaderRef uploader = upload(bucket, object_paths[i], &opts, err);
require_noerror(*err);
size_t uploaded = 0;
while (uploaded < data_len) {
int to_write_len = (data_len - uploaded > 256) ? 256 : data_len - uploaded;
int write_len = upload_write(uploader, (uint8_t *)data+uploaded, to_write_len, err);
size_t uploaded_total = 0;
while (uploaded_total < data_len) {
size_t size_to_write = (data_len - uploaded_total > 256) ? 256 : data_len - uploaded_total;
size_t write_size = upload_write(uploader, (uint8_t *)data+uploaded_total, size_to_write, err);
require_noerror(*err);
if (write_len == 0) {
if (write_size == 0) {
break;
}
uploaded += write_len;
uploaded_total += write_size;
}
upload_commit(uploader, err);
require_noerror(*err);
}
{ // object meta
ObjectRef object_ref = open_object(bucket, object_paths[i], err);
require_noerror(*err);
ObjectMeta object_meta = get_object_meta(object_ref, err);
require_noerror(*err);
require(strcmp(object_paths[i], object_meta.path) == 0);
require(data_len == object_meta.size);
require(future_expiration_timestamp == object_meta.expires);
require((time(NULL) - object_meta.created) <= 2);
require((time(NULL) - object_meta.modified) <= 2);
require(object_meta.checksum_bytes != NULL);
// TODO: checksum is an empty slice in go; is that expected?
// require(object_meta.checksum_length != 0);
free_object_meta(&object_meta);
close_object(object_ref, err);
}
{ // download
DownloaderRef downloader = download(bucket, object_paths[i], err);
require_noerror(*err);
uint8_t downloadedData[data_len];
memset(downloadedData, '\0', data_len);
size_t downloadedTotal = 0;
uint8_t downloaded_data[data_len];
memset(downloaded_data, '\0', data_len);
size_t downloaded_total = 0;
uint64_t size_to_read = 256 + i;
size_t size_to_read = 256 + i;
while (true) {
uint64_t downloadedSize = download_read(downloader, &downloadedData[downloadedTotal], size_to_read, err);
size_t read_size = download_read(downloader, &downloaded_data[downloaded_total], size_to_read, err);
require_noerror(*err);
if (downloadedSize == EOF) {
if (read_size == 0) {
break;
}
downloadedTotal += downloadedSize;
downloaded_total += read_size;
}
download_close(downloader, err);
require_noerror(*err);
require(memcmp(data, downloadedData, data_len) == 0);
require(memcmp(data, downloaded_data, data_len) == 0);
}
if (data != NULL) {
free(data);
}
require_noerror(*err);
}
{ // List objects
ObjectList objects_list = list_objects(bucket, NULL, err);
require_noerror(*err);
require(strcmp(bucket_name, objects_list.bucket) == 0);
require(strcmp("", objects_list.prefix) == 0);
require(false == objects_list.more);
require(num_of_objects == objects_list.length);
ObjectInfo *object;
for (int i=0; i < objects_list.length; i++) {
object = &objects_list.items[i];
require(true == array_contains(object->path, object_paths, num_of_objects));
}
free_list_objects(&objects_list);
}
close_bucket(bucket, err);

View File

@ -6,10 +6,23 @@
#include <stdio.h>
#include <string.h>
typedef enum CipherSuite {
STORJ_ENC_UNSPECIFIED = 0,
STORJ_ENC_NULL = 1,
STORJ_ENC_AESGCM = 2,
STORJ_ENC_SECRET_BOX = 3
} CipherSuite;
typedef enum RedundancyAlgorithm {
STORJ_INVALID_REDUNDANCY_ALGORITHM = 0,
STORJ_REED_SOLOMON = 1
} RedundancyAlgorithm;
typedef struct APIKey { long _handle; } APIKeyRef;
typedef struct Uplink { long _handle; } UplinkRef;
typedef struct Project { long _handle; } ProjectRef;
typedef struct Bucket { long _handle; } BucketRef;
typedef struct Object { long _handle; } ObjectRef;
typedef struct Downloader { long _handle; } DownloaderRef;
typedef struct Uploader { long _handle; } UploaderRef;
@ -18,6 +31,7 @@ typedef struct UplinkConfig {
struct {
bool SkipPeerCAWhitelist;
} TLS;
// TODO: add support for MaxMemory
} Volatile;
} UplinkConfig;
@ -26,30 +40,30 @@ typedef struct ProjectOptions {
} ProjectOptions;
typedef struct EncryptionParameters {
uint8_t cipher_suite;
int32_t block_size;
CipherSuite cipher_suite;
int32_t block_size;
} EncryptionParameters;
typedef struct RedundancyScheme {
uint8_t algorithm;
int32_t share_size;
int16_t required_shares;
int16_t repair_shares;
int16_t optimal_shares;
int16_t total_shares;
RedundancyAlgorithm algorithm;
int32_t share_size;
int16_t required_shares;
int16_t repair_shares;
int16_t optimal_shares;
int16_t total_shares;
} RedundancyScheme;
typedef struct BucketInfo {
char *name;
int64_t created;
uint8_t path_cipher;
CipherSuite path_cipher;
uint64_t segment_size;
EncryptionParameters encryption_parameters;
RedundancyScheme redundancy_scheme;
} BucketInfo;
typedef struct BucketConfig {
uint8_t path_cipher;
CipherSuite path_cipher;
EncryptionParameters encryption_parameters;
RedundancyScheme redundancy_scheme;
} BucketConfig;
@ -70,7 +84,48 @@ typedef struct EncryptionAccess {
char key[32];
} EncryptionAccess;
typedef struct ObjectInfo {
uint32_t version;
BucketInfo bucket;
char *path;
bool is_prefix;
char *content_type;
int64_t created;
int64_t modified;
int64_t expires;
} ObjectInfo;
typedef struct ObjectList {
char *bucket;
char *prefix;
bool more;
ObjectInfo *items;
int32_t length;
} ObjectList;
typedef struct UploadOptions {
char *content_type;
int64_t expires;
} UploadOptions;
typedef struct ListOptions {
char *prefix;
char *cursor;
char delimiter;
bool recursive;
int8_t direction;
int64_t limit;
} ListOptions;
typedef struct ObjectMeta {
char *bucket;
char *path;
bool is_prefix;
char *content_type;
int64_t created;
int64_t modified;
int64_t expires;
uint64_t size;
uint8_t *checksum_bytes;
uint64_t checksum_length;
} ObjectMeta;