2019-06-21 13:24:06 +01:00
|
|
|
// Copyright (C) 2019 Storj Labs, Inc.
|
|
|
|
// See LICENSE for copying information.
|
|
|
|
|
|
|
|
package main
|
|
|
|
|
|
|
|
// #include "uplink_definitions.h"
|
|
|
|
import "C"
|
|
|
|
|
|
|
|
import (
|
2019-07-30 12:40:05 +01:00
|
|
|
"fmt"
|
2019-06-21 13:24:06 +01:00
|
|
|
"io"
|
|
|
|
"time"
|
|
|
|
"unsafe"
|
|
|
|
|
2019-08-07 15:28:27 +01:00
|
|
|
"storj.io/storj/internal/errs2"
|
2019-06-21 13:24:06 +01:00
|
|
|
"storj.io/storj/lib/uplink"
|
2019-06-21 19:44:00 +01:00
|
|
|
"storj.io/storj/pkg/storj"
|
2019-06-21 13:24:06 +01:00
|
|
|
)
|
|
|
|
|
2019-06-21 19:44:00 +01:00
|
|
|
// Object is a scoped uplink.Object
|
|
|
|
type Object struct {
|
|
|
|
scope
|
|
|
|
*uplink.Object
|
|
|
|
}
|
|
|
|
|
|
|
|
//export open_object
|
2019-07-30 12:40:05 +01:00
|
|
|
// open_object returns an Object handle, if authorized.
|
2019-06-21 19:44:00 +01:00
|
|
|
func open_object(bucketHandle C.BucketRef, objectPath *C.char, cerr **C.char) C.ObjectRef {
|
|
|
|
bucket, ok := universe.Get(bucketHandle._handle).(*Bucket)
|
|
|
|
if !ok {
|
|
|
|
*cerr = C.CString("invalid bucket")
|
|
|
|
return C.ObjectRef{}
|
|
|
|
}
|
|
|
|
|
|
|
|
scope := bucket.scope.child()
|
|
|
|
|
|
|
|
object, err := bucket.OpenObject(scope.ctx, C.GoString(objectPath))
|
|
|
|
if err != nil {
|
2019-07-30 12:40:05 +01:00
|
|
|
*cerr = C.CString(fmt.Sprintf("%+v", err))
|
2019-06-21 19:44:00 +01:00
|
|
|
return C.ObjectRef{}
|
|
|
|
}
|
|
|
|
|
|
|
|
return C.ObjectRef{universe.Add(&Object{scope, object})}
|
|
|
|
}
|
|
|
|
|
|
|
|
//export close_object
|
2019-07-30 12:40:05 +01:00
|
|
|
// close_object closes the object.
|
2019-06-21 19:44:00 +01:00
|
|
|
func close_object(objectHandle C.ObjectRef, cerr **C.char) {
|
|
|
|
object, ok := universe.Get(objectHandle._handle).(*Object)
|
|
|
|
if !ok {
|
|
|
|
*cerr = C.CString("invalid object")
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
universe.Del(objectHandle._handle)
|
|
|
|
defer object.cancel()
|
|
|
|
|
|
|
|
if err := object.Close(); err != nil {
|
2019-07-30 12:40:05 +01:00
|
|
|
*cerr = C.CString(fmt.Sprintf("%+v", err))
|
2019-06-21 19:44:00 +01:00
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
//export get_object_meta
|
2019-07-30 12:40:05 +01:00
|
|
|
// get_object_meta returns the object meta which contains metadata about a specific Object.
|
2019-06-21 19:44:00 +01:00
|
|
|
func get_object_meta(cObject C.ObjectRef, cErr **C.char) C.ObjectMeta {
|
|
|
|
object, ok := universe.Get(cObject._handle).(*Object)
|
|
|
|
if !ok {
|
|
|
|
*cErr = C.CString("invalid object")
|
|
|
|
return C.ObjectMeta{}
|
|
|
|
}
|
|
|
|
|
|
|
|
checksumLen := len(object.Meta.Checksum)
|
|
|
|
checksumPtr := C.malloc(C.size_t(checksumLen))
|
|
|
|
checksum := (*[1 << 30]uint8)(checksumPtr)
|
|
|
|
copy((*checksum)[:], object.Meta.Checksum)
|
|
|
|
|
|
|
|
return C.ObjectMeta{
|
|
|
|
bucket: C.CString(object.Meta.Bucket),
|
|
|
|
path: C.CString(object.Meta.Path),
|
|
|
|
is_prefix: C.bool(object.Meta.IsPrefix),
|
|
|
|
content_type: C.CString(object.Meta.ContentType),
|
|
|
|
created: C.int64_t(object.Meta.Created.Unix()),
|
|
|
|
modified: C.int64_t(object.Meta.Modified.Unix()),
|
|
|
|
expires: C.int64_t(object.Meta.Expires.Unix()),
|
|
|
|
size: C.uint64_t(object.Meta.Size),
|
|
|
|
checksum_bytes: (*C.uint8_t)(checksumPtr),
|
|
|
|
checksum_length: C.uint64_t(checksumLen),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-06-24 03:06:14 +01:00
|
|
|
// Upload stores writecloser and context scope for uploading
|
2019-06-21 13:24:06 +01:00
|
|
|
type Upload struct {
|
|
|
|
scope
|
|
|
|
wc io.WriteCloser // 🚽
|
|
|
|
}
|
|
|
|
|
|
|
|
//export upload
|
2019-07-30 12:40:05 +01:00
|
|
|
// upload uploads a new object, if authorized.
|
|
|
|
func upload(cBucket C.BucketRef, path *C.char, cOpts *C.UploadOptions, cErr **C.char) C.UploaderRef {
|
2019-06-21 13:24:06 +01:00
|
|
|
bucket, ok := universe.Get(cBucket._handle).(*Bucket)
|
|
|
|
if !ok {
|
|
|
|
*cErr = C.CString("invalid bucket")
|
2019-07-30 12:40:05 +01:00
|
|
|
return C.UploaderRef{}
|
2019-06-21 13:24:06 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
scope := bucket.scope.child()
|
|
|
|
|
|
|
|
var opts *uplink.UploadOptions
|
|
|
|
if cOpts != nil {
|
|
|
|
var metadata map[string]string
|
|
|
|
|
|
|
|
opts = &uplink.UploadOptions{
|
|
|
|
ContentType: C.GoString(cOpts.content_type),
|
|
|
|
Metadata: metadata,
|
|
|
|
Expires: time.Unix(int64(cOpts.expires), 0),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
writeCloser, err := bucket.NewWriter(scope.ctx, C.GoString(path), opts)
|
|
|
|
if err != nil {
|
2019-07-30 12:40:05 +01:00
|
|
|
*cErr = C.CString(fmt.Sprintf("%+v", err))
|
|
|
|
return C.UploaderRef{}
|
2019-06-21 13:24:06 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
return C.UploaderRef{universe.Add(&Upload{
|
|
|
|
scope: scope,
|
|
|
|
wc: writeCloser,
|
|
|
|
})}
|
|
|
|
}
|
|
|
|
|
|
|
|
//export upload_write
|
2019-07-30 12:40:05 +01:00
|
|
|
func upload_write(uploader C.UploaderRef, bytes *C.uint8_t, length C.size_t, cErr **C.char) C.size_t {
|
2019-06-21 13:24:06 +01:00
|
|
|
upload, ok := universe.Get(uploader._handle).(*Upload)
|
|
|
|
if !ok {
|
|
|
|
*cErr = C.CString("invalid uploader")
|
2019-06-21 19:44:00 +01:00
|
|
|
return C.size_t(0)
|
2019-06-21 13:24:06 +01:00
|
|
|
}
|
|
|
|
|
2019-07-30 12:40:05 +01:00
|
|
|
if err := upload.ctx.Err(); err != nil {
|
2019-08-07 15:28:27 +01:00
|
|
|
if !errs2.IsCanceled(err) {
|
2019-07-30 12:40:05 +01:00
|
|
|
*cErr = C.CString(fmt.Sprintf("%+v", err))
|
|
|
|
}
|
|
|
|
return C.size_t(0)
|
|
|
|
}
|
|
|
|
|
2019-06-21 13:24:06 +01:00
|
|
|
buf := (*[1 << 30]byte)(unsafe.Pointer(bytes))[:length]
|
|
|
|
|
|
|
|
n, err := upload.wc.Write(buf)
|
2019-06-21 19:44:00 +01:00
|
|
|
if err != nil {
|
2019-08-07 15:28:27 +01:00
|
|
|
if !errs2.IsCanceled(err) {
|
|
|
|
*cErr = C.CString(fmt.Sprintf("%+v", err))
|
|
|
|
}
|
2019-06-21 13:24:06 +01:00
|
|
|
}
|
2019-06-21 19:44:00 +01:00
|
|
|
return C.size_t(n)
|
2019-06-21 13:24:06 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
//export upload_commit
|
|
|
|
func upload_commit(uploader C.UploaderRef, cErr **C.char) {
|
|
|
|
upload, ok := universe.Get(uploader._handle).(*Upload)
|
|
|
|
if !ok {
|
|
|
|
*cErr = C.CString("invalid uploader")
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2019-07-30 12:40:05 +01:00
|
|
|
if err := upload.ctx.Err(); err != nil {
|
2019-08-07 15:28:27 +01:00
|
|
|
if !errs2.IsCanceled(err) {
|
2019-07-30 12:40:05 +01:00
|
|
|
*cErr = C.CString(fmt.Sprintf("%+v", err))
|
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
2019-06-21 13:24:06 +01:00
|
|
|
defer upload.cancel()
|
|
|
|
|
|
|
|
err := upload.wc.Close()
|
|
|
|
if err != nil {
|
2019-08-07 15:28:27 +01:00
|
|
|
if !errs2.IsCanceled(err) {
|
2019-07-30 12:40:05 +01:00
|
|
|
*cErr = C.CString(fmt.Sprintf("%+v", err))
|
|
|
|
}
|
2019-06-21 13:24:06 +01:00
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-07-30 12:40:05 +01:00
|
|
|
//export upload_cancel
|
|
|
|
func upload_cancel(uploader C.UploaderRef, cErr **C.char) {
|
|
|
|
upload, ok := universe.Get(uploader._handle).(*Upload)
|
|
|
|
if !ok {
|
|
|
|
*cErr = C.CString("invalid uploader")
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := upload.ctx.Err(); err != nil {
|
2019-08-07 15:28:27 +01:00
|
|
|
if !errs2.IsCanceled(err) {
|
2019-07-30 12:40:05 +01:00
|
|
|
*cErr = C.CString(fmt.Sprintf("%+v", err))
|
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
upload.cancel()
|
|
|
|
}
|
|
|
|
|
2019-06-21 19:44:00 +01:00
|
|
|
//export list_objects
|
2019-07-30 12:40:05 +01:00
|
|
|
// list_objects lists objects a user is authorized to see.
|
2019-06-21 19:44:00 +01:00
|
|
|
func list_objects(bucketRef C.BucketRef, cListOpts *C.ListOptions, cErr **C.char) (cObjList C.ObjectList) {
|
|
|
|
bucket, ok := universe.Get(bucketRef._handle).(*Bucket)
|
|
|
|
if !ok {
|
|
|
|
*cErr = C.CString("invalid bucket")
|
|
|
|
return cObjList
|
|
|
|
}
|
|
|
|
|
|
|
|
scope := bucket.scope.child()
|
|
|
|
|
|
|
|
var opts *uplink.ListOptions
|
|
|
|
if unsafe.Pointer(cListOpts) != nil {
|
|
|
|
opts = &uplink.ListOptions{
|
2019-07-30 12:40:05 +01:00
|
|
|
Prefix: C.GoString(cListOpts.prefix),
|
2019-06-21 19:44:00 +01:00
|
|
|
Cursor: C.GoString(cListOpts.cursor),
|
|
|
|
Delimiter: rune(cListOpts.delimiter),
|
|
|
|
Recursive: bool(cListOpts.recursive),
|
|
|
|
Direction: storj.ListDirection(cListOpts.direction),
|
|
|
|
Limit: int(cListOpts.limit),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
objectList, err := bucket.ListObjects(scope.ctx, opts)
|
|
|
|
if err != nil {
|
2019-07-30 12:40:05 +01:00
|
|
|
*cErr = C.CString(fmt.Sprintf("%+v", err))
|
2019-06-21 19:44:00 +01:00
|
|
|
return cObjList
|
|
|
|
}
|
|
|
|
objListLen := len(objectList.Items)
|
|
|
|
|
|
|
|
objectSize := int(C.sizeof_ObjectInfo)
|
2019-08-06 10:36:35 +01:00
|
|
|
ptr := C.malloc(C.size_t(objectSize * objListLen))
|
2019-08-26 14:12:26 +01:00
|
|
|
cObjectsPtr := (*[1 << 30 / unsafe.Sizeof(C.ObjectInfo{})]C.ObjectInfo)(ptr)
|
2019-06-21 19:44:00 +01:00
|
|
|
|
|
|
|
for i, object := range objectList.Items {
|
|
|
|
object := object
|
|
|
|
cObjectsPtr[i] = newObjectInfo(&object)
|
|
|
|
}
|
|
|
|
|
|
|
|
return C.ObjectList{
|
|
|
|
bucket: C.CString(objectList.Bucket),
|
|
|
|
prefix: C.CString(objectList.Prefix),
|
|
|
|
more: C.bool(objectList.More),
|
|
|
|
items: (*C.ObjectInfo)(unsafe.Pointer(cObjectsPtr)),
|
|
|
|
length: C.int32_t(objListLen),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-06-24 03:06:14 +01:00
|
|
|
// Download stores readcloser and context scope for downloading
|
2019-06-21 13:24:06 +01:00
|
|
|
type Download struct {
|
|
|
|
scope
|
2019-08-13 18:29:02 +01:00
|
|
|
rc io.ReadCloser
|
2019-06-21 13:24:06 +01:00
|
|
|
}
|
|
|
|
|
2019-07-30 12:40:05 +01:00
|
|
|
//export download
|
2019-06-21 13:24:06 +01:00
|
|
|
// download returns an Object's data. A length of -1 will mean
|
|
|
|
// (Object.Size - offset).
|
2019-07-30 12:40:05 +01:00
|
|
|
func download(bucketRef C.BucketRef, path *C.char, cErr **C.char) C.DownloaderRef {
|
2019-06-21 13:24:06 +01:00
|
|
|
bucket, ok := universe.Get(bucketRef._handle).(*Bucket)
|
|
|
|
if !ok {
|
|
|
|
*cErr = C.CString("invalid bucket")
|
2019-07-30 12:40:05 +01:00
|
|
|
return C.DownloaderRef{}
|
2019-06-21 13:24:06 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
scope := bucket.scope.child()
|
|
|
|
|
2019-08-13 18:29:02 +01:00
|
|
|
rc, err := bucket.Download(scope.ctx, C.GoString(path))
|
2019-06-21 13:24:06 +01:00
|
|
|
if err != nil {
|
2019-08-07 15:28:27 +01:00
|
|
|
if !errs2.IsCanceled(err) {
|
|
|
|
*cErr = C.CString(fmt.Sprintf("%+v", err))
|
|
|
|
}
|
2019-07-30 12:40:05 +01:00
|
|
|
return C.DownloaderRef{}
|
2019-06-21 13:24:06 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
return C.DownloaderRef{universe.Add(&Download{
|
|
|
|
scope: scope,
|
|
|
|
rc: rc,
|
|
|
|
})}
|
|
|
|
}
|
|
|
|
|
|
|
|
//export download_read
|
2019-06-21 19:44:00 +01:00
|
|
|
func download_read(downloader C.DownloaderRef, bytes *C.uint8_t, length C.size_t, cErr **C.char) C.size_t {
|
2019-06-21 13:24:06 +01:00
|
|
|
download, ok := universe.Get(downloader._handle).(*Download)
|
|
|
|
if !ok {
|
|
|
|
*cErr = C.CString("invalid downloader")
|
2019-06-21 19:44:00 +01:00
|
|
|
return C.size_t(0)
|
2019-06-21 13:24:06 +01:00
|
|
|
}
|
|
|
|
|
2019-07-30 12:40:05 +01:00
|
|
|
if err := download.ctx.Err(); err != nil {
|
2019-08-07 15:28:27 +01:00
|
|
|
if !errs2.IsCanceled(err) {
|
2019-07-30 12:40:05 +01:00
|
|
|
*cErr = C.CString(fmt.Sprintf("%+v", err))
|
|
|
|
}
|
|
|
|
return C.size_t(0)
|
|
|
|
}
|
|
|
|
|
2019-06-21 13:24:06 +01:00
|
|
|
buf := (*[1 << 30]byte)(unsafe.Pointer(bytes))[:length]
|
|
|
|
|
|
|
|
n, err := download.rc.Read(buf)
|
2019-08-07 15:28:27 +01:00
|
|
|
if err != nil && err != io.EOF && !errs2.IsCanceled(err) {
|
2019-07-30 12:40:05 +01:00
|
|
|
*cErr = C.CString(fmt.Sprintf("%+v", err))
|
2019-06-21 13:24:06 +01:00
|
|
|
}
|
2019-06-21 19:44:00 +01:00
|
|
|
return C.size_t(n)
|
2019-06-21 13:24:06 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
//export download_close
|
|
|
|
func download_close(downloader C.DownloaderRef, cErr **C.char) {
|
|
|
|
download, ok := universe.Get(downloader._handle).(*Download)
|
|
|
|
if !ok {
|
|
|
|
*cErr = C.CString("invalid downloader")
|
|
|
|
}
|
|
|
|
|
2019-07-30 12:40:05 +01:00
|
|
|
if err := download.ctx.Err(); err != nil {
|
2019-08-07 15:28:27 +01:00
|
|
|
if !errs2.IsCanceled(err) {
|
2019-07-30 12:40:05 +01:00
|
|
|
*cErr = C.CString(fmt.Sprintf("%+v", err))
|
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2019-06-21 13:24:06 +01:00
|
|
|
defer download.cancel()
|
|
|
|
|
|
|
|
err := download.rc.Close()
|
|
|
|
if err != nil {
|
2019-08-07 15:28:27 +01:00
|
|
|
if !errs2.IsCanceled(err) {
|
|
|
|
*cErr = C.CString(fmt.Sprintf("%+v", err))
|
|
|
|
}
|
2019-07-30 12:40:05 +01:00
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
//export download_cancel
|
|
|
|
func download_cancel(downloader C.DownloaderRef, cErr **C.char) {
|
|
|
|
download, ok := universe.Get(downloader._handle).(*Download)
|
|
|
|
if !ok {
|
|
|
|
*cErr = C.CString("invalid downloader")
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := download.ctx.Err(); err != nil {
|
2019-08-07 15:28:27 +01:00
|
|
|
if !errs2.IsCanceled(err) {
|
2019-07-30 12:40:05 +01:00
|
|
|
*cErr = C.CString(fmt.Sprintf("%+v", err))
|
|
|
|
}
|
2019-06-21 13:24:06 +01:00
|
|
|
return
|
|
|
|
}
|
2019-07-30 12:40:05 +01:00
|
|
|
|
|
|
|
download.cancel()
|
|
|
|
}
|
|
|
|
|
|
|
|
//export delete_object
|
|
|
|
func delete_object(bucketRef C.BucketRef, path *C.char, cerr **C.char) {
|
|
|
|
bucket, ok := universe.Get(bucketRef._handle).(*Bucket)
|
|
|
|
if !ok {
|
|
|
|
*cerr = C.CString("invalid downloader")
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := bucket.DeleteObject(bucket.ctx, C.GoString(path)); err != nil {
|
|
|
|
*cerr = C.CString(fmt.Sprintf("%+v", err))
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-07-30 17:51:37 +01:00
|
|
|
//export free_uploader
|
|
|
|
// free_uploader deletes the uploader reference from the universe
|
|
|
|
func free_uploader(uploader C.UploaderRef) {
|
2019-07-30 12:40:05 +01:00
|
|
|
universe.Del(uploader._handle)
|
|
|
|
}
|
|
|
|
|
2019-07-30 17:51:37 +01:00
|
|
|
//export free_downloader
|
|
|
|
// free_downloader deletes the downloader reference from the universe
|
|
|
|
func free_downloader(downloader C.DownloaderRef) {
|
2019-07-30 12:40:05 +01:00
|
|
|
universe.Del(downloader._handle)
|
2019-06-21 13:24:06 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
//export free_upload_opts
|
|
|
|
func free_upload_opts(uploadOpts *C.UploadOptions) {
|
|
|
|
C.free(unsafe.Pointer(uploadOpts.content_type))
|
|
|
|
uploadOpts.content_type = nil
|
|
|
|
}
|
2019-06-21 19:44:00 +01:00
|
|
|
|
|
|
|
//export free_object_meta
|
2019-07-30 12:40:05 +01:00
|
|
|
// free_object_meta frees the object meta
|
2019-06-21 19:44:00 +01:00
|
|
|
func free_object_meta(objectMeta *C.ObjectMeta) {
|
|
|
|
C.free(unsafe.Pointer(objectMeta.bucket))
|
|
|
|
objectMeta.bucket = nil
|
|
|
|
|
|
|
|
C.free(unsafe.Pointer(objectMeta.path))
|
|
|
|
objectMeta.path = nil
|
|
|
|
|
|
|
|
C.free(unsafe.Pointer(objectMeta.content_type))
|
|
|
|
objectMeta.content_type = nil
|
|
|
|
|
|
|
|
C.free(unsafe.Pointer(objectMeta.checksum_bytes))
|
|
|
|
objectMeta.checksum_bytes = nil
|
|
|
|
}
|
|
|
|
|
|
|
|
//export free_object_info
|
2019-07-30 12:40:05 +01:00
|
|
|
// free_object_info frees the object info
|
2019-06-21 19:44:00 +01:00
|
|
|
func free_object_info(objectInfo *C.ObjectInfo) {
|
|
|
|
bucketInfo := objectInfo.bucket
|
|
|
|
free_bucket_info(&bucketInfo)
|
|
|
|
|
|
|
|
C.free(unsafe.Pointer(objectInfo.path))
|
|
|
|
objectInfo.path = nil
|
|
|
|
|
|
|
|
C.free(unsafe.Pointer(objectInfo.content_type))
|
|
|
|
objectInfo.content_type = nil
|
|
|
|
}
|
|
|
|
|
|
|
|
//export free_list_objects
|
2019-07-30 12:40:05 +01:00
|
|
|
// free_list_objects frees the list of objects
|
2019-06-21 19:44:00 +01:00
|
|
|
func free_list_objects(objectList *C.ObjectList) {
|
|
|
|
C.free(unsafe.Pointer(objectList.bucket))
|
|
|
|
objectList.bucket = nil
|
|
|
|
|
|
|
|
C.free(unsafe.Pointer(objectList.prefix))
|
|
|
|
objectList.prefix = nil
|
|
|
|
|
2019-08-26 14:12:26 +01:00
|
|
|
items := (*[1 << 30 / unsafe.Sizeof(C.ObjectInfo{})]C.ObjectInfo)(unsafe.Pointer(objectList.items))[:objectList.length]
|
2019-06-21 19:44:00 +01:00
|
|
|
for _, item := range items {
|
|
|
|
item := item
|
|
|
|
free_object_info((*C.ObjectInfo)(unsafe.Pointer(&item)))
|
|
|
|
}
|
|
|
|
}
|