Newer
Older
"containers": {"@type": "as.dto.dataset.fetchoptions.DataSetFetchOptions"},
"type": {"@type": "as.dto.dataset.fetchoptions.DataSetTypeFetchOptions"},
}
for option in ['tags', 'properties', 'dataStore', 'physicalData', 'linkedData',
'experiment', 'sample']:
fetchopts[option] = fetch_option[option]
request = {
"params": [
self.token,
criteria,
fetchopts,
],
resp = self._post_request(self.as_v3, request)
raise ValueError('no such dataset found: ' + permid)
parse_jackson(resp)
for permid in resp:
if only_data:
return resp[permid]
else:
return DataSet(
self,
type=self.get_dataset_type(resp[permid]["type"]["code"]),
data=resp[permid]
)
Swen Vermeul
committed
def get_sample(self, sample_ident, only_data=False, withAttachments=False):
Chandrasekhar Ramakrishnan
committed
"""Retrieve metadata for the sample.
Get metadata for the sample and any directly connected parents of the sample to allow access
to the same information visible in the ELN UI. The metadata will be on the file system.
:param sample_identifiers: A list of sample identifiers to retrieve.
"""
search_request = search_request_for_identifier(sample_ident, 'sample')
Swen Vermeul
committed
fetchopts = {"type": {"@type": "as.dto.sample.fetchoptions.SampleTypeFetchOptions"}}
Swen Vermeul
committed
for option in ['tags', 'properties', 'attachments', 'space', 'experiment', 'registrator', 'dataSets']:
fetchopts[option] = fetch_option[option]
if withAttachments:
fetchopts['attachments'] = fetch_option['attachmentsWithContent']
for key in ['parents','children','container','components']:
fetchopts[key] = {"@type": "as.dto.sample.fetchoptions.SampleFetchOptions"}
sample_request = {
"method": "getSamples",
"params": [
self.token,
[search_request],
Swen Vermeul
committed
fetchopts
resp = self._post_request(self.as_v3, sample_request)
parse_jackson(resp)
raise ValueError('no such sample found: ' + sample_ident)
for sample_ident in resp:
if only_data:
return resp[sample_ident]
else:
return Sample(self, self.get_sample_type(resp[sample_ident]["type"]["code"]), resp[sample_ident])
get_object = get_sample # Alias
def get_external_data_management_system(self, permId, only_data=False):
Chandrasekhar Ramakrishnan
committed
"""Retrieve metadata for the external data management system.
:param permId: A permId for an external DMS.
Chandrasekhar Ramakrishnan
committed
:param only_data: Return the result data as a hash-map, not an object.
"""
request = {
"method": "getExternalDataManagementSystems",
"params": [
self.token,
[{
"@type": "as.dto.externaldms.id.ExternalDmsPermId",
"permId": permId
Chandrasekhar Ramakrishnan
committed
}],
{},
],
}
resp = self._post_request(self.as_v3, request)
parse_jackson(resp)
if resp is None or len(resp) == 0:
raise ValueError('no such external DMS found: ' + permId)
Chandrasekhar Ramakrishnan
committed
else:
for ident in resp:
if only_data:
return resp[ident]
else:
return ExternalDMS(self, resp[ident])
def new_space(self, **kwargs):
return Space(self, None, **kwargs)
Swen Vermeul
committed
def new_analysis(self, name, description=None, sample=None, dss_code=None, result_files=None,
notebook_files=None, parents=None):
Swen Vermeul
committed
""" An analysis contains the Jupyter notebook file(s) and some result files.
Technically this method involves uploading files to the session workspace
and activating the dropbox aka dataset ingestion service "jupyter-uploader-api"
Swen Vermeul
committed
"""
if dss_code is None:
dss_code = self.get_datastores()['code'][0]
# if a sample identifier was given, use it as a string.
# if a sample object was given, take its identifier
sampleId = self.sample_to_sample_id(sample)
Swen Vermeul
committed
parentIds = []
if parents is not None:
if not isinstance(parents, list):
parants = [parents]
for parent in parents:
parentIds.append(parent.permId)
Swen Vermeul
committed
folder = time.strftime('%Y-%m-%d_%H-%M-%S')
Swen Vermeul
committed
data_sets = []
if notebook_files is not None:
notebooks_folder = os.path.join(folder, 'notebook_files')
self.upload_files(
datastore_url=datastore_url,
Swen Vermeul
committed
files=notebook_files,
folder=notebooks_folder,
Swen Vermeul
committed
wait_until_finished=True
)
data_sets.append({
"dataSetType": "JUPYTER_NOTEBOOk",
Swen Vermeul
committed
"sessionWorkspaceFolder": notebooks_folder,
"fileNames": notebook_files,
"properties": {}
Swen Vermeul
committed
})
if result_files is not None:
results_folder = os.path.join(folder, 'result_files')
self.upload_files(
datastore_url=datastore_url,
Swen Vermeul
committed
files=result_files,
folder=results_folder,
wait_until_finished=True
)
data_sets.append({
"dataSetType": "JUPYTER_RESULT",
"sessionWorkspaceFolder": results_folder,
"fileNames": result_files,
"properties": {}
Swen Vermeul
committed
})
"method": "createReportFromAggregationService",
"params": [
self.token,
dss_code,
{
"sampleId": sampleId,
"parentIds": parentIds,
"containers": [{
"dataSetType": "JUPYTER_CONTAINER",
"properties": {
"NAME": name,
"DESCRIPTION": description
}
}],
"dataSets": data_sets,
}
],
resp = self._post_request(self.reg_v1, request)
try:
if resp['rows'][0][0]['value'] == 'OK':
return resp['rows'][0][1]['value']
except:
return resp
Swen Vermeul
committed
def new_git_data_set(self, data_set_type, path, commit_id, repository_id, dms, sample=None, experiment=None, properties={},
dss_code=None, parents=None, data_set_code=None, contents=[]):
""" Create a link data set.
:param data_set_type: The type of the data set
Chandrasekhar Ramakrishnan
committed
:param data_set_type: The type of the data set
:param path: The path to the git repository
:param commit_id: The git commit id
:param repository_id: The git repository id - same for copies
:param dms: An external data managment system object or external_dms_id
:param sample: A sample object or sample id.
Chandrasekhar Ramakrishnan
committed
:param dss_code: Code for the DSS -- defaults to the first dss if none is supplied.
:param properties: Properties for the data set.
:param parents: Parents for the data set.
Chandrasekhar Ramakrishnan
committed
:param data_set_code: A data set code -- used if provided, otherwise generated on the server
:param contents: A list of dicts that describe the contents:
{'file_length': [file length],
'crc32': [crc32 checksum],
'directory': [is path a directory?]
'path': [the relative path string]}
:return: A DataSet object
"""
return pbds.GitDataSetCreation(self, data_set_type, path, commit_id, repository_id, dms, sample, experiment,
properties, dss_code, parents, data_set_code, contents).new_git_data_set()
def new_content_copy(self, path, commit_id, repository_id, edms_id, data_set_id):
"""
Create a content copy in an existing link data set.
:param path: path of the new content copy
"param commit_id: commit id of the new content copy
"param repository_id: repository id of the content copy
"param edms_id: Id of the external data managment system of the content copy
"param data_set_id: Id of the data set to which the new content copy belongs
"""
return pbds.GitDataSetUpdate(self, path, commit_id, repository_id, edms_id, data_set_id).new_content_copy()
Chandrasekhar Ramakrishnan
committed
@staticmethod
def sample_to_sample_id(sample):
"""Take sample which may be a string or object and return an identifier for it."""
return Openbis._object_to_object_id(sample, "as.dto.sample.id.SampleIdentifier", "as.dto.sample.id.SamplePermId");
@staticmethod
def experiment_to_experiment_id(experiment):
"""Take experiment which may be a string or object and return an identifier for it."""
return Openbis._object_to_object_id(experiment, "as.dto.experiment.id.ExperimentIdentifier", "as.dto.experiment.id.SamplePermId");
@staticmethod
def _object_to_object_id(obj, identifierType, permIdType):
object_id = None
if isinstance(obj, str):
if (is_identifier(obj)):
object_id = {
"identifier": obj,
"@type": identifierType
}
else:
object_id = {
"permId": obj,
"@type": permIdType
}
else:
object_id = {
"identifier": obj.identifier,
"@type": identifierType
return object_id
Chandrasekhar Ramakrishnan
committed
@staticmethod
def data_set_to_data_set_id(data_set):
if isinstance(data_set, str):
code = data_set
else:
code = data_set.permId
return {
"permId": code,
"@type": "as.dto.dataset.id.DataSetPermId"
}
def external_data_managment_system_to_dms_id(self, dms):
if isinstance(dms, str):
dms_id = {
"permId": dms,
"@type": "as.dto.externaldms.id.ExternalDmsPermId"
}
else:
dms_id = {
"identifier": dms.code,
"@type": "as.dto.sample.id.SampleIdentifier"
}
return dms_id
Swen Vermeul
committed
def new_sample(self, type, props=None, **kwargs):
""" Creates a new sample of a given sample type.
Swen Vermeul
committed
return Sample(self, self.get_sample_type(type), None, props, **kwargs)
new_object = new_sample # Alias
def new_dataset(self, type=None, files=None, props=None, folder=None, **kwargs):
""" Creates a new dataset of a given sample type.
"""
if files is None:
raise ValueError('please provide at least one file')
elif isinstance(files, str):
files = [files]
type_obj = self.get_dataset_type(type.upper())
return DataSet(self, type=type_obj, files=files, folder=folder, props=props, **kwargs)
def new_semantic_annotation(self, entityType=None, propertyType=None, **kwargs):
return SemanticAnnotation(
openbis_obj=self, isNew=True,
entityType=entityType, propertyType=propertyType, **kwargs
)
def _get_dss_url(self, dss_code=None):
""" internal method to get the downloadURL of a datastore.
"""
Swen Vermeul
committed
dss = self.get_datastores()
if dss_code is None:
return dss['downloadUrl'][0]
else:
Swen Vermeul
committed
return dss[dss['code'] == dss_code]['downloadUrl'][0]
Swen Vermeul
committed
def upload_files(self, datastore_url=None, files=None, folder=None, wait_until_finished=False):
Swen Vermeul
committed
if datastore_url is None:
if files is None:
raise ValueError("Please provide a filename.")
Swen Vermeul
committed
if folder is None:
# create a unique foldername
folder = time.strftime('%Y-%m-%d_%H-%M-%S')
if isinstance(files, str):
files = [files]
self.files = files
self.startByte = 0
self.endByte = 0
# define a queue to handle the upload threads
queue = DataSetUploadQueue()
real_files = []
for filename in files:
if os.path.isdir(filename):
real_files.extend(
[os.path.join(dp, f) for dp, dn, fn in os.walk(os.path.expanduser(filename)) for f in fn])
else:
real_files.append(os.path.join(filename))
# compose the upload-URL and put URL and filename in the upload queue
for filename in real_files:
file_in_wsp = os.path.join(folder, filename)
Swen Vermeul
committed
self.files_in_wsp.append(file_in_wsp)
upload_url = (
Swen Vermeul
committed
datastore_url + '/session_workspace_file_upload'
+ '?filename=' + os.path.join(folder, quote(filename))
+ '&id=1'
+ '&startByte=0&endByte=0'
+ '&sessionID=' + self.token
)
queue.put([upload_url, filename, self.verify_certificates])
# wait until all files have uploaded
if wait_until_finished:
queue.join()
# return files with full path in session workspace
Swen Vermeul
committed
return self.files_in_wsp
2373
2374
2375
2376
2377
2378
2379
2380
2381
2382
2383
2384
2385
2386
2387
2388
2389
2390
2391
2392
2393
2394
2395
2396
2397
def __init__(self, workers=20):
# maximum files to be uploaded at once
self.upload_queue = Queue()
# define number of threads and start them
for t in range(workers):
t = Thread(target=self.upload_file)
t.daemon = True
t.start()
def put(self, things):
""" expects a list [url, filename] which is put into the upload queue
"""
self.upload_queue.put(things)
def join(self):
""" needs to be called if you want to wait for all uploads to be finished
"""
self.upload_queue.join()
def upload_file(self):
while True:
# get the next item in the queue
upload_url, filename, verify_certificates = self.upload_queue.get()
filesize = os.path.getsize(filename)
# upload the file to our DSS session workspace
with open(filename, 'rb') as f:
resp = requests.post(upload_url, data=f, verify=verify_certificates)
resp.raise_for_status()
data = resp.json()
assert filesize == int(data['size'])
# Tell the queue that we are done
self.upload_queue.task_done()
Swen Vermeul
committed
2412
2413
2414
2415
2416
2417
2418
2419
2420
2421
2422
2423
2424
2425
2426
2427
2428
2429
2430
2431
2432
2433
def __init__(self, workers=20):
# maximum files to be downloaded at once
self.download_queue = Queue()
# define number of threads
for t in range(workers):
t = Thread(target=self.download_file)
t.daemon = True
t.start()
def put(self, things):
""" expects a list [url, filename] which is put into the download queue
"""
self.download_queue.put(things)
def join(self):
""" needs to be called if you want to wait for all downloads to be finished
"""
self.download_queue.join()
def download_file(self):
while True:
url, filename, file_size, verify_certificates = self.download_queue.get()
Swen Vermeul
committed
# create the necessary directory structure if they don't exist yet
os.makedirs(os.path.dirname(filename), exist_ok=True)
# request the file in streaming mode
r = requests.get(url, stream=True, verify=verify_certificates)
Swen Vermeul
committed
with open(filename, 'wb') as f:
for chunk in r.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
Swen Vermeul
committed
f.write(chunk)
assert os.path.getsize(filename) == int(file_size)
Swen Vermeul
committed
self.download_queue.task_done()
Swen Vermeul
committed
def __init__(self, openbis_obj, type, data=None, props=None, **kwargs):
self.__dict__['openbis'] = openbis_obj
self.__dict__['type'] = type
self.__dict__['p'] = PropertyHolder(openbis_obj, type)
self.__dict__['a'] = AttrHolder(openbis_obj, 'DataSet', type)
# existing OpenBIS object
if data is not None:
self._set_data(data)
Swen Vermeul
committed
if props is not None:
for key in props:
setattr(self.p, key, props[key])
if kwargs is not None:
for key in kwargs:
setattr(self, key, kwargs[key])
def __eq__(self, other):
return str(self) == str(other)
def __ne__(self, other):
return str(self) != str(other)
def _set_data(self, data):
# assign the attribute data to self.a by calling it
# (invoking the AttrHolder.__call__ function)
self.a(data)
self.__dict__['data'] = data
# put the properties in the self.p namespace (without checking them)
if 'properties' in data:
for key, value in data['properties'].items():
self.p.__dict__[key.lower()] = value
Swen Vermeul
committed
@property
def attrs(self):
return self.__dict__['a']
return self.openbis.get_project(self._project['identifier'])
except Exception:
pass
return self.openbis.get_experiment(self._experiment['identifier'])
except Exception:
pass
return self.openbis.get_sample(self._sample['identifier'])
object = sample # Alias
def __getattr__(self, name):
return getattr(self.__dict__['a'], name)
def __setattr__(self, name, value):
if name in ['set_properties', 'set_tags', 'add_tags']:
raise ValueError("These are methods which should not be overwritten")
setattr(self.__dict__['a'], name, value)
def _repr_html_(self):
"""Print all the assigned attributes (identifier, tags, etc.) in a nicely formatted table. See
AttributeHolder class.
"""
return self.a._repr_html_()
def __repr__(self):
"""same thing as _repr_html_() but for IPython
"""
return self.a.__repr__()
class LinkedData():
def __init__(self, data=None):
self.data = data if data is not None else []
self.attrs = ['externalCode', 'contentCopies']
def __dir__(self):
return self.attrs
def __getattr__(self, name):
if name in self.attrs:
if name in self.data:
return self.data[name]
else:
return ''
Swen Vermeul
committed
class PhysicalData():
def __init__(self, data=None):
if data is None:
data = []
self.data = data
self.attrs = ['speedHint', 'complete', 'shareId', 'size',
'fileFormatType', 'storageFormat', 'location', 'presentInArchive',
'storageConfirmation', 'locatorType', 'status']
Swen Vermeul
committed
2563
2564
2565
2566
2567
2568
2569
2570
2571
2572
2573
2574
2575
2576
2577
2578
2579
2580
2581
2582
2583
2584
2585
2586
2587
def __dir__(self):
return self.attrs
def __getattr__(self, name):
if name in self.attrs:
if name in self.data:
return self.data[name]
else:
return ''
def _repr_html_(self):
html = """
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th>attribute</th>
<th>value</th>
</tr>
</thead>
<tbody>
"""
for attr in self.attrs:
html += "<tr> <td>{}</td> <td>{}</td> </tr>".format(
attr, getattr(self, attr, '')
Swen Vermeul
committed
)
html += """
</tbody>
</table>
"""
return html
def __repr__(self):
headers = ['attribute', 'value']
lines = []
for attr in self.attrs:
lines.append([
attr,
getattr(self, attr, '')
])
return tabulate(lines, headers=headers)
class DataSet(OpenBisObject):
""" DataSet are openBIS objects that contain the actual files.
"""
def __init__(self, openbis_obj, type=type, data=None, files=None, folder=None, props=None, **kwargs):
Swen Vermeul
committed
super(DataSet, self).__init__(openbis_obj, type, data, props, **kwargs)
# existing DataSet
if data is not None:
if data['physicalData'] is None:
self.__dict__['shareId'] = None
self.__dict__['location'] = None
else:
self.__dict__['shareId'] = data['physicalData']['shareId']
self.__dict__['location'] = data['physicalData']['location']
# new DataSet
if files is not None:
self.__dict__['files'] = files
self.__dict__['folder'] = folder
def __str__(self):
return self.data['code']
def __dir__(self):
'props', 'get_parents()', 'get_children()',
'add_parents()', 'add_children()', 'del_parents()', 'del_children()',
'sample', 'experiment', 'physicalData', 'linkedData',
'tags', 'set_tags()', 'add_tags()', 'del_tags()',
'add_attachment()', 'get_attachments()', 'download_attachments()',
"get_files(start_folder='/')", 'file_list',
'download(files=None, destination=None, wait_until_finished=True)',
'status', 'archive()', 'unarchive()', 'data'
def __setattr__(self, name, value):
if name in ['folder']:
self.__dict__[name] = value
else:
super(DataSet, self).__setattr__(name, value)
@property
def props(self):
return self.__dict__['p']
@property
def type(self):
return self.__dict__['type']
@type.setter
def type(self, type_name):
dataset_type = self.openbis.get_dataset_type(type_name.upper())
self.p.__dict__['_type'] = dataset_type
self.a.__dict__['_type'] = dataset_type
Swen Vermeul
committed
@property
def physicalData(self):
if 'physicalData' in self.data:
return PhysicalData(self.data['physicalData'])
@property
def linkedData(self):
if 'linkedData' in self.data:
return LinkedData(self.data['linkedData'])
Swen Vermeul
committed
@property
def status(self):
ds = self.openbis.get_dataset(self.permId)
self.data['physicalData'] = ds.data['physicalData']
try:
return self.data['physicalData']['status']
except Exception:
return None
def archive(self, remove_from_data_store=True):
fetchopts = {
"removeFromDataStore": remove_from_data_store,
"@type": "as.dto.dataset.archive.DataSetArchiveOptions"
}
self.archive_unarchive('archiveDataSets', fetchopts)
print("DataSet {} archived".format(self.permId))
def unarchive(self):
fetchopts = {
"@type": "as.dto.dataset.unarchive.DataSetUnarchiveOptions"
}
self.archive_unarchive('unarchiveDataSets', fetchopts)
print("DataSet {} unarchived".format(self.permId))
def archive_unarchive(self, method, fetchopts):
dss = self.get_datastore
payload = {}
request = {
"method": method,
"params": [
self.openbis.token,
[{
"permId": self.permId,
"@type": "as.dto.dataset.id.DataSetPermId"
}],
dict(fetchopts)
],
}
resp = self.openbis._post_request(self._openbis.as_v3, request)
return
def set_properties(self, properties):
self.openbis.update_dataset(self.permId, properties=properties)
Swen Vermeul
committed
def download(self, files=None, destination=None, wait_until_finished=True, workers=10):
""" download the actual files and put them by default in the following folder:
__current_dir__/destination/dataset_permId/
If no files are specified, all files of a given dataset are downloaded.
If no destination is specified, the hostname is chosen instead.
Files are usually downloaded in parallel, using 10 workers by default. If you want to wait until
all the files are downloaded, set the wait_until_finished option to True.
Swen Vermeul
committed
"""
if files == None:
elif isinstance(files, str):
files = [files]
if destination is None:
destination = self.openbis.hostname
base_url = self.data['dataStore']['downloadUrl'] + '/datastore_server/' + self.permId + '/'
Swen Vermeul
committed
queue = DataSetDownloadQueue(workers=workers)
# get file list and start download
for filename in files:
file_info = self.get_file_list(start_folder=filename)
file_size = file_info[0]['fileSize']
download_url = base_url + filename + '?sessionID=' + self.openbis.token
filename_dest = os.path.join(destination, self.permId, filename)
queue.put([download_url, filename_dest, file_size, self.openbis.verify_certificates])
Swen Vermeul
committed
# wait until all files have downloaded
if wait_until_finished:
queue.join()
print("Files downloaded to: %s" % os.path.join(destination, self.permId))
@property
def folder(self):
return self.__dict__['folder']
def file_list(self):
"""returns the list of files including their directories as an array of strings. Just folders are not
listed.
"""
files = []
for file in self.get_file_list(recursive=True):
if file['isDirectory']:
pass
else:
files.append(file['pathInDataSet'])
return files
def get_files(self, start_folder='/'):
"""Returns a DataFrame of all files in this dataset
"""
def createRelativePath(pathInDataSet):
if self.shareId is None:
return ''
else:
return os.path.join(self.shareId, self.location, pathInDataSet)
def signed_to_unsigned(sig_int):
"""openBIS delivers crc32 checksums as signed integers.
If the number is negative, we just have to add 2**32
We display the hex number to match with the classic UI
"""
if sig_int < 0:
sig_int += 2 ** 32
return "%x" % (sig_int & 0xFFFFFFFF)
files = self.get_file_list(start_folder=start_folder)
df = DataFrame(files)
df['relativePath'] = df['pathInDataSet'].map(createRelativePath)
df['crc32Checksum'] = df['crc32Checksum'].fillna(0.0).astype(int).map(signed_to_unsigned)
return df[['isDirectory', 'pathInDataSet', 'fileSize', 'crc32Checksum']]
def get_file_list(self, recursive=True, start_folder="/"):
"""Lists all files of a given dataset. You can specifiy a start_folder other than "/".
By default, all directories and their containing files are listed recursively. You can
turn off this option by setting recursive=False.
"""
"method": "listFilesForDataSet",
"params": [
self.permId,
"id": "1"
self.data["dataStore"]["downloadUrl"] + '/datastore_server/rmi-dss-api-v1.json',
json.dumps(request),
verify=self.openbis.verify_certificates
)
data = resp.json()
if 'error' in data:
raise ValueError('Error from openBIS: ' + data['error']['message'])
elif 'result' in data:
return data['result']
Swen Vermeul
committed
raise ValueError('request to openBIS did not return either result nor error')
Swen Vermeul
committed
raise ValueError('internal error while performing post request')
def _generate_plugin_request(self, dss):
"""generates a request to activate the dataset-uploader ingestion plugin to
register our files as a new dataset
"""
sample_identifier = None
if self.sample is not None:
sample_identifier = self.sample.identifier
experiment_identifier = None
if self.experiment is not None:
experiment_identifier = self.experiment.identifier
parentIds = self.parents
dataset_type = self.type.code
properties = self.props.all_nonempty()
"method": "createReportFromAggregationService",
"params": [
self.openbis.token,
dss,
PYBIS_PLUGIN,
{
"method" : "insertDataSet",
"sampleIdentifier" : sample_identifier,
"experimentIdentifier" : experiment_identifier,
"dataSetType" : dataset_type,
"folderName" : self.folder,
"fileNames" : self.files,
"isZipDirectoryUpload" : False,
"properties" : properties,
"parentIdentifiers": parentIds
def save(self):
if self.is_new:
2875
2876
2877
2878
2879
2880
2881
2882
2883
2884
2885
2886
2887
2888
2889
2890
2891
2892
2893
2894
2895
2896
if self.files is None or len(self.files) == 0:
raise ValueError('Cannot register a dataset without a file. Please provide at least one file')
if self.sample is None and self.experiment is None:
raise ValueError('A DataSet must be either connected to a Sample or an Experiment')
# upload the data to the user session workspace
datastores = self.openbis.get_datastores()
self.openbis.upload_files(
datastore_url= datastores['downloadUrl'][0],
files=self.files,
folder='',
wait_until_finished=True
)
# activate the ingestion plugin, as soon as the data is uploaded
request = self._generate_plugin_request(dss=datastores['code'][0])
resp = self.openbis._post_request(self.openbis.reg_v1, request)
if resp['rows'][0][0]['value'] == 'OK':
permId = resp['rows'][0][2]['value']
if permId is None or permId == '':
self.__dict__['is_new'] = False
print("DataSet successfully created. Because you connected to an openBIS version older than 16.05.04, you cannot update the object.")
else:
new_dataset_data = self.openbis.get_dataset(permId, only_data=True)
self._set_data(new_dataset_data)
print("DataSet successfully created.")
else:
raise ValueError('Error while creating the DataSet: ' + resp['rows'][0][1]['value'])
self.__dict__['_is_new'] = False
else:
request = self._up_attrs()
props = self.p._all_props()
request["params"][1][0]["properties"] = props
request["params"][1][0].pop('parentIds')
request["params"][1][0].pop('childIds')
self.openbis._post_request(self.openbis.as_v3, request)
print("DataSet successfully updated.")
class AttrHolder():
""" General class for both samples and experiments that hold all common attributes, such as:
- space
Swen Vermeul
committed
- experiment (sample)
- samples (experiment)
Swen Vermeul
committed
- parents (sample, dataset)
- children (sample, dataset)
- tags
"""
def __init__(self, openbis_obj, entity, type=None):
self.__dict__['_openbis'] = openbis_obj
self.__dict__['_entity'] = entity
if type is not None:
self.__dict__['_allowed_attrs'] = _definitions(entity)['attrs']
self.__dict__['_identifier'] = None
self.__dict__['_is_new'] = True
def __call__(self, data):
"""This internal method is invoked when an existing object is loaded.
Instead of invoking a special method we «call» the object with the data
self(data)
which automatically invokes this method.
Since the data comes from openBIS, we do not have to check it (hence the
self.__dict__ statements to prevent invoking the __setattr__ method)
Internally data is stored with an underscore, e.g.
sample._space --> { '@id': 4,
'@type': 'as.dto.space.id.SpacePermId',
'permId': 'MATERIALS' }
but when fetching the attribute without the underscore, we only return
the relevant data for the user:
sample.space --> 'MATERIALS'
"""
self.__dict__['_is_new'] = False
Swen Vermeul
committed
for attr in self._allowed_attrs:
if attr in ["code", "permId", "identifier",
"type", "container", "components"]:
self.__dict__['_' + attr] = data.get(attr, None)
Swen Vermeul
committed
d = data.get(attr, None)
Swen Vermeul
committed
if d is not None:
d = d['permId']
self.__dict__['_' + attr] = d
Swen Vermeul
committed
elif attr in ["sample", "experiment", "project"]:
d = data.get(attr, None)
Swen Vermeul
committed
if d is not None:
d = d['identifier']
self.__dict__['_' + attr] = d
Swen Vermeul
committed
elif attr in ["parents", "children", "samples"]:
self.__dict__['_' + attr] = []
Swen Vermeul
committed
for item in data[attr]:
self.__dict__['_' + attr].append(item['identifier'])
self.__dict__['_' + attr].append(item['permId'])
Swen Vermeul
committed
Swen Vermeul
committed
for item in data[attr]:
Swen Vermeul
committed
"code": item['code'],
"@type": "as.dto.tag.id.TagCode"
})
self.__dict__['_tags'] = tags
self.__dict__['_prev_tags'] = copy.deepcopy(tags)
self.__dict__['_' + attr] = data.get(attr, None)
def _new_attrs(self, method_name=None):
"""Returns the Python-equivalent JSON request when a new object is created.