Newer
Older
"registrator": { "@type": "as.dto.person.fetchoptions.PersonFetchOptions" },
"modifier": { "@type": "as.dto.person.fetchoptions.PersonFetchOptions" },
"experiments": { "@type": "as.dto.experiment.fetchoptions.ExperimentFetchOptions", },
"space": { "@type": "as.dto.space.fetchoptions.SpaceFetchOptions" },
"@type": "as.dto.project.fetchoptions.ProjectFetchOptions"
}
request = {
"method": "searchProjects",
"params": [ self.token,
criteria,
options,
],
}
resp = self._post_request(self.as_v3, request)
if resp is not None:
objects = resp['objects']
parse_jackson(objects)
projects = DataFrame(objects)
if len(projects) is 0:
raise ValueError("No projects found!")
projects['registrationDate']= projects['registrationDate'].map(format_timestamp)
projects['modificationDate']= projects['modificationDate'].map(format_timestamp)
projects['registrator'] = projects['registrator'].map(extract_person)
projects['modifier'] = projects['modifier'].map(extract_person)
projects['permid'] = projects['permId'].map(extract_permid)
projects['identifier'] = projects['identifier'].map(extract_identifier)
projects['space'] = projects['space'].map(extract_code)
pros=projects[['code', 'space', 'registrator', 'registrationDate',
'modifier', 'modificationDate', 'permid', 'identifier']]
return Things(self, 'project', pros, 'identifier')
else:
raise ValueError("No projects found!")
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
def _create_get_request(self, method_name, entity_type, permids, options):
if not isinstance(permids, list):
permids = [permids]
type = "as.dto.{}.id.{}".format(entity_type.lower(), entity_type.capitalize())
search_params = []
for permid in permids:
# decide if we got a permId or an identifier
match = re.match('/', permid)
if match:
search_params.append(
{ "identifier" : permid, "@type" : type + 'Identifier' }
)
else:
search_params.append(
{ "permId" : permid, "@type": type + 'PermId' }
)
fo = {}
for option in options:
fo[option] = fetch_option[option]
request = {
"method": method_name,
"params": [
self.token,
search_params,
fo
],
}
return request
def get_project(self, projectId):
request = self._create_get_request('getProjects', 'project', projectId, ['attachments'])
resp = self._post_request(self.as_v3, request)
return resp
def new_project(self, space_code, code, description, leaderId):
request = {
"method": "createProjects",
"params": [
self.token,
[
{
"code": code,
"spaceId": {
"permId": space_code,
"@type": "as.dto.space.id.SpacePermId"
},
"@type": "as.dto.project.create.ProjectCreation",
"description": description,
"leaderId": leaderId,
"attachments": None
}
]
],
}
resp = self._post_request(self.as_v3, request)
return resp
def get_sample_types(self):
""" Returns a list of all available sample types
return self._get_types_of("searchSampleTypes", "Sample", ["generatedCodePrefix"])
def get_experiment_types(self):
""" Returns a list of all available experiment types
"""
return self._get_types_of("searchExperimentTypes", "Experiment")
def get_material_types(self):
""" Returns a list of all available material types
"""
return self._get_types_of("searchMaterialTypes", "Material")
""" Returns a list (DataFrame object) of all currently available dataset types
"""
return self._get_types_of("searchDataSetTypes", "DataSet")
def get_vocabulary(self):
""" Returns a DataFrame of all vocabulary terms available
fetch_options = {
"vocabulary" : { "@type" : "as.dto.vocabulary.fetchoptions.VocabularyFetchOptions" },
"@type": "as.dto.vocabulary.fetchoptions.VocabularyTermFetchOptions"
}
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
request = {
"method": "searchVocabularyTerms",
"params": [ self.token, {}, fetch_options ]
}
resp = self._post_request(self.as_v3, request)
objects = DataFrame(resp['objects'])
objects['registrationDate'] = objects['registrationDate'].map(format_timestamp)
objects['modificationDate'] = objects['modificationDate'].map(format_timestamp)
return objects[['code', 'description', 'registrationDate', 'modificationDate']]
def get_tags(self):
""" Returns a DataFrame of all
"""
request = {
"method": "searchTags",
"params": [ self.token, {}, {} ]
}
resp = self._post_request(self.as_v3, request)
objects = DataFrame(resp['objects'])
objects['registrationDate'] = objects['registrationDate'].map(format_timestamp)
return objects[['code', 'registrationDate']]
def _get_types_of(self, method_name, entity_type, additional_attributes=[]):
""" Returns a list of all available experiment types
"""
attributes = ['code', 'description', *additional_attributes]
fetch_options = {}
if entity_type is not None:
fetch_options = {
"propertyAssignments" : {
"@type" : "as.dto.property.fetchoptions.PropertyAssignmentFetchOptions"
},
"@type": "as.dto.{}.fetchoptions.{}TypeFetchOptions".format(entity_type.lower(), entity_type)
}
attributes.append('propertyAssignments')
"params": [ self.token, {}, fetch_options ],
}
resp = self._post_request(self.as_v3, request)
parse_jackson(resp)
if len(resp['objects']) >= 1:
types = DataFrame(resp['objects'])
if 'propertyAssignments' in fetch_options:
types['propertyAssignments'] = types['propertyAssignments'].map(extract_property_assignments)
Swen Vermeul
committed
def is_session_active(self):
""" checks whether a session is still active. Returns true or false.
"""
Swen Vermeul
committed
return self.is_token_valid(self.token)
def is_token_valid(self, token=None):
Chandrasekhar Ramakrishnan
committed
"""Check if the connection to openBIS is valid.
This method is useful to check if a token is still valid or if it has timed out,
requiring the user to login again.
Chandrasekhar Ramakrishnan
committed
:return: Return True if the token is valid, False if it is not valid.
"""
if token is None:
token = self.token
if token is None:
return False
request = {
"method": "isSessionActive",
"params": [ token ],
resp = self._post_request(self.as_v1, request)
"""fetch a dataset and some metadata attached to it:
- properties
- sample
- parents
- children
- containers
- dataStore
- physicalData
- linkedData
:return: a DataSet object
"""
criteria = [{
"permId": permid,
"@type": "as.dto.dataset.id.DataSetPermId"
}]
fetchopts = {
"parents": { "@type": "as.dto.dataset.fetchoptions.DataSetFetchOptions" },
"children": { "@type": "as.dto.dataset.fetchoptions.DataSetFetchOptions" },
"containers": { "@type": "as.dto.dataset.fetchoptions.DataSetFetchOptions" },
"@type": "as.dto.dataset.fetchoptions.DataSetFetchOptions",
}
for option in ['tags', 'properties', 'dataStore', 'physicalData', 'linkedData',
'experiment', 'sample']:
fetchopts[option] = fetch_option[option]
request = {
"params": [ self.token,
criteria,
fetchopts,
resp = self._post_request(self.as_v3, request)
if resp is not None:
for permid in resp:
return DataSet(self, resp[permid])
Chandrasekhar Ramakrishnan
committed
def get_sample(self, sample_ident):
Chandrasekhar Ramakrishnan
committed
"""Retrieve metadata for the sample.
Get metadata for the sample and any directly connected parents of the sample to allow access
to the same information visible in the ELN UI. The metadata will be on the file system.
:param sample_identifiers: A list of sample identifiers to retrieve.
"""
search_request = search_request_for_identifier(sample_ident, 'sample')
"type": {
"@type": "as.dto.sample.fetchoptions.SampleTypeFetchOptions"
},
"parents": {
"@type": "as.dto.sample.fetchoptions.SampleFetchOptions",
"properties": {
"@type": "as.dto.property.fetchoptions.PropertyFetchOptions"
},
"children": {
"@type": "as.dto.sample.fetchoptions.SampleFetchOptions",
"properties": {
"@type": "as.dto.property.fetchoptions.PropertyFetchOptions"
}
},
"dataSets": {
"@type": "as.dto.dataset.fetchoptions.DataSetFetchOptions",
"properties": {
"@type": "as.dto.property.fetchoptions.PropertyFetchOptions"
},
"type": {
"@type": "as.dto.dataset.fetchoptions.DataSetTypeFetchOptions"
},
},
"properties": fetch_option['properties'],
"registrator": fetch_option['registrator'],
"tags": fetch_option['tags'],
}
sample_request = {
"method": "getSamples",
"params": [
self.token,
[ search_request ],
resp = self._post_request(self.as_v3, sample_request)
parse_jackson(resp)
if resp is None or len(resp) == 0:
raise ValueError('no such sample found: '+sample_ident)
else:
for sample_ident in resp:
return Sample(self, resp[sample_ident])
def new_space(self, name, description=None):
""" Creates a new space in the openBIS instance. Returns a list of all spaces
"""
request = {
"method": "createSpaces",
"params": [
self.token,
[ {
"@id": 0,
"code": name,
"description": description,
"@type": "as.dto.space.create.SpaceCreation"
} ]
],
}
resp = self._post_request(self.as_v3, request)
return self.get_spaces(refresh=True)
Swen Vermeul
committed
def new_analysis(self, name, description=None, sample=None, dss_code=None, result_files=None,
notebook_files=None, parents=[]):
""" An analysis contains the Jupyter notebook file(s) and some result files.
Technically this method involves uploading files to the session workspace
and activating the dropbox aka dataset ingestion service "jupyter-uploader-api"
Swen Vermeul
committed
"""
if dss_code is None:
dss_code = self.get_datastores()['code'][0]
# if a sample identifier was given, use it as a string.
# if a sample object was given, take its identifier
# TODO: handle permId's
sample_identifier = None
if isinstance(sample, str):
sample_identifier = sample
else:
sample_identifier = sample.ident
Swen Vermeul
committed
folder = time.strftime('%Y-%m-%d_%H-%M-%S')
Swen Vermeul
committed
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
data_sets = []
if notebook_files is not None:
notebooks_folder = os.path.join(folder, 'notebook_files')
self.upload_files(
datastore_url = datastore_url,
files=notebook_files,
folder= notebooks_folder,
wait_until_finished=True
)
data_sets.append({
"dataSetType" : "JUPYTER_NOTEBOOk",
"sessionWorkspaceFolder": notebooks_folder,
"fileNames" : notebook_files,
"properties" : {}
})
if result_files is not None:
results_folder = os.path.join(folder, 'result_files')
self.upload_files(
datastore_url = datastore_url,
files=result_files,
folder=results_folder,
wait_until_finished=True
)
data_sets.append({
"dataSetType" : "JUPYTER_RESULT",
"sessionWorkspaceFolder" : results_folder,
"fileNames" : result_files,
"properties" : {}
})
request = {
"method": "createReportFromAggregationService",
"params": [
self.token,
dss_code,
Swen Vermeul
committed
DROPBOX_PLUGIN,
Swen Vermeul
committed
"identifier" : sample.identifier
Swen Vermeul
committed
"containers" : [
{
"dataSetType" : "JUPYTER_CONTAINER",
"properties" : {
"NAME" : name,
"DESCRIPTION" : description
Swen Vermeul
committed
}
Swen Vermeul
committed
"dataSets" : data_sets,
"parents" : parents,
resp = self._post_request(self.reg_v1, request)
try:
if resp['rows'][0][0]['value'] == 'OK':
return resp['rows'][0][1]['value']
except:
return resp
Swen Vermeul
committed
def new_sample(self, sample_name, space_name, sample_type, tags=[], **kwargs):
""" Creates a new sample of a given sample type. sample_name, sample_type and space are
mandatory arguments.
if isinstance(tags, str):
tags = [tags]
tag_ids = []
for tag in tags:
tag_dict = {
"code":tag,
"@type":"as.dto.tag.id.TagCode"
}
tag_ids.append(tag_dict)
sample_create_request = {
"method":"createSamples",
"params":[
self.token,
"properties":{},
"typeId":{
"permId": sample_type,
"@type":"as.dto.entitytype.id.EntityTypePermId"
},
"code": sample_name,
"spaceId":{
"permId": space_name,
"@type":"as.dto.space.id.SpacePermId"
},
"tagIds":tag_ids,
"@type":"as.dto.sample.create.SampleCreation",
"experimentId":None,
"containerId":None,
"componentIds":None,
"parentIds":None,
"childIds":None,
"attachments":None,
"creationId":None,
"autoGeneratedCode":None
],
}
resp = self._post_request(self.as_v3, sample_create_request)
if 'permId' in resp[0]:
return self.get_sample(resp[0]['permId'])
else:
raise ValueError("error while trying to fetch sample from server: " + str(resp))
def _get_dss_url(self, dss_code=None):
""" internal method to get the downloadURL of a datastore.
"""
Swen Vermeul
committed
dss = self.get_datastores()
if dss_code is None:
return dss['downloadUrl'][0]
else:
Swen Vermeul
committed
return dss[dss['code'] == dss_code]['downloadUrl'][0]
Swen Vermeul
committed
def upload_files(self, datastore_url=None, files=None, folder=None, wait_until_finished=False):
Swen Vermeul
committed
if datastore_url is None:
if files is None:
raise ValueError("Please provide a filename.")
Swen Vermeul
committed
if folder is None:
# create a unique foldername
folder = time.strftime('%Y-%m-%d_%H-%M-%S')
if isinstance(files, str):
files = [files]
self.files = files
self.startByte = 0
self.endByte = 0
# define a queue to handle the upload threads
queue = DataSetUploadQueue()
real_files = []
for filename in files:
if os.path.isdir(filename):
real_files.extend([os.path.join(dp, f) for dp, dn, fn in os.walk(os.path.expanduser(filename)) for f in fn])
else:
real_files.append(os.path.join(filename))
# compose the upload-URL and put URL and filename in the upload queue
for filename in real_files:
file_in_wsp = os.path.join(folder, filename)
Swen Vermeul
committed
self.files_in_wsp.append(file_in_wsp)
upload_url = (
Swen Vermeul
committed
datastore_url + '/session_workspace_file_upload'
+ '?filename=' + os.path.join(folder,filename)
+ '&id=1'
+ '&startByte=0&endByte=0'
+ '&sessionID=' + self.token
)
queue.put([upload_url, filename, self.verify_certificates])
# wait until all files have uploaded
if wait_until_finished:
queue.join()
# return files with full path in session workspace
Swen Vermeul
committed
return self.files_in_wsp
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569
class DataSetUploadQueue:
def __init__(self, workers=20):
# maximum files to be uploaded at once
self.upload_queue = Queue()
# define number of threads and start them
for t in range(workers):
t = Thread(target=self.upload_file)
t.daemon = True
t.start()
def put(self, things):
""" expects a list [url, filename] which is put into the upload queue
"""
self.upload_queue.put(things)
def join(self):
""" needs to be called if you want to wait for all uploads to be finished
"""
self.upload_queue.join()
def upload_file(self):
while True:
# get the next item in the queue
upload_url, filename, verify_certificates = self.upload_queue.get()
filesize = os.path.getsize(filename)
# upload the file to our DSS session workspace
with open(filename, 'rb') as f:
resp = requests.post(upload_url, data=f, verify=verify_certificates)
resp.raise_for_status()
data = resp.json()
assert filesize == int(data['size'])
# Tell the queue that we are done
self.upload_queue.task_done()
Swen Vermeul
committed
class DataSetDownloadQueue:
def __init__(self, workers=20):
# maximum files to be downloaded at once
self.download_queue = Queue()
# define number of threads
for t in range(workers):
t = Thread(target=self.download_file)
t.daemon = True
t.start()
def put(self, things):
""" expects a list [url, filename] which is put into the download queue
"""
self.download_queue.put(things)
Swen Vermeul
committed
def join(self):
""" needs to be called if you want to wait for all downloads to be finished
"""
self.download_queue.join()
def download_file(self):
while True:
url, filename, file_size, verify_certificates = self.download_queue.get()
Swen Vermeul
committed
# create the necessary directory structure if they don't exist yet
os.makedirs(os.path.dirname(filename), exist_ok=True)
# request the file in streaming mode
r = requests.get(url, stream=True, verify=verify_certificates)
Swen Vermeul
committed
with open(filename, 'wb') as f:
for chunk in r.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
assert os.path.getsize(filename) == int(file_size)
Swen Vermeul
committed
self.download_queue.task_done()
class DataSet():
""" DataSet are openBIS objects that contain the actual files.
"""
def __init__(self, openbis_obj, data):
self.data = data
self.permid = data["code"]
self.permId = data["code"]
if data['physicalData'] is None:
self.shareId = None
self.location = None
else:
self.shareId = data['physicalData']['shareId']
self.location = data['physicalData']['location']
def _repr_html_(self):
html = """
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th>attribute</th>
<th>value</th>
</tr>
</thead>
<tbody>
<tr> <th>permId</th> <td>{}</td> </tr>
<tr> <th>properties</th> <td>{}</td> </tr>
<tr> <th>tags</th> <td>{}</td> </tr>
</tbody>
</table>
"""
return html.format(self.permid, self.data['properties'], self.data['tags'])
Swen Vermeul
committed
def download(self, files=None, wait_until_finished=True, workers=10):
""" download the actual files and put them by default in the following folder:
Swen Vermeul
committed
__current_dir__/hostname/dataset_permid/
If no files are specified, all files of a given dataset are downloaded.
Files are usually downloaded in parallel, using 10 workers by default. If you want to wait until
all the files are downloaded, set the wait_until_finished option to True.
Swen Vermeul
committed
"""
if files == None:
files = self.file_list()
elif isinstance(files, str):
files = [files]
base_url = self.data['dataStore']['downloadUrl'] + '/datastore_server/' + self.permid + '/'
Swen Vermeul
committed
queue = DataSetDownloadQueue(workers=workers)
# get file list and start download
for filename in files:
file_info = self.get_file_list(start_folder=filename)
file_size = file_info[0]['fileSize']
download_url = base_url + filename + '?sessionID=' + self.openbis.token
filename = os.path.join(self.openbis.hostname, self.permid, filename)
queue.put([download_url, filename, file_size, self.openbis.verify_certificates])
Swen Vermeul
committed
# wait until all files have downloaded
if wait_until_finished:
queue.join()
print("Files downloaded to: %s" % os.path.join(self.openbis.hostname, self.permid))
def get_parents(self):
return self.openbis.get_datasets(withChildren=self.permid)
def get_children(self):
return self.openbis.get_datasets(withParents=self.permid)
def file_list(self):
files = []
for file in self.get_file_list(recursive=True):
if file['isDirectory']:
pass
else:
files.append(file['pathInDataSet'])
return files
def get_files(self, start_folder='/'):
""" Returns a DataFrame of all files in this dataset
"""
def createRelativePath(pathInDataSet):
if self.shareId is None:
return ''
else:
return os.path.join(self.shareId, self.location, pathInDataSet)
files = self.get_file_list(start_folder=start_folder)
df = DataFrame(files)
df['relativePath'] = df['pathInDataSet'].map(createRelativePath)
df['crc32Checksum'] = df['crc32Checksum'].fillna(0.0).astype(int)
return df[['isDirectory', 'pathInDataSet', 'fileSize', 'crc32Checksum']]
def get_file_list(self, recursive=True, start_folder="/"):
""" Lists all files of a given dataset. You can specifiy a start_folder other than "/".
By default, all directories and their containing files are listed recursively. You can
turn off this option by setting recursive=False.
"""
request = {
"method" : "listFilesForDataSet",
"params" : [
self.openbis.token,
self.permid,
recursive,
],
"id":"1"
}
self.data["dataStore"]["downloadUrl"] + '/datastore_server/rmi-dss-api-v1.json',
json.dumps(request),
verify=self.openbis.verify_certificates
)
data = resp.json()
if 'error' in data:
Swen Vermeul
committed
raise ValueError('Error from openBIS: ' + data['error'] )
elif 'result' in data:
return data['result']
Swen Vermeul
committed
raise ValueError('request to openBIS did not return either result nor error')
Swen Vermeul
committed
raise ValueError('internal error while performing post request')
class Sample():
""" A Sample is one of the most commonly used objects in openBIS.
"""
def __init__(self, openbis_obj, data):
self.openbis = openbis_obj
self.data = data
self.permid = data['permId']['permId']
self.permId = data['permId']['permId']
self.identifier = data['identifier']['identifier']
self.ident = data['identifier']['identifier']
self.properties = data['properties']
self.tags = extract_tags(data['tags'])
self.openbis.delete_entity('sample', self.permId, reason)
def get_datasets(self):
parse_jackson(objects)
datasets = DataFrame(objects)
datasets['registrationDate'] = datasets['registrationDate'].map(format_timestamp)
datasets['properties'] = datasets['properties'].map(extract_properties)
datasets['type'] = datasets['type'].map(extract_code)
return datasets
#return datasets[['code','properties', 'type', 'registrationDate']]
def get_parents(self):
return self.openbis.get_samples(withChildren=self.permId)
def get_children(self):
return self.openbis.get_samples(withParents=self.permId)
class Space(dict):
""" managing openBIS spaces
"""
def __init__(self, openbis_obj, *args, **kwargs):
super(Space, self).__init__(*args, **kwargs)
self.__dict__ = self
self.openbis = openbis_obj
""" Lists all samples in a given space. A pandas DataFrame object is returned.
"""
return self.openbis.get_samples(space=self.code, *args, **kwargs)
@property
def projects(self):
return self.openbis.get_projects(space=self.code)
def new_project(self, **kwargs):
return self.openbis.new_project(space=self.code, **kwargs)
@property
def experiments(self):
return self.openbis.get_experiments(space=self.code)
class Things():
"""An object that contains a DataFrame object about an entity available in openBIS.
"""
def __init__(self, openbis_obj, what, df, identifier_name='code'):
self.openbis = openbis_obj
self.what = what
self.df = df
self.identifier_name = identifier_name
def _repr_html_(self):
return self.df._repr_html_()
def __getitem__(self, key):
if self.df is not None and len(self.df) > 0:
row = None
if isinstance(key, int):
# get thing by rowid
row = self.df.loc[[key]]
elif isinstance(key, list):
# treat it as a normal dataframe
return self.df[key]
else:
# get thing by code
row = self.df[self.df[self.identifier_name]==key.upper()]
if row is not None:
# invoke the openbis.get_what() method
return getattr(self.openbis, 'get_'+self.what)(row[self.identifier_name].values[0])
class Experiment():
""" managing openBIS experiments
"""
def __init__(self, openbis_obj, data):
1865
1866
1867
1868
1869
1870
1871
1872
1873
1874
1875
1876
1877
1878
1879
1880
1881
1882
1883
1884
1885
1886
1887
1888
1889
1890
1891
1892
1893
1894
1895
1896
1897
1898
1899
1900
1901
1902
1903
1904
1905
1906
1907
1908
1909
1910
1911
1912
1913
1914
self.permid = data['permId']['permId']
self.permId = data['permId']['permId']
self.identifier = data['identifier']['identifier']
self.properties = data['properties']
self.tags = extract_tags(data['tags'])
self.attachments = extract_attachments(data['attachments'])
self.project = data['project']['code']
self.data = data
def __setitem__(self, key, value):
self.openbis.update_experiment(self.permid, key, value)
def set_properties(self, properties):
self.openbis.update_experiment(self.permid, properties=properties)
def set_tags(self, tags):
tagIds = _tagIds_for_tags(tags, 'Set')
self.openbis.update_experiment(self.permid, tagIds=tagIds)
def add_tags(self, tags):
tagIds = _tagIds_for_tags(tags, 'Add')
self.openbis.update_experiment(self.permid, tagIds=tagIds)
def del_tags(self, tags):
tagIds = _tagIds_for_tags(tags, 'Remove')
self.openbis.update_experiment(self.permid, tagIds=tagIds)
def delete(self, reason):
self.openbis.delete_entity('experiment', self.permid, reason)
def _repr_html_(self):
html = """
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th>attribute</th>
<th>value</th>
</tr>
</thead>
<tbody>
<tr> <th>permId</th> <td>{}</td> </tr>
<tr> <th>identifier</th> <td>{}</td> </tr>
<tr> <th>project</th> <td>{}</td> </tr>
<tr> <th>properties</th> <td>{}</td> </tr>
<tr> <th>tags</th> <td>{}</td> </tr>
<tr> <th>attachments</th> <td>{}</td> </tr>
</tbody>
</table>
"""
return html.format(self.permid, self.identifier, self.project, self.properties, self.tags, self.attachments)
data["identifier"] = self.identifier
data["permid"] = self.permid
data["properties"] = self.properties
data["tags"] = self.tags