diff --git a/api-data-store-server-java/src/main/java/ch/ethz/sis/afsapi/api/OperationsAPI.java b/api-data-store-server-java/src/main/java/ch/ethz/sis/afsapi/api/OperationsAPI.java index 36e78ca56be4d74e189e02ed08e97847762d3851..2afacb9b37e89a9906359c19572c2136bf55d45c 100644 --- a/api-data-store-server-java/src/main/java/ch/ethz/sis/afsapi/api/OperationsAPI.java +++ b/api-data-store-server-java/src/main/java/ch/ethz/sis/afsapi/api/OperationsAPI.java @@ -34,7 +34,7 @@ public interface OperationsAPI @NonNull Integer limit) throws Exception; @NonNull - Boolean write(@NonNull String owner, @NonNull String destination, @NonNull Long offset, + Boolean write(@NonNull String owner, @NonNull String source, @NonNull Long offset, @NonNull byte[] data, @NonNull byte[] md5Hash) throws Exception; @NonNull diff --git a/api-openbis-python3-pybis/src/python/CHANGELOG.md b/api-openbis-python3-pybis/src/python/CHANGELOG.md index 18e4e5fe0deb218daad4945a68e33c0cc24581b4..a9f158e7d26ac87d28b3ea5b93a41f223894b4a5 100644 --- a/api-openbis-python3-pybis/src/python/CHANGELOG.md +++ b/api-openbis-python3-pybis/src/python/CHANGELOG.md @@ -1,10 +1,14 @@ +## Changes with pybis-1.35.7 + +- Improvements to fast download scheme + ## Changes with pybis-1.35.6 - Added metaData attribute handling for sample, sampleType, experiment, experimentType, dataset, datasetType - Fixed property assignment to a newly created sample type. - Updated docs. - Fixed sample.del_children() method. -- Fixed metaData attributed assignment case. +- Fixed metaData attribute assignment case. ## Changes with pybis-1.35.5 diff --git a/api-openbis-python3-pybis/src/python/pybis/__init__.py b/api-openbis-python3-pybis/src/python/pybis/__init__.py index bcabda6fdf47bc3d23702531a5d2e1dbe3fc45a2..541a460f3a332849550b70a18d33ce5152c30a04 100644 --- a/api-openbis-python3-pybis/src/python/pybis/__init__.py +++ b/api-openbis-python3-pybis/src/python/pybis/__init__.py @@ -15,7 +15,7 @@ name = "pybis" __author__ = "ID SIS • ETH Zürich" __email__ = "openbis-support@id.ethz.ch" -__version__ = "1.35.6rc4" +__version__ = "1.35.7rc3" from . import pybis from .pybis import DataSet diff --git a/api-openbis-python3-pybis/src/python/pybis/fast_download.py b/api-openbis-python3-pybis/src/python/pybis/fast_download.py index b907bb91a3d3e5315dd5cb83850c51af1af037e4..074c23d72a54d6ebfb963ac53be025fd68f5639c 100644 --- a/api-openbis-python3-pybis/src/python/pybis/fast_download.py +++ b/api-openbis-python3-pybis/src/python/pybis/fast_download.py @@ -29,6 +29,7 @@ import binascii import functools import json import os +import time from pathlib import Path from threading import Lock, Thread from urllib.parse import urljoin @@ -115,6 +116,11 @@ def deserialize_chunk(byte_array): 'invalid_reason': "" } + if len(byte_array) == 0: + result['invalid'] = True + result['invalid_reason'] = "HEADER" + return result + start, end = 0, sequence_number_bytes result['sequence_number'] = int.from_bytes(byte_array[start:end], "big") start, end = end, end + download_item_id_length_bytes @@ -173,6 +179,10 @@ class AtomicChecker: with self._lock: self._max += 1 + def break_count(self): + with self._lock: + self._max = 0 + def remove_value(self, value): with self._lock: if value in self._set: @@ -182,6 +192,13 @@ class AtomicChecker: return self._set +def _get_json(response): + try: + return True, response.json() + except: + return False, response + + class DownloadThread(Thread): """Helper class defining single stream download""" @@ -206,6 +223,7 @@ class DownloadThread(Thread): downloadSessionId=self.download_session_id, numberOfChunks=self.number_of_chunks, downloadStreamId=self.stream_id) + retry_counter = 0 while self.counter.should_continue(): try: download_response = self.session.post(self.download_url, @@ -214,20 +232,36 @@ class DownloadThread(Thread): if download_response.ok is True: data = deserialize_chunk(download_response.content) if data['invalid'] is True: - print(f"Invalid checksum received. Retrying package") - if data['invalid_reason'] == "PAYLOAD": - sequence_number = data['sequence_number'] - if repeated_chunks.get(sequence_number, 0) >= DOWNLOAD_RETRIES_COUNT: - raise ValueError( - "Received incorrect payload multiple times. Aborting.") - repeated_chunks[sequence_number] = repeated_chunks.get(sequence_number, - 0) + 1 - queue_chunks(self.session, self.download_url, - self.download_session_id, - [f"{sequence_number}:{sequence_number}"], - self.verify_certificates) - self.counter.repeat_call() # queue additional download chunk run + is_json, response = _get_json(download_response) + if is_json: + if 'retriable' in response and response['retriable'] is False: + self.counter.break_count() + raise ValueError(response["error"]) + else: + if data['invalid_reason'] == "PAYLOAD": + sequence_number = data['sequence_number'] + if repeated_chunks.get(sequence_number, 0) >= DOWNLOAD_RETRIES_COUNT: + self.counter.break_count() + raise ValueError( + "Received incorrect payload multiple times. Aborting.") + repeated_chunks[sequence_number] = repeated_chunks.get(sequence_number, + 0) + 1 + queue_chunks(self.session, self.download_url, + self.download_session_id, + [f"{sequence_number}:{sequence_number}"], + self.verify_certificates) + self.counter.repeat_call() # queue additional download chunk run + + if retry_counter >= REQUEST_RETRIES_COUNT: + self.counter.break_count() + raise ValueError("Consecutive download calls to the server failed.") + + # Exponential backoff for the consecutive failures + time.sleep(2 ** retry_counter) + retry_counter += 1 + else: + retry_counter = 0 sequence_number = data['sequence_number'] self.save_to_file(data) self.counter.remove_value(sequence_number) @@ -322,35 +356,26 @@ class FastDownload: start_session_params) download_session_id = start_download_session['downloadSessionId'] - try: - # Step 3 - Put files into fileserver download queue - ranges = start_download_session['ranges'] - self._queue_all_files(download_url, download_session_id, ranges) + # Step 3 - Put files into fileserver download queue - # Step 4 - Download files in chunks + ranges = start_download_session['ranges'] + self._queue_all_files(download_url, download_session_id, ranges) - session_stream_ids = list(start_download_session['streamIds']) + # Step 4 & 5 - Download files in chunks and close connection - exception_list = [] - thread = Thread(target=self._download_step, - args=(download_url, download_session_id, session_stream_ids, ranges, - exception_list)) - thread.start() + session_stream_ids = list(start_download_session['streamIds']) - if self.wait_until_finished is True: - thread.join() - if exception_list: - raise exception_list[0] - finally: - # Step 5 - Close the session - finish_download_session_params = make_fileserver_body_params( - method='finishDownloadSession', - downloadSessionId=download_session_id) + exception_list = [] + thread = Thread(target=self._download_step, + args=(download_url, download_session_id, session_stream_ids, ranges, + exception_list)) + thread.start() - self.session.post(download_url, - data=json.dumps(finish_download_session_params), - verify=self.verify_certificates) + if self.wait_until_finished is True: + thread.join() + if exception_list: + raise exception_list[0] return self.destination @@ -408,28 +433,43 @@ class FastDownload: chunks_to_download = set(range(min_chunk, max_chunk + 1)) counter = 1 - while True: # each iteration will create threads for streams - checker = AtomicChecker(chunks_to_download) - streams = [ - DownloadThread(self.session, download_url, download_session_id, stream_id, checker, - self.verify_certificates, self.create_default_folders, - self.destination) for stream_id in session_stream_ids] - - for thread in streams: - thread.start() - for thread in streams: - thread.join() - - if chunks_to_download == set(): # if there are no more chunks to download - break - else: - if counter >= DOWNLOAD_RETRIES_COUNT: - print(f"Reached maximum retry count:{counter}. Aborting.") - exception_list += [ - ValueError(f"Reached maximum retry count:{counter}. Aborting.")] + try: + while True: # each iteration will create threads for streams + checker = AtomicChecker(chunks_to_download) + streams = [ + DownloadThread(self.session, download_url, download_session_id, stream_id, checker, + self.verify_certificates, self.create_default_folders, + self.destination) for stream_id in session_stream_ids] + + for thread in streams: + thread.start() + for thread in streams: + thread.join() + + if chunks_to_download == set(): # if there are no more chunks to download break - counter += 1 - # queue chunks that we - queue_chunks(self.session, download_url, download_session_id, - [f"{x}:{x}" for x in chunks_to_download], - self.verify_certificates) + else: + if counter >= DOWNLOAD_RETRIES_COUNT: + print(f"Reached maximum retry count:{counter}. Aborting.") + exception_list += [ + ValueError(f"Reached maximum retry count:{counter}. Aborting.")] + break + exceptions = [stream.exc for stream in streams if stream.exc is not None] + if exceptions: + print(f"Download failed with message: {exceptions[0]}") + exception_list += exceptions + break + counter += 1 + # queue chunks that failed to download in the previous pass + queue_chunks(self.session, download_url, download_session_id, + [f"{x}:{x}" for x in chunks_to_download], + self.verify_certificates) + finally: + # Step 5 - Close the session + finish_download_session_params = make_fileserver_body_params( + method='finishDownloadSession', + downloadSessionId=download_session_id) + + self.session.post(download_url, + data=json.dumps(finish_download_session_params), + verify=self.verify_certificates) diff --git a/api-openbis-python3-pybis/src/python/setup.cfg b/api-openbis-python3-pybis/src/python/setup.cfg index 6f15cd8d36ca9ea955c408a4a5c717ed7211748e..6feadb21ce8091dac2e2c46abc221ca37211f407 100644 --- a/api-openbis-python3-pybis/src/python/setup.cfg +++ b/api-openbis-python3-pybis/src/python/setup.cfg @@ -1,6 +1,6 @@ [metadata] name = PyBIS -version = 1.35.6rc4 +version = 1.35.7rc3 author = ID SIS • ETH Zürich author_email = openbis-support@id.ethz.ch license = Apache Software License Version 2.0 diff --git a/api-openbis-python3-pybis/src/python/setup.py b/api-openbis-python3-pybis/src/python/setup.py index 2679e6371c8230e1a2b2bb6e4e577d0a0a10aa97..7c625170c0351777a72bcbbaef0fee8b94ada21d 100644 --- a/api-openbis-python3-pybis/src/python/setup.py +++ b/api-openbis-python3-pybis/src/python/setup.py @@ -26,7 +26,7 @@ with open("README.md", "r", encoding="utf-8") as fh: setup( name="PyBIS", - version="1.35.6rc4", + version="1.35.7rc3", author="ID SIS • ETH Zürich", author_email="openbis-support@id.ethz.ch", description="openBIS connection and interaction, optimized for using with Jupyter", diff --git a/api-openbis-python3-pybis/src/python/tests/test_fastdownload.py b/api-openbis-python3-pybis/src/python/tests/test_fastdownload.py index 9ad2bbb63cf09a984664f519474cd1a7cb49fe21..0ada560b2e7fcc6fffa927dee87b0645bc94cf01 100644 --- a/api-openbis-python3-pybis/src/python/tests/test_fastdownload.py +++ b/api-openbis-python3-pybis/src/python/tests/test_fastdownload.py @@ -1,15 +1,17 @@ +import binascii import json import os +import time from http.server import BaseHTTPRequestHandler, HTTPServer from threading import Thread import pytest + from pybis.fast_download import FastDownload def get_download_response(sequence_number, perm_id, file, is_directory, offset, payload): - # binascii.crc32(byte_array[:end]) - import binascii + result = b'' result += sequence_number.to_bytes(4, "big") download_item_id = perm_id + "/" + file @@ -52,7 +54,7 @@ class MyServer(BaseHTTPRequestHandler): self.wfile.write(response) -def createFastDownloadSession(permId, files, download_url, wished_number_of_streams): +def create_fast_download_session(permId, files, download_url, wished_number_of_streams): return '''{ "jsonrpc": "2.0", "id": "2", "result": { "@type": "dss.dto.datasetfile.fastdownload.FastDownloadSession", "@id": 1, "downloadUrl": "''' + download_url + '''", @@ -66,7 +68,7 @@ def createFastDownloadSession(permId, files, download_url, wished_number_of_stre "wishedNumberOfStreams": ''' + wished_number_of_streams + ''' } } }''' -def startDownloadSession(ranges, wished_number_of_streams): +def start_download_session(ranges, wished_number_of_streams): return """{ "downloadSessionId": "72863f8d-1ed1-4795-a531-4d93a5081562", "ranges": { @@ -113,18 +115,18 @@ def run_around_tests(base_data): 'finishDownloadSession': "", 'counter': 0, 'parts': 10, - 'createFastDownloadSession': createFastDownloadSession(perm_id, - file, - download_url, - streams), - 'startDownloadSession': startDownloadSession(ranges, streams) + 'createFastDownloadSession': create_fast_download_session(perm_id, + file, + download_url, + streams), + 'startDownloadSession': start_download_session(ranges, streams) } MyServer.response_code = 200 yield temp_folder, download_url, streams, perm_id, file cleanup(temp_folder) -def test_download_fails_after_retry(run_around_tests): +def test_download_fails_after_retries(run_around_tests): temp_folder, download_url, streams, perm_id, file = run_around_tests def generate_download_response(): @@ -139,7 +141,7 @@ def test_download_fails_after_retry(run_around_tests): fast_download.download() assert False except ValueError as error: - assert str(error) == 'Reached maximum retry count:3. Aborting.' + assert str(error) == 'Consecutive download calls to the server failed.' def test_download_file(run_around_tests): @@ -179,6 +181,58 @@ def test_download_file(run_around_tests): assert expected_outcome == data +def test_download_file_wait_flag_disabled(run_around_tests): + temp_folder, download_url, streams, perm_id, file = run_around_tests + + def generate_download_response(): + parts = MyServer.next_response['parts'] + counter = MyServer.next_response['counter'] + payload_length = 10 + while counter < parts: + response = get_download_response(counter, perm_id, file, False, + counter * payload_length, + bytearray([counter] * payload_length)) + # Slow down responses to simulate download of a big file + time.sleep(0.1) + counter += 1 + MyServer.next_response['counter'] = counter % parts + yield response + + MyServer.next_response['download'] = generate_download_response() + + fast_download = FastDownload("", download_url, perm_id, file, str(temp_folder), + True, False, False, streams) + fast_download.download() + + # Verify that file has not been downloaded yet + downloaded_files = [ + os.path.join(dp, f) + for dp, dn, fn in os.walk(temp_folder) + for f in fn + ] + assert len(downloaded_files) == 0 + + # Wait for 2 seconds to finish download + time.sleep(2) + + # find file + downloaded_files = [ + os.path.join(dp, f) + for dp, dn, fn in os.walk(temp_folder) + for f in fn + ] + assert len(downloaded_files) == 1 + + assert downloaded_files[0].endswith(file) + import functools + expected_outcome = functools.reduce(lambda a, b: a + b, + [bytearray([x] * 10) for x in range(10)]) + with open(downloaded_files[0], 'rb') as fn: + data = fn.read() + assert len(data) == 100 + assert expected_outcome == data + + def test_download_file_starts_with_fail(run_around_tests): temp_folder, download_url, streams, perm_id, file = run_around_tests @@ -217,4 +271,120 @@ def test_download_file_starts_with_fail(run_around_tests): with open(downloaded_files[0], 'rb') as fn: data = fn.read() assert len(data) == 100 - assert expected_outcome == data \ No newline at end of file + assert expected_outcome == data + + +def test_download_fails_after_getting_java_exception(run_around_tests): + """ + Test that verifies that if non-retryable exception is thrown, + the whole download session is aborted. + """ + temp_folder, download_url, streams, perm_id, file = run_around_tests + + def generate_download_response(): + # First download fails with non-retryable exception + yield b'{"error":"Some server error message.","retriable":false}' + # Further responses are alright + MyServer.response_code = 200 + parts = MyServer.next_response['parts'] + counter = MyServer.next_response['counter'] + payload_length = 10 + while counter < parts: + response = get_download_response(counter, perm_id, file, False, + counter * payload_length, + bytearray([counter] * payload_length)) + counter += 1 + MyServer.next_response['counter'] = counter % parts + yield response + + MyServer.next_response['download'] = generate_download_response() + + fast_download = FastDownload("", download_url, perm_id, file, str(temp_folder), + True, True, False, streams) + try: + fast_download.download() + assert False + except ValueError as error: + assert str(error) == 'Some server error message.' + + +def test_download_passes_after_getting_java_exception(run_around_tests): + """ + Test that verifies that if retryable server exception is thrown, + the whole download retries and downloads the file. + """ + temp_folder, download_url, streams, perm_id, file = run_around_tests + + def generate_download_response(): + # First download fails with non-retryable exception + yield b'{"error":"Some server error message.","retriable":true}' + # Further responses are alright + MyServer.response_code = 200 + parts = MyServer.next_response['parts'] + counter = MyServer.next_response['counter'] + payload_length = 10 + while counter < parts: + response = get_download_response(counter, perm_id, file, False, + counter * payload_length, + bytearray([counter] * payload_length)) + counter += 1 + MyServer.next_response['counter'] = counter % parts + yield response + + MyServer.next_response['download'] = generate_download_response() + + fast_download = FastDownload("", download_url, perm_id, file, str(temp_folder), + True, True, False, streams) + fast_download.download() + + downloaded_files = [ + os.path.join(dp, f) + for dp, dn, fn in os.walk(temp_folder) + for f in fn + ] + assert len(downloaded_files) == 1 + assert downloaded_files[0].endswith(file) + import functools + expected_outcome = functools.reduce(lambda a, b: a + b, + [bytearray([x] * 10) for x in range(10)]) + with open(downloaded_files[0], 'rb') as fn: + data = fn.read() + assert len(data) == 100 + assert expected_outcome == data + + +def test_download_file_payload_failure(run_around_tests): + temp_folder, download_url, streams, perm_id, file = run_around_tests + + def generate_download_response(): + parts = MyServer.next_response['parts'] + counter = MyServer.next_response['counter'] + payload_length = 10 + + fail_response = None + while counter < parts: + response = get_download_response(counter, perm_id, file, False, + counter * payload_length, + bytearray([counter] * payload_length)) + if counter == 0: + array = bytearray(response) + array[-8:] = bytearray([0]*8) + response = bytes(array) + fail_response = response + + counter += 1 + MyServer.next_response['counter'] = counter % parts + yield response + + while True: + yield fail_response + + MyServer.next_response['download'] = generate_download_response() + + fast_download = FastDownload("", download_url, perm_id, file, str(temp_folder), + True, True, False, streams) + try: + fast_download.download() + assert False + except ValueError as error: + assert str(error) == 'Received incorrect payload multiple times. Aborting.' diff --git a/docs/.readthedocs.yaml b/docs/.readthedocs.yaml index 424aa93a45285915c0f9e6a90181a51e71ff8690..ccfd63265fb5ad16c7b9f9699a3778e87736a846 100644 --- a/docs/.readthedocs.yaml +++ b/docs/.readthedocs.yaml @@ -10,16 +10,11 @@ build: os: ubuntu-22.04 tools: python: "3.11" - # You can also specify other tool versions: - urllib3: "1.26.15" - # rust: "1.64" - # golang: "1.19" # Build documentation in the docs/ directory with Sphinx sphinx: - configuration: docs/conf.py + configuration: docs/conf.py + python: - version: "3.11" - install: - - requirements: - docs/requirements.txt + install: + - requirements: docs/requirements.txt diff --git a/docs/.readthedocs.yaml.old b/docs/.readthedocs.yaml.old new file mode 100644 index 0000000000000000000000000000000000000000..200f5cf8ce8f5d4305d2951626d918129c33bcd4 --- /dev/null +++ b/docs/.readthedocs.yaml.old @@ -0,0 +1,25 @@ +# .readthedocs.yaml +# Read the Docs configuration file +# See https://docs.readthedocs.io/en/stable/config-file/v2.html for details + +# Required +version: 2 + +# Set the version of Python and other tools you might need +build: + os: ubuntu-22.04 + tools: + python: "3.11" + # You can also specify other tool versions: + #urllib3: "1.26.15" + # rust: "1.64" + # golang: "1.19" + +# Build documentation in the docs/ directory with Sphinx +sphinx: + configuration: docs/conf.py +python: + version: "3.11" + install: + - requirements: + docs/requirements.txt diff --git a/docs/index.rst b/docs/index.rst index 51fb97348c132827fb8883ed7141053dbe9b2169..7ac2bf3530f86c02a2396d3c2cc1b8ac8256ab11 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -11,24 +11,46 @@ The complete solution for managing your research data. General Users </user-documentation/general-users/index> General Admin Users </user-documentation/general-admin-users/index> + Advance Features </user-documentation/advance-features/index> + Legacy Advance Features </user-documentation/legacy-advance-features/index> + +.. toctree:: + :maxdepth: 4 + :hidden: + :caption: Software Developer Documentation + + Development Environment </software-developer-documentation/development-environment/index> + APIS </software-developer-documentation/apis/index> + Server-Side Extensions </software-developer-documentation/server-side-extensions/index> + Client-Side Extensions </software-developer-documentation/client-side-extensions/index> + Legacy Server-Side Extensions </software-developer-documentation/legacy-server-side-extensions/index> + +.. toctree:: + :maxdepth: 4 + :hidden: + :caption: System Admin Documentation + + Installation </system-admin-documentation/installation/index> + Docker Installation </system-admin-documentation/docker-installation/index> + Advanced Features </system-admin-documentation/advanced-features/index> User Documentation ^^^^^^^^^^^^^^^^^^ - :doc:`General Users </user-documentation/general-users/index>` - :doc:`General Admin Users </user-documentation/general-admin-users/index>` - - Advanced Features - - Legacy Advance Features + - :doc:`Advance Features </user-documentation/advance-features/index>` + - :doc:`Legacy Advance Features </user-documentation/legacy-advance-features/index>` Software Developer Documentation ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - - Development Environment - - APIS - - Server-side Extensions - - Client-side Extensions - - Legacy Server-side Extensions + - :doc:`Development Environment </software-developer-documentation/development-environment/index>` + - :doc:`APIS </software-developer-documentation/apis/index>` + - :doc:`Server-Side Extensions </software-developer-documentation/server-side-extensions/index>` + - :doc:`Client-Side Extensions </software-developer-documentation/client-side-extensions/index>` + - :doc:`Legacy Server-Side Extensions </software-developer-documentation/legacy-server-side-extensions/index>` System Admin Documentation ^^^^^^^^^^^^^^^^^^^^^^^^^^ - - Installation - - Docker Installation - - Advanced Features + - :doc:`Installation </system-admin-documentation/installation/index>` + - :doc:`Docker Installation </system-admin-documentation/docker-installation/index>` + - :doc:`Advanced Features </system-admin-documentation/advanced-features/index>` diff --git a/docs/requirements.txt b/docs/requirements.txt index ea132ee9ccba3793faea29a6831c06c94ff46ebe..3806bc5b8aea485710cdba7bfd581766be7ee17a 100644 --- a/docs/requirements.txt +++ b/docs/requirements.txt @@ -1 +1,2 @@ -myst_parser==0.15.2 +docutils==0.20.1 +myst_parser==2.0.0 diff --git a/docs/software-developer-documentation/apis/img/139.png b/docs/software-developer-documentation/apis/img/139.png new file mode 100644 index 0000000000000000000000000000000000000000..6433c6d84a009db13702bdc723f9c8a039a8db43 Binary files /dev/null and b/docs/software-developer-documentation/apis/img/139.png differ diff --git a/docs/software-developer-documentation/apis/img/Python_install_windows.png b/docs/software-developer-documentation/apis/img/Python_install_windows.png new file mode 100644 index 0000000000000000000000000000000000000000..42656cbed26790563456aa2a0df72d12eb67164c Binary files /dev/null and b/docs/software-developer-documentation/apis/img/Python_install_windows.png differ diff --git a/docs/software-developer-documentation/apis/index.rst b/docs/software-developer-documentation/apis/index.rst new file mode 100644 index 0000000000000000000000000000000000000000..68618580d1776b255f02bf798a1185929cb52620 --- /dev/null +++ b/docs/software-developer-documentation/apis/index.rst @@ -0,0 +1,10 @@ +APIS +==== + +.. toctree:: + :maxdepth: 4 + + java-javascript-v3-api + python-v3-api + matlab-v3-api + personal-access-tokens \ No newline at end of file diff --git a/docs/software-developer-documentation/apis/java-javascript-v3-api.md b/docs/software-developer-documentation/apis/java-javascript-v3-api.md new file mode 100644 index 0000000000000000000000000000000000000000..d871d05ca2f92c87b8ef38ecfd751b25b58ea0e5 --- /dev/null +++ b/docs/software-developer-documentation/apis/java-javascript-v3-api.md @@ -0,0 +1,4149 @@ +openBIS V3 API +============== + +### I. Architecture + +Open BIS consists of two main components: an Application Server and one +or more Data Store Servers. The Application Server manages the system’s +meta data, while the Data Store Server(s) manage the file store(s). Each +Data Store Server manages its own file store. Here we will refer to the +Application Server as the "AS" and the Data Store Server as the "DSS." + +#### One AS, one or more DSS + +Why is there only one Application Server but multiple Data Store +Servers? It is possible to have only one Data Store Server, but in a +complex project there might be many labs using the same OpenBIS instance +and therefore sharing the same meta data. Each lab might have its own +Data Store Server to make file management easier and more efficient. The +Data Store Servers are on different Java virtual machines, which enables +the files to be processed faster. It is also more efficient when the +physical location of the Data Store Server is closer to the lab that is +using it. Another reason is that the meta data tends to be relatively +small in size, whereas the files occupy a large amount of space in the +system. + + + +#### The Java API + +The Java V3 API consists of two interfaces: + +- ch.ethz.sis.openbis.generic.asapi.v3.IApplicationServerAPI +- ch.ethz.sis.openbis.generic.dssapi.v3.IDatastoreServerAPI + +Please check our JavaDoc for more +details: <https://openbis.ch/javadoc/20.10.x/javadoc-api-v3/index.html> + +All V3 API jars are packed in openBIS-API-V3-<VERSION>.zip which +is part of openBIS-clients-and-APIs-<VERSION>.zip (the latest +version can be downloaded at [Sprint Releases](#) > Clients and APIs) + +#### The Javascript API + +The Javascript V3 API consists of a module hosted at +<OPENBIS\_URL>/resources/api/v3/openbis.js, for instance +<http://localhost/openbis>/ resources/api/v3/openbis.js. Please check +the openbis.js file itself for more details. + +### II. API Features + +#### Current Features - AS + +The current implementation of the V3 openBIS API contains the following +features: + +- Creation:  Create spaces, projects, experiments and experiment + types, samples and sample types, materials and material types, + vocabulary terms, tags +- Associations: Associate spaces, project, experiments, samples, + datasets, materials to each other +- Tags: Add/Remove/Set tags for experiments, samples, datasets and + materials +- Properties: Set properties for experiments, samples, datasets and + materials +- Search: Search & get spaces, project, experiments, samples, + datasets, materials, vocabulary terms, tags +- Update: Update spaces, project, experiments, samples, datasets, + materials, vocabulary terms, tags +- Deletion: Delete spaces, project, experiments, samples, datasets, + materials, vocabulary terms, tags +- Authentication: Login as user, login as another user, login as an + anonymous user +- Transactional features: performing multiple operations in one + transaction (with executeOperations method) +- Queries: create/update/get/search/delete/execute queries +- Generating codes/permids + +#### Current Features - DSS + +- Search data set files +- Download data set files + +#### Missing/Planned Features + +The current implementation of the V3 openBIS API does not yet include +the following features: + +- Management features: Managing data stores +- Search features: Searching experiments having samples/datasets, + searching datasets (oldest, deleted, for archiving etc.) +- Update features: Updating datasets share id, size, status, storage + confirmation, post registration status + +### III. Accessing the API + +In order to use V3 API you have to know the url of an openBIS instance +you want to connect to. Moreover, before calling any of the API methods +you have to login to the system to receive a sessionToken. All the login +methods are part of the AS API. Once you successfully authenticate in +openBIS you can invoke other methods of the API (at both AS and DSS). In +each call you have to provide your sessionToken. When you have finished +working with the API you should call logout method to release all the +resources related with your session. + + + +Note: If the openBIS instance you are connecting to uses SSL and does +not have a real certificate (it is using the self-signed certificate +that comes with openBIS), you need to tell the java client to use the +trust store that comes with openBIS. This can be done by setting the +property [javax.net](http://javax.net).ssl.trustStore. Example: + + + +**Using openBIS trust store in Java clients** + + java -Djavax.net.ssl.trustStore=/home/openbis/openbis/servers/openBIS-server/jetty/etc/openBIS.keystore -jar the-client.jar + +Connecting in Java + +**V3ConnectionExample.java** + + import ch.ethz.sis.openbis.generic.asapi.v3.IApplicationServerApi; + import ch.ethz.sis.openbis.generic.asapi.v3.dto.common.search.SearchResult; + import ch.ethz.sis.openbis.generic.asapi.v3.dto.space.Space; + import ch.ethz.sis.openbis.generic.asapi.v3.dto.space.fetchoptions.SpaceFetchOptions; + import ch.ethz.sis.openbis.generic.asapi.v3.dto.space.search.SpaceSearchCriteria; + import ch.systemsx.cisd.common.spring.HttpInvokerUtils; + + public class V3ConnectionExample + { + + private static final String URL = "http://localhost:8888/openbis/openbis" + IApplicationServerApi.SERVICE_URL; + + private static final int TIMEOUT = 10000; + + public static void main(String[] args) + { + // get a reference to AS API + IApplicationServerApi v3 = HttpInvokerUtils.createServiceStub(IApplicationServerApi.class, URL, TIMEOUT); + + // login to obtain a session token + String sessionToken = v3.login("admin", "password"); + + // invoke other API methods using the session token, for instance search for spaces + SearchResult<Space> spaces = v3.searchSpaces(sessionToken, new SpaceSearchCriteria(), new SpaceFetchOptions()); + System.out.println("Number of spaces: " + spaces.getObjects().size()); + + // logout to release the resources related with the session + v3.logout(sessionToken); + } + + + } + +#### Connecting in Javascript + +We have put a lot of effort to make the use of the API in Javascript and +Java almost identical. The DTOs which are a big part of the API are +exactly the same in both languages. The methods you can invoke via the +Javascript and Java APIs are also exactly the same. This makes the +switch from Javascript to Java or the other way round very easy. Because +of some major differences between Javascript and Java development still +some things had to be done a bit differently. But even then we tried to +be conceptually consistent. + +**V3ConnectionExample.html** + + <!DOCTYPE html> + <html> + <head> + <meta charset="utf-8"> + <title>V3ConnectionExample</title> + <!-- + These two js files, i.e. config.js and require.js are RequireJS configuration and RequireJS library itself. + Please check http://requirejs.org/ for more details on how RequireJS makes loading dependencies in Javascript easier. + --> + <script type="text/javascript" src="http://localhost:8888/openbis/resources/api/v3/config.js"></script> + <script type="text/javascript" src="http://localhost:8888/openbis/resources/api/v3/require.js"></script> + </head> + <body> + <script> + + + // With "require" call we asynchronously load "openbis", "SpaceSearchCriteria" and "SpaceFetchOptions" classes that we will need for our example. + // The function that is passed as a second parameter of the require call is a callback that gets executed once requested classes are loaded. + // In Javascript we work with exactly the same classes as in Java. For instance, "ch.ethz.sis.openbis.generic.asapi.v3.dto.space.search.SpaceSearchCriteria" + // Java class and "as/dto/space/search/SpaceSearchCriteria" Javascript class have exactly the same methods. In order to find a Javascript class name please + // check our Javadoc (https://openbis.ch/javadoc/20.10.x/javadoc-api-v3/index.html). The Javascript class name is defined in @JsonObject annotation of each V3 API Java DTO. + + + require([ "openbis", "as/dto/space/search/SpaceSearchCriteria", "as/dto/space/fetchoptions/SpaceFetchOptions" ], function(openbis, SpaceSearchCriteria, SpaceFetchOptions) { + + // get a reference to AS API + var v3 = new openbis(); + + // login to obtain a session token (the token it is automatically stored in openbis object and will be used for all subsequent API calls) + v3.login("admin", "password").done(function() { + + // invoke other API methods, for instance search for spaces + v3.searchSpaces(new SpaceSearchCriteria(), new SpaceFetchOptions()).done(function(result) { + + alert("Number of spaces: " + result.getObjects().length); + + // logout to release the resources related with the session + v3.logout(); + }); + }); + }); + </script> + </body> + </html> + + + +### IV. AS Methods + +The sections below describe how to use different methods of the V3 API. +Each section describes a group of similar methods. For instance, we have +one section that describes creation of entities. Even though the API +provides us methods for creation of spaces, projects, experiments, +samples and materials, vocabulary terms, tags we only concentrate here +on creation of samples. Samples are the most complex entity kind. Once +you understand how creation of samples works you will also know how to +create other kinds of entities as all creation methods follow the same +patterns. The same applies for other methods like updating of entities, +searching or getting entities. We will introduce them using the sample +example. + +Each section will be split into Java and Javascript subsections. We want +to keep Java and Javascript code examples close to each other so that +you can easily see what are the similarities and differences in the API +usage between these two languages. + +NOTE: The following code examples assume that we have already got a +reference to the V3 API and we have already authenticated to get a +session token. Moreover in Javascript example we do not include the html +page template to make them shorter and more readable. Please +check "Accessing the API" section for examples on how to get a reference +to V3 API, authenticate or build a simple html page. + +#### Login + +OpenBIS provides the following login methods: + +- login(user, password) - login as a given user +- loginAs(user, password, asUser) - login on behalf of a different + user (e.g. I am an admin but I would like to see only things user + "x" would normally see) +- loginAsAnonymousUser() - login as an anonymous user configured in AS + service.properties + +All login methods return a session token if the provided parameters were +correct. In case a given user does not exist or the provided password +was incorrect the login methods return null. + +##### Example + +**V3LoginExample.java** + + public class V3LoginExample + { + public static void main(String[] args) + { + // we assume here that v3 object has been already created (please check "Accessing the API" section for more details) +  + // login as a specific user + String sessionToken = v3.login("admin", "password"); + System.out.println(sessionToken); + + // login on behalf of a different user (I am an admin but I would like to see only things that some other user would normally see) + sessionToken = v3.loginAs("admin", "password", "someotheruser"); + System.out.println(sessionToken); + + // login as an anonymous user (anonymous user has to be configured in service.properties first) + sessionToken = v3.loginAsAnonymousUser(); + System.out.println(sessionToken); + } + } + +**V3LoginExample.html** + + <script> +  + // we assume here that v3 object has been already created (please check "Accessing the API" section for more details) +  + // login as a specific user + v3.login("admin", "password").done(function(sessionToken) { + alert(sessionToken); + + // login on behalf of a different user (I am an admin but I would like to see only things that some other user would normally see) + v3.loginAs("admin", "password", "someotheruser").done(function(sessionToken) { + alert(sessionToken); + + // login as an anonymous user (anonymous user has to be configured in service.properties first) + v3.loginAsAnonymousUser().done(function(sessionToken) { + alert(sessionToken); + }); + }); + }); + </script> + +#### Personal Access Tokens + +A personal access token (in short: PAT) can be thought of as a longer +lived session token which can be used for integrating openBIS with +external systems. If you would like to learn more about the idea behind +PATs please read: [Personal Access +Tokens](/pages/viewpage.action?pageId=122140993). + +Example of how to create and use a PAT: + + import java.util.Arrays; + import java.util.Date; + import java.util.List; + import java.util.Map; + + import org.apache.commons.lang.time.DateUtils; + + import ch.ethz.sis.openbis.generic.asapi.v3.dto.pat.PersonalAccessToken; + import ch.ethz.sis.openbis.generic.asapi.v3.dto.pat.create.PersonalAccessTokenCreation; + import ch.ethz.sis.openbis.generic.asapi.v3.dto.pat.fetchoptions.PersonalAccessTokenFetchOptions; + import ch.ethz.sis.openbis.generic.asapi.v3.dto.pat.id.IPersonalAccessTokenId; + import ch.ethz.sis.openbis.generic.asapi.v3.dto.pat.id.PersonalAccessTokenPermId; + import ch.ethz.sis.openbis.generic.asapi.v3.dto.space.fetchoptions.SpaceFetchOptions; + import ch.ethz.sis.openbis.generic.asapi.v3.dto.space.search.SpaceSearchCriteria; + + public class V3PersonalAccessTokenExample + { + public static void main(String[] args) + { + // we assume here that v3 object has been already created and we have already called login (please check "Accessing the API" section for more details) + + PersonalAccessTokenCreation creation = new PersonalAccessTokenCreation(); + creation.setSessionName("test session"); + creation.setValidFromDate(new Date(System.currentTimeMillis() - DateUtils.MILLIS_PER_DAY)); + creation.setValidToDate(new Date(System.currentTimeMillis() + DateUtils.MILLIS_PER_DAY)); + + // create and get the new PAT + List<PersonalAccessTokenPermId> ids = v3api.createPersonalAccessTokens(sessionToken, Arrays.asList(creation)); + Map<IPersonalAccessTokenId, PersonalAccessToken> map = v3api.getPersonalAccessTokens(sessionToken, ids, new PersonalAccessTokenFetchOptions()); + PersonalAccessToken pat = map.get(ids.get(0)); + + // use the new PAT to list spaces + v3api.searchSpaces(pat.getHash(), new SpaceSearchCriteria(), new SpaceFetchOptions()); + } + } + +#### Session Information + +OpenBIS provides a method to obtain the session information for an +already log in user: + +##### Example + +**V3CreationExample.java** + + import ch.ethz.sis.openbis.generic.asapi.v3.dto.session.SessionInformation; + + public class V3SessionInformationExample + { + public static void main(String[] args) + { + // we assume here that v3 object has been already created and we have already called login (please check "Accessing the API" section for more details) + SessionInformation sessionInformation = v3.getSessionInformation(sessionToken); + System.out.println("User Name: " + sessionInformation.getUserName()); + System.out.println("Home Group: " + sessionInformation.getHomeGroupCode()); + System.out.println("Person: " + sessionInformation.getPerson()); + System.out.println("Creator Person: " + sessionInformation.getCreatorPerson()); + } + } + +#### Creating entities + +The methods for creating entities in V3 API are called: createSpaces, +createProjects, createExperiments, createSamples, createMaterials, +createVocabularyTerms, createTags. They all allow to create one or more +entities at once by passing one or more entity creation objects (i.e. +SpaceCreation, ProjectCreation, ExperimentCreation, SampleCreation, +MaterialCreation, VocabularyTermCreation, TagCreation). All these +methods return as a result a list of the new created entity perm ids. + +NOTE: Creating data sets via V3 API is not available yet. The new V3 +dropboxes are planned but not implemented yet. Please use V2 dropboxes +until V3 version is out. + +##### Example + +**V3CreationExample.java** + + import java.util.List; + import java.util.Arrays; + import ch.ethz.sis.openbis.generic.asapi.v3.dto.entitytype.id.EntityTypePermId; + import ch.ethz.sis.openbis.generic.asapi.v3.dto.experiment.id.ExperimentIdentifier; + import ch.ethz.sis.openbis.generic.asapi.v3.dto.sample.create.SampleCreation; + import ch.ethz.sis.openbis.generic.asapi.v3.dto.sample.id.SamplePermId; + import ch.ethz.sis.openbis.generic.asapi.v3.dto.space.id.SpacePermId; + public class V3CreationExample + { + public static void main(String[] args) + { + // we assume here that v3 object has been already created and we have already called login (please check "Accessing the API" section for more details) +  + SampleCreation sample = new SampleCreation(); + sample.setTypeId(new EntityTypePermId("MY_SAMPLE_TYPE_CODE")); + sample.setSpaceId(new SpacePermId("MY_SPACE_CODE")); + sample.setExperimentId(new ExperimentIdentifier("/MY_SPACE_CODE/MY_PROJECT_CODE/MY_EXPERIMENT_CODE")); + sample.setCode("MY_SAMPLE_CODE"); + + // you can also pass more than one creation object to create multiple entities at once + List<SamplePermId> permIds = v3.createSamples(sessionToken, Arrays.asList(sample)); + System.out.println("Perm ids: " + permIds);  + } + } + +**V3CreationExample.html** + + <script> + require([ "as/dto/sample/create/SampleCreation", "as/dto/entitytype/id/EntityTypePermId", "as/dto/space/id/SpacePermId", "as/dto/experiment/id/ExperimentIdentifier" ], + function(SampleCreation, EntityTypePermId, SpacePermId, ExperimentIdentifier) { +  + // we assume here that v3 object has been already created and we have already called login (please check "Accessing the API" section for more details) + + var sample = new SampleCreation(); + sample.setTypeId(new EntityTypePermId("MY_SAMPLE_TYPE_CODE")); + sample.setSpaceId(new SpacePermId("MY_SPACE_CODE")); + sample.setExperimentId(new ExperimentIdentifier("/MY_SPACE_CODE/MY_PROJECT_CODE/MY_EXPERIMENT_CODE")); + sample.setCode("MY_SAMPLE_CODE"); + + // you can also pass more than one creation object to create multiple entities at once + v3.createSamples([ sample ]).done(function(permIds) { + alert("Perm ids: " + JSON.stringify(permIds)); + }); + }); + </script> + +##### Properties example + +**V3CreationWithPropertiesExample.java** + + import java.util.Arrays; + import ch.ethz.sis.openbis.generic.asapi.v3.dto.entitytype.id.EntityTypePermId; + import ch.ethz.sis.openbis.generic.asapi.v3.dto.experiment.id.ExperimentIdentifier; + import ch.ethz.sis.openbis.generic.asapi.v3.dto.sample.create.SampleCreation; + import ch.ethz.sis.openbis.generic.asapi.v3.dto.space.id.SpacePermId; + + public class V3CreationWithPropertiesExample + { + public static void main(String[] args) + { + // we assume here that v3 object has been already created and we have already called login (please check "Accessing the API" section for more details) + + SampleCreation sample = new SampleCreation(); + sample.setTypeId(new EntityTypePermId("MY_SAMPLE_TYPE_CODE")); + sample.setSpaceId(new SpacePermId("MY_SPACE_CODE")); + sample.setExperimentId(new ExperimentIdentifier("/MY_SPACE_CODE/MY_PROJECT_CODE/MY_EXPERIMENT_CODE")); + sample.setCode("MY_SAMPLE_CODE"); + + // examples of value formats that should be used for different types of properties + sample.setProperty("MY_VARCHAR", "this is a description"); + sample.setProperty("MY_INTEGER", "123"); + sample.setProperty("MY_REAL", "123.45"); + sample.setProperty("MY_BOOLEAN", "true"); + sample.setProperty("MY_MATERIAL", "MY_MATERIAL_CODE (MY_MATERIAL_TYPE_CODE)"); + sample.setProperty("MY_VOCABULARY", "MY_TERM_CODE"); + + v3.createSamples(sessionToken, Arrays.asList(sample)); + } + } + +**V3CreationWithPropertiesExample.html** + + <script> + require([ "as/dto/sample/create/SampleCreation", "as/dto/entitytype/id/EntityTypePermId", "as/dto/space/id/SpacePermId", "as/dto/experiment/id/ExperimentIdentifier" ], + function(SampleCreation, EntityTypePermId, SpacePermId, ExperimentIdentifier) { + + // we assume here that v3 object has been already created and we have already called login (please check "Accessing the API" section for more details) + + var sample = new SampleCreation(); + sample.setTypeId(new EntityTypePermId("MY_SAMPLE_TYPE_CODE")); + sample.setSpaceId(new SpacePermId("MY_SPACE_CODE")); + sample.setExperimentId(new ExperimentIdentifier("/MY_SPACE_CODE/MY_PROJECT_CODE/MY_EXPERIMENT_CODE")); + sample.setCode("MY_SAMPLE_CODE"); + + // examples of value formats that should be used for different types of properties + sample.setProperty("MY_VARCHAR", "this is a description"); + sample.setProperty("MY_INTEGER", "123"); + sample.setProperty("MY_REAL", "123.45"); + sample.setProperty("MY_BOOLEAN", "true"); + sample.setProperty("MY_MATERIAL", "MY_MATERIAL_CODE (MY_MATERIAL_TYPE_CODE)"); + sample.setProperty("MY_VOCABULARY", "MY_TERM_CODE"); + + v3.createSamples([ sample ]).done(function(permIds) { + alert("Perm ids: " + JSON.stringify(permIds)); + }); + }); + }); + </script> + +##### Different ids example + +**V3CreationWithDifferentIdsExample.java** + + import java.util.Arrays; + import ch.ethz.sis.openbis.generic.asapi.v3.dto.entitytype.id.EntityTypePermId; + import ch.ethz.sis.openbis.generic.asapi.v3.dto.experiment.id.ExperimentIdentifier; + import ch.ethz.sis.openbis.generic.asapi.v3.dto.experiment.id.ExperimentPermId; + import ch.ethz.sis.openbis.generic.asapi.v3.dto.sample.create.SampleCreation; + import ch.ethz.sis.openbis.generic.asapi.v3.dto.space.id.SpacePermId; + + public class V3CreationWithDifferentIdsExample + { + public static void main(String[] args) + { + // we assume here that v3 object has been already created and we have already called login (please check "Accessing the API" section for more details) + + SampleCreation sample = new SampleCreation(); + sample.setTypeId(new EntityTypePermId("MY_SAMPLE_TYPE_CODE")); + sample.setSpaceId(new SpacePermId("MY_SPACE_CODE")); + sample.setCode("MY_SAMPLE_CODE"); + + // as an experiment id we can use any class that implements IExperimentId interface. For instance, experiment identifier: + sample.setExperimentId(new ExperimentIdentifier("/MY_SPACE_CODE/MY_PROJECT_CODE/MY_EXPERIMENT_CODE")); + // or experiment perm id: + sample.setExperimentId(new ExperimentPermId("20160115170718361-98668")); + + v3.createSamples(sessionToken, Arrays.asList(sample)); + } + } + +**V3CreationWithDifferentIdsExample.html** + + <script> + require([ "as/dto/sample/create/SampleCreation", "as/dto/entitytype/id/EntityTypePermId", "as/dto/space/id/SpacePermId", "as/dto/experiment/id/ExperimentIdentifier", "as/dto/experiment/id/ExperimentPermId" ], + function(SampleCreation, EntityTypePermId, SpacePermId, ExperimentIdentifier, ExperimentPermId) { + + // we assume here that v3 object has been already created and we have already called login (please check "Accessing the API" section for more details) +  + var sample = new SampleCreation(); + sample.setTypeId(new EntityTypePermId("MY_SAMPLE_TYPE_CODE")); + sample.setSpaceId(new SpacePermId("MY_SPACE_CODE")); + sample.setExperimentId(new ExperimentIdentifier("/MY_SPACE_CODE/MY_PROJECT_CODE/MY_EXPERIMENT_CODE")); + sample.setCode("MY_SAMPLE_CODE"); + + // as an experiment id we can use any class that implements IExperimentId interface. For instance, experiment identifier: + sample.setExperimentId(new ExperimentIdentifier("/MY_SPACE_CODE/MY_PROJECT_CODE/MY_EXPERIMENT_CODE")); + // or experiment perm id: + sample.setExperimentId(new ExperimentPermId("20160115170718361-98668")); + + v3.createSamples([ sample ]).done(function(permIds) { + alert("Perm ids: " + JSON.stringify(permIds)); + }); + }); + </script> + +##### Parent child example + +The following example creates parent and child samples for a sample type +which allow automatic code generation: + +**V3CreationParentAndChildExample** + + import java.util.Arrays; + import java.util.List; + import ch.ethz.sis.openbis.generic.asapi.v3.dto.common.id.CreationId; + import ch.ethz.sis.openbis.generic.asapi.v3.dto.entitytype.id.EntityTypePermId; + import ch.ethz.sis.openbis.generic.asapi.v3.dto.sample.create.SampleCreation; + import ch.ethz.sis.openbis.generic.asapi.v3.dto.sample.id.SamplePermId; + import ch.ethz.sis.openbis.generic.asapi.v3.dto.space.id.SpacePermId; + + public class V3CreationParentAndChildExample + { + public static void main(String[] args) + { + // we assume here that v3 object has been already created and we have already called login (please check "Accessing the API" section for more details) + + SampleCreation parentSample = new SampleCreation(); + parentSample.setTypeId(new EntityTypePermId("MY_SAMPLE_TYPE_CODE")); + parentSample.setSpaceId(new SpacePermId("MY_SPACE_CODE")); + parentSample.setCreationId(new CreationId("parent")); + + SampleCreation childSample = new SampleCreation(); + childSample.setTypeId(new EntityTypePermId("MY_SAMPLE_TYPE_CODE")); + childSample.setSpaceId(new SpacePermId("MY_SPACE_CODE")); + childSample.setParentIds(Arrays.asList(parentSample.getCreationId())); + + List<SamplePermId> permIds = v3.createSamples(sessionToken, Arrays.asList(parentSample, childSample)); + System.out.println("Perm ids: " + permIds); + } + } + +**V3CreationParentAndChildExample.html** + + <script> + require([ "openbis", "as/dto/sample/create/SampleCreation", "as/dto/entitytype/id/EntityTypePermId", "as/dto/space/id/SpacePermId", "as/dto/common/id/CreationId" ], + function(openbis, SampleCreation, EntityTypePermId, SpacePermId, CreationId) { + + // we assume here that v3 object has been already created and we have already called login (please check "Accessing the API" section for more details) + + var parentSample = new SampleCreation(); + parentSample.setTypeId(new EntityTypePermId("MY_SAMPLE_TYPE_CODE")); + parentSample.setSpaceId(new SpacePermId("MY_SPACE_CODE")); + parentSample.setCreationId(new CreationId("parent")); + var childSample = new SampleCreation(); + childSample.setTypeId(new EntityTypePermId("MY_SAMPLE_TYPE_CODE")); + childSample.setSpaceId(new SpacePermId("MY_SPACE_CODE")); + childSample.setParentIds([parentSample.getCreationId()]); + v3.createSamples([ parentSample, childSample ]).done(function(permIds) { + alert("Perm ids: " + JSON.stringify(permIds)); + }); + }); + </script> + +#### Updating entities + +The methods for updating entities in V3 API are called: updateSpaces, +updateProjects, updateExperiments, updateSamples, updateDataSets, +updateMaterials, updateVocabularyTerms, updateTags. They all allow to +update one or more entities at once by passing one or more entity update +objects (i.e. SpaceUpdate, ProjectUpdate, ExperimentUpdate, +SampleUpdate, MaterialUpdate, VocabularyTermUpdate, TagUpdate). With +update objects you can update entities without fetching their state +first, i.e. the update objects contain only changes - not the full state +of entities. All update objects require an id of an entity that will be +updated. Please note that some of the entity fields cannot be changed +once an entity is created, for instance sample code becomes immutable +after creation. + +##### Example + +**V3UpdateExample.java** + + import java.util.Arrays; + import ch.ethz.sis.openbis.generic.asapi.v3.dto.experiment.id.ExperimentIdentifier; + import ch.ethz.sis.openbis.generic.asapi.v3.dto.sample.id.SampleIdentifier; + import ch.ethz.sis.openbis.generic.asapi.v3.dto.sample.update.SampleUpdate; + + public class V3UpdateExample + { + public static void main(String[] args) + { + // we assume here that v3 object has been already created and we have already called login (please check "Accessing the API" section for more details) +  + // here we update a sample and attach it to a different experiment + SampleUpdate sample = new SampleUpdate(); + sample.setSampleId(new SampleIdentifier("/MY_SPACE_CODE/MY_SAMPLE_CODE")); + sample.setExperimentId(new ExperimentIdentifier("/MY_SPACE_CODE/MY_PROJECT_CODE/MY_OTHER_EXPERIMENT_CODE")); + + // you can also pass more than one update object to update multiple entities at once + v3.updateSamples(sessionToken, Arrays.asList(sample)); + System.out.println("Updated"); + } + } + +**V3UpdateExample.html** + + <script> + require([ "as/dto/sample/update/SampleUpdate", "as/dto/sample/id/SampleIdentifier", "as/dto/experiment/id/ExperimentIdentifier" ], + function(SampleUpdate, SampleIdentifier, ExperimentIdentifier) { + + // we assume here that v3 object has been already created and we have already called login (please check "Accessing the API" section for more details) +  + // here we update a sample and attach it to a different experiment + var sample = new SampleUpdate(); + sample.setSampleId(new SampleIdentifier("/MY_SPACE_CODE/MY_SAMPLE_CODE")); + sample.setExperimentId(new ExperimentIdentifier("/MY_SPACE_CODE/MY_PROJECT_CODE/MY_OTHER_EXPERIMENT_CODE")); + + // you can also pass more than one update object to update multiple entities at once + v3.updateSamples([ sample ]).done(function() { + alert("Updated"); + }); + }); + </script> + +##### Properties example + +**V3UpdateWithPropertiesExample.java** + + import java.util.Arrays; + import ch.ethz.sis.openbis.generic.asapi.v3.dto.sample.id.SampleIdentifier; + import ch.ethz.sis.openbis.generic.asapi.v3.dto.sample.update.SampleUpdate; + + public class V3UpdateWithPropertiesExample + { + public static void main(String[] args) + { + // we assume here that v3 object has been already created and we have already called login (please check "Accessing the API" section for more details) +  + SampleUpdate sample = new SampleUpdate(); + sample.setSampleId(new SampleIdentifier("/MY_SPACE_CODE/MY_SAMPLE_CODE")); + + // examples of value formats that should be used for different types of properties + sample.setProperty("MY_VARCHAR", "this is a description"); + sample.setProperty("MY_INTEGER", "123"); + sample.setProperty("MY_REAL", "123.45"); + sample.setProperty("MY_BOOLEAN", "true"); + sample.setProperty("MY_MATERIAL", "MY_MATERIAL_CODE (MY_MATERIAL_TYPE_CODE)"); + sample.setProperty("MY_VOCABULARY", "MY_TERM_CODE"); + + v3.updateSamples(sessionToken, Arrays.asList(sample)); + + System.out.println("Updated"); + } + } + +**V3UpdateWithPropertiesExample.html** + + <script> + require([ "as/dto/sample/update/SampleUpdate", "as/dto/sample/id/SampleIdentifier" ], function(SampleUpdate, SampleIdentifier) { + + // we assume here that v3 object has been already created and we have already called login (please check "Accessing the API" section for more details) +  + var sample = new SampleUpdate(); + sample.setSampleId(new SampleIdentifier("/MY_SPACE_CODE/MY_SAMPLE_CODE")); + + // examples of value formats that should be used for different types of properties + sample.setProperty("MY_VARCHAR", "this is a description"); + sample.setProperty("MY_INTEGER", "123"); + sample.setProperty("MY_REAL", "123.45"); + sample.setProperty("MY_BOOLEAN", "true"); + sample.setProperty("MY_MATERIAL", "MY_MATERIAL_CODE (MY_MATERIAL_TYPE_CODE)"); + sample.setProperty("MY_VOCABULARY", "MY_TERM_CODE"); + + v3.updateSamples([ sample ]).done(function() { + alert("Updated"); + }); + }); + </script> + +##### Parents example + +**V3UpdateWithParentsExample.java** + + import java.util.Arrays; + import ch.ethz.sis.openbis.generic.asapi.v3.dto.sample.id.SampleIdentifier; + import ch.ethz.sis.openbis.generic.asapi.v3.dto.sample.update.SampleUpdate; + + public class V3UpdateWithParentsExample + { + public static void main(String[] args) + { + // we assume here that v3 object has been already created and we have already called login (please check "Accessing the API" section for more details) +  + // Let's assume the sample we are about to update has the following parents: + // - MY_PARENT_CODE_1 + // - MY_PARENT_CODE_2 + + SampleUpdate sample = new SampleUpdate(); + sample.setSampleId(new SampleIdentifier("/MY_SPACE_CODE/MY_SAMPLE_CODE")); + + // We can add and remove parents from the existing list. For instance, here we are adding: MY_PARENT_CODE_3 and removing: MY_PARENT_CODE_1. + // The list of parents after such change would be: [MY_PARENT_CODE_2, MY_PARENT_CODE_3]. Please note that we don't have to fetch the existing + // list of parents, we are just defining what changes should be made to this list on the server side. Updating lists of children or contained + // samples works exactly the same. + + sample.getParentIds().add(new SampleIdentifier("/MY_SPACE_CODE/MY_PARENT_CODE_3")); + sample.getParentIds().remove(new SampleIdentifier("/MY_SPACE_CODE/MY_PARENT_CODE_1")); + + // Instead of adding and removing parents we can also set the list of parents to a completely new value. + sample.getParentIds().set(new SampleIdentifier("/MY_SPACE_CODE/MY_PARENT_CODE_2"), new SampleIdentifier("/MY_SPACE_CODE/MY_PARENT_CODE_3")); + + v3.updateSamples(sessionToken, Arrays.asList(sample)); +  + System.out.println("Updated"); + } + } + +**V3UpdateWithParentsExample.html** + + <script> + require([ "as/dto/sample/update/SampleUpdate", "as/dto/sample/id/SampleIdentifier" ], + function(SampleUpdate, SampleIdentifier) { + + // we assume here that v3 object has been already created and we have already called login (please check "Accessing the API" section for more details) +  + // Let's assume the sample we are about to update has the following parents: + // - MY_PARENT_CODE_1 + // - MY_PARENT_CODE_2 + + var sample = new SampleUpdate(); + sample.setSampleId(new SampleIdentifier("/MY_SPACE_CODE/MY_SAMPLE_CODE")); + + // We can add and remove parents from the existing list. For instance, here we are adding: MY_PARENT_CODE_3 and removing: MY_PARENT_CODE_1. + // The list of parents after such change would be: [MY_PARENT_CODE_2, MY_PARENT_CODE_3]. Please note that we don't have to fetch the existing + // list of parents, we are just defining what changes should be made to this list on the server side. Updating lists of children or contained + // samples works exactly the same. + + sample.getParentIds().add(new SampleIdentifier("/MY_SPACE/MY_PARENT_CODE_3")); + sample.getParentIds().remove(new SampleIdentifier("/MY_SPACE/MY_PARENT_CODE_1")); + + // Instead of adding and removing parents we can also set the list of parents to a completely new value. + sample.getParentIds().set(new SampleIdentifier("MY_SPACE/MY_PARENT_CODE_2"), new SampleIdentifier("MY_SPACE/MY_PARENT_CODE_3")); + + v3.updateSamples([ sample ]).done(function() { + alert("Updated"); + }); + }); + </script> + +#### Getting authorization rights for entities + +If the user isn't allowed to create or update an entity an exception is +thrown. But often a client application wants to know in advance whether +such operations are allowed or not. With the API method `getRights()` +authorizations rights for specified entities can be requested. Currently +only creation and update authorization rights for projects, experiments, +samples and data sets (only update right) are returned. + +In order to check whether an entity can be created or not a dummy +identifier has to be provided when calling `getRights()`. This +identifier should be a wellformed identifier which specifies the entity +to which such a new entity belongs. For example, calling `getRights()` +with `new ExperimentIdentifier("/MY-SPACE/PROJECT1/DUMMY")` would return +rights containing `CREATE` if the user is allowed to create an +experiment in the project `/MY-SPACE/PROJECT1`. + +#### Freezing entities + +An entity (Space, Project, Experiment, Sample, Data Set) can be frozen. +There are two types of frozen: *Core* and *surface*. A frozen core means +that certain attributes of the entity can not be changed but still +connections between entities can be added or removed. A frozen surface +implies a frozen core and frozen connections of particular types. To +freeze an entity it has to be updated by invoking at least one freeze +method on the update object. Example: + + SampleUpdate sample = new SampleUpdate(); + sample.setSampleId(new SampleIdentifier("/MY_SPACE_CODE/MY_SAMPLE_CODE")); + sample.freezeForChildren(); + v3.updateSamples(sessionToken, Arrays.asList(sample)); + +Freezing can not be reverted. + +The timestamp of freezing, the types of freezing, the user and the +identifier of the frozen entity will be stored in the database as a +freezing event. + +The following tables show all freezing possibilities and what is actual +frozen. + +##### Space + +|Freezing method|Description| +|--- |--- | +|freeze|The specified space can not be deleted. +The description can not be set or changed.| +|freezeForProjects|Same as freeze() plus no projects can be added to or removed from the specified space.| +|freezeForSamples|Same as freeze() plus no samples can be added to or removed from the specified space.| + +##### Project + +|Freezing method|Description| +|--- |--- | +|freeze|The specified project can not be deleted. +The description can not be set or changed. +No attachments can be added or removed.| +|freezeForExperiments|Same as freeze() plus no experiments can be added to or removed from the specified project.| +|freezeForSamples|Same as freeze() plus no samples can be added to or removed from the specified project.| + +##### Experiment + +|Freezing method|Description| +|--- |--- | +|freeze|The specified experiment can not be deleted. +No properties can be added, removed or modified. +No attachments can be added or removed.| +|freezeForSamples|Same as freeze() plus no samples can be added to or removed from the specified experiment.| +|freezeForDataSets|Same as freeze() plus no data sets can be added to or removed from the specified experiment.| + +##### Sample + +|Freezing method|Description| +|--- |--- | +|freeze|The specified sample can not be deleted. +No properties can be added, removed or modified. +No attachments can be added or removed.| +|freezeForComponents|Same as freeze() plus no component samples can be added to or removed from the specified sample.| +|freezeForChildren|Same as freeze() plus no child samples can be added to or removed from the specified sample.| +|freezeForParents|Same as freeze() plus no parent samples can be added to or removed from the specified sample.| +|freezeForDataSets|Same as freeze() plus no data sets can be added to or removed from the specified sample.| + +##### Data Set + +|Freezing method|Description| +|--- |--- | +|freeze|The specified data set can not be deleted. +No properties can be added, removed or modified. +Content copies can be still added or removed for frozen link data sets.| +|freezeForChildren|Same as freeze() plus no child data sets can be added to or removed from the specified data set.| +|freezeForParents|Same as freeze() plus no parent data sets can be added to or removed from the specified data set.| +|freezeForComponents|Same as freeze() plus no component data sets can be added to or removed from the specified data set.| +|freezeForContainers|Same as freeze() plus no container data sets can be added to or removed from the specified data set.| + +#### Searching entities + +The methods for searching entities in V3 API are called: `searchSpaces`, +`searchProjects`, `searchExperiments`, `searchSamples`, +`searchDataSets`, +`searchMaterials`, `searchVocabularyTerms, searchTags`, `searchGlobally`. + +They all take criteria and fetch options objects as an input. The +criteria object allows you to specify what entities you are looking for. +For instance, only entities from a given space, entities of a given +type, entities with a property X that equals Y and much much more. + +The fetch options object allows you to tell the API which parts of the +entities found should be fetched and returned as a result of the method +call. For instance, you can tell the API to return the results only with +properties because this is all what you will need for your processing. +This gives you a very fine grained control over how much data you +actually fetch from the server. The less you ask for via fetch options +the less data the API has to load from the database and the less data it +will have to transfer over the network. Therefore by default, the fetch +options object is empty, i.e. it tells the API only to fetch the basic +information about a given entity, i.e. its id, attributes and creation +and registration dates. If you want to fetch anything more then you have +to let the API know via fetch options which parts you are also +interested in. + +Another functionality that the fetch options object provides is +pagination (see FetchOptions.from(Integer) and +FetchOptions.count(Integer) methods). With pagination a user can control +if a search method shall return all found results or just a given +subrange. This is especially useful for handling very large numbers of +results e.g. when we want to build a UI to present them. In such a +situation, we can perform the search that returns only the first batch +of results (e.g. the first 100) for the UI to be responsive and ask for +another batch only if a user requests that (e.g. via clicking on a next +page button in the UI). The pagination is available in all the search +methods including the global search (i.e. searchGlobally method). A code +example on how to use the pagination methods is presented below. + +Apart from the pagination the fetch options also provides the means to +sort the results (see FetchOptions.sortBy() method). What fields can be +used for sorting depends on the search method and the returned objects. +Results can be sorted ascending or descending. Sorting by multiple +fields is also possible (e.g. first sort by type and then by +identifier). A code example on how to use sorting is presented below. + +##### Example + +**V3SearchExample.java** + + import ch.ethz.sis.openbis.generic.asapi.v3.dto.common.search.SearchResult; + import ch.ethz.sis.openbis.generic.asapi.v3.dto.sample.Sample; + import ch.ethz.sis.openbis.generic.asapi.v3.dto.sample.fetchoptions.SampleFetchOptions; + import ch.ethz.sis.openbis.generic.asapi.v3.dto.sample.search.SampleSearchCriteria; + + public class V3SearchExample + { + public static void main(String[] args) + { + // we assume here that v3 object has been already created and we have already called login (please check "Accessing the API" section for more details) +  + // search for samples that are in space with code MY_SPACE_CODE and are of sample type with code MY_SAMPLE_TYPE_CODE + SampleSearchCriteria criteria = new SampleSearchCriteria(); + criteria.withSpace().withCode().thatEquals("MY_SPACE_CODE"); + criteria.withType().withCode().thatEquals("MY_SAMPLE_TYPE_CODE"); + + // tell the API to fetch properties for each returned sample + SampleFetchOptions fetchOptions = new SampleFetchOptions(); + fetchOptions.withProperties(); + + SearchResult<Sample> result = v3.searchSamples(sessionToken, criteria, fetchOptions); + + for (Sample sample : result.getObjects()) + { + // because we asked for properties via fetch options we can access them here, otherwise NotFetchedException would be thrown by getProperties method + System.out.println("Sample " + sample.getIdentifier() + " has properties: " + sample.getProperties()); + } + } + } + +**V3SearchExample.html** + + <script> + require([ "as/dto/sample/search/SampleSearchCriteria", "as/dto/sample/fetchoptions/SampleFetchOptions" ], + function(SampleSearchCriteria, SampleFetchOptions) { + + // we assume here that v3 object has been already created and we have already called login (please check "Accessing the API" section for more details) + + // search for samples that are in space with code MY_SPACE_CODE and are of sample type with code MY_SAMPLE_TYPE_CODE + var criteria = new SampleSearchCriteria(); + criteria.withSpace().withCode().thatEquals("MY_SPACE_CODE"); + criteria.withType().withCode().thatEquals("MY_SAMPLE_TYPE_CODE"); + + // tell the API to fetch properties for each returned sample + var fetchOptions = new SampleFetchOptions(); + fetchOptions.withProperties(); + + v3.searchSamples(criteria, fetchOptions).done(function(result) { + result.getObjects().forEach(function(sample) { + // because we asked for properties via fetch options we can access them here, otherwise NotFetchedException would be thrown by getProperties method + alert("Sample " + sample.getIdentifier() + " has properties: " + JSON.stringify(sample.getProperties())); + }); + }); + }); + </script> + +##### Example with pagination and sorting + +**V3SearchWithPaginationAndSortingExample.java** + + import ch.ethz.sis.openbis.generic.asapi.v3.dto.common.search.SearchResult; + import ch.ethz.sis.openbis.generic.asapi.v3.dto.sample.Sample; + import ch.ethz.sis.openbis.generic.asapi.v3.dto.sample.fetchoptions.SampleFetchOptions; + import ch.ethz.sis.openbis.generic.asapi.v3.dto.sample.search.SampleSearchCriteria; + + public class V3SearchWithPaginationAndSortingExample + { + public static void main(String[] args) + { + // we assume here that v3 object has been already created and we have already called login (please check "Accessing the API" section for more details) + + SampleSearchCriteria criteria = new SampleSearchCriteria(); + SampleFetchOptions fetchOptions = new SampleFetchOptions(); + + // get the first 100 results + fetchOptions.from(0); + fetchOptions.count(100); + + // sort the results first by a type (ascending) and then by an identifier (descending) + fetchOptions.sortBy().type().asc(); + fetchOptions.sortBy().identifier().desc(); + + SearchResult<Sample> result = v3.searchSamples(sessionToken, criteria, fetchOptions); + + // because of the pagination the list contains only the first 100 objects (or even less if there are fewer results found) + System.out.println(result.getObjects()); + + // returns the number of all found results (i.e. potentially more than 100) + System.out.println(result.getTotalCount()); + } + } + +**V3SearchWithPaginationAndSortingExample.html** + + <script> + require([ "as/dto/sample/search/SampleSearchCriteria", "as/dto/sample/fetchoptions/SampleFetchOptions" ], + function(SampleSearchCriteria, SampleFetchOptions) { + + // we assume here that v3 object has been already created and we have already called login (please check "Accessing the API" section for more details) + + var criteria = new SampleSearchCriteria(); + var fetchOptions = new SampleFetchOptions(); + + // get the first 100 results + fetchOptions.from(0); + fetchOptions.count(100); + + // sort the results first by a type (ascending) and then by an identifier (descending) + fetchOptions.sortBy().type().asc(); + fetchOptions.sortBy().identifier().desc(); + + v3.searchSamples(criteria, fetchOptions).done(function(result) { + // because of pagination the list contains only the first 100 objects (or even less if there are fewer results found) + console.log(result.getObjects()); + + // returns the number of all found results (i.e. potentially more than 100) + console.log(result.getTotalCount()); + }); + }); + </script> + +##### Example with OR operator + +By default all specified search criteria have to be fulfilled. If only +one criteria needs to be fulfilled use `criteria.withOrOperator()` as in +the following example: + +**V3SearchWithOrOperatorExample.java** + + import ch.ethz.sis.openbis.generic.asapi.v3.dto.common.search.SearchResult; + import ch.ethz.sis.openbis.generic.asapi.v3.dto.sample.Sample; + import ch.ethz.sis.openbis.generic.asapi.v3.dto.sample.fetchoptions.SampleFetchOptions; + import ch.ethz.sis.openbis.generic.asapi.v3.dto.sample.search.SampleSearchCriteria; + + public class V3SearchWithOrOperatorExample + { + public static void main(String[] args) + { + // we assume here that v3 object has been already created and we have already called login (please check "Accessing the API" section for more details) +  + // search for samples that are either in space with code MY_SPACE_CODE or of sample type with code MY_SAMPLE_TYPE_CODE + SampleSearchCriteria criteria = new SampleSearchCriteria(); + criteria.withOrOperator(); + criteria.withSpace().withCode().thatEquals("MY_SPACE_CODE"); + criteria.withType().withCode().thatEquals("MY_SAMPLE_TYPE_CODE"); + + // tell the API to fetch the type for each returned sample + SampleFetchOptions fetchOptions = new SampleFetchOptions(); + fetchOptions.withType(); + + SearchResult<Sample> result = v3.searchSamples(sessionToken, criteria, fetchOptions); + +  for (Sample sample : result.getObjects()) + { + System.out.println("Sample " + sample.getIdentifier() + " [" + sample.getType().getCode() + "]"); + } + } + } + +**V3SearchWithOrOperatorExample.html** + + <script> + require([ "as/dto/sample/search/SampleSearchCriteria", "as/dto/sample/fetchoptions/SampleFetchOptions" ], + function(SampleSearchCriteria, SampleFetchOptions) { + + // we assume here that v3 object has been already created and we have already called login (please check "Accessing the API" section for more details) + + // search for samples that are in space with code MY_SPACE_CODE and are of sample type with code MY_SAMPLE_TYPE_CODE + var criteria = new SampleSearchCriteria(); + criteria.withOrOperator(); + criteria.withSpace().withCode().thatEquals("MY_SPACE_CODE"); + criteria.withType().withCode().thatEquals("MY_SAMPLE_TYPE_CODE"); + + // tell the API to fetch type for each returned sample + var fetchOptions = new SampleFetchOptions(); + fetchOptions.withType(); + + v3.searchSamples(criteria, fetchOptions).done(function(result) { + result.getObjects().forEach(function(sample) { + alert("Sample " + sample.getIdentifier() + " [" + sample.getType().getCode() + "]"); + }); + }); + }); + </script> + +##### Example with nested logical operators + +The following code finds samples with perm ID that ends with "6" AND +(with code that contains "-" OR that starts with "C") AND (with +experiment OR of type whose code starts with "MASTER"). + +**V3SearchWithNestedLogicalOperatorsExample.java** + + import ch.ethz.sis.openbis.generic.asapi.v3.dto.common.search.SearchResult; + import ch.ethz.sis.openbis.generic.asapi.v3.dto.sample.Sample; + import ch.ethz.sis.openbis.generic.asapi.v3.dto.sample.fetchoptions.SampleFetchOptions; + import ch.ethz.sis.openbis.generic.asapi.v3.dto.sample.search.SampleSearchCriteria; + + public class V3SearchWithRecursiveFetchOptionsExample + { + public static void main(String[] args) + { + // we assume here that v3 object has been already created and we have already called login (please check "Accessing the API" section for more details) + SampleSearchCriteria criteria = new SampleSearchCriteria().withAndOperator(); + criteria.withPermId().thatEndsWith("6"); + + SampleSearchCriteria subcriteria1 = criteria.withSubcriteria().withOrOperator(); + subcriteria1.withCode().thatContains("-"); + subcriteria1.withCode().thatStartsWith("C"); + + SampleSearchCriteria subcriteria2 = criteria.withSubcriteria().withOrOperator(); + subcriteria2.withExperiment(); + subcriteria2.withType().withCode().thatStartsWith("MASTER"); + + // tell the API to fetch all descendents for each returned sample + SampleFetchOptions fetchOptions = new SampleFetchOptions(); + SearchResult<Sample> result = v3.searchSamples(sessionToken, criteria, fetchOptions); + + for (Sample sample : result.getObjects()) + { + System.out.println("Sample " + sample.getIdentifier() + " [" + sample.getType().getCode() + "]"); + } + } + } + +**V3SearchWithNestedLogicalOperatorsExample.html** + + <script> + require([ "as/dto/sample/search/SampleSearchCriteria", "as/dto/sample/fetchoptions/SampleFetchOptions" ], + function(SampleSearchCriteria, SampleFetchOptions) { + + // we assume here that v3 object has been already created and we have already called login (please check "Accessing the API" section for more details) + var criteria = new SampleSearchCriteria().withAndOperator(); + criteria.withPermId().thatEndsWith("6"); + + var subcriteria1 = criteria.withSubcriteria().withOrOperator(); + subcriteria1.withCode().thatContains("-"); + subcriteria1.withCode().thatStartsWith("C"); + + var subcriteria2 = criteria.withSubcriteria().withOrOperator(); + subcriteria2.withExperiment(); + subcriteria2.withType().withCode().thatStartsWith("MASTER"); + + // tell the API to fetch type for each returned sample + var fetchOptions = new SampleFetchOptions(); + + v3.searchSamples(criteria, fetchOptions).done(function(result) { + result.getObjects().forEach(function(sample) { + alert("Sample " + sample.getIdentifier() + " [" + sample.getType().getCode() + "]"); + }); + }); + }); + </script> + +##### Example with recursive fetch options + +In order to get all descendent/acsendents of a sample fetch options can +be used recursively by +using `fetchOptions.withChildrenUsing(fetchOptions) `as in the following +example: + +**V3SearchWithRecursiveFetchOptionsExample.java** + + import ch.ethz.sis.openbis.generic.asapi.v3.dto.common.search.SearchResult; + import ch.ethz.sis.openbis.generic.asapi.v3.dto.sample.Sample; + import ch.ethz.sis.openbis.generic.asapi.v3.dto.sample.fetchoptions.SampleFetchOptions; + import ch.ethz.sis.openbis.generic.asapi.v3.dto.sample.search.SampleSearchCriteria; + + public class V3SearchWithRecursiveFetchOptionsExample + { + public static void main(String[] args) + { + // we assume here that v3 object has been already created and we have already called login (please check "Accessing the API" section for more details) +  + SampleSearchCriteria criteria = new SampleSearchCriteria(); + criteria.withType().withCode().thatEquals("MY_SAMPLE_TYPE_CODE"); + + // tell the API to fetch all descendents for each returned sample + SampleFetchOptions fetchOptions = new SampleFetchOptions(); + fetchOptions.withChildrenUsing(fetchOptions); + +  SearchResult<Sample> result = v3.searchSamples(sessionToken, criteria, fetchOptions); + + for (Sample sample : result.getObjects()) + { + System.out.println("Sample " + renderWithDescendants(sample)); + } + } + + private static String renderWithDescendants(Sample sample) + { + StringBuilder builder = new StringBuilder(); + for (Sample child : sample.getChildren()) + { + if (builder.length() > 0) + { + builder.append(", "); + } + builder.append(renderWithDescendants(child)); + } + if (builder.length() == 0) + { + return sample.getCode(); + } + return sample.getCode() + " -> (" + builder.toString() + ")"; + } + } + +**V3SearchWithRecursiveFetchOptionsExample.html** + + <script> + require([ "as/dto/sample/search/SampleSearchCriteria", "as/dto/sample/fetchoptions/SampleFetchOptions" ], + function(SampleSearchCriteria, SampleFetchOptions) { + + // we assume here that v3 object has been already created and we have already called login (please check "Accessing the API" section for more details) + + var criteria = new SampleSearchCriteria(); + criteria.withType().withCode().thatEquals("MY_SAMPLE_TYPE_CODE"); + + // tell the API to fetch all descendents for each returned sample + var fetchOptions = new SampleFetchOptions(); + fetchOptions.withChildrenUsing(fetchOptions); + + v3.searchSamples(criteria, fetchOptions).done(function(result) { + result.getObjects().forEach(function(sample) { + alert("Sample " + renderWithDescendants(sample)); + }); + }); + + function renderWithDescendants(sample) { + var children = sample.getChildren(); + var list = ""; + for (var i = 0; i < children.length; i++) { + if (list.length > 0) { + list += ", "; + } + list += renderWithDescendants(children[i]); + } + if (children.length == 0) { + return sample.getCode(); + } + return sample.getCode() + " -> (" + list + ")" + } + }); + </script> + +##### Global search + +There are two kinds or global search: + +- Using thatContains() and thatContainsExactly() methods of + GlobalSearchTextCriteria. This type of search performs the substring + search in any field of any entity. +- Using thatMatches() method of GlobalSearchTextCriteria. This type of + search performs lexical match using English dictionaly. If a + matching string is not a word it is matched as a whole (i.e. code + will match code only if a whole code string is provided). + +Global search searches for experiments, samples, data sets and materials +by specifying a text snippet (or complete words) to be found in any type +of meta data (entity attribute or property). Example: + +**V3GlobalSearchExample.java** + + import ch.ethz.sis.openbis.generic.asapi.v3.dto.common.search.SearchResult; + import ch.ethz.sis.openbis.generic.asapi.v3.dto.global.GlobalSearchObject; + import ch.ethz.sis.openbis.generic.asapi.v3.dto.global.fetchoptions.GlobalSearchObjectFetchOptions; + import ch.ethz.sis.openbis.generic.asapi.v3.dto.global.search.GlobalSearchCriteria; + import ch.ethz.sis.openbis.generic.asapi.v3.dto.global.search.GlobalSearchObjectKind; + + public class V3GlobalSearchExample + { + public static void main(String[] args) + { + // we assume here that v3 object has been already created and we have already called login (please check "Accessing the API" section for more details) +  + // search for any text matching 'default' but only among samples + GlobalSearchCriteria criteria = new GlobalSearchCriteria(); + criteria.withObjectKind().thatIn(GlobalSearchObjectKind.SAMPLE); + criteria.withText().thatMatches("default"); + + // Fetch also the sample type + GlobalSearchObjectFetchOptions fetchOptions = new GlobalSearchObjectFetchOptions(); + fetchOptions.withSample().withType(); + + SearchResult<GlobalSearchObject> result = v3.searchGlobally(sessionToken, criteria, fetchOptions); + + for (GlobalSearchObject object : result.getObjects()) + { + System.out.println(object.getObjectKind() + ": " + object.getObjectIdentifier() + " [" + + object.getSample().getType().getCode() + + "], score:" + object.getScore() + ", match:" + object.getMatch()); + } + } + } + + + +**V3GlobalSearchExample.html** + + <script> + require([ "as/dto/global/search/GlobalSearchCriteria", "as/dto/global/search/GlobalSearchObjectKind", "as/dto/global/fetchoptions/GlobalSearchObjectFetchOptions" ], + function(GlobalSearchCriteria, GlobalSearchObjectKind, GlobalSearchObjectFetchOptions) { + + // we assume here that v3 object has been already created and we have already called login (please check "Accessing the API" section for more details) + + // search for any text matching 'default' but only among samples + var criteria = new GlobalSearchCriteria(); + criteria.withObjectKind().thatIn([GlobalSearchObjectKind.SAMPLE]); + criteria.withText().thatMatches("default"); + + // Fetch also the sample type + var fetchOptions = new GlobalSearchObjectFetchOptions(); + fetchOptions.withSample().withType(); + + v3.searchGlobally(criteria, fetchOptions).done(function(result) { + result.getObjects().forEach(function(object) { + alert(object.getObjectKind() + ": " + object.getObjectIdentifier() + " [" + + object.getSample().getType().getCode() + + "], score:" + object.getScore() + ", match:" + object.getMatch()); + }); + }); + }); + </script> + + + +#### Getting entities + +The methods for getting entities in V3 API are called: getSpaces, +getProjects, getExperiments, getSamples, getDataSets, getMaterials, +getVocabularyTerms, getTags. They all take a list of entity ids and +fetch options as an input (please check "Searching entities" section for +more details on the fetch options). They return a map where the passed +entity ids become the keys and values are the entities found for these +ids. If no entity was found for a given id or entity exists but you +don't have access to it then there is no entry for such an id in the +returned map. + +##### Example + +**V3GetExample.java** + + import java.util.Arrays; + import java.util.Map; + import ch.ethz.sis.openbis.generic.asapi.v3.dto.sample.Sample; + import ch.ethz.sis.openbis.generic.asapi.v3.dto.sample.fetchoptions.SampleFetchOptions; + import ch.ethz.sis.openbis.generic.asapi.v3.dto.sample.id.ISampleId; + import ch.ethz.sis.openbis.generic.asapi.v3.dto.sample.id.SampleIdentifier; + import ch.ethz.sis.openbis.generic.asapi.v3.dto.sample.id.SamplePermId; + + public class V3GetExample + { + public static void main(String[] args) + { + // we assume here that v3 object has been already created and we have already called login (please check "Accessing the API" section for more details) + + ISampleId id1 = new SampleIdentifier("/MY_SPACE_CODE/MY_SAMPLE_CODE"); + ISampleId id2 = new SampleIdentifier("/MY_SPACE_CODE/MY_SAMPLE_CODE_2"); + ISampleId id3 = new SamplePermId("20160115170726679-98669"); // perm id of sample /MY_SPACE_CODE/MY_SAMPLE_CODE + ISampleId id4 = new SamplePermId("20160118115737079-98672"); // perm id of sample /MY_SPACE_CODE/MY_SAMPLE_CODE_3 + ISampleId id5 = new SamplePermId("I_DONT_EXIST"); + + SampleFetchOptions fetchOptions = new SampleFetchOptions(); + fetchOptions.withProperties(); + + Map<ISampleId, Sample> map = v3.getSamples(sessionToken, Arrays.asList(id1, id2, id3, id4, id5), fetchOptions); + + map.get(id1); // returns sample /MY_SPACE_CODE/MY_SAMPLE_CODE + map.get(id2); // returns sample /MY_SPACE_CODE/MY_SAMPLE_CODE_2 + map.get(id3); // returns sample /MY_SPACE_CODE/MY_SAMPLE_CODE + map.get(id4); // returns sample /MY_SPACE_CODE/MY_SAMPLE_CODE_3 + map.get(id5); // returns null + } + } + +**V3GetExample.html** + + <script> + require([ "as/dto/sample/id/SampleIdentifier", "as/dto/sample/id/SamplePermId", "as/dto/sample/fetchoptions/SampleFetchOptions" ], + function(SampleIdentifier, SamplePermId, SampleFetchOptions) { +  + // we assume here that v3 object has been already created and we have already called login (please check "Accessing the API" section for more details) + + var id1 = new SampleIdentifier("/MY_SPACE_CODE/MY_SAMPLE_CODE"); + var id2 = new SampleIdentifier("/MY_SPACE_CODE/MY_SAMPLE_CODE_2"); + var id3 = new SamplePermId("20160115170726679-98669"); // perm id of sample /MY_SPACE_CODE/MY_SAMPLE_CODE + var id4 = new SamplePermId("20160118115737079-98672"); // perm id of sample /MY_SPACE_CODE/MY_SAMPLE_CODE_3 + var id5 = new SamplePermId("I_DONT_EXIST"); + + var fetchOptions = new SampleFetchOptions(); + fetchOptions.withProperties(); + + + v3.getSamples([ id1, id2, id3, id4, id5 ], fetchOptions).done(function(map) { + map[id1]; // returns sample /MY_SPACE_CODE/MY_SAMPLE_CODE + map[id2]; // returns sample /MY_SPACE_CODE/MY_SAMPLE_CODE_2 + map[id3]; // returns sample /MY_SPACE_CODE/MY_SAMPLE_CODE + map[id4]; // returns sample /MY_SPACE_CODE/MY_SAMPLE_CODE_3 + map[id5]; // returns null + }); + }); + </script> + +#### Deleting entities + +The methods for deleting entities in V3 API are called: deleteSpaces, +deleteProjects, deleteExperiments, deleteSamples, deleteDataSets, +deleteMaterials, deleteVocabularyTerms, deleteTags. The delete methods +for spaces, projects, materials, vocabulary terms, tags perform a +permanent deletion (there is no trash can for these entities - deletion +cannot be reverted). The delete methods for experiments, samples and +data sets perform a logical deletion (move entities to the trash can) +and return a deletion id. This deletion id can be used for either +confirming the logical deletion to remove the entities permanently or +reverting the logical deletion to take the entities out from the trash +can. + +##### Example + +**V3DeleteExample.java** + + import java.util.Arrays; + import ch.ethz.sis.openbis.generic.asapi.v3.dto.deletion.id.IDeletionId; + import ch.ethz.sis.openbis.generic.asapi.v3.dto.sample.delete.SampleDeletionOptions; + import ch.ethz.sis.openbis.generic.asapi.v3.dto.sample.id.ISampleId; + import ch.ethz.sis.openbis.generic.asapi.v3.dto.sample.id.SampleIdentifier; + + public class V3DeleteExample + { + public static void main(String[] args) + { + // we assume here that v3 object has been already created and we have already called login (please check "Accessing the API" section for more details) +  + ISampleId id1 = new SampleIdentifier("/MY_SPACE_CODE/MY_SAMPLE_CODE"); + ISampleId id2 = new SampleIdentifier("/MY_SPACE_CODE/MY_SAMPLE_CODE_2"); + + SampleDeletionOptions deletionOptions = new SampleDeletionOptions(); + deletionOptions.setReason("Testing logical deletion"); + + // logical deletion (move objects to the trash can) + IDeletionId deletionId = v3.deleteSamples(sessionToken, Arrays.asList(id1, id2), deletionOptions); + + // you can use the deletion id to confirm the deletion (permanently delete objects) + v3.confirmDeletions(sessionToken, Arrays.asList(deletionId)); + + // you can use the deletion id to revert the deletion (get the objects out from the trash can) + v3.revertDeletions(sessionToken, Arrays.asList(deletionId)); + } + } + +**V3DeleteExample.html** + + <script> + require([ "as/dto/sample/id/SampleIdentifier", "as/dto/sample/delete/SampleDeletionOptions" ], + function(SampleIdentifier, SampleDeletionOptions) { +  + // we assume here that v3 object has been already created and we have already called login (please check "Accessing the API" section for more details) + + var id1 = new SampleIdentifier("/MY_SPACE_CODE/MY_SAMPLE_CODE"); + var id2 = new SampleIdentifier("/MY_SPACE_CODE/MY_SAMPLE_CODE_2"); + + var deletionOptions = new SampleDeletionOptions(); + deletionOptions.setReason("Testing logical deletion"); + + // logical deletion (move objects to the trash can) + v3.deleteSamples([ id1, id2 ], deletionOptions).done(function(deletionId) { + + // you can use the deletion id to confirm the deletion (permanently delete objects) + v3.confirmDeletions([ deletionId ]); + + // you can use the deletion id to revert the deletion (get the objects out from the trash can) + v3.revertDeletions([ deletionId ]); + }); + }); + </script> + +#### Searching entity types + +The following search methods allows to search for entity types including +all assigned property +types: `searchDataSetTypes`, `searchExperimentTypes`, `searchMaterialTypes` +and `searchSampleTypes`. Here is an example which will search for all +sample types and assigned property types: + +**V3SearchTypesExample.java** + + import ch.ethz.sis.openbis.generic.asapi.v3.dto.common.search.SearchResult; + import ch.ethz.sis.openbis.generic.asapi.v3.dto.property.PropertyAssignment; + import ch.ethz.sis.openbis.generic.asapi.v3.dto.sample.SampleType; + import ch.ethz.sis.openbis.generic.asapi.v3.dto.sample.fetchoptions.SampleTypeFetchOptions; + import ch.ethz.sis.openbis.generic.asapi.v3.dto.sample.search.SampleTypeSearchCriteria; + + public class V3SearchTypesExample + { + public static void main(String[] args) + { + // we assume here that v3 object has been already created and we have already called login (please check "Accessing the API" section for more details) +  + SampleTypeSearchCriteria searchCriteria = new SampleTypeSearchCriteria(); + SampleTypeFetchOptions fetchOptions = new SampleTypeFetchOptions(); + fetchOptions.withPropertyAssignments().withPropertyType(); + + SearchResult<SampleType> result = v3.searchSampleTypes(sessionToken, searchCriteria, fetchOptions); + + for (SampleType sampleType : result.getObjects()) + { + System.out.println(sampleType.getCode()); + for (PropertyAssignment assignment : sampleType.getPropertyAssignments()) + { + System.out.println(" " + assignment.getPropertyType().getCode() + (assignment.isMandatory() ? "*" : "")); + } + } + } + } + +**V3SearchTypesExample.html** + + <script> + require([ "as/dto/sample/search/SampleTypeSearchCriteria", "as/dto/sample/fetchoptions/SampleTypeFetchOptions" ], + function(SampleTypeSearchCriteria, SampleTypeFetchOptions) { + + // we assume here that v3 object has been already created and we have already called login (please check "Accessing the API" section for more details) + + // here we are interested only in the last updates of samples and projects + var criteria = new SampleTypeSearchCriteria(); + var fetchOptions = new SampleTypeFetchOptions(); + fetchOptions.withPropertyAssignments().withPropertyType(); + + v3.searchSampleTypes(criteria, fetchOptions).done(function(result) { + result.getObjects().forEach(function(sampleType) { + var msg = sampleType.getCode(); + var assignments = sampleType.getPropertyAssignments(); + for (var i = 0; i < assignments.length; i++) { + msg += "\n " + assignments[i].getPropertyType().getCode(); + } + alert(msg); + }); + }); + }); + </script> + +#### Modifications + +The API allows to ask for the latest modification (UPDATE or +CREATE\_OR\_DELETE) for groups of objects of various kinds (see +class `ch.ethz.sis.openbis.generic.asapi.v3.dto.objectkindmodification.ObjectKind`for +a complete list). This feature of the openBIS API helps GUI clients to +update views automatically. Here is an example which asks for the latest +project and sample update: + +**V3SearchObjectKindModificationsExample.java** + + import ch.ethz.sis.openbis.generic.asapi.v3.dto.common.search.SearchResult; + import ch.ethz.sis.openbis.generic.asapi.v3.dto.objectkindmodification.ObjectKind; + import ch.ethz.sis.openbis.generic.asapi.v3.dto.objectkindmodification.ObjectKindModification; + import ch.ethz.sis.openbis.generic.asapi.v3.dto.objectkindmodification.OperationKind; + import ch.ethz.sis.openbis.generic.asapi.v3.dto.objectkindmodification.fetchoptions.ObjectKindModificationFetchOptions; + import ch.ethz.sis.openbis.generic.asapi.v3.dto.objectkindmodification.search.ObjectKindModificationSearchCriteria; + + public class V3SearchObjectKindModificationsExample + { + public static void main(String[] args) + { + // we assume here that v3 object has been already created and we have already called login (please check "Accessing the API" section for more details) +  + // here we are interested only in the last updates of samples and projects + ObjectKindModificationSearchCriteria criteria = new ObjectKindModificationSearchCriteria(); + criteria.withObjectKind().thatIn(ObjectKind.PROJECT, ObjectKind.SAMPLE); + criteria.withOperationKind().thatIn(OperationKind.UPDATE); + + ObjectKindModificationFetchOptions fetchOptions = new ObjectKindModificationFetchOptions(); + SearchResult<ObjectKindModification> result = v3.searchObjectKindModifications(sessionToken, criteria, fetchOptions); + + for (ObjectKindModification modification : result.getObjects()) + { + System.out.println("The last " + modification.getOperationKind() + " of an entity of kind " + + modification.getObjectKind() + " occured at " + modification.getLastModificationTimeStamp()); + } + } + } + +**V3SearchObjectKindModificationsExample.html** + + <script> + require([ "as/dto/objectkindmodification/search/ObjectKindModificationSearchCriteria", + "as/dto/objectkindmodification/ObjectKind", "as/dto/objectkindmodification/OperationKind", + "as/dto/objectkindmodification/fetchoptions/ObjectKindModificationFetchOptions" ], + function(ObjectKindModificationSearchCriteria, ObjectKind, OperationKind, ObjectKindModificationFetchOptions) { + + // we assume here that v3 object has been already created and we have already called login (please check "Accessing the API" section for more details) + + // here we are interested only in the last updates of samples and projects + var criteria = new ObjectKindModificationSearchCriteria(); + criteria.withObjectKind().thatIn([ObjectKind.PROJECT, ObjectKind.SAMPLE]); + criteria.withOperationKind().thatIn([OperationKind.UPDATE]); + + var fetchOptions = new ObjectKindModificationFetchOptions(); + + v3.searchObjectKindModifications(criteria, fetchOptions).done(function(result) { + result.getObjects().forEach(function(modification) { + alert("The last " + modification.getOperationKind() + " of an entity of kind " + + modification.getObjectKind() + " occured at " + modification.getLastModificationTimeStamp()); + }); + }); + }); + </script> + +#### Custom AS Services + +In order to extend openBIS API new custom services can be established by +core plugins of type `services` (see [Custom Application Server +Services](/pages/viewpage.action?pageId=80699473)). The API offers a +method to search for a service and to execute a service. + +##### Search for custom services + +As with any other search method `searchCustomASServices()` needs a +search criteria `CustomASServiceSearchCriteria` and fetch options +`CustomASServiceFetchOptions`. The following example returns all +available custom AS services. + +###### Example + +**V3SearchCustomASServicesExample.java** + + import ch.ethz.sis.openbis.generic.asapi.v3.dto.common.search.SearchResult; + import ch.ethz.sis.openbis.generic.asapi.v3.dto.service.CustomASService; + import ch.ethz.sis.openbis.generic.asapi.v3.dto.service.fetchoptions.CustomASServiceFetchOptions; + import ch.ethz.sis.openbis.generic.asapi.v3.dto.service.search.CustomASServiceSearchCriteria; + + public class V3SearchCustomASServicesExample + { + public static void main(String[] args) + { + // we assume here that v3 object has been already created and we have already called login (please check "Accessing the API" section for more details) + + CustomASServiceSearchCriteria criteria = new CustomASServiceSearchCriteria(); + CustomASServiceFetchOptions fetchOptions = new CustomASServiceFetchOptions(); + SearchResult<CustomASService> result = v3.searchCustomASServices(sessionToken, criteria, fetchOptions); + for (CustomASService service : result.getObjects()) + { + System.out.println(service.getCode() + ": " + service.getLabel() + " (" + service.getDescription() + ")"); + } + } + } + +**V3SearchCustomASServicesExample.html** + + <script> + require([ "as/dto/service/search/CustomASServiceSearchCriteria", "as/dto/service/fetchoptions/CustomASServiceFetchOptions" ], + function(CustomASServiceSearchCriteria, CustomASServiceFetchOptions) { + // we assume here that v3 object has been already created and we have already called login (please check "Accessing the API" section for more details) + + var criteria = new CustomASServiceSearchCriteria(); + var fetchOptions = new CustomASServiceFetchOptions(); + v3.searchCustomASServices(criteria, fetchOptions).done(function(result) { + result.getObjects().forEach(function(service) { + alert(service.getCode() + ": " + service.getLabel() + " (" + service.getDescription() + ")"); + }); + }); + }); + </script> + +##### Execute a custom service + +In order to execute a custom AS service its code is needed. In addition +a set of key-value pairs can be provided. The key has to be a string +whereas the value can be any object. Note, that in case of Java the +object has be an instance of class which Java serializable. The +key-value pairs are added to `CustomASServiceExecutionOptions` object by +invoking `withParameter()` for each pair. + +The result can be any object (again it has to be Java serializable in +the Java case). In a Java client the result will usually be casted for +further processing. + +###### Example + +**V3ExecuteCustomASServiceExample.java** + + import ch.ethz.sis.openbis.generic.asapi.v3.dto.service.CustomASServiceExecutionOptions; + import ch.ethz.sis.openbis.generic.asapi.v3.dto.service.id.CustomASServiceCode; + + public class V3ExecuteCustomASServiceExample + { + public static void main(String[] args) + { + // we assume here that v3 object has been already created and we have already called login (please check "Accessing the API" section for more details) + + CustomASServiceCode id = new CustomASServiceCode("example-service"); + CustomASServiceExecutionOptions options = new CustomASServiceExecutionOptions().withParameter("space-code", "TEST"); + Object result = v3.executeCustomASService(sessionToken, id, options); + + System.out.println("Result: " + result); + } + } + +**V3ExecuteCustomASServiceExample.html** + + <script> + require([ "as/dto/service/id/CustomASServiceCode", "as/dto/service/CustomASServiceExecutionOptions" ], + function(CustomASServiceCode, CustomASServiceExecutionOptions) { + // we assume here that v3 object has been already created and we have already called login (please check "Accessing the API" section for more details) + var id = new CustomASServiceCode("example-service"); + var options = new CustomASServiceExecutionOptions().withParameter("space-code", "TEST"); + v3.executeCustomASService(id, options).done(function(result) { + alert(result); + }); + }); + </script> + +#### Archiving / unarchiving data sets + +The API provides the following methods for handling the data set +archiving: archiveDataSets and unarchiveDataSets. Both methods schedule +the operation to be executed asynchronously, i.e. once +archiveDataSets/unarchiveDataSets method call finishes the requested +data sets are only scheduled for the archiving/unarchiving but are not +in the archive/store yet. + +##### Archiving data sets  + +###### Example + +**V3ArchiveDataSetsExample.java** + + import java.util.Arrays; + import ch.ethz.sis.openbis.generic.asapi.v3.IApplicationServerApi; + import ch.ethz.sis.openbis.generic.asapi.v3.dto.dataset.archive.DataSetArchiveOptions; + import ch.ethz.sis.openbis.generic.asapi.v3.dto.dataset.id.DataSetPermId; + import ch.ethz.sis.openbis.generic.asapi.v3.dto.dataset.id.IDataSetId; + import ch.systemsx.cisd.common.spring.HttpInvokerUtils; + + public class V3ArchiveDataSetsExample + { + public static void main(String[] args) + { + // we assume here that v3 object has been already created and we have already called login (please check "Accessing the API" section for more details) + + IDataSetId id1 = new DataSetPermId("20160524154020607-2266"); + IDataSetId id2 = new DataSetPermId("20160524154020607-2267"); + + DataSetArchiveOptions options = new DataSetArchiveOptions(); + + // With removeFromDataStore flag set to true data sets are moved to the archive. + // With removeFromDataStore flag set to false data sets are copied to the archive. + // Default value is true (move to the archive). + options.setRemoveFromDataStore(false); + + // Schedules archiving of the specified data sets. Archiving itself is executed asynchronously. + v3.archiveDataSets(sessionToken, Arrays.asList(id1, id2), options); + + System.out.println("Archiving scheduled"); + } + } + +**V3ArchiveDataSetsExample.html** + + <script> + require([ "openbis", "as/dto/dataset/id/DataSetPermId", "as/dto/dataset/archive/DataSetArchiveOptions" ], + function(openbis, DataSetPermId, DataSetArchiveOptions) { + + // we assume here that v3 object has been already created and we have already called login (please check "Accessing the API" section for more details) +  + var id1 = new DataSetPermId("20160524154020607-2266") + var id2 = new DataSetPermId("20160524154020607-2267") + + var options = new DataSetArchiveOptions(); + + // With removeFromDataStore flag set to true data sets are moved to the archive. + // With removeFromDataStore flag set to false data sets are copied to the archive. + // Default value is true (move to the archive). + options.setRemoveFromDataStore(false); +  + // Schedules archiving of the specified data sets. Archiving itself is executed asynchronously. + v3.archiveDataSets([ id1, id2 ], options).done(function() { + alert("Archiving scheduled"); + }); + }); + }); + </script> + +##### Unarchiving data sets + +###### Example + +**V3UnarchiveDataSetsExample.java** + + import java.util.Arrays; + import ch.ethz.sis.openbis.generic.asapi.v3.IApplicationServerApi; + import ch.ethz.sis.openbis.generic.asapi.v3.dto.dataset.id.DataSetPermId; + import ch.ethz.sis.openbis.generic.asapi.v3.dto.dataset.id.IDataSetId; + import ch.ethz.sis.openbis.generic.asapi.v3.dto.dataset.unarchive.DataSetUnarchiveOptions; + import ch.systemsx.cisd.common.spring.HttpInvokerUtils; + + public class V3UnarchiveDataSetsExample + { + public static void main(String[] args) + { + // we assume here that v3 object has been already created and we have already called login (please check "Accessing the API" section for more details) + + IDataSetId id1 = new DataSetPermId("20160524154020607-2266"); + IDataSetId id2 = new DataSetPermId("20160524154020607-2267"); + + DataSetUnarchiveOptions options = new DataSetUnarchiveOptions(); + + // Schedules unarchiving of the specified data sets. Unarchiving itself is executed asynchronously. + v3.unarchiveDataSets(sessionToken, Arrays.asList(id1, id2), options); + + System.out.println("Unarchiving scheduled"); + } + } + +**V3UnarchiveDataSetsExample.html** + + <script> + require([ "openbis", "as/dto/dataset/id/DataSetPermId", "as/dto/dataset/unarchive/DataSetUnarchiveOptions" ], + function(openbis, DataSetPermId, DataSetUnarchiveOptions) { + + // we assume here that v3 object has been already created and we have already called login (please check "Accessing the API" section for more details) + + var id1 = new DataSetPermId("20160524154020607-2266") + var id2 = new DataSetPermId("20160524154020607-2267") + + var options = new DataSetUnarchiveOptions(); + + // Schedules unarchiving of the specified data sets. Unarchiving itself is executed asynchronously. + v3.unarchiveDataSets([ id1, id2 ], options).done(function() { + alert("Unarchiving scheduled"); + }); + }); + }); + </script> + +#### Executing Operations + +The V3 API provides you with methods that allow you to create, update, +get, search and delete entities, archive and unarchive datasets, execute +custom services and much more. With these methods you can +programmatically access most of the openBIS features to build your own +webapps, dropboxes or services. Even though these methods are quite +different, there are some things that they all have in common: + +- each method is executed in its own separate transaction +- each method is executed synchronously + +Let's think about what it really means. Separate transactions make two +(even subsequent) method calls completely unrelated. For instance, when +you make a call to create experiments and then another call to create +samples, then even if the sample creation fails the experiments, that +had been already created, would remain in the system. Most of the time +this is exactly what we want but not always. There are times when we +would like to create either both experiments and samples or nothing if +something is wrong. A good example would be an import of some file that +contains both experiments and samples. We would like to be able to +import the file, fail if it is wrong, correct the file and import it +again. With separate transactions we would end up with some things +already created after the first failed import and we wouldn't be able to +reimport the corrected file again as some things would be already in the +system. + +Synchronous method execution is also something what we expect most of +the time. You call a method and it returns once all the work is done. +For instance, when we call a method to create samples we know that once +the method finishes all the samples have been created in the +system. This makes perfect sense when we need to execute operations that +depend on each other, e.g. we can create data sets and attach them to +samples only after the samples had been created. Just as with the +separate transactions, there are cases when synchronous method execution +is limiting. Let's use the file import example again. What would happen +if a file we wanted to import contained hundreds of thousands of +entities? The import would probably take a very long time. Our +synchronous method call would not return until all the entities have +been created which means we would also block a script/program that makes +this method call for a very long time. We could of course create a +separate thread in our script/program to overcome this problem but that +would add up more complexity. It would be also nice to notify a user +once such an operation finishes or fails, e.g. by sending an email. +Unfortunately that would mean we have to keep our script/program running +until the operation finishes or fails to send such an email. What about +a progress information for running executions or a history of previous +operations and their results? That would be nice but it would increase +the complexity of our script/program even more. + +Therefore, if you want to: + +- execute multiple operations in a single transaction +- execute operations asynchronously +- monitor progress of operations +- receive notifications about finished/failed operations +- keep history of operations and their results + +you should use: + +- executeOperations method to execute your operations +- getOperationExecutions and searchOperationExecutions methods to + retrieve information about operation executions (e.g. progress, + results or errors) +- updateOperationExecutions and deleteOperationExecutions methods to + control what information should be still kept for a given operation + execution and what information can be already removed + +More details on each of these methods in presented in the sections +below. Please note that all of the described methods are available in +both Javascript and Java. + +##### Method executeOperations + +This method can be used to execute one or many operations either +synchronously or asynchronously. Operations are always executed in a +single transaction (a failure of a single operation triggers a rollback +of all the operations). The executeOperations method can be used to +execute any of the IApplicationServerApi methods (except for +login/logout and executeOperations itself), i.e. for each +IApplicationServerApi method there is a corresponding operation class +(class that implements IOperation interface). For instance, +IApplicationServerApi.createSpaces method is represented by +CreateSpacesOperation class, IApplicationServerApi.updateSpaces method +by UpdateSpacesOperation class etc. + +###### **Asynchronous operation execution** + +An asynchronous executeOperations invocation only schedules operations +for the execution and then immediately returns. Results of the scheduled +operations can be retrieved later with getOperationExecutions or +searchOperationExecutions methods. + +Because the operations are scheduled to be executed later (in a separate +thread) a regular try/catch block around executeOperations method will +only catch exceptions related with scheduling the operations for the +execution, but NOT the exceptions thrown by the operations during the +execution. To check for errors that occurred during the execution please +use getOperationExecutions and searchOperationExecutions methods once +the execution finishes. + +In order to execute operations asynchronously, executeOperations has to +be used with AsynchronousOperationExecutionOptions. With such options, +the method returns AsynchronousOperationExecutionResults object. +AsynchronousOperationExecutionResults object contains automatically +generated executionId that can be used for retrieving additional +information about the execution, fetching the results or errors. + +During its life an asynchronous execution goes through the following +states: + +- NEW - execution has been just created with executeOperations method +- SCHEDULED - execution has been added to a thread pool queue and is + waiting for a free thread +- RUNNING - execution has been picked from a thread pool queue by a + free thread and is currently executing +- FINISHED/FAILED - if execution finishes successfully then execution + state is changed to FINISHED, if anything goes wrong it is changed + to FAILED + + + +**V3ExecuteOperationsAsynchronous.java** + + import java.util.Arrays; + import ch.ethz.sis.openbis.generic.asapi.v3.IApplicationServerApi; + import ch.ethz.sis.openbis.generic.asapi.v3.dto.entitytype.id.EntityTypePermId; + import ch.ethz.sis.openbis.generic.asapi.v3.dto.experiment.id.ExperimentIdentifier; + import ch.ethz.sis.openbis.generic.asapi.v3.dto.operation.AsynchronousOperationExecutionOptions; + import ch.ethz.sis.openbis.generic.asapi.v3.dto.operation.AsynchronousOperationExecutionResults; + import ch.ethz.sis.openbis.generic.asapi.v3.dto.sample.create.CreateSamplesOperation; + import ch.ethz.sis.openbis.generic.asapi.v3.dto.sample.create.SampleCreation; + import ch.ethz.sis.openbis.generic.asapi.v3.dto.space.id.SpacePermId; + + public class V3ExecuteOperationsAsynchronous + { + public static void main(String[] args) + { + // we assume here that v3 object has been already created and we have already called login (please check "Accessing the API" section for more details) + + SampleCreation sample = new SampleCreation(); + sample.setTypeId(new EntityTypePermId("MY_SAMPLE_TYPE_CODE")); + sample.setSpaceId(new SpacePermId("MY_SPACE_CODE")); + sample.setExperimentId(new ExperimentIdentifier("/MY_SPACE_CODE/MY_PROJECT_CODE/MY_EXPERIMENT_CODE")); + sample.setCode("MY_SAMPLE_CODE"); + + CreateSamplesOperation operation = new CreateSamplesOperation(sample); + + AsynchronousOperationExecutionResults results = (AsynchronousOperationExecutionResults) v3.executeOperations(sessionToken, + Arrays.asList(operation), new AsynchronousOperationExecutionOptions()); + + System.out.println("Execution id: " + results.getExecutionId()); + } + } + +**V3ExecuteOperationsAsynchronous.html** + + <script> + require([ "openbis", "as/dto/sample/create/SampleCreation", "as/dto/entitytype/id/EntityTypePermId", "as/dto/space/id/SpacePermId", "as/dto/experiment/id/ExperimentIdentifier", "as/dto/sample/create/CreateSamplesOperation", "as/dto/operation/AsynchronousOperationExecutionOptions" ], + function(openbis, SampleCreation, EntityTypePermId, SpacePermId, ExperimentIdentifier, CreateSamplesOperation, AsynchronousOperationExecutionOptions) { + + // we assume here that v3 object has been already created and we have already called login (please check "Accessing the API" section for more details) + + var sample = new SampleCreation(); + sample.setTypeId(new EntityTypePermId("MY_SAMPLE_TYPE_CODE")); + sample.setSpaceId(new SpacePermId("MY_SPACE_CODE")); + sample.setExperimentId(new ExperimentIdentifier("/MY_SPACE_CODE/MY_PROJECT_CODE/MY_EXPERIMENT_CODE")); + sample.setCode("MY_SAMPLE_CODE"); + + var operation = new CreateSamplesOperation([ sample ]); + + v3.executeOperations([ operation ], new AsynchronousOperationExecutionOptions()).done(function(results) { + + console.log("Execution id: " + results.getExecutionId()); + }); + }); + </script> + +###### **Synchronous operation execution** + +A synchronous executeOperations invocation immediately executes all the +operations. Any exceptions thrown by the executed operations can be +caught with a regular try/catch block around executeOperations method. + +In order to execute operations synchronously, executeOperations has to +be used with SynchronousOperationExecutionOptions. With such options, +the method returns SynchronousOperationExecutionResults object. +SynchronousOperationExecutionResults object contains the results for all +the executed operations. + +In contrast to the asynchronous version, the synchronous call requires +executionId to be explicitly set in SynchronousOperationExecutionOptions +for the additional information to be gathered about the execution. + +During its life a synchronous execution goes through the following +states: + +- NEW - execution has been just created with executeOperations method +- RUNNING - execution is being executed by the same thread as + executeOperations method +- FINISHED/FAILED - if execution finishes successfully then execution + state is changed to FINISHED, if anything goes wrong it is changed + to FAILED + + + +**V3ExecuteOperationsSynchronous.java** + + import java.util.Arrays; + import ch.ethz.sis.openbis.generic.asapi.v3.IApplicationServerApi; + import ch.ethz.sis.openbis.generic.asapi.v3.dto.entitytype.id.EntityTypePermId; + import ch.ethz.sis.openbis.generic.asapi.v3.dto.experiment.id.ExperimentIdentifier; + import ch.ethz.sis.openbis.generic.asapi.v3.dto.operation.SynchronousOperationExecutionOptions; + import ch.ethz.sis.openbis.generic.asapi.v3.dto.operation.SynchronousOperationExecutionResults; + import ch.ethz.sis.openbis.generic.asapi.v3.dto.sample.create.CreateSamplesOperation; + import ch.ethz.sis.openbis.generic.asapi.v3.dto.sample.create.CreateSamplesOperationResult; + import ch.ethz.sis.openbis.generic.asapi.v3.dto.sample.create.SampleCreation; + import ch.ethz.sis.openbis.generic.asapi.v3.dto.space.id.SpacePermId; + + public class V3ExecuteOperationsSynchronous + { + public static void main(String[] args) + { + // we assume here that v3 object has been already created and we have already called login (please check "Accessing the API" section for more details) + + SampleCreation sample = new SampleCreation(); + sample.setTypeId(new EntityTypePermId("MY_SAMPLE_TYPE_CODE")); + sample.setSpaceId(new SpacePermId("MY_SPACE_CODE")); + sample.setExperimentId(new ExperimentIdentifier("/MY_SPACE_CODE/MY_PROJECT_CODE/MY_EXPERIMENT_CODE")); + sample.setCode("MY_SAMPLE_CODE"); + + CreateSamplesOperation operation = new CreateSamplesOperation(sample); + + SynchronousOperationExecutionResults results = (SynchronousOperationExecutionResults) v3.executeOperations(sessionToken, + Arrays.asList(operation), new SynchronousOperationExecutionOptions()); + + CreateSamplesOperationResult result = (CreateSamplesOperationResult) results.getResults().get(0); + + System.out.println("Sample id: " + result.getObjectIds()); + } + } + +**V3ExecuteOperationsSynchronous.html** + + <script> + require([ "openbis", "as/dto/sample/create/SampleCreation", "as/dto/entitytype/id/EntityTypePermId", "as/dto/space/id/SpacePermId", "as/dto/experiment/id/ExperimentIdentifier", "as/dto/sample/create/CreateSamplesOperation", "as/dto/operation/SynchronousOperationExecutionOptions" ], + function(openbis, SampleCreation, EntityTypePermId, SpacePermId, ExperimentIdentifier, CreateSamplesOperation, SynchronousOperationExecutionOptions) { + + // we assume here that v3 object has been already created and we have already called login (please check "Accessing the API" section for more details) + + var sample = new SampleCreation(); + sample.setTypeId(new EntityTypePermId("MY_SAMPLE_TYPE_CODE")); + sample.setSpaceId(new SpacePermId("MY_SPACE_CODE")); + sample.setExperimentId(new ExperimentIdentifier("/MY_SPACE_CODE/MY_PROJECT_CODE/MY_EXPERIMENT_CODE")); + sample.setCode("MY_SAMPLE_CODE"); + + var operation = new CreateSamplesOperation([ sample ]); + + v3.executeOperations([ operation ], new SynchronousOperationExecutionOptions()).done(function(results) { + + var result = results.getResults()[0]; + console.log("Sample id: " + result.getObjectIds()); + }); + }); + </script> + +###### **Notifications** + +The executeOperations method can notify about finished or failed +operation executions. At the moment the only supported notification +method is email (OperationExecutionEmailNotification). + +For successfully finished executions an email contains: + +- execution id +- execution description +- list of operation summaries and operation results + +For failed executions an email contains: + +- execution id +- execution description +- list of operation summaries +- error + + + +**V3ExecuteOperationsEmailNotification.java** + + import java.util.Arrays; + import ch.ethz.sis.openbis.generic.asapi.v3.IApplicationServerApi; + import ch.ethz.sis.openbis.generic.asapi.v3.dto.entitytype.id.EntityTypePermId; + import ch.ethz.sis.openbis.generic.asapi.v3.dto.experiment.id.ExperimentIdentifier; + import ch.ethz.sis.openbis.generic.asapi.v3.dto.operation.AsynchronousOperationExecutionOptions; + import ch.ethz.sis.openbis.generic.asapi.v3.dto.operation.AsynchronousOperationExecutionResults; + import ch.ethz.sis.openbis.generic.asapi.v3.dto.operation.OperationExecutionEmailNotification; + import ch.ethz.sis.openbis.generic.asapi.v3.dto.sample.create.CreateSamplesOperation; + import ch.ethz.sis.openbis.generic.asapi.v3.dto.sample.create.SampleCreation; + import ch.ethz.sis.openbis.generic.asapi.v3.dto.space.id.SpacePermId; + + public class V3ExecuteOperationsEmailNotification + { + public static void main(String[] args) + { + // we assume here that v3 object has been already created and we have already called login (please check "Accessing the API" section for more details) + + SampleCreation sample = new SampleCreation(); + sample.setTypeId(new EntityTypePermId("MY_SAMPLE_TYPE_CODE")); + sample.setSpaceId(new SpacePermId("MY_SPACE_CODE")); + sample.setExperimentId(new ExperimentIdentifier("/MY_SPACE_CODE/MY_PROJECT_CODE/MY_EXPERIMENT_CODE")); + sample.setCode("MY_SAMPLE_CODE"); + + CreateSamplesOperation operation = new CreateSamplesOperation(sample); + + AsynchronousOperationExecutionOptions options = new AsynchronousOperationExecutionOptions(); + options.setNotification(new OperationExecutionEmailNotification("my@email1.com", "my@email2.com")); + + AsynchronousOperationExecutionResults results = (AsynchronousOperationExecutionResults) v3.executeOperations(sessionToken, + Arrays.asList(operation), options); + + System.out.println("Execution id: " + results.getExecutionId()); + } + } + +**V3ExecuteOperationsEmailNotification.html** + + <script> + require([ "openbis", "as/dto/sample/create/SampleCreation", "as/dto/entitytype/id/EntityTypePermId", "as/dto/space/id/SpacePermId", "as/dto/experiment/id/ExperimentIdentifier", "as/dto/sample/create/CreateSamplesOperation", "as/dto/operation/AsynchronousOperationExecutionOptions", "as/dto/operation/OperationExecutionEmailNotification" ], + function(openbis, SampleCreation, EntityTypePermId, SpacePermId, ExperimentIdentifier, CreateSamplesOperation, AsynchronousOperationExecutionOptions, OperationExecutionEmailNotification) { + + // we assume here that v3 object has been already created and we have already called login (please check "Accessing the API" section for more details) + + var sample = new SampleCreation(); + sample.setTypeId(new EntityTypePermId("MY_SAMPLE_TYPE_CODE")); + sample.setSpaceId(new SpacePermId("MY_SPACE_CODE")); + sample.setExperimentId(new ExperimentIdentifier("/MY_SPACE_CODE/MY_PROJECT_CODE/MY_EXPERIMENT_CODE")); + sample.setCode("MY_SAMPLE_CODE"); + + var operation = new CreateSamplesOperation([ sample ]); + + var options = new AsynchronousOperationExecutionOptions(); + options.setNotification(new OperationExecutionEmailNotification([ "my@email1.com", "my@email2.com" ])); + + v3.executeOperations([ operation ], options).done(function(results) { + + console.log("Execution id: " + results.getExecutionId()); + }); + }); + </script> + +##### Method getOperationExecutions / searchOperationExecutions + +Operation execution information can be fetched by an owner of an +execution (i.e. a person that called executeOperations method) or an +admin. Both getOperationExecutions and searchOperationExecutions methods +work similar to the other get/search methods in the V3 API. + +The operation execution information that both methods return can be +divided into 3 categories: + +- basic information (code, state, owner, description, creationDate, + startDate, finishDate etc.) +- summary information (summary of operations, progress, error, + results) +- detailed information (details of operations, progress, error, + results) + +Each category can have a different availability time (i.e. time for how +long a given information is stored in the system). The availability +times can be set via the executeOperations method options (both +SynchronousOperationExecutionOptions and +AsynchronousOperationExecutionOptions): + +- basic information (setAvailabilityTime) +- summary information (setSummaryAvailabilityTime) +- detailed information (setDetailsAvailabilityTime) + +If the times are not explicitly set, then the following defaults are +used: + +- basic information (1 year) +- summary information (1 month) +- detailed information (1 day) + +The current availability of each category can be checked with +getAvailability, getSummaryAvailability, getDetailsAvailability methods +of OperationExecution class. The availability can have one of the +following values: + +- AVAILABLE - an information is available and can be fetched +- DELETE\_PENDING - an explicit request to delete the information has + been made with updateOperationExecutions or + deleteOperationExecutions method +- DELETED - an explicit request to delete the information has been + processed and the information has been deleted +- TIME\_OUT\_PENDING - an availability time has expired, the + information has been scheduled to be removed +- TIMED\_OUT - an availability time has expired, the information has + been removed + +Update of availability values and deletion of operation execution +related information are done with two separate V3 maintenance tasks +(please check service.properties for their configuration). + +**V3GetOperationExecutionsAsynchronous.java** + + import java.util.Arrays; + import java.util.Map; + import ch.ethz.sis.openbis.generic.asapi.v3.IApplicationServerApi; + import ch.ethz.sis.openbis.generic.asapi.v3.dto.entitytype.id.EntityTypePermId; + import ch.ethz.sis.openbis.generic.asapi.v3.dto.experiment.id.ExperimentIdentifier; + import ch.ethz.sis.openbis.generic.asapi.v3.dto.operation.AsynchronousOperationExecutionOptions; + import ch.ethz.sis.openbis.generic.asapi.v3.dto.operation.AsynchronousOperationExecutionResults; + import ch.ethz.sis.openbis.generic.asapi.v3.dto.operation.OperationExecution; + import ch.ethz.sis.openbis.generic.asapi.v3.dto.operation.fetchoptions.OperationExecutionFetchOptions; + import ch.ethz.sis.openbis.generic.asapi.v3.dto.operation.id.IOperationExecutionId; + import ch.ethz.sis.openbis.generic.asapi.v3.dto.sample.create.CreateSamplesOperation; + import ch.ethz.sis.openbis.generic.asapi.v3.dto.sample.create.SampleCreation; + import ch.ethz.sis.openbis.generic.asapi.v3.dto.space.id.SpacePermId; + + public class V3GetOperationExecutionsAsynchronous + { + public static void main(String[] args) + { + // we assume here that v3 object has been already created and we have already called login (please check "Accessing the API" section for more details) + + SampleCreation sample = new SampleCreation(); + sample.setTypeId(new EntityTypePermId("MY_SAMPLE_TYPE_CODE")); + sample.setSpaceId(new SpacePermId("MY_SPACE_CODE")); + sample.setExperimentId(new ExperimentIdentifier("/MY_SPACE_CODE/MY_PROJECT_CODE/MY_EXPERIMENT_CODE")); + sample.setCode("MY_SAMPLE_CODE"); + + CreateSamplesOperation operation = new CreateSamplesOperation(sample); + + // Asynchronous execution: information about an asynchronous operation execution is always gathered, the executionId + // is also always automatically generated and returned with AsynchronousOperationExecutionResults. + + AsynchronousOperationExecutionOptions options = new AsynchronousOperationExecutionOptions(); + + // Both synchronous and asynchronous executions: default availability times can be overwritten using the options object. + // Availability times should be specified in seconds. + + options.setAvailabilityTime(30 * 24 * 60 * 60); // one month + options.setSummaryAvailabilityTime(24 * 60 * 60); // one day + options.setDetailsAvailabilityTime(60 * 60); // one hour + + // Execute operation + + AsynchronousOperationExecutionResults results = + (AsynchronousOperationExecutionResults) v3.executeOperations(sessionToken, Arrays.asList(operation), options); + + // It is an asynchronous execution. It might be still waiting for a free thread, + // it may be already executing or it may have already finished. It does not matter. + // We can already fetch the information about it. + + // Specify what information to fetch about the execution + + OperationExecutionFetchOptions fo = new OperationExecutionFetchOptions(); + fo.withSummary(); + fo.withSummary().withOperations(); + fo.withSummary().withProgress(); + fo.withSummary().withResults(); + fo.withSummary().withError(); + fo.withDetails(); + fo.withDetails().withOperations(); + fo.withDetails().withProgress(); + fo.withDetails().withResults(); + fo.withDetails().withError(); + + // Get information about the execution + + Map<IOperationExecutionId, OperationExecution> executions = + v3.getOperationExecutions(sessionToken, Arrays.asList(results.getExecutionId()), fo); + + OperationExecution execution = executions.get(results.getExecutionId()); + + // Summary contains String representation of operations, progress, results and error + + String summaryOperation = execution.getSummary().getOperations().get(0); + System.out.println("Summary.operation: " + summaryOperation); + System.out.println("Summary.progress: " + execution.getSummary().getProgress()); + System.out.println("Summary.results: " + execution.getSummary().getResults()); + System.out.println("Summary.error: " + execution.getSummary().getError()); + + // Details contain object representation of operations, progress, results and error + + CreateSamplesOperation detailsOperation = (CreateSamplesOperation) execution.getDetails().getOperations().get(0); + System.out.println("Details.operation: " + detailsOperation); + System.out.println("Details.progress: " + execution.getSummary().getProgress()); + System.out.println("Details.results: " + execution.getSummary().getResults()); + System.out.println("Details.error: " + execution.getSummary().getError()); + } + } + +**V3GetOperationExecutionsAsynchronous.html** + + <script> + require([ "openbis", "as/dto/sample/create/SampleCreation", "as/dto/entitytype/id/EntityTypePermId", "as/dto/space/id/SpacePermId", "as/dto/experiment/id/ExperimentIdentifier", "as/dto/sample/create/CreateSamplesOperation", "as/dto/operation/AsynchronousOperationExecutionOptions", "as/dto/operation/fetchoptions/OperationExecutionFetchOptions", "as/dto/operation/id/OperationExecutionPermId" ], + function(openbis, SampleCreation, EntityTypePermId, SpacePermId, ExperimentIdentifier, CreateSamplesOperation, AsynchronousOperationExecutionOptions, OperationExecutionFetchOptions, OperationExecutionPermId) { + + // we assume here that v3 object has been already created and we have already called login (please check "Accessing the API" section for more details) + + var sample = new SampleCreation(); + sample.setTypeId(new EntityTypePermId("MY_SAMPLE_TYPE_CODE")); + sample.setSpaceId(new SpacePermId("MY_SPACE_CODE")); + sample.setExperimentId(new ExperimentIdentifier("/MY_SPACE_CODE/MY_PROJECT_CODE/MY_EXPERIMENT_CODE")); + sample.setCode("MY_SAMPLE_CODE"); + + var operation = new CreateSamplesOperation([ sample ]); + + // Asynchronous execution: information about an asynchronous operation execution is always gathered, the executionId + // is also always automatically generated and returned with AsynchronousOperationExecutionResults. + + var options = new AsynchronousOperationExecutionOptions(); + + // Both synchronous and asynchronous executions: default availability times can be overwritten using the options object. + // Availability times should be specified in seconds. + + options.setAvailabilityTime(30 * 24 * 60 * 60); // one month + options.setSummaryAvailabilityTime(24 * 60 * 60); // one day + options.setDetailsAvailabilityTime(60 * 60); // one hour + + // Execute operation + + v3.executeOperations([ operation ], options).done(function(results) { + + // It is an asynchronous execution. It might be still waiting for a free thread, + // it may be already executing or it may have already finished. It does not matter. + // We can already fetch the information about it. + + // Specify what information to fetch about the execution + + var fo = new OperationExecutionFetchOptions(); + fo.withSummary(); + fo.withSummary().withOperations(); + fo.withSummary().withProgress(); + fo.withSummary().withResults(); + fo.withSummary().withError(); + + fo.withDetails(); + fo.withDetails().withOperations(); + fo.withDetails().withProgress(); + fo.withDetails().withResults(); + fo.withDetails().withError(); + + // Get information about the execution + + v3.getOperationExecutions([ results.getExecutionId() ], fo).done(function(executions) { + + var execution = executions[results.getExecutionId()]; + + // Summary contains String representation of operations, progress, results and error + + var summaryOperation = execution.getSummary().getOperations()[0]; + console.log("Summary.operation: " + summaryOperation); + console.log("Summary.progress: " + execution.getSummary().getProgress()); + console.log("Summary.results: " + execution.getSummary().getResults()); + console.log("Summary.error: " + execution.getSummary().getError()); + + // Details contain object representation of operations, progress, results and error + + var detailsOperation = execution.getDetails().getOperations()[0]; + console.log("Details.operation: " + detailsOperation); + console.log("Details.progress: " + execution.getSummary().getProgress()); + console.log("Details.results: " + execution.getSummary().getResults()); + console.log("Details.error: " + execution.getSummary().getError()); + }); + }); + }); + </script> + + + + + +**V3GetOperationExecutionsSynchronous.java** + + import java.util.Arrays; + import java.util.Map; + import ch.ethz.sis.openbis.generic.asapi.v3.IApplicationServerApi; + import ch.ethz.sis.openbis.generic.asapi.v3.dto.entitytype.id.EntityTypePermId; + import ch.ethz.sis.openbis.generic.asapi.v3.dto.experiment.id.ExperimentIdentifier; + import ch.ethz.sis.openbis.generic.asapi.v3.dto.operation.OperationExecution; + import ch.ethz.sis.openbis.generic.asapi.v3.dto.operation.SynchronousOperationExecutionOptions; + import ch.ethz.sis.openbis.generic.asapi.v3.dto.operation.fetchoptions.OperationExecutionFetchOptions; + import ch.ethz.sis.openbis.generic.asapi.v3.dto.operation.id.IOperationExecutionId; + import ch.ethz.sis.openbis.generic.asapi.v3.dto.operation.id.OperationExecutionPermId; + import ch.ethz.sis.openbis.generic.asapi.v3.dto.sample.create.CreateSamplesOperation; + import ch.ethz.sis.openbis.generic.asapi.v3.dto.sample.create.SampleCreation; + import ch.ethz.sis.openbis.generic.asapi.v3.dto.space.id.SpacePermId; + + public class V3GetOperationExecutionsSynchronous + { + public static void main(String[] args) + { + // we assume here that v3 object has been already created and we have already called login (please check "Accessing the API" section for more details) + + SampleCreation sample = new SampleCreation(); + sample.setTypeId(new EntityTypePermId("MY_SAMPLE_TYPE_CODE")); + sample.setSpaceId(new SpacePermId("MY_SPACE_CODE")); + sample.setExperimentId(new ExperimentIdentifier("/MY_SPACE_CODE/MY_PROJECT_CODE/MY_EXPERIMENT_CODE")); + sample.setCode("MY_SAMPLE_CODE_7"); + + CreateSamplesOperation operation = new CreateSamplesOperation(sample); + + // Synchronous execution: to gather information about a synchronous operation execution, the executionId has to + // be explicitly set in the options object. OperationExecutionPermId created with no-argument constructor automatically + // generates a random permId value. + + SynchronousOperationExecutionOptions options = new SynchronousOperationExecutionOptions(); + options.setExecutionId(new OperationExecutionPermId()); + + // Both synchronous and asynchronous executions: default availability times can be overwritten using the options object. + // Availability times should be specified in seconds. + + options.setAvailabilityTime(30 * 24 * 60 * 60); // one month + options.setSummaryAvailabilityTime(24 * 60 * 60); // one day + options.setDetailsAvailabilityTime(60 * 60); // one hour + + // Execute operation + + v3.executeOperations(sessionToken, Arrays.asList(operation), options); + + // Specify what information to fetch about the execution + + OperationExecutionFetchOptions fo = new OperationExecutionFetchOptions(); + fo.withSummary(); + fo.withSummary().withOperations(); + fo.withSummary().withProgress(); + fo.withSummary().withResults(); + fo.withSummary().withError(); + fo.withDetails(); + fo.withDetails().withOperations(); + fo.withDetails().withProgress(); + fo.withDetails().withResults(); + fo.withDetails().withError(); + + // Get information about the execution + + Map<IOperationExecutionId, OperationExecution> executions = + v3.getOperationExecutions(sessionToken, Arrays.asList(options.getExecutionId()), fo); + + OperationExecution execution = executions.get(options.getExecutionId()); + + // Summary contains String representation of operations, progress, results and error + + String summaryOperation = execution.getSummary().getOperations().get(0); + System.out.println("Summary.operation: " + summaryOperation); + System.out.println("Summary.progress: " + execution.getSummary().getProgress()); + System.out.println("Summary.results: " + execution.getSummary().getResults()); + System.out.println("Summary.error: " + execution.getSummary().getError()); + + // Details contain object representation of operations, progress, results and error + + CreateSamplesOperation detailsOperation = (CreateSamplesOperation) execution.getDetails().getOperations().get(0); + System.out.println("Details.operation: " + detailsOperation); + System.out.println("Details.progress: " + execution.getSummary().getProgress()); + System.out.println("Details.results: " + execution.getSummary().getResults()); + System.out.println("Details.error: " + execution.getSummary().getError()); + } + } + +**V3GetOperationExecutionsSynchronous.html** + + <script> + require([ "openbis", "as/dto/sample/create/SampleCreation", "as/dto/entitytype/id/EntityTypePermId", "as/dto/space/id/SpacePermId", "as/dto/experiment/id/ExperimentIdentifier", "as/dto/sample/create/CreateSamplesOperation", "as/dto/operation/SynchronousOperationExecutionOptions", "as/dto/operation/fetchoptions/OperationExecutionFetchOptions", "as/dto/operation/id/OperationExecutionPermId" ], + function(openbis, SampleCreation, EntityTypePermId, SpacePermId, ExperimentIdentifier, CreateSamplesOperation, SynchronousOperationExecutionOptions, OperationExecutionFetchOptions, OperationExecutionPermId) { + + // we assume here that v3 object has been already created and we have already called login (please check "Accessing the API" section for more details) + var sample = new SampleCreation(); + + sample.setTypeId(new EntityTypePermId("MY_SAMPLE_TYPE_CODE")); + sample.setSpaceId(new SpacePermId("MY_SPACE_CODE")); + sample.setExperimentId(new ExperimentIdentifier("/MY_SPACE_CODE/MY_PROJECT_CODE/MY_EXPERIMENT_CODE")); + sample.setCode("MY_SAMPLE_CODE"); + + var operation = new CreateSamplesOperation([ sample ]); + + // Synchronous execution: to gather information about a synchronous operation execution, the executionId has to + // be explicitly set in the options object. OperationExecutionPermId created with no-argument constructor automatically + // generates a random permId value. + + var options = new SynchronousOperationExecutionOptions(); + options.setExecutionId(new OperationExecutionPermId()); + + // Both synchronous and asynchronous executions: default availability times can be overwritten using the options object. + // Availability times should be specified in seconds. + + options.setAvailabilityTime(30 * 24 * 60 * 60); // one month + options.setSummaryAvailabilityTime(24 * 60 * 60); // one day + options.setDetailsAvailabilityTime(60 * 60); // one hour + + // Execute operation + + v3.executeOperations([ operation ], options).done(function() { + + // Specify what information to fetch about the execution + + var fo = new OperationExecutionFetchOptions(); + fo.withSummary(); + fo.withSummary().withOperations(); + fo.withSummary().withProgress(); + fo.withSummary().withResults(); + fo.withSummary().withError(); + + fo.withDetails(); + fo.withDetails().withOperations(); + fo.withDetails().withProgress(); + fo.withDetails().withResults(); + fo.withDetails().withError(); + + // Get information about the execution + + v3.getOperationExecutions([ options.getExecutionId() ], fo).done(function(executions) { + + var execution = executions[options.getExecutionId()]; + + // Summary contains String representation of operations, progress, results and error + + var summaryOperation = execution.getSummary().getOperations()[0]; + console.log("Summary.operation: " + summaryOperation); + console.log("Summary.progress: " + execution.getSummary().getProgress()); + console.log("Summary.results: " + execution.getSummary().getResults()); + console.log("Summary.error: " + execution.getSummary().getError()); + + // Details contain object representation of operations, progress, results and error + + var detailsOperation = execution.getDetails().getOperations()[0]; + console.log("Details.operation: " + detailsOperation); + console.log("Details.progress: " + execution.getSummary().getProgress()); + console.log("Details.results: " + execution.getSummary().getResults()); + console.log("Details.error: " + execution.getSummary().getError()); + }); + }); + }); + </script> + +##### Method updateOperationExecutions / deleteOperationExecutions + +The updateOperationExecutions and deleteOperationExecutions methods can +be used to explicitly delete some part of information or delete all the +information about a given operation execution before a corresponding +availability time expires. + +**V3UpdateOperationExecutions.java** + + import java.util.Arrays; + import java.util.Map; + import ch.ethz.sis.openbis.generic.asapi.v3.IApplicationServerApi; + import ch.ethz.sis.openbis.generic.asapi.v3.dto.entitytype.id.EntityTypePermId; + import ch.ethz.sis.openbis.generic.asapi.v3.dto.experiment.id.ExperimentIdentifier; + import ch.ethz.sis.openbis.generic.asapi.v3.dto.operation.AsynchronousOperationExecutionOptions; + import ch.ethz.sis.openbis.generic.asapi.v3.dto.operation.AsynchronousOperationExecutionResults; + import ch.ethz.sis.openbis.generic.asapi.v3.dto.operation.OperationExecution; + import ch.ethz.sis.openbis.generic.asapi.v3.dto.operation.fetchoptions.OperationExecutionFetchOptions; + import ch.ethz.sis.openbis.generic.asapi.v3.dto.operation.id.IOperationExecutionId; + import ch.ethz.sis.openbis.generic.asapi.v3.dto.operation.update.OperationExecutionUpdate; + import ch.ethz.sis.openbis.generic.asapi.v3.dto.sample.create.CreateSamplesOperation; + import ch.ethz.sis.openbis.generic.asapi.v3.dto.sample.create.SampleCreation; + import ch.ethz.sis.openbis.generic.asapi.v3.dto.space.id.SpacePermId; + + public class V3UpdateOperationExecutions + { + public static void main(String[] args) + { + // we assume here that v3 object has been already created and we have already called login (please check "Accessing the API" section for more details) + + SampleCreation sample = new SampleCreation(); + sample.setTypeId(new EntityTypePermId("MY_SAMPLE_TYPE_CODE")); + sample.setSpaceId(new SpacePermId("MY_SPACE_CODE")); + sample.setExperimentId(new ExperimentIdentifier("/MY_SPACE_CODE/MY_PROJECT_CODE/MY_EXPERIMENT_CODE")); + sample.setCode("MY_SAMPLE_CODE"); + + CreateSamplesOperation operation = new CreateSamplesOperation(sample); + AsynchronousOperationExecutionOptions options = new AsynchronousOperationExecutionOptions(); + + // Execute operation + + AsynchronousOperationExecutionResults results = + (AsynchronousOperationExecutionResults) v3.executeOperations(sessionToken, Arrays.asList(operation), options); + + // You can explicitly request a deletion of summary or details. Here we want to delete details. + + OperationExecutionUpdate update = new OperationExecutionUpdate(); + update.setExecutionId(results.getExecutionId()); + update.deleteDetails(); + + v3.updateOperationExecutions(sessionToken, Arrays.asList(update)); + + // Let's check the execution information + + OperationExecutionFetchOptions fo = new OperationExecutionFetchOptions(); + fo.withSummary(); + fo.withDetails(); + + Map<IOperationExecutionId, OperationExecution> executions = + v3.getOperationExecutions(sessionToken, Arrays.asList(results.getExecutionId()), fo); + + OperationExecution execution = executions.get(results.getExecutionId()); + + // Summary availability is AVAILABLE. Details availability is either DELETE_PENDING or DELETED + // depending on whether a maintenance task has already processed the deletion request. + + System.out.println("Summary: " + execution.getSummary()); + System.out.println("Summary.availability: " + execution.getSummaryAvailability()); + System.out.println("Details: " + execution.getDetails()); + System.out.println("Details.availability: " + execution.getDetailsAvailability()); + } + } + +**V3UpdateOperationExecutions.html** + + <script> + require([ "openbis", "as/dto/sample/create/SampleCreation", "as/dto/entitytype/id/EntityTypePermId", "as/dto/space/id/SpacePermId", "as/dto/experiment/id/ExperimentIdentifier", "as/dto/sample/create/CreateSamplesOperation", "as/dto/operation/AsynchronousOperationExecutionOptions", "as/dto/operation/update/OperationExecutionUpdate", "as/dto/operation/fetchoptions/OperationExecutionFetchOptions" ], + function(openbis, SampleCreation, EntityTypePermId, SpacePermId, ExperimentIdentifier, CreateSamplesOperation, AsynchronousOperationExecutionOptions, OperationExecutionUpdate, OperationExecutionFetchOptions) { + + // we assume here that v3 object has been already created and we have already called login (please check "Accessing the API" section for more details) +  + var sample = new SampleCreation(); + sample.setTypeId(new EntityTypePermId("MY_SAMPLE_TYPE_CODE")); + sample.setSpaceId(new SpacePermId("MY_SPACE_CODE")); + sample.setExperimentId(new ExperimentIdentifier("/MY_SPACE_CODE/MY_PROJECT_CODE/MY_EXPERIMENT_CODE")); + sample.setCode("MY_SAMPLE_CODE"); + + var operation = new CreateSamplesOperation([ sample ]); + var options = new AsynchronousOperationExecutionOptions(); + + // Execute operation + + v3.executeOperations([ operation ], options).done(function(results) { + + // You can explicitly request a deletion of summary or details. Here we want to delete details. + + var update = new OperationExecutionUpdate(); + update.setExecutionId(results.getExecutionId()); + update.deleteDetails(); + + v3.updateOperationExecutions([ update ]).done(function() { + + // Let's check the execution information + + var fo = new OperationExecutionFetchOptions(); + fo.withSummary(); + fo.withDetails(); + + v3.getOperationExecutions([ results.getExecutionId() ], fo).done(function(executions) { + + var execution = executions[results.getExecutionId()]; + + // Summary availability is AVAILABLE. Details availability is either DELETE_PENDING or DELETED + // depending on whether a maintenance task has already processed the deletion request. + + console.log("Summary: " + execution.getSummary()); + console.log("Summary.availability: " + execution.getSummaryAvailability()); + console.log("Details: " + execution.getDetails()); + console.log("Details.availability: " + execution.getDetailsAvailability()); + }); + }); + }); + }); + </script> + + + + +**V3DeleteOperationExecutions.java** + + import java.util.Arrays; + import java.util.Map; + import ch.ethz.sis.openbis.generic.asapi.v3.IApplicationServerApi; + import ch.ethz.sis.openbis.generic.asapi.v3.dto.entitytype.id.EntityTypePermId; + import ch.ethz.sis.openbis.generic.asapi.v3.dto.experiment.id.ExperimentIdentifier; + import ch.ethz.sis.openbis.generic.asapi.v3.dto.operation.AsynchronousOperationExecutionOptions; + import ch.ethz.sis.openbis.generic.asapi.v3.dto.operation.AsynchronousOperationExecutionResults; + import ch.ethz.sis.openbis.generic.asapi.v3.dto.operation.OperationExecution; + import ch.ethz.sis.openbis.generic.asapi.v3.dto.operation.delete.OperationExecutionDeletionOptions; + import ch.ethz.sis.openbis.generic.asapi.v3.dto.operation.fetchoptions.OperationExecutionFetchOptions; + import ch.ethz.sis.openbis.generic.asapi.v3.dto.operation.id.IOperationExecutionId; + import ch.ethz.sis.openbis.generic.asapi.v3.dto.sample.create.CreateSamplesOperation; + import ch.ethz.sis.openbis.generic.asapi.v3.dto.sample.create.SampleCreation; + import ch.ethz.sis.openbis.generic.asapi.v3.dto.space.id.SpacePermId; + + public class V3DeleteOperationExecutions + { + public static void main(String[] args) + { + // we assume here that v3 object has been already created and we have already called login (please check "Accessing the API" section for more details) + + SampleCreation sample = new SampleCreation(); + sample.setTypeId(new EntityTypePermId("MY_SAMPLE_TYPE_CODE")); + sample.setSpaceId(new SpacePermId("MY_SPACE_CODE")); + sample.setExperimentId(new ExperimentIdentifier("/MY_SPACE_CODE/MY_PROJECT_CODE/MY_EXPERIMENT_CODE")); + sample.setCode("MY_SAMPLE_CODE"); + + CreateSamplesOperation operation = new CreateSamplesOperation(sample); + AsynchronousOperationExecutionOptions options = new AsynchronousOperationExecutionOptions(); + + // Execute operation + + AsynchronousOperationExecutionResults results = + (AsynchronousOperationExecutionResults) v3.executeOperations(sessionToken, Arrays.asList(operation), options); + + // Explicitly request a deletion of all the information about the execution + + OperationExecutionDeletionOptions deletionOptions = new OperationExecutionDeletionOptions(); + deletionOptions.setReason("test reason"); + + v3.deleteOperationExecutions(sessionToken, Arrays.asList(results.getExecutionId()), deletionOptions); + + // Let's check whether the execution information is still available + + + Map<IOperationExecutionId, OperationExecution> executions = + v3.getOperationExecutions(sessionToken, Arrays.asList(results.getExecutionId()), new OperationExecutionFetchOptions()); + + OperationExecution execution = executions.get(results.getExecutionId()); + + // Depending on whether a maintenance task has already processed the deletion request + // the execution will be either null or the returned execution availability will be DELETE_PENDING. + + System.out.println("Availability: " + (execution != null ? execution.getAvailability() : null)); + } + } + +**V3DeleteOperationExecutions.html** + + <script> + require([ "openbis", "as/dto/sample/create/SampleCreation", "as/dto/entitytype/id/EntityTypePermId", "as/dto/space/id/SpacePermId", "as/dto/experiment/id/ExperimentIdentifier", "as/dto/sample/create/CreateSamplesOperation", "as/dto/operation/AsynchronousOperationExecutionOptions", "as/dto/operation/delete/OperationExecutionDeletionOptions", "as/dto/operation/fetchoptions/OperationExecutionFetchOptions" ], + function(openbis, SampleCreation, EntityTypePermId, SpacePermId, ExperimentIdentifier, CreateSamplesOperation, AsynchronousOperationExecutionOptions, OperationExecutionDeletionOptions, OperationExecutionFetchOptions) { + + // we assume here that v3 object has been already created and we have already called login (please check "Accessing the API" section for more details) + + var sample = new SampleCreation(); + sample.setTypeId(new EntityTypePermId("MY_SAMPLE_TYPE_CODE")); + sample.setSpaceId(new SpacePermId("MY_SPACE_CODE")); + sample.setExperimentId(new ExperimentIdentifier("/MY_SPACE_CODE/MY_PROJECT_CODE/MY_EXPERIMENT_CODE")); + sample.setCode("MY_SAMPLE_CODE"); + + var operation = new CreateSamplesOperation([ sample ]); + var options = new AsynchronousOperationExecutionOptions(); + + // Execute operation + + v3.executeOperations([ operation ], options).done(function(results) { + + // Explicitly request a deletion of all the information about the execution + + var deletionOptions = new OperationExecutionDeletionOptions(); + deletionOptions.setReason("test reason"); + + v3.deleteOperationExecutions([ results.getExecutionId() ], deletionOptions).done(function() { + + // Let's check whether the execution information is still available + + v3.getOperationExecutions([ results.getExecutionId() ], new OperationExecutionFetchOptions()).done(function(executions) { + + var execution = executions[results.getExecutionId()]; + + // Depending on whether a maintenance task has already processed the deletion request + // the execution will be either null or the returned execution availability will be DELETE_PENDING. + + console.log("Availability: " + (execution != null ? execution.getAvailability() : null)); + + }); + }); + }); + }); + </script> + +##### Configuration + +Many aspects of the operation execution behavior can be configured via +service.properties file. +More details on what exactly can be configured can be found in the file +itself. + +#### Semantic Annotations + +If terms like: semantic web, RDF, OWL are new to you, then it is highly +recommended to read the following tutorial first: +<http://www.linkeddatatools.com/semantic-web-basics>. + +In short: semantic annotations allow you to define a meaning for openBIS +sample types, property types and sample property assignments by the +means of ontology terms. This, together with standards like "Dublin +Core" (<http://dublincore.org/>) can help you integrate openBIS with +other systems and exchange data between them with a well defined meaning +easily. + +To describe a meaning of a single sample type, property type or sample +property assignment a collection of semantic annotations can be used. +Therefore, for instance, you can use one annotation to describe a +general meaning of a property and another one to describe a unit that is +used for its values. + +In order to make the openBIS configuration easier to maintain sample +property assignments inherit semantic annotations from a corresponding +property type. This inheritance works only for sample property +assignments without any semantic annotations, i.e. if there is at least +one semantic annotation defined at a sample property assignment level +then nothing gets inherited from the property type level anymore. The +inheritance makes it possible to define a meaning of a property once, at +the property type level, and override it, only if needed, at sample +property assignment level. + +V3 API provides the following methods to manipulate the semantic +annotations: + +- createSemanticAnnotations +- updateSemanticAnnotations +- deleteSemanticAnnotations +- getSemanticAnnotations +- searchSemanticAnnotations + +These methods work similar to the other create/update/delete/get/search +V3 API counterparts. + +Moreover, once semantic annotations are defined, it is possible to +search for samples and sample types that have a given semantic +annotation. To do it, one has to use searchSamples and searchSampleTypes +methods and specify appropriate withType().withSemanticAnnotations() +condition in SampleSearchCriteria or withSemanticAnnotations() condition +in SampleTypeSearchCriteria. + +#### Web App Settings + +The web app settings functionality is a user specific key-value map +where a user specific configuration can be stored. The settings are +persistent, i.e. they can live longer than a user session that created +them. Web app settings of a given user can be read/updated only by that +user or by an instance admin. + + + +**WebAppSettingsExample.java** + + import java.util.Arrays; + import java.util.Map; + import ch.ethz.sis.openbis.generic.asapi.v3.IApplicationServerApi; + import ch.ethz.sis.openbis.generic.asapi.v3.dto.person.Person; + import ch.ethz.sis.openbis.generic.asapi.v3.dto.person.fetchoptions.PersonFetchOptions; + import ch.ethz.sis.openbis.generic.asapi.v3.dto.person.id.IPersonId; + import ch.ethz.sis.openbis.generic.asapi.v3.dto.person.id.Me; + import ch.ethz.sis.openbis.generic.asapi.v3.dto.person.update.PersonUpdate; + import ch.ethz.sis.openbis.generic.asapi.v3.dto.webapp.WebAppSetting; + import ch.ethz.sis.openbis.generic.asapi.v3.dto.webapp.create.WebAppSettingCreation; + import ch.ethz.sis.openbis.generic.asapi.v3.dto.webapp.fetchoptions.WebAppSettingsFetchOptions; + import ch.ethz.sis.openbis.generic.asapi.v3.dto.webapp.update.WebAppSettingsUpdateValue; + + public class WebAppSettingsExample + { + public static void main(String[] args) + { + // we assume here that v3 object has been already created and we have already called login (please check "Accessing the API" section for more details) + + PersonUpdate update = new PersonUpdate(); + // update the currently logged in user + update.setUserId(new Me()); + + // add "setting1a" and "setting1b" to "app1" (other settings for "app1" will remain unchanged) + WebAppSettingsUpdateValue app1 = update.getWebAppSettings("app1"); + app1.add(new WebAppSettingCreation("setting1a", "value1a")); + app1.add(new WebAppSettingCreation("setting1b", "value1b")); + + // set "setting2a", "setting2b" and "setting2c" for "app2" (other settings for "app2" will be removed) + WebAppSettingsUpdateValue app2 = update.getWebAppSettings("app2"); + app2.set(new WebAppSettingCreation("setting2a", "value2a"), new WebAppSettingCreation("setting2b", "value2b"), + new WebAppSettingCreation("setting2c", "value2c")); + + // remove "setting3a" from "app3" (other settings for "app3" will remain unchanged) + WebAppSettingsUpdateValue app3 = update.getWebAppSettings("app3"); + app3.remove("setting3a"); + + v3.updatePersons(sessionToken, Arrays.asList(update)); + + // option 1 : fetch a person with all settings of all web apps + PersonFetchOptions personFo1 = new PersonFetchOptions(); + personFo1.withAllWebAppSettings(); + + // option 2 : fetch a person with either all or chosen settings of chosen web apps + PersonFetchOptions personFo2 = new PersonFetchOptions(); + + // option 2a : fetch "app1" with all settings + WebAppSettingsFetchOptions app1Fo = personFo2.withWebAppSettings("app1"); + app1Fo.withAllSettings(); + + // option 2b : fetch "app2" with chosen settings + WebAppSettingsFetchOptions app2Fo = personFo2.withWebAppSettings("app2"); + app2Fo.withSetting("setting2a"); + app2Fo.withSetting("setting2b"); + + Map<IPersonId, Person> persons = v3.getPersons(sessionToken, Arrays.asList(new Me()), personFo2); + Person person = persons.values().iterator().next(); + + // get "setting1a" for "app1" + WebAppSetting setting1a = person.getWebAppSettings("app1").getSetting("setting1a"); + System.out.println(setting1a.getValue()); + + // get all fetched settings for "app2" + Map<String, WebAppSetting> settings2 = person.getWebAppSettings("app2").getSettings(); + System.out.println(settings2); + } + } + +**WebAppSettingsExample.html** + + <script> + require([ "jquery", "openbis", "as/dto/person/update/PersonUpdate", "as/dto/person/id/Me", "as/dto/webapp/create/WebAppSettingCreation", "as/dto/person/fetchoptions/PersonFetchOptions" ], + function($, openbis, PersonUpdate, Me, WebAppSettingCreation, PersonFetchOptions) { + $(document).ready(function() { + + // we assume here that v3 object has been already created and we have already called login (please check "Accessing the API" section for more details) + + var update = new PersonUpdate(); + // update the currently logged in user + update.setUserId(new Me()); + + // add "setting1a" and "setting1b" to "app1" (other settings for "app1" will remain unchanged) + var app1 = update.getWebAppSettings("app1"); + app1.add(new WebAppSettingCreation("setting1a", "value1a")); + app1.add(new WebAppSettingCreation("setting1b", "value1b")); + + // set "setting2a", "setting2b" and "setting2c" for "app2" (other settings for "app2" will be removed) + var app2 = update.getWebAppSettings("app2"); + app2.set([ new WebAppSettingCreation("setting2a", "value2a"), new WebAppSettingCreation("setting2b", "value2b"), new WebAppSettingCreation("setting2c", "value2c") ]); + + // remove "setting3a" from "app3" (other settings for "app3" will remain unchanged) + var app3 = update.getWebAppSettings("app3"); + app3.remove("setting3a"); + + v3.updatePersons([ update ]).done(function() { + + // option 1 : fetch a person with all settings of all web apps + var personFo1 = new PersonFetchOptions(); + personFo1.withAllWebAppSettings(); + + // option 2 : fetch a person with either all or chosen settings of chosen web apps + var personFo2 = new PersonFetchOptions(); + + // option 2a : fetch "app1" with all settings + var app1Fo = personFo2.withWebAppSettings("app1"); + app1Fo.withAllSettings(); + + // option 2b : fetch "app2" with chosen settings + var app2Fo = personFo2.withWebAppSettings("app2"); + app2Fo.withSetting("setting2a"); + app2Fo.withSetting("setting2b"); + + v3.getPersons([ new Me() ], personFo2).done(function(persons) { + + var person = persons[new Me()]; + + // get "setting1a" for "app1" + var setting1a = person.getWebAppSettings("app1").getSetting("setting1a"); + console.log(setting1a.getValue()); + + // get all fetched settings for "app2" + var settings2 = person.getWebAppSettings("app2").getSettings(); + console.log(settings2); + }); + }); + }); + }); + </script> + +#### Imports + +The imports that are normally accesible via "Import" menu in the generic +openBIS UI can be also used programatically from within a V3 custom AS +service. Such an import process consists of two steps: + +- uploading a file to /openbis/upload servlet to be temporarily stored + under a specific user session key (more information on the upload + servlet can be found [here](/pages/viewpage.action?pageId=80699317)) +- importing the uploaded file using one + of ch.ethz.sis.openbis.generic.asapi.v3.plugin.service.IImportService + methods accessible from within a V3 custom AS service + +Currently available import methods: + +- String createExperiments(String sessionToken, String uploadKey, + String experimentTypeCode, boolean async, String userEmail) + +- String updateExperiments(String sessionToken, String uploadKey, + String experimentTypeCode, boolean async, String userEmail) + +- String createSamples(String sessionToken, String uploadKey, String + sampleTypeCode, String defaultSpaceIdentifier, String + spaceIdentifierOverride, String experimentIdentifierOverride, + boolean updateExisting, boolean async, String userEmail) + +- String updateSamples(String sessionToken, String uploadKey, String + sampleTypeCode, String defaultSpaceIdentifier, String + spaceIdentifierOverride, String experimentIdentifierOverride, + boolean async, String userEmail) + +- String updateDataSets(String sessionToken, String uploadKey, String + dataSetTypeCode, boolean async, String userEmail) + +- String createMaterials(String sessionToken, String uploadKey, String + materialTypeCode, boolean updateExisting, boolean async, String + userEmail) + +- String updateMaterials(String sessionToken, String uploadKey, String + materialTypeCode, boolean ignoreUnregistered, boolean async, String + userEmail) + +- String generalImport(String sessionToken, String uploadKey, String + defaultSpaceIdentifier, boolean updateExisting, + boolean async, String userEmail) - import of samples and materials + from an Excel file + +- String customImport(String sessionToken, String uploadKey, String + customImportCode, boolean async, String userEmail) - import + delegated to a dropbox + +Parameters: + +|Parameter|Type|Methods|Description| +|--- |--- |--- |--- | +|sessionToken|String|ALL|openBIS session token; to get a session token of a currently logged in user inside a custom AS service context.getSessionToken() method shall be used.| +|uploadKey|String|ALL|A key the file to be imported has been uploaded to (see the 1st step of the import process described above).| +|async|boolean|ALL|A flag that controls whether the import should be performed synchronously (i.e. in the current thread) or asynchronously (i.e. in a separate thread). For asynchronous imports an email with either an execution result or error is sent to the specified email address (see userEmail parameter).| +|userEmail|String|ALL|An email address where an execution result or error should be sent to (only for asynchronous imports - see async parameter).| +|experimentTypeCode|String|createExperiments, updateExperiments|A type of experiments to be created/updated.| +|sampleTypeCode|String|createSamples, updateSamples|A type of samples to be created/updated.| +|dataSetTypeCode|String|updateDataSets|A type of data sets to be updated.| +|materialTypeCode|String|createMaterials, updateMaterials|A type of materials to be created/updated.| +|customImportCode|String|customImport|A code of a custom import the import process should be delegated to. A custom import sends the uploaded file to a dropbox. Inside a dropbox the uploaded file can be accessed via transaction.getIncoming() method.| +|defaultSpaceIdentifier|String|createSamples, updateSamples, generalImport|A default space identifier. If null then identifiers of samples to be created/updated are expected to be specified in the uploaded file. If not null then: +codes of samples to be created are automatically generated and the samples are created in the requested default space +identifiers of samples to be updated can omit the space part (the requested default space will be automatically added)| +|spaceIdentifierOverride|String|createSamples, updateSamples|A space identifier to be used instead of the ones defined in the uploaded file.| +|experimentIdentifierOverride|String|createSamples, updateSamples|An experiment identifier to be used instead of the ones defined in the uploaded file.| +|updateExisting|boolean|createSamples, createMaterials, generalImport|A flag that controlls whether in case of an attempt to create an already existing entity an update should be performed or such a creation should fail.| +|ignoreUnregistered|boolean|updateMaterials|A flag that controlls whether in case of an attempt to update a nonexistent entity such update should be silently ignored or it should fail.| + +File formats: + +The TSV examples below assume experiment/sample/dataset/material type +used contains exactly one property called "DESCRIPTION". + +|Method|Template| +|--- |--- | +|createExperiments|create-experiments-import-template.tsv| +|updateExperiments|update-experiments-import-template.tsv| +|createSamples|create-samples-import-template.tsv| +|updateSamples|update-samples-import-template.tsv| +|updateDataSets|update-data-sets-import-template.tsv| +|createMaterials|create-materials-import-template.tsv| +|updateMaterials|update-materials-import-template.tsv| +|generalImport|| +|customImport|any kind of file| + +Return values: + +All methods return a message with a short summary of the performed +operation, e.g. a synchronous createSamples method call could return a +message like "Registration of 1 sample(s) is complete." while the +asynchronous version could return a message like "When the import is +complete the confirmation or failure report will be sent by email.". + + + +An example webapp to upload a file with samples and a custom AS service +to import that file is presented below. + + + +**ImportSamplesWebAppExample.html** + + <!DOCTYPE html> + <html> + <head> + <meta charset="utf-8"> + <title>Samples import</title> + + <script type="text/javascript" src="/openbis-test/resources/api/v3/config.js"></script> + <script type="text/javascript" src="/openbis-test/resources/api/v3/require.js"></script> + + </head> + <body> + <script> + require([ "jquery", "openbis", "as/dto/service/id/CustomASServiceCode", "as/dto/service/CustomASServiceExecutionOptions" ], function($, openbis, CustomASServiceCode, CustomASServiceExecutionOptions) { + $(document).ready(function() { +  + // we assume here that v3 object has been already created and we have already called login (please check "Accessing the API" section for more details) +  + var uploadFrame = $("#uploadFrame"); + uploadFrame.load(function() { + alert("Upload finished") + }); + + + var uploadForm = $("#uploadForm"); + uploadForm.find("input[name=sessionID]").val(sessionToken); + + + var importForm = $("#importForm"); + importForm.submit(function(e) { + e.preventDefault(); + + var sampleType = importForm.find("input[name=sampleType]").val(); + var serviceId = new CustomASServiceCode("import-service"); + var serviceOptions = new CustomASServiceExecutionOptions(); + serviceOptions.withParameter("sampleType", sampleType); + + facade.executeCustomASService(serviceId, serviceOptions).done(function(result) { + alert("Import successful: " + result); + }).fail(function(error) { + alert("Import failed: " + error.message); + }); + + return false; + }); + }); + }); + </script> + + <iframe id="uploadFrame" name="uploadFrame" style="display: none"></iframe> + + <h1>Step 1 : upload samples file</h1> + <form id="uploadForm" method="post" action="/openbis/upload" enctype="multipart/form-data" target="uploadFrame"> + <input type="file" name="importWebappUploadKey" multiple="multiple"> + <input type="hidden" name="sessionID"> + <input type="hidden" name="sessionKeysNumber" value="1"> + <input type="hidden" name="sessionKey_0" value="importWebappUploadKey"> + <input type="submit"> + </form> + + <h1>Step 2 : import samples file</h1> + <form id="importForm"> + <label>Sample Type</label> + <input type="text" name="sampleType"> + <input type="submit"> + </form> + + </body> + </html> + +**ImportSamplesServiceExample.py** + + def process(context, parameters): + sampleType = parameters.get("sampleType") + return context.getImportService().createSamples(context.getSessionToken(), "importWebappUploadKey", sampleType, None, None, None, False, False, None); + +#### Generate identifiers + +V3 API provides 2 methods for generating unique identifiers: + +- createPermIdStrings - generates globally unique identifiers that + consist of a timestamp and a sequence generated number (e.g. + "20180531170854641-944"); this method uses one global sequence. +- createCodes - generates identifiers that are unique for a given + entity kind and consist of a prefix and a sequence generated number + (e.g. "MY-PREFIX-147"); this method uses a dedicated sequence for + each entity kind. + + + +**GenerateIdentifiersExample.java** + + import java.util.List; + import ch.ethz.sis.openbis.generic.asapi.v3.IApplicationServerApi; + import ch.ethz.sis.openbis.generic.asapi.v3.dto.entitytype.EntityKind; + + public class GenerateIdentifiersExample + { + public static void main(String[] args) + { + // we assume here that v3 object has been already created and we have already called login (please check "Accessing the API" section for more details) + List<String> permIds = v3.createPermIdStrings(sessionToken, 2); + List<String> codes = v3.createCodes(sessionToken, "MY-PREFIX-", EntityKind.SAMPLE, 3); + + System.out.println(permIds); // example output: [20180531170854641-944, 20180531170854641-945] + System.out.println(codes); // example output: [MY-PREFIX-782, MY-PREFIX-783, MY-PREFIX-784] + } + } + +**GenerateIdentifiersExample.html** + + <script> + require([ "jquery", "openbis", "as/dto/entitytype/EntityKind" ], function($, openbis, EntityKind) { + $(document).ready(function() { + + // we assume here that v3 object has been already created and we have already called login (please check "Accessing the API" section for more details) + v3.createPermIdStrings(2).then(function(permIds) { + console.log(permIds); // example output: [20180531170854641-944, 20180531170854641-945] + }); + + v3.createCodes("MY-PREFIX-", EntityKind.SAMPLE, 3).then(function(codes) { + console.log(codes); // example output: [MY-PREFIX-782, MY-PREFIX-783, MY-PREFIX-784] + }); + }); + }); + </script> + +### V. DSS Methods + +#### Search files + +The searchFiles method can be used to search for data set files at a +single data store (Java version) or at multiple data stores at the same +time (Javascript version). + +Similar to the other V3 search methods it takes as parameters a +sessionToken, search criteria and fetch options and returns a search +result object. + +When searching across multiple data stores the results from each data +store are combined together and returned back as a single regular search +result object as if it was returned by only one data store. + +##### Example + +**V3SearchDataSetFilesExample.java** + + import ch.ethz.sis.openbis.generic.asapi.v3.dto.common.search.SearchResult; + import ch.ethz.sis.openbis.generic.asapi.v3.dto.dataset.search.DataSetSearchCriteria; + import ch.ethz.sis.openbis.generic.dssapi.v3.dto.datasetfile.DataSetFile; + import ch.ethz.sis.openbis.generic.dssapi.v3.dto.datasetfile.fetchoptions.DataSetFileFetchOptions; + import ch.ethz.sis.openbis.generic.dssapi.v3.dto.datasetfile.search.DataSetFileSearchCriteria; + + + public class V3SearchDataSetFilesExample + { + public static void main(String[] args) + { + // we assume here that v3 objects for both AS and DSS have been already created and we have already called login on AS to get the sessionToken (please check "Accessing the API" section for more details) + + DataSetFileSearchCriteria criteria = new DataSetFileSearchCriteria(); + + DataSetSearchCriteria dataSetCriteria = criteria.withDataSet().withOrOperator(); + dataSetCriteria.withCode().thatEquals("MY_DATA_SET_CODE_1"); + dataSetCriteria.withCode().thatEquals("MY_DATA_SET_CODE_2"); + + // Searches for files at at a single data store +  + SearchResult<DataSetFile> result = dssV3.searchFiles(sessionToken, criteria, new DataSetFileFetchOptions()); + + for (DataSetFile file : result.getObjects()) + { + System.out.println("DataSet: " + file.getDataSetPermId() + " has file: " + file.getPath()); + } + } + } + + + +**V3SearchDataSetFilesAtAllDataStoresExample.html** + + <script> + require([ "openbis", "dss/dto/datasetfile/search/DataSetFileSearchCriteria", "dss/dto/datasetfile/fetchoptions/DataSetFileFetchOptions" ], + function(DataSetFileSearchCriteria, DataSetFileFetchOptions) { + + // we assume here that v3 object has been already created and we have already called login (please check "Accessing the API" section for more details) + + var criteria = new DataSetFileSearchCriteria(); + + var dataSetCriteria = criteria.withDataSet().withOrOperator(); + dataSetCriteria.withCode().thatEquals("MY_DATA_SET_CODE_1"); + dataSetCriteria.withCode().thatEquals("MY_DATA_SET_CODE_2"); + + var fetchOptions = new DataSetFileFetchOptions(); + + // getDataStoreFacade() call (without any parameters) returns a facade object that uses all available data stores, + // e.g. calling searchFiles on such a facade searches for files at all available data stores +  + v3.getDataStoreFacade().searchFiles(criteria, fetchOptions).done(function(result) { + result.getObjects().forEach(function(file) { + console.log("DataSet: " + file.getDataSetPermId() + " has file: " + file.getPath()); + }); + }); + }); + </script> + +**V3SearchDataSetFilesAtChosenDataStoresExample.html** + + <script> + require([ "openbis", "dss/dto/datasetfile/search/DataSetFileSearchCriteria", "dss/dto/datasetfile/fetchoptions/DataSetFileFetchOptions" ], + function(DataSetFileSearchCriteria, DataSetFileFetchOptions) { + + // we assume here that v3 object has been already created and we have already called login (please check "Accessing the API" section for more details) + + var criteria = new DataSetFileSearchCriteria(); + + var dataSetCriteria = criteria.withDataSet().withOrOperator(); + dataSetCriteria.withCode().thatEquals("MY_DATA_SET_CODE_1"); + dataSetCriteria.withCode().thatEquals("MY_DATA_SET_CODE_2"); + + var fetchOptions = new DataSetFileFetchOptions(); + + // getDataStoreFacade("DSS1","DSS2") returns a facade object that uses only "DSS1" and "DSS2" data stores, + // e.g. calling searchFiles on such a facade searches for files only at these two data stores even if there + // are more datastores available + + v3.getDataStoreFacade("DSS1", "DSS2").searchFiles(criteria, fetchOptions).done(function(result) { + result.getObjects().forEach(function(file) { + console.log("DataSet: " + file.getDataSetPermId() + " has file: " + file.getPath()); + }); + }); + }); + </script> + +#### Downloading files, folders, and datasets + +Datasets that are created in Open BIS can be accessed by V3 API in a +number of different ways. It's possible to download individual files, +folders, and entire datasets as illustrated in the following examples. +To get started, it is necessary to reference both the AS API +(IApplicationServerApi) and the DSS API (IDataStoreServerAPI), and login +and get a session token object. + +The API provides two methods for downloading: + +- Simple downloading: A single InputStream is returned which contains + all files and file meta data. +- Fast downloading: A FastDownloadSession object is returned which is + used by a helper class to download files in parallel streams in + chunks. It is based on the [SIS File Transfer Protocol](#). + +#### Simple Downloading + +By setting the DataSetFileDownloadOptions it's possible to change how +data is downloaded - data can be downloaded file by file, or by folder, +by an entire dataset in a recursive manner. It is also possible to +search for datasets by defining the appropriate search criteria +(DataSetFileSearchCriteria). + +In order to download content via the V3 DSS API, the dataset needs to +already be inside Open BIS. It is necessary to know the dataset code at +the very minimum. It is helpful to also know the file path to the file +desired to download. + +##### Download a single file located inside a dataset + +Here is how to download a single file and print out the contents, when +the dataset code and the file path are known. Here a search is not +necessary since the file path and dataset code are known. + +###### A note about recursion + +Note that when only downloading one file, it is better to set the +recursive flag to false in DataSetFileDownloadOptions, although it makes +no difference in the results returned. The recursive flag really only +matters when downloading entire datasets or directories - if it is true, +then the entire tree of contents will be downloaded, if false, then the +single path requested will be downloaded. If that path is just a +directory then the returned result will consist of just meta data about +the directory. + +**Download a single file** + + import java.io.InputStream; + import java.util.Arrays; + import ch.ethz.sis.openbis.generic.asapi.v3.IApplicationServerApi; + import ch.ethz.sis.openbis.generic.asapi.v3.dto.dataset.id.DataSetPermId; + import ch.ethz.sis.openbis.generic.dssapi.v3.IDataStoreServerApi; + import ch.ethz.sis.openbis.generic.dssapi.v3.dto.datasetfile.download.DataSetFileDownload; + import ch.ethz.sis.openbis.generic.dssapi.v3.dto.datasetfile.download.DataSetFileDownloadOptions; + import ch.ethz.sis.openbis.generic.dssapi.v3.dto.datasetfile.download.DataSetFileDownloadReader; + import ch.ethz.sis.openbis.generic.dssapi.v3.dto.datasetfile.id.DataSetFilePermId; + import ch.ethz.sis.openbis.generic.dssapi.v3.dto.datasetfile.id.IDataSetFileId; + import ch.systemsx.cisd.common.spring.HttpInvokerUtils; + + public class V3DSSExample1 + { + // DATASET EXAMPLE STRUCTURE + // The dataset consists of a root folder with 2 files and a subfolder with 1 file + // root: + // - file1.txt + // - file2.txt + // - subfolder: + // - file3.txt + + public static void main(String[] args) + { + String AS_URL = "https://localhost:8443/openbis/openbis"; + String DSS_URL = "https://localhost:8444/datastore_server"; + + // Reference the DSS + IDataStoreServerApi dss = + HttpInvokerUtils.createStreamSupportingServiceStub(IDataStoreServerApi.class, + DSS_URL + IDataStoreServerApi.SERVICE_URL, 10000); + + // Reference the AS and login & get a session token + IApplicationServerApi as = HttpInvokerUtils + .createServiceStub(IApplicationServerApi.class, AS_URL + + IApplicationServerApi.SERVICE_URL, 10000); + + String sessionToken = as.login("admin", "password"); + + // Download a single file with a path and a dataset code + DataSetFileDownloadOptions options = new DataSetFileDownloadOptions(); + options.setRecursive(false); + IDataSetFileId fileToDownload = new DataSetFilePermId( + new DataSetPermId("20161205154857065-25"), + "root/subfolder/file3.txt"); + + // Download the files into a stream and read them with the file reader + // Here there is only one file, but we need to put it in an array anyway + InputStream stream = dss.downloadFiles(sessionToken, + Arrays.asList(fileToDownload), + options); + + DataSetFileDownloadReader reader = new DataSetFileDownloadReader(stream); + DataSetFileDownload file = null; + + // Print out the contents + while ((file = reader.read()) != null) + { + System.out.println("Downloaded " + file.getDataSetFile().getPath() + " " + file.getDataSetFile().getFileLength()); + System.out.println("-----FILE CONTENTS-----"); + System.out.println(file.getInputStream()); + } + } + } + +##### Download a folder located inside a dataset + +The example below demonstrates how to download a folder and all its +contents, when the dataset code and the folder path are known. The goal +here is to download the directory called "subfolder" and the file +"file3.txt" which will return two objects, one representing the metadata +of the directory, and the other representing both the meta data of +file3.txt and the file contents. Note that setting recursive flag to +true will return both the subfolder directory object AND file3.txt, +while setting recursive flag to false will return just the meta data of +the directory object. + +**Download a folder** + + import java.io.InputStream; + import java.util.Arrays; + import ch.ethz.sis.openbis.generic.asapi.v3.IApplicationServerApi; + import ch.ethz.sis.openbis.generic.asapi.v3.dto.dataset.id.DataSetPermId; + import ch.ethz.sis.openbis.generic.dssapi.v3.IDataStoreServerApi; + import ch.ethz.sis.openbis.generic.dssapi.v3.dto.datasetfile.download.DataSetFileDownload; + import ch.ethz.sis.openbis.generic.dssapi.v3.dto.datasetfile.download.DataSetFileDownloadOptions; + import ch.ethz.sis.openbis.generic.dssapi.v3.dto.datasetfile.download.DataSetFileDownloadReader; + import ch.ethz.sis.openbis.generic.dssapi.v3.dto.datasetfile.id.DataSetFilePermId; + import ch.ethz.sis.openbis.generic.dssapi.v3.dto.datasetfile.id.IDataSetFileId; + import ch.systemsx.cisd.common.spring.HttpInvokerUtils; + + public class V3DSSExample2 + { + // DATASET EXAMPLE STRUCTURE + // The dataset consists of a root folder with 2 files and a subfolder with 1 file + // root: + // - file1.txt + // - file2.txt + // - subfolder: + // - file3.txt + + public static void main(String[] args) + { + String AS_URL = "https://localhost:8443/openbis/openbis"; + String DSS_URL = "https://localhost:8444/datastore_server"; + + // Reference the DSS + IDataStoreServerApi dss = + HttpInvokerUtils.createStreamSupportingServiceStub(IDataStoreServerApi.class, + DSS_URL + IDataStoreServerApi.SERVICE_URL, 10000); + + // Reference the AS and login & get a session token + IApplicationServerApi as = HttpInvokerUtils + .createServiceStub(IApplicationServerApi.class, AS_URL + + IApplicationServerApi.SERVICE_URL, 10000); + + String sessionToken = as.login("admin", "password"); + // Download a single folder (containing a file inside) with a path and a data set code + DataSetFileDownloadOptions options = new DataSetFileDownloadOptions(); + IDataSetFileId fileToDownload = new DataSetFilePermId(new DataSetPermId("20161205154857065-25"), + "root/subfolder"); + + // Setting recursive flag to true will return both the subfolder directory object AND file3.txt + options.setRecursive(true); + + // Setting recursive flag to false will return just the meta data of the directory object + //options.setRecursive(false); + + // Read the contents and print them out + InputStream stream = dss.downloadFiles(sessionToken, Arrays.asList(fileToDownload), options); + DataSetFileDownloadReader reader = new DataSetFileDownloadReader(stream); + DataSetFileDownload file = null; + while ((file = reader.read()) != null) + { + System.out.println("Downloaded " + file.getDataSetFile().getPath() + " " + file.getDataSetFile().getFileLength()); + System.out.println("-----FILE CONTENTS-----"); + System.out.println(file.getInputStream()); + } + } + } + +##### Search for a dataset and download all its contents, file by file + +Here is an example that demonstrates how to search for datasets and +download the contents file by file. Here recursion is not used - see +example 4 for a recursive example. To search for datasets, it is +necessary to assign the appropriate criteria in the +DataSetFileSearchCriteria object. It is also possible to search for +datasets that contain certain files, as demonstrated below. Searching +for files via the searchFiles method returns a list of DataSetFile +objects that contain meta data about the files and also the file +contents. The meta data includes the file perm ids, the dataset perm ids +(the perm ids are objects, not simple codes!), the file path, the file +length, and whether or not the file is a directory. With this list of +files, it is possible to iterate and access the contents as shown in +this example. + + + +**Search & download a whole dataset, file by file** + + import java.io.InputStream; + import java.util.LinkedList; + import java.util.List; + import ch.ethz.sis.openbis.generic.asapi.v3.IApplicationServerApi; + import ch.ethz.sis.openbis.generic.asapi.v3.dto.common.search.SearchResult; + import ch.ethz.sis.openbis.generic.dssapi.v3.IDataStoreServerApi; + import ch.ethz.sis.openbis.generic.dssapi.v3.dto.datasetfile.DataSetFile; + import ch.ethz.sis.openbis.generic.dssapi.v3.dto.datasetfile.download.DataSetFileDownload; + import ch.ethz.sis.openbis.generic.dssapi.v3.dto.datasetfile.download.DataSetFileDownloadOptions; + import ch.ethz.sis.openbis.generic.dssapi.v3.dto.datasetfile.download.DataSetFileDownloadReader; + import ch.ethz.sis.openbis.generic.dssapi.v3.dto.datasetfile.fetchoptions.DataSetFileFetchOptions; + import ch.ethz.sis.openbis.generic.dssapi.v3.dto.datasetfile.id.IDataSetFileId; + import ch.ethz.sis.openbis.generic.dssapi.v3.dto.datasetfile.search.DataSetFileSearchCriteria; + import ch.systemsx.cisd.common.spring.HttpInvokerUtils; + + + public class V3DSSExample3 + { + // DATASET EXAMPLE STRUCTURE + // The dataset consists of a root folder with 2 files and a subfolder with 1 file + // root: + // - file1.txt + // - file2.txt + // - subfolder: + // - file3.txt + + public static void main(String[] args) + { + String AS_URL = "https://localhost:8443/openbis/openbis"; + String DSS_URL = "https://localhost:8444/datastore_server"; + + // Reference the DSS + IDataStoreServerApi dss = + HttpInvokerUtils.createStreamSupportingServiceStub(IDataStoreServerApi.class, + DSS_URL + IDataStoreServerApi.SERVICE_URL, 10000); + + // Reference the AS and login & get a session token + IApplicationServerApi as = HttpInvokerUtils + .createServiceStub(IApplicationServerApi.class, AS_URL + + IApplicationServerApi.SERVICE_URL, 10000); + + String sessionToken = as.login("admin", "password"); + + // Create search criteria + DataSetFileSearchCriteria criteria = new DataSetFileSearchCriteria(); + criteria.withDataSet().withCode().thatEquals("20161205154857065-25"); + // Search for a dataset with a certain file inside like this: + //criteria.withDataSet().withChildren().withPermId(mypermid); + // Search for the files & put the file perm ids in a list for easy access + // (file perm ids are objects containing meta data describing the file) + SearchResult<DataSetFile> result = dss.searchFiles(sessionToken, criteria, new DataSetFileFetchOptions()); + List<DataSetFile> files = result.getObjects(); + + // This returns the following list of objects: + // DataSetFile("root", isDirectory = true) + // DataSetFile("root/file1.txt", isDirectory = false) + // DataSetFile("root/file2.txt", isDirectory = false) + // DataSetFile("root/subfolder", isDirectory = true) + // DataSetFile("root/subfolder/file3.txt", isDirectory = false) + + List<IDataSetFileId> fileIds = new LinkedList<IDataSetFileId>(); + for (DataSetFile file : files) + { + System.out.println(file.getPath() + " " + file.getFileLength()); + fileIds.add(file.getPermId()); + } + + // Download the files & print the contents + DataSetFileDownloadOptions options = new DataSetFileDownloadOptions(); + options.setRecursive(false); + InputStream stream = dss.downloadFiles(sessionToken, fileIds, options); + DataSetFileDownloadReader reader = new DataSetFileDownloadReader(stream); + DataSetFileDownload file = null; + while ((file = reader.read()) != null) + { + System.out.println("Downloaded " + file.getDataSetFile().getPath() + " " + file.getDataSetFile().getFileLength()); + System.out.println(file.getInputStream()); + } + } + } + +##### Download a whole dataset recursively + +Here is a simplified way to download a dataset. Instead of downloading +files one by one, it is possible to download the entire dataset +recursively by simply setting the recursive file to true in the +DataSetFileDownloadOptions object. + +**Download a whole dataset recursively** + + import java.io.InputStream; + import java.util.Arrays; + import ch.ethz.sis.openbis.generic.asapi.v3.IApplicationServerApi; + import ch.ethz.sis.openbis.generic.asapi.v3.dto.dataset.id.DataSetPermId; + import ch.ethz.sis.openbis.generic.dssapi.v3.IDataStoreServerApi; + import ch.ethz.sis.openbis.generic.dssapi.v3.dto.datasetfile.download.DataSetFileDownload; + import ch.ethz.sis.openbis.generic.dssapi.v3.dto.datasetfile.download.DataSetFileDownloadOptions; + import ch.ethz.sis.openbis.generic.dssapi.v3.dto.datasetfile.download.DataSetFileDownloadReader; + import ch.ethz.sis.openbis.generic.dssapi.v3.dto.datasetfile.id.DataSetFilePermId; + import ch.ethz.sis.openbis.generic.dssapi.v3.dto.datasetfile.id.IDataSetFileId; + import ch.systemsx.cisd.common.spring.HttpInvokerUtils; + + public class V3DSSExample4 + { + // DATASET EXAMPLE STRUCTURE + // The dataset consists of a root folder with 2 files and a subfolder with 1 file + // root: + // - file1.txt + // - file2.txt + // - subfolder: + // - file3.txt + + public static void main(String[] args) + { + String AS_URL = "https://localhost:8443/openbis/openbis"; + String DSS_URL = "https://localhost:8444/datastore_server"; + + // Reference the DSS + IDataStoreServerApi dss = + HttpInvokerUtils.createStreamSupportingServiceStub(IDataStoreServerApi.class, + DSS_URL + IDataStoreServerApi.SERVICE_URL, 10000); + + // Reference the AS and login & get a session token + IApplicationServerApi as = HttpInvokerUtils + .createServiceStub(IApplicationServerApi.class, AS_URL + + IApplicationServerApi.SERVICE_URL, 10000); + + String sessionToken = as.login("admin", "password"); + + // Download the files and print the contents + DataSetFileDownloadOptions options = new DataSetFileDownloadOptions(); + IDataSetFileId fileId = new DataSetFilePermId(new DataSetPermId("20161205154857065-25")); + options.setRecursive(true); + InputStream stream = dss.downloadFiles(sessionToken, Arrays.asList(fileId), options); + DataSetFileDownloadReader reader = new DataSetFileDownloadReader(stream); + DataSetFileDownload file = null; + + while ((file = reader.read()) != null) + { + file.getInputStream(); + System.out.println("Downloaded " + file.getDataSetFile().getPath() + " " + file.getDataSetFile().getFileLength()); + } + } + } + +##### Search and list all the files inside a data store + +Here is an example that demonstrates how to list all the files in a data +store. By simply leaving the following line as is: + + DataSetFileSearchCriteria criteria = new DataSetFileSearchCriteria(); + +it will automatically return every object in the data store. This is +useful when it is desired to list an entire directory or iterate over +the whole data store. + +**Search and list all files inside a data store** + + import java.io.InputStream; + import java.util.LinkedList; + import java.util.List; + import ch.ethz.sis.openbis.generic.asapi.v3.IApplicationServerApi; + import ch.ethz.sis.openbis.generic.asapi.v3.dto.common.search.SearchResult; + import ch.ethz.sis.openbis.generic.dssapi.v3.IDataStoreServerApi; + import ch.ethz.sis.openbis.generic.dssapi.v3.dto.datasetfile.DataSetFile; + import ch.ethz.sis.openbis.generic.dssapi.v3.dto.datasetfile.download.DataSetFileDownload; + import ch.ethz.sis.openbis.generic.dssapi.v3.dto.datasetfile.download.DataSetFileDownloadOptions; + import ch.ethz.sis.openbis.generic.dssapi.v3.dto.datasetfile.download.DataSetFileDownloadReader; + import ch.ethz.sis.openbis.generic.dssapi.v3.dto.datasetfile.fetchoptions.DataSetFileFetchOptions; + import ch.ethz.sis.openbis.generic.dssapi.v3.dto.datasetfile.id.IDataSetFileId; + import ch.ethz.sis.openbis.generic.dssapi.v3.dto.datasetfile.search.DataSetFileSearchCriteria; + import ch.systemsx.cisd.common.spring.HttpInvokerUtils; + + public class V3DSSExample5 + { + // DATASET EXAMPLE STRUCTURE + // The dataset consists of a root folder with 2 files and a subfolder with 1 file + // root: + // - file1.txt + // - file2.txt + // - subfolder: + // - file3.txt + + public static void main(String[] args) + { + String AS_URL = "https://localhost:8443/openbis/openbis"; + String DSS_URL = "https://localhost:8444/datastore_server"; + + // Reference the DSS + IDataStoreServerApi dss = + HttpInvokerUtils.createStreamSupportingServiceStub(IDataStoreServerApi.class, + DSS_URL + IDataStoreServerApi.SERVICE_URL, 10000); + + // Reference the AS and login & get a session token + IApplicationServerApi as = HttpInvokerUtils + .createServiceStub(IApplicationServerApi.class, AS_URL + + IApplicationServerApi.SERVICE_URL, 10000); + + String sessionToken = as.login("admin", "password"); + + // Create search criteria + DataSetFileSearchCriteria criteria = new DataSetFileSearchCriteria(); + criteria.withDataSet(); + + //comment out this line below, and just leave the criteria empty - and it will return everything. + //criteria.withDataSet().withCode().thatEquals("20151201115639682-98322"); + // Search for the files & put the file perm ids (objects containing meta data) in a list for easy access + SearchResult<DataSetFile> result = dss.searchFiles(sessionToken, criteria, new DataSetFileFetchOptions()); + List<DataSetFile> files = result.getObjects(); + List<IDataSetFileId> fileIds = new LinkedList<IDataSetFileId>(); + for (DataSetFile file : files) + { + System.out.println(file.getPath() + " " + file.getFileLength()); + fileIds.add(file.getPermId()); + } + + // Download the files and print the contents + DataSetFileDownloadOptions options = new DataSetFileDownloadOptions(); + options.setRecursive(false); + InputStream stream = dss.downloadFiles(sessionToken, fileIds, options); + DataSetFileDownloadReader reader = new DataSetFileDownloadReader(stream); + DataSetFileDownload file = null; + + + while ((file = reader.read()) != null) + { + System.out.println("Downloaded " + file.getDataSetFile().getPath() + " " + file.getDataSetFile().getFileLength()); + System.out.println("-----FILE CONTENTS-----"); + System.out.println(file.getInputStream()); + } + } + } + +#### Fast Downloading + +Fast downloading is based on the [SIS File Transfer Protocol](#) and +library. Downloading is done in two steps: + +1. Create a fast download session with the + method `createFastDownloadSession()` on V3 DSS API. One parameter + is a list of data set file ids. Such an id contains the data set + code and the path to the file inside the data set. If a file id + points to a folder the whole folder will be downloaded. The last + parameter specifies download preferences. Currently only the wished + number of parallel download streams can be specified. The API call + returns a `FastDownloadSession` object. + +2. Download the files with the helper class `FastDownloader`. The + simplest usage is just do `` + + **Search and list all files inside a data store** + + new FastDownloader(downloadSession).downloadTo(destinationFolder); + + The files are stored in the destination folder in <data set + code>/<relative file path as in the data store on openBIS>. + +Here is a complete example: + +**Search and list all files inside a data store** + + import java.io.File; + import java.nio.file.Path; + import java.util.ArrayList; + import java.util.Collection; + import java.util.List; + import java.util.Map; + import java.util.Map.Entry; + + import org.apache.commons.lang3.time.StopWatch; + + import ch.ethz.sis.filetransfer.DownloadListenerAdapter; + import ch.ethz.sis.filetransfer.IDownloadItemId; + import ch.ethz.sis.openbis.generic.asapi.v3.IApplicationServerApi; + import ch.ethz.sis.openbis.generic.asapi.v3.dto.dataset.DataSet; + import ch.ethz.sis.openbis.generic.asapi.v3.dto.dataset.fetchoptions.DataSetFetchOptions; + import ch.ethz.sis.openbis.generic.asapi.v3.dto.dataset.id.DataSetPermId; + import ch.ethz.sis.openbis.generic.asapi.v3.dto.dataset.search.DataSetSearchCriteria; + import ch.ethz.sis.openbis.generic.dssapi.v3.IDataStoreServerApi; + import ch.ethz.sis.openbis.generic.dssapi.v3.dto.datasetfile.fastdownload.FastDownloadSession; + import ch.ethz.sis.openbis.generic.dssapi.v3.dto.datasetfile.fastdownload.FastDownloadSessionOptions; + import ch.ethz.sis.openbis.generic.dssapi.v3.dto.datasetfile.id.DataSetFilePermId; + import ch.ethz.sis.openbis.generic.dssapi.v3.dto.datasetfile.id.IDataSetFileId; + import ch.ethz.sis.openbis.generic.dssapi.v3.fastdownload.FastDownloadResult; + import ch.ethz.sis.openbis.generic.dssapi.v3.fastdownload.FastDownloader; + import ch.systemsx.cisd.common.spring.HttpInvokerUtils; + import ch.systemsx.cisd.openbis.common.api.client.ServiceFinder; + + public class V3FastDownloadExample + { + public static void main(String[] args) + { + IApplicationServerApi v3 = + HttpInvokerUtils.createServiceStub(IApplicationServerApi.class, "http://localhost:8888/openbis/openbis" + + IApplicationServerApi.SERVICE_URL, 10000); + String sessionToken = v3.login("test", "password"); + + // Search for some data sets + DataSetSearchCriteria searchCriteria = new DataSetSearchCriteria(); + searchCriteria.withCode().thatStartsWith("201902"); + DataSetFetchOptions fetchOptions = new DataSetFetchOptions(); + fetchOptions.withDataStore(); + fetchOptions.withPhysicalData(); + List<DataSet> dataSets = v3.searchDataSets(sessionToken, searchCriteria, fetchOptions).getObjects(); + + // Get the DSS URL from the first data set assuming that all data sets from the same data store + String dssUrl = dataSets.get(0).getDataStore().getDownloadUrl(); + System.out.println("url:" + dssUrl); + + // Create DSS server + IDataStoreServerApi dssServer = new ServiceFinder("datastore_server", IDataStoreServerApi.SERVICE_URL) + .createService(IDataStoreServerApi.class, dssUrl); + + // We download all files of the all found data sets. + List<DataSetFilePermId> fileIds = new ArrayList<>(); + for (DataSet dataSet : dataSets) + { + fileIds.add(new DataSetFilePermId(new DataSetPermId(dataSet.getCode()))); + } + + // Create the download session for 2 streams in parallel (if possible) + FastDownloadSession downloadSession = dssServer.createFastDownloadSession(sessionToken, + fileIds, new FastDownloadSessionOptions().withWishedNumberOfStreams(2)); + + // Do the actual download into 'targets/fast-download' and print the time needed by using a download listener + FastDownloadResult result = new FastDownloader(downloadSession).withListener( + new DownloadListenerAdapter() + { + private StopWatch stopWatch = new StopWatch(); + + @Override + public void onDownloadStarted() + { + stopWatch.start(); + } + @Override + public void onDownloadFinished(Map<IDownloadItemId, Path> itemPaths) + { + System.out.println("Successfully finished after " + stopWatch); + } + @Override + public void onDownloadFailed(Collection<Exception> e) + { + System.out.println("Downloading failed after " + stopWatch); + } + }) + .downloadTo(new File("targets/fast-download")); + + // Print the mapping of data set file id to the actual path + for (Entry<IDataSetFileId, Path> entry : result.getPathsById().entrySet()) + { + System.out.println(entry); + } + + v3.logout(sessionToken); + } + } + +##### What happens under the hood? + +The files to be downloaded are chunked into chunks of maximum size 1 MB. +On the DSS a special web service (`FileTransferServerServlet`) provides +these chunks. On the client side these chunks are requested and stored +in the file system. This is done in parallel if possible and requested +(withWishedNumberOfStreams). The server tells the client the actual +number of streams available for parallel downloading without slowing +down DSS. The actual number of streams depends on + +- the wished number of streams +- the number of streams currently used by other download sessions +- the maximum number of allowed streams as specified by the + property `api.v3.fast-download.maximum-number-of-allowed-streams` in + DSS `service.properties`. Default value is 10. + +The actual number of streams is half of the number of free streams or +the wished number of streams, if it is less. The number of free streams +is given by the difference between the maximum number of allowed streams +and the total number of used streams. + +It is possible that the actual number of streams is zero if the server +is currently too busy with downloading (that is, there is no free +dowload stream available). The FastDownloader will retry it later. + +##### Customizing Fast Dowloading + +There are three ways to customizing the FastDownloader: + +- withListener(): Adds a listener which will be notified when + - the download session has been started/finished/failed, + - the download of a file/folder has been started/finished and + - a chunk has been downloaded. + There can be several listeners. By default there are no + listeners. Note, that listeners are notified in a separated + thread associated with the download session. +- withLogger(): Sets a logger. By default nothing is logged. +- withRetryProviderFactory(): Sets the factory which creates a retry + provider. A retry provider knows when and how often a failed action + (e.g. sever request) should be retried. By default it is retried + three times. The first retry is a second later. For each following + retry the waiting time is increases by the factor two. + +#### Register Data Sets + +To register datasets using the Java or JavaScript API use one of the +following examples as a template. + +**Example (Java)** + +**Register Data Set** + + import java.util.UUID; + import org.eclipse.jetty.client.HttpClient; + import org.eclipse.jetty.client.api.Request; + import org.eclipse.jetty.client.util.MultiPartContentProvider; + import org.eclipse.jetty.client.util.StringContentProvider; + import org.eclipse.jetty.http.HttpMethod; + + import ch.ethz.sis.openbis.generic.asapi.v3.IApplicationServerApi; + import ch.ethz.sis.openbis.generic.asapi.v3.dto.dataset.id.DataSetPermId; + import ch.ethz.sis.openbis.generic.asapi.v3.dto.entitytype.id.EntityTypePermId; + import ch.ethz.sis.openbis.generic.asapi.v3.dto.sample.id.SampleIdentifier; + import ch.ethz.sis.openbis.generic.dssapi.v3.IDataStoreServerApi; + import ch.ethz.sis.openbis.generic.dssapi.v3.dto.dataset.create.UploadedDataSetCreation; + import ch.systemsx.cisd.common.http.JettyHttpClientFactory; + import ch.systemsx.cisd.common.spring.HttpInvokerUtils; + + public class RegisterDataSet + { + public static void main(String[] args) throws Exception + {     + final String AS_URL = "http://localhost:8888/openbis/openbis"; + final String DSS_URL = "http://localhost:8889/datastore_server"; + + final OpenBIS openbisV3 = new OpenBIS(AS_URL, DSS_URL); + + openbisV3.login("admin", "password"); + + final Path path = Path.of("/uploadPath"); + final String uploadId = openbisV3.uploadFileWorkspaceDSS(path); + + final UploadedDataSetCreation creation = new UploadedDataSetCreation(); + creation.setUploadId(uploadId); + creation.setExperimentId(new ExperimentIdentifier("/DEFAULT/DEFAULT/DEFAULT")); + creation.setTypeId(new EntityTypePermId("ATTACHMENT", EntityKind.DATA_SET)); + + try + { + final DataSetPermId dataSetPermId = openbisV3.createUploadedDataSet(creation); + // A data set assigned to the experiment "/DEFAULT/DEFAULT/DEFAULT" with the folder "uploadPath" is created + System.out.println("dataSetPermId=" + dataSetPermId); + } catch (final Exception e) + { + e.printStackTrace(); + } + + openbisV3.logout(); + } + } + +** +Example (Javascript)** + +**Register Data Set** + + <!DOCTYPE html> + <html> + <head> + <meta charset="utf-8"> + <title>Dataset upload</title> + + <script type="text/javascript" src="/openbis-test/resources/api/v3/config.js"></script> + <script type="text/javascript" src="/openbis-test/resources/api/v3/require.js"></script> + + </head> + <body> +   <label for="myfile">Select a file:</label> + <input type="file" id="myFile"/> +   <script>     + require(["openbis", "dss/dto/dataset/create/UploadedDataSetCreation", "as/dto/experiment/id/ExperimentIdentifier", + "as/dto/entitytype/id/EntityTypePermId", "as/dto/entitytype/EntityKind"], + function(openbis, UploadedDataSetCreation, ExperimentIdentifier, EntityTypePermId, EntityKind) { + var testProtocol = window.location.protocol; + var testHost = window.location.hostname; + var testPort = window.location.port; + + var testUrl = testProtocol + "//" + testHost + ":" + testPort; + var testApiUrl = testUrl + "/openbis/openbis/rmi-application-server-v3.json"; + + var openbisV3 = new openbis(testApiUrl); + + var fileInput = document.getElementById("myFile"); + fileInput.onchange = (e) => { + var files = e.target.files; + + openbisV3.login("admin","password").done(sessionToken => { + var dataStoreFacade = openbisV3.getDataStoreFacade(); + dataStoreFacade.uploadFilesWorkspaceDSS(files).done(uploadId => { + var creation = new UploadedDataSetCreation(); + creation.setUploadId(uploadId); + creation.setExperimentId(new ExperimentIdentifier("/DEFAULT/DEFAULT/DEFAULT")); + creation.setTypeId(new EntityTypePermId("ATTACHMENT", EntityKind.DATA_SET)); + + dataStoreFacade.createUploadedDataSet(creation).done(dataSetPermId => { +               // A data set assigned to the experiment "/DEFAULT/DEFAULT/DEFAULT" with the folder "uploadPath" is created + console.log("dataSetPermId=" + dataSetPermId); +               openbisV3.logout(); +             }).fail(error => { + console.error(error); +               openbisV3.logout(); +             }); + }); + }); + } + });   + </script> + </body> + </html> + +VI. Web application context +--------------------------- + +When making web applications and embedding them into an openBIS tab on +the core UI is often required to have information about the context +those applications are being loaded for two particular purposes: + +- Making the application context sensitive and show + information/functionality related to the current context. The + context object provided by **getWebAppContext()** contains all + information required for this purpose. +- Login into the facade without presenting the user with another login + screen since they have already login into openBIS. For + that **loginFromContext()** can be used. + +This methods only exist on the Javascript facade with the purpose of +being used on embedded web applications, calling them from an external +web application will do nothing. + +**WebAppContextExample.html** + + <script> + require(['openbis'], function(openbis) { + var openbisV3 = new openbis(); + var webappcontext = openbisV3.getWebAppContext(); + + console.log(webappcontext.getWebappCode()); + console.log(webappcontext.getSessionId()); + console.log(webappcontext.getEntityKind()); + console.log(webappcontext.getEntityType()); + console.log(webappcontext.getEntityIdentifier()); + console.log(webappcontext.getEntityPermId()); + + openbisV3.loginFromContext(); + openbisV3.getSessionInformation().done(function(sessionInfo) { + console.log(sessionInfo.getUserName()); + }); + }); + </script> \ No newline at end of file diff --git a/docs/software-developer-documentation/apis/matlab-v3-api.md b/docs/software-developer-documentation/apis/matlab-v3-api.md new file mode 100644 index 0000000000000000000000000000000000000000..b9760c0cbbb5b1947fec6cf83bc6fab9291801a9 --- /dev/null +++ b/docs/software-developer-documentation/apis/matlab-v3-api.md @@ -0,0 +1,31 @@ +# How to access openBIS from MATLAB + +## Preamble +[openBIS](https://wiki-bsse.ethz.ch/display/bis/Home) is a research data management system developed by [ETH SIS](https://sis.id.ethz.ch/). Data stored in openBIS can be accessed directly via the web UI or programmatically using APIs. For example, [pyBIS](https://sissource.ethz.ch/sispub/openbis/tree/master/pybis) is a project that provides a Python 3 module for interacting with openBIS. +[MATLAB](https://ch.mathworks.com/products/matlab.html) is a high-level numerical computing environment that is popular in many areas of science. This repository provides a toolbox to access data in openBIS directly from MATLAB. + +## Setup +The toolbox interacts with openBIS by calling pyBIS functions directly from MATLAB. Therefore, both Python and MATLAB have to be installed and configured properly. Please consult the [MATLAB - Python compatibility table](https://www.mathworks.com/content/dam/mathworks/mathworks-dot-com/support/sysreq/files/python-compatibility.pdf) to choose the correct versions. Also note that Python 2.7 is no longer supported! + +#### macOS +On macOS, the setup has been tested with a Miniconda Python distribution. +1. Download and install [Miniconda3](https://conda.io/miniconda.html) (use a Python version according to the [MATLAB - Python compatibility table](https://www.mathworks.com/content/dam/mathworks/mathworks-dot-com/support/sysreq/files/python-compatibility.pdf)) +2. Open the terminal and install pyBIS with pip: `pip install pybis` +3. Find the path to your Python executable: `which python` +4. Open MATLAB and set the Python executable. On Matlab R2019b or later, use the command: `pyenv('Version', 'Path/to/python')`. Replace with the path found in previous step. On earlier versions of Matlab, the `pyenv` command is called `pyversion`. + +#### Windows 10 +On Windows using the Anaconda or Miniconda approach did not work (for some reason, MATLAB could not find the Python modules). On the other hand, using the standard Python installation seems to work. +1. Download and install Python [here](https://www.python.org/downloads/windows/) (use a Python version according to the [MATLAB - Python compatibility table](https://www.mathworks.com/content/dam/mathworks/mathworks-dot-com/support/sysreq/files/python-compatibility.pdf)). Make sure to choose the **64-bit version**. +2. During the installation, make sure Python is added to the Path and registered as default Python interpreter. To do this, select the little tick box `Add Python 3.x to PATH` in the installation window: + + +3. Open Windows PowerShell and install pyBIS with pip: `pip install pybis` +4. Find the path to your Python executable by typing: `Get-Command python`. The path is listed in the Source column, i.e. `C:\Users\user\AppData\Local\Programs\Python\Python38\python.exe`. Copy the path by selecting it and pressing `Ctrl-C` +5. Open MATLAB and set the Python executable. On Matlab R2019b or later, use the command: `pyenv('Version', 'C:\Path\to\Programs\python.exe')`. Replace with the path found in step 4. On earlier versions of Matlab, the `pyenv` command is called `pyversion`. + +## Usage +Download [this repository](https://sissource.ethz.ch/sispub/openbis/-/tree/master/api-openbis-matlab) and add it to your Matlab Path. If you are running the toolbox for the first time, make sure to carry out the steps described under **Setup** above. An [example script](https://sissource.ethz.ch/hluetcke/matlab-openbis/blob/master/openbis_example.mlx) demonstrating some common usage patterns is provided in the repository. The script can be run interactively in the MATLAB Live Editor. Type `doc OpenBis` in the Matlab Command Window to access the built-in documentation. + +## Notes +I do not have time to test these instructions and the toolbox with all combinations of Python & Matlab versions on different operating systems. In general, a combination of recent Python and Matlab versions should work on macOS and Windows. If you run into any issues, please feel free to contact the [SIS Helpdesk](mailto:sis.helpdesk@ethz.ch). diff --git a/docs/software-developer-documentation/apis/personal-access-tokens.md b/docs/software-developer-documentation/apis/personal-access-tokens.md new file mode 100644 index 0000000000000000000000000000000000000000..8bed1565929836a23b4d3b52708eec6bd8bda82f --- /dev/null +++ b/docs/software-developer-documentation/apis/personal-access-tokens.md @@ -0,0 +1,282 @@ +Personal Access Tokens +====================== + +#### Background + +"Personal access token" (in short: PAT) is an openBIS feature that was +introduced to simplify integration of openBIS with other systems. Such +integrations are usually done using openBIS V3 API and therefore require +an external application to authenticate in openBIS to fetch or create +some data. Without "Personal access tokens" the only way of +authenticating in openBIS V3 API was the V3 API login method. Given a +user name and a password the login method would return back an openBIS +session token, which could be later used in other V3 API calls as a +secret and a proof of who we are. + +Unfortunately, even though this approach worked well it had some +limitations. These were mainly caused by the nature of session tokens in +openBIS: + +- session tokens are short lived +- session tokens do not survive openBIS restarts +- obtaining a new session token requires a user name and a password + +Because of these limitations external applications had to be prepared +for a situation where an openBIS session token stops working. They had +to know how to recover. When one session token expired or was +invalidated they had to obtain a new one by calling the login method +again and providing a user name and a password. But even then the whole +state of the previous session (e.g. files stored in the session +workspace) would be gone and not available in the new session. + +Depending on a use case and a type of the integration that could cause +smaller or bigger headaches for the developers of the external system. +Fortunately, "Personal access tokens" come to a rescue. + +#### What are "Personal access tokens" ? + +A personal access token (in short: PAT) is very similar to a session +token but there are also some important differences. + +Similarities: + +- a PAT is bound to a specific user and represents that user's + session. Two users can't share a session using PAT. Internal PAT + sessions identifier is the combination of both the userId and the + session name. +- a PAT is a secret that must not be publicly shared (having a user's + PAT one can perform any actions in openBIS that this user could + normally perform, except for user and PAT management) +- a user can have multiple PATs active at the same time +- a PAT can be used in places where a regular session token could be + normally used, e.g. to call V3 API methods (a full list of endpoints + that support PATs is presented below) + +Differences: + +- a PAT is created using a dedicated "createPersonalAccessTokens" V3 + API method (not using "login" method as a regular session token) +- a PAT can be long lived (its validFrom and validTo dates are defined + at the moment of creation), still it should be replaced periodically + for security reasons +- a PAT session survives openBIS restarts, i.e. the same PAT can be + used before and after a restart (session workspace folder state is + also kept) +- multiple PATs may represent a single PAT session (both PATs must + have the same "session name") - this becomes useful for handling a + transition period from one soon to be expired PAT to a new PAT that + replaces it without losing the session's state + +#### Who can create a "Personal access token" ? + +Any openBIS user can manage its own PATs. Instance admin users can +manage all PATs in the system. + +#### Where can I use "Personal access tokens" ? + +Endpoints that support PATs: + +AS: + +- V3 API +- File Upload Servlet (class: UploadServiceServlet, path: /upload) +- File Download Servlet (class: DownloadServiceServlet, path: + /download) +- Session Workspace Provider + +DSS: + +- V3 API +- File Upload Servlet (class: StoreShareFileUploadServlet, path: + /store\_share\_file\_upload) +- File Download Servlet (class: DatasetDownloadServlet, path: /\*) +- Session Workspace Upload Servlet (class: + SessionWorkspaceFileUploadServlet, path: + /session\_workspace\_file\_upload) +- Session Workspace Download Servlet (class: + SessionWorkspaceFileDownloadServlet, path: + /session\_workspace\_file\_download) +- Session Workspace Provider +- SFTP + +#### Where "Personal access tokens" are stored ? + +PATs are stored in "personal-access-tokens.json" JSON file. By default +the file is located in the main openBIS folder where it survives openBIS +restarts and upgrades. + +The location can be changed using "personal-access-tokens-file-path" +property in AS service.properties. The JSON file is read at the openBIS +start up. + +#### How long should my "Personal Access Tokens" be valid ? + +Because of security reasons PATs should not be valid indefinitely. +Instead, each PAT should have a well defined validity period after which +it should be replaced with a new PAT with a different hash. To make this +transition as smooth as possible please use the following guide: + +- create PAT\_1 with sessionName = <MY\_SESSION> and use it in + your integration +- when PAT\_1 is soon to be expired, create PAT\_2 with the same + sessionName = <MY\_SESSION> (both PAT\_1 and PAT\_2 will work + at this point and will refer to the same openBIS session) +- replace PAT\_1 with PAT\_2 in your integration + +PATs created by the same user and with the same "session name" refer +under the hood to the same openBIS session. Therefore, even if one of +such PATs expires the session is kept active and its state is +maintained. + +#### Configuration + +"Personal access tokens" functionality is enabled by default. To +configure it please use AS service.properties: + + # personal access tokens feature + personal-access-tokens-enabled = true + + # change the default location of the JSON file that stores personal access tokens (default: personal-access-tokens.json file in the main openBIS folder) + personal-access-tokens-file-path = MY_FOLDER/personal-access-tokens.json + + # set maximum allowed validity period (in seconds) - personal access token with a longer validity period cannot be created (default: 30 days) + personal-access-tokens-max-validity-period = 2592000 + + # set validity warning period (in seconds) - owners of personal access tokens that are going to expire within this warning period are going to receive email notifications (default: 5 days) + personal-access-tokens-validity-warning-period = 259200 + +#### Typical Application Workflow + +Most typical use case for Personal Access Tokens is to run code on a +third party service against openBIS. + +On such services we want to have: + +1. A long lasting session with openBIS for several days that survives + restarts. +2. We don't want to keep the user and password stored. + +For such services we recommend to create a PAT on log in and store the +PAT instead. We provide the example Gradle project with the java class +PersonalAccessTokensApplicationWorkflows ([source downloadable +here](/download/attachments/132286225/src.zip?version=1&modificationDate=1663662966452&api=v2)) +as the recommend way to manage getting the most up to date personal +access token for an application and user. Including creation and renewal +management. + + private static final String URL = "https://openbis-sis-ci-sprint.ethz.ch/openbis/openbis" + IApplicationServerApi.SERVICE_URL; + private static final int TIMEOUT = 10000; + + private static final String USER = "admin"; + private static final String PASSWORD = "changeit"; + + public static void main(String[] args) { + IApplicationServerApi v3 = HttpInvokerUtils.createServiceStub(IApplicationServerApi.class, URL, TIMEOUT); + String sessionToken = v3.login(USER, PASSWORD); + System.out.println("sessionToken: " + sessionToken); + PersonalAccessTokenPermId pat = PersonalAccessTokensApplicationWorkflows.getApplicationPersonalAccessTokenOnLogin(v3, sessionToken, "MY_APPLICATION"); + System.out.println("pat: " + pat); + v3.logout(sessionToken); + } + + package ch.ethz.sis.pat; + + import ch.ethz.sis.openbis.generic.asapi.v3.IApplicationServerApi; + import ch.ethz.sis.openbis.generic.asapi.v3.dto.common.search.SearchResult; + import ch.ethz.sis.openbis.generic.asapi.v3.dto.pat.PersonalAccessToken; + import ch.ethz.sis.openbis.generic.asapi.v3.dto.pat.create.PersonalAccessTokenCreation; + import ch.ethz.sis.openbis.generic.asapi.v3.dto.pat.fetchoptions.PersonalAccessTokenFetchOptions; + import ch.ethz.sis.openbis.generic.asapi.v3.dto.pat.id.PersonalAccessTokenPermId; + import ch.ethz.sis.openbis.generic.asapi.v3.dto.pat.search.PersonalAccessTokenSearchCriteria; + import ch.ethz.sis.openbis.generic.asapi.v3.dto.session.SessionInformation; + import org.apache.commons.lang3.time.DateUtils; + + import java.util.Calendar; + import java.util.Date; + import java.util.List; + import java.util.Map; + + public class PersonalAccessTokensApplicationWorkflows { + + private static final int DAY_IN_SECONDS = 24 * 60 * 60; + + private static final String PERSONAL_ACCESS_TOKENS_MAX_VALIDITY_PERIOD = "personal-access-tokens-max-validity-period"; + + private static final String PERSONAL_ACCESS_TOKENS_VALIDITY_WARNING_PERIOD = "personal-access-tokens-validity-warning-period"; + + private PersonalAccessTokensApplicationWorkflows() { + + } + + /* + * This utility method returns the current application token, creates one if no one is found and renews it if is close to expiration. + * Requires are real session token hence requires a form where the user can input its user and password on an application. + */ + public static PersonalAccessTokenPermId getApplicationPersonalAccessTokenOnLogin(IApplicationServerApi v3, String sessionToken, String applicationName) { + // Obtain servers renewal information + Map<String, String> information = v3.getServerInformation(sessionToken); + int personalAccessTokensRenewalPeriodInSeconds = Integer.parseInt(information.get(PersonalAccessTokensApplicationWorkflows.PERSONAL_ACCESS_TOKENS_VALIDITY_WARNING_PERIOD)); + int personalAccessTokensRenewalPeriodInDays = personalAccessTokensRenewalPeriodInSeconds / DAY_IN_SECONDS; + int personalAccessTokensMaxValidityPeriodInSeconds = Integer.parseInt(information.get(PersonalAccessTokensApplicationWorkflows.PERSONAL_ACCESS_TOKENS_MAX_VALIDITY_PERIOD)); + int personalAccessTokensMaxValidityPeriodInDays = personalAccessTokensMaxValidityPeriodInSeconds / DAY_IN_SECONDS; + + // Obtain user id + SessionInformation sessionInformation = v3.getSessionInformation(sessionToken); + + // Search for PAT for this user and application + // NOTE: Standard users only get their PAT but admins get all, filtering with the user solves this corner case + PersonalAccessTokenSearchCriteria personalAccessTokenSearchCriteria = new PersonalAccessTokenSearchCriteria(); + personalAccessTokenSearchCriteria.withSessionName().thatEquals(applicationName); + personalAccessTokenSearchCriteria.withOwner().withUserId().thatEquals(sessionInformation.getPerson().getUserId()); + + SearchResult<PersonalAccessToken> personalAccessTokenSearchResult = v3.searchPersonalAccessTokens(sessionToken, personalAccessTokenSearchCriteria, new PersonalAccessTokenFetchOptions()); + PersonalAccessToken bestTokenFound = null; + PersonalAccessTokenPermId bestTokenFoundPermId = null; + + // Obtain longer lasting application token + for (PersonalAccessToken personalAccessToken:personalAccessTokenSearchResult.getObjects()) { + if (personalAccessToken.getValidToDate().after(new Date())) { + if (bestTokenFound == null) { + bestTokenFound = personalAccessToken; + } else if (personalAccessToken.getValidToDate().after(bestTokenFound.getValidToDate())) { + bestTokenFound = personalAccessToken; + } + } + } + + // If best token doesn't exist, create + if (bestTokenFound == null) { + bestTokenFoundPermId = createApplicationPersonalAccessToken(v3, sessionToken, applicationName, personalAccessTokensMaxValidityPeriodInDays); + } + + // If best token is going to expire in less than the warning period, renew + Calendar renewalDate = Calendar.getInstance(); + renewalDate.add(Calendar.DAY_OF_MONTH, personalAccessTokensRenewalPeriodInDays); + if (bestTokenFound != null && bestTokenFound.getValidToDate().before(renewalDate.getTime())) { + bestTokenFoundPermId = createApplicationPersonalAccessToken(v3, sessionToken, applicationName, personalAccessTokensMaxValidityPeriodInDays); + } + + // If we have not created or renewed, return current + if (bestTokenFoundPermId == null) { + bestTokenFoundPermId = bestTokenFound.getPermId(); + } + + return bestTokenFoundPermId; + } + + private static PersonalAccessTokenPermId createApplicationPersonalAccessToken(IApplicationServerApi v3, String sessionToken, String applicationName, int personalAccessTokensMaxValidityPeriodInDays) { + PersonalAccessTokenCreation creation = new PersonalAccessTokenCreation(); + creation.setSessionName(applicationName); + creation.setValidFromDate(new Date(System.currentTimeMillis() - DateUtils.MILLIS_PER_DAY)); + creation.setValidToDate(new Date(System.currentTimeMillis() + DateUtils.MILLIS_PER_DAY * personalAccessTokensMaxValidityPeriodInDays)); + List<PersonalAccessTokenPermId> personalAccessTokens = v3.createPersonalAccessTokens(sessionToken, List.of(creation)); + return personalAccessTokens.get(0); + } + + } + +#### V3 API + +Code examples for personal access tokens can be found in the main V3 API +documentation: [openBIS V3 +API\#PersonalAccessTokens](/pages/viewpage.action?pageId=80699415) \ No newline at end of file diff --git a/docs/software-developer-documentation/apis/python-v3-api.md b/docs/software-developer-documentation/apis/python-v3-api.md new file mode 100644 index 0000000000000000000000000000000000000000..20ba1f6a2f70b28e8dfc956bf85df9267cf4b84f --- /dev/null +++ b/docs/software-developer-documentation/apis/python-v3-api.md @@ -0,0 +1,1543 @@ +# Welcome to pyBIS! + +pyBIS is a Python module for interacting with openBIS. pyBIS is designed to be most useful in a [Jupyter Notebook](https://jupyter.org) or IPython environment, especially if you are developing Python scripts for automatisation. Jupyter Notebooks offer some sort of IDE for openBIS, supporting TAB completition and immediate data checks, making the life of a researcher hopefully easier. + +## Dependencies and Requirements + +- pyBIS relies the openBIS API v3 +- openBIS version 16.05.2 or newer is required +- 19.06.5 or later is recommended +- pyBIS uses Python 3.6 or newer and the Pandas module + +## Installation + +``` +pip install --upgrade pybis +``` + +That command will download install pyBIS and all its dependencies. If pyBIS is already installed, it will be upgraded to the latest version. + +If you haven't done yet, install Jupyter and/or Jupyter Lab (the next Generation of Jupyter): + +``` +pip install jupyter +pip install jupyterlab +``` + +# General Usage + +### TAB completition and other hints in Jupyter / IPython + +- in a Jupyter Notebook or IPython environment, pybis helps you to enter the commands +- After every dot `.` you might hit the `TAB` key in order to look at the available commands. +- if you are unsure what parameters to add to a , add a question mark right after the method and hit `SHIFT+ENTER` +- Jupyter will then look up the signature of the method and show some helpful docstring + +### Checking input + +- When working with properties of entities, they might use a **controlled vocabulary** or are of a specific **property type**. +- Add an underscore `_` character right after the property and hit `SHIFT+ENTER` to show the valid values +- When a property only acceps a controlled vocabulary, you will be shown the valid terms in a nicely formatted table +- if you try to assign an **invalid value** to a property, you'll receive an error immediately + +### Glossary + +- **spaces:** used for authorisation eg. to separate two working groups. If you have permissions in a space, you can see everything which in that space, but not necessarily in another space (unless you have the permission). +- **projects:** a space consists of many projects. +- **experiments / collections:** a projects contain many experiments. Experiments can have _properties_ +- **samples / objects:** an experiment contains many samples. Samples can have _properties_ +- **dataSet:** a dataSet which contains the actual _data files_, either pyhiscal (stored in openBIS dataStore) or linked +- **attributes:** every entity above contains a number of attributes. They are the same accross all instances of openBIS and independent of their type. +- **properties:** Additional specific key-value pairs, available for these entities: + + - experiments + - samples + - dataSets + + every single instance of an entity must be of a specific **entity type** (see below). The type defines the set of properties. + +- **experiment type / collection type:** a type for experiments which specifies its properties +- **sample type / object type:** a type for samples / objects which specifies its properties +- **dataSet type:** a type for dataSets which specifies its properties +- **property type:** a single property, as defined in the entity types above. It can be of a classic data type (e.g. INTEGER, VARCHAR, BOOLEAN) or its values can be controlled (CONTROLLEDVOCABULARY). +- **plugin:** a script written in [Jython](https://www.jython.org) which allows to check property values in a even more detailed fashion + +# connect to OpenBIS + +## login + +In an **interactive session** e.g. inside a Jupyter notebook, you can use `getpass` to enter your password safely: + +```python +from pybis import Openbis +o = Openbis('https://example.com') +o = Openbis('example.com') # https:// is assumed + +import getpass +password = getpass.getpass() + +o.login('username', password, save_token=True) # save the session token in ~/.pybis/example.com.token +``` + +In a **script** you would rather use two **environment variables** to provide username and password: + +```python +from pybis import Openbis +o = Openbis(os.environ['OPENBIS_HOST']) + +o.login(os.environ['OPENBIS_USERNAME'], os.environ['OPENBIS_PASSWORD']) +``` + +As an even better alternative, you should use personal access tokens (PAT) to avoid username/password altogether. See below. + +### Verify certificate + +By default, your SSL-Certification is being verified. If you have a test-instance with a self-signed certificate, you'll need to turn off this verification explicitly: + +```python +from pybis import Openbis +o = Openbis('https://test-openbis-instance.com', verify_certificates=False) +``` + +### Check session token, logout() + +Check whether your session, i.e. the **session token** is still valid and log out: + +```python +print(f"Session is active: {o.is_session_active()} and token is {o.token}") +o.logout() +print(f"Session is active: {o.is_session_active()"} +``` + +### Personal access token (PAT) + +As an (new) alternative to login every time you run a script, you can create tokens which + +- once issued, do **not need username or password** +- are **much longer valid** than session tokens (default is one year) +- **survive restarts** of an openBIS instance + +To create a token, you first need a valid session – either through classic login or by assigning an existing valid session token: + +```python +from pybis import Openbis +o = Openbis('https://test-openbis-instance.com') + +o.login("username", "password") +# or +o.set_token("your_username-220808165456793xA3D0357C5DE66A5BAD647E502355FE2C") +``` + +Then you can create a new personal access token (PAT) and use it for all further pyBIS queries: + +```python +pat = o.get_or_create_personal_access_token(sessionName="Project A") +o.set_token(pat, save_token=True) +``` + +You may also use permId directly: + +```python +pat = o.get_or_create_personal_access_token(sessionName="Project A") +o.set_token(pat.permId, save_token=True) +``` + +**Note:** If there is an existing PAT with the same _sessionName_ which is still valid and the validity is within the warning period (defined by the server), then this existing PAT is returned instead. However, you can enforce creating a new PAT by passing the argument `force=True`. + +**Note:** Most operations are permitted using the PAT, _except_: + +- all operations on personal access tokens itself +- i.e. create, list, delete operations on tokens + +For these operations, you need to use a session token instead. + +To get a list of all currently available tokens: + +```python +o.get_personal_access_tokens() +o.get_personal_access_tokens(sessionName="APPLICATION_1") +``` + +To delete the first token shown in the list: + +```python +o.get_personal_access_tokens()[0].delete('some reason') +``` + +### Caching + +With `pyBIS 1.17.0`, a lot of caching has been introduced to improve the speed of object lookups that do not change often. If you encounter any problems, you can turn it off like this: + +```python +o = Openbis('https://example.com', use_cache=False) + +# or later in the script +o.use_cache = False +o.clear_cache() +o.clear_cache('sampleType') +``` + +## Mount openBIS dataStore server + +### Prerequisites: FUSE / SSHFS + +Mounting an openBIS dataStore server requires FUSE / SSHFS to be installed (requires root privileges). The mounting itself requires no root privileges. + +**Mac OS X** + +Follow the installation instructions on +https://osxfuse.github.io + +**Unix Cent OS 7** + +``` +$ sudo yum install epel-release +$ sudo yum --enablerepo=epel -y install fuse-sshfs +$ user="$(whoami)" +$ usermod -a -G fuse "$user" +``` + +After the installation, an `sshfs` command should be available. + +### Mount dataStore server with pyBIS + +Because the mount/unmount procedure differs from platform to platform, pyBIS offers two simple methods: + +``` +o.mount() +o.mount(username, password, hostname, mountpoint, volname) +o.is_mounted() +o.unmount() +o.get_mountpoint() +``` + +Currently, mounting is supported for Linux and Mac OS X only. + +All attributes, if not provided, are re-used by a previous login() command. If no mountpoint is provided, the default mounpoint will be `~/hostname`. If this directory does not exist, it will be created. The directory must be empty before mounting. + +# Masterdata + +OpenBIS stores quite a lot of meta-data along with your dataSets. The collection of data that describes this meta-data (i.e. meta-meta-data) is called masterdata. It consists of: + +- sample types +- dataSet types +- material types +- experiment types +- property types +- vocabularies +- vocabulary terms +- plugins (jython scripts that allow complex data checks) +- tags +- semantic annotations + +## browse masterdata + +``` +sample_types = o.get_sample_types() # get a list of sample types +sample_types.df # DataFrame object +st = o.get_sample_types()[3] # get 4th element of that list +st = o.get_sample_type('YEAST') +st.code +st.generatedCodePrefix +st.attrs.all() # get all attributes as a dict +st.get_validationPlugin() # returns a plugin object + +st.get_property_assignments() # show the list of properties + # for that sample type +o.get_material_types() +o.get_dataset_types() +o.get_experiment_types() +o.get_collection_types() + +o.get_property_types() +pt = o.get_property_type('BARCODE_COMPLEXITY_CHECKER') +pt.attrs.all() + +o.get_plugins() +pl = o.get_plugin('Diff_time') +pl.script # the Jython script that processes this property + +o.get_vocabularies() +o.get_vocabulary('BACTERIAL_ANTIBIOTIC_RESISTANCE') +o.get_terms(vocabulary='STORAGE') +o.get_tags() +``` + +## create property types + +**Samples** (objects), **experiments** (collections) and **dataSets** contain type-specific **properties**. When you create a new sample, experiment or datasSet of a given type, the set of properties is well defined. Also, the values of these properties are being type-checked. + +The first step in creating a new entity type is to create a so called **property type**: + +``` +pt_text = o.new_property_type( + code = 'MY_NEW_PROPERTY_TYPE', + label = 'yet another property type', + description = 'my first property', + dataType = 'VARCHAR', +) +pt_text.save() + +pt_int = o.new_property_type( + code = 'MY_NUMBER', + label = 'property contains a number', + dataType = 'INTEGER', +) +pt_int.save() + +pt_voc = o.new_property_type( + code = 'MY_CONTROLLED_VOCABULARY', + label = 'label me', + description = 'give me a description', + dataType = 'CONTROLLEDVOCABULARY', + vocabulary = 'STORAGE', +) +pt_voc.save() + +pt_richtext = o.new_property_type( + code = 'MY_RICHTEXT_PROPERTY', + label = 'richtext data', + description = 'property contains rich text', + dataType = 'MULTILINE_VARCHAR', + metaData = {'custom_widget' : 'Word Processor'} +) +pt_richtext.save() + +pt_spread = o.new_property_type( + code = 'MY_TABULAR_DATA', + label = 'data in a table', + description = 'property contains a spreadsheet', + dataType = 'XML', + metaData = {'custom_widget': 'Spreadsheet'} +) +pt_spread.save() +``` + +The `dataType` attribute can contain any of these values: + +- `INTEGER` +- `VARCHAR` +- `MULTILINE_VARCHAR` +- `REAL` +- `TIMESTAMP` +- `BOOLEAN` +- `HYPERLINK` +- `XML` +- `CONTROLLEDVOCABULARY` +- `MATERIAL` + +When choosing `CONTROLLEDVOCABULARY`, you must specify a `vocabulary` attribute (see example). Likewise, when choosing `MATERIAL`, a `materialType` attribute must be provided. + +To create a **richtext property**, use `MULTILINE_VARCHAR` as `dataType` and set `metaData` to `{'custom_widget' : 'Word Processor'}` as shown in the example above. + +To create a **tabular, spreadsheet-like property**, use `XML` as `dataType` and set `metaData` to `{'custom_widget' : 'Spreadhseet'}`as shown in the example above. + +**Note**: PropertyTypes that start with a \$ are by definition `managedInternally` and therefore this attribute must be set to True. + +## create sample types / object types + +The second step (after creating a property type, see above) is to create the **sample type**. The new name for **sample** is **object**. You can use both methods interchangeably: + +- `new_sample_type()` == `new_object_type()` + +``` +sample_type = o.new_sample_type( + code = 'my_own_sample_type', # mandatory + generatedCodePrefix = 'S', # mandatory + description = '', + autoGeneratedCode = True, + subcodeUnique = False, + listable = True, + showContainer = False, + showParents = True, + showParentMetadata = False, + validationPlugin = 'Has_Parents' # see plugins below +) +sample_type.save() +``` + +When `autoGeneratedCode` attribute is set to `True`, then you don't need to provide a value for `code` when you create a new sample. You can get the next autoGeneratedCode like this: + +``` +sample_type.get_next_sequence() # eg. 67 +sample_type.get_next_code() # e.g. FLY77 +``` + +From pyBIS 1.31.0 onwards, you can provide a `code` even for samples where its sample type has `autoGeneratedCode=True` to offer the same functionality as ELN-LIMS. In earlier versions of pyBIS, providing a code in this situation caused an error. + +## assign and revoke properties to sample type / object type + +The third step, after saving the sample type, is to **assign or revoke properties** to the newly created sample type. This assignment procedure applies to all entity types (dataset type, experiment type). + +``` +sample_type.assign_property( + prop = 'diff_time', # mandatory + section = '', + ordinal = 5, + mandatory = True, + initialValueForExistingEntities = 'initial value' + showInEditView = True, + showRawValueInForms = True +) +sample_type.revoke_property('diff_time') +sample_type.get_property_assignments() +``` + +## create a dataset type + +The second step (after creating a **property type**, see above) is to create the **dataset type**. The third step is to **assign or revoke the properties** to the newly created dataset type. + +``` +dataset_type = o.new_dataset_type( + code = 'my_dataset_type', # mandatory + description = None, + mainDataSetPattern = None, + mainDataSetPath = None, + disallowDeletion = False, + validationPlugin = None, +) +dataset_type.save() +dataset_type.assign_property('property_name') +dataset_type.revoke_property('property_name') +dataset_type.get_property_assignments() +``` + +## create an experiment type / collection type + +The second step (after creating a **property type**, see above) is to create the **experiment type**. + +The new name for **experiment** is **collection**. You can use both methods interchangeably: + +- `new_experiment_type()` == `new_collection_type()` + +``` +experiment_type = o.new_experiment_type( + code, + description = None, + validationPlugin = None, +) +experiment_type.save() +experiment_type.assign_property('property_name') +experiment_type.revoke_property('property_name') +experiment_type.get_property_assignments() +``` + +## create material types + +Materials and material types are deprecated in newer versions of openBIS. + +``` +material_type = o.new_material_type( + code, + description=None, + validationPlugin=None, +) +material_type.save() +material_type.assign_property('property_name') +material_type.revoke_property('property_name') +material_type.get_property_assignments() + +``` + +## create plugins + +Plugins are Jython scripts that can accomplish more complex data-checks than ordinary types and vocabularies can achieve. They are assigned to entity types (dataset type, sample type etc). [Documentation and examples can be found here](https://wiki-bsse.ethz.ch/display/openBISDoc/Properties+Handled+By+Scripts) + +``` +pl = o.new_plugin( + name ='my_new_entry_validation_plugin', + pluginType ='ENTITY_VALIDATION', # or 'DYNAMIC_PROPERTY' or 'MANAGED_PROPERTY', + entityKind = None, # or 'SAMPLE', 'MATERIAL', 'EXPERIMENT', 'DATA_SET' + script = 'def calculate(): pass' # a JYTHON script +) +pl.save() +``` + +## Users, Groups and RoleAssignments + +Users can only login into the openBIS system when: + +- they are present in the authentication system (e.g. LDAP) +- the username/password is correct +- the user's mail address needs is present +- the user is already added to the openBIS user list (see below) +- the user is assigned a role which allows a login, either directly assigned or indirectly assigned via a group membership + +``` +o.get_groups() +group = o.new_group(code='group_name', description='...') +group = o.get_group('group_name') +group.save() +group.assign_role(role='ADMIN', space='DEFAULT') +group.get_roles() +group.revoke_role(role='ADMIN', space='DEFAULT') + +group.add_members(['admin']) +group.get_members() +group.del_members(['admin']) +group.delete() + +o.get_persons() +person = o.new_person(userId='username') +person.space = 'USER_SPACE' +person.save() +# person.delete() is currently not possible. + +person.assign_role(role='ADMIN', space='MY_SPACE') +person.assign_role(role='OBSERVER') +person.get_roles() +person.revoke_role(role='ADMIN', space='MY_SPACE') +person.revoke_role(role='OBSERVER') + +o.get_role_assignments() +o.get_role_assignments(space='MY_SPACE') +o.get_role_assignments(group='MY_GROUP') +ra = o.get_role_assignment(techId) +ra.delete() +``` + +## Spaces + +Spaces are fundamental way in openBIS to divide access between groups. Within a space, data can be easily shared. Between spaces, people need to be given specific access rights (see section above). The structure in openBIS is as follows: + +- space + - project + - experiment / collection + - sample / object + - dataset + +``` +space = o.new_space(code='space_name', description='') +space.save() +o.get_spaces( + start_with = 0, # start_with and count + count = 10, # enable paging +) +space = o.get_space('MY_SPACE') + +# get individual attributes +space.code +space.description +space.registrator +space.registrationDate +space.modifier +space.modificationDate + +# set individual attribute +# most of the attributes above are set automatically and cannot be modified. +space.description = '...' + +# get all attributes as a dictionary +space.attrs.all() + +space.delete('reason for deletion') +``` + +## Projects + +Projects live within spaces and usually contain experiments (aka collections): + +- space + - project + - experiment / collection + - sample / object + - dataset + +``` +project = o.new_project( + space = space, + code = 'project_name', + description = 'some project description' +) +project = space.new_project( + code = 'project_code', + description = 'project description' +) +project.save() + +o.get_projects( + space = 'MY_SPACE', # show only projects in MY_SPACE + start_with = 0, # start_with and count + count = 10, # enable paging +) +o.get_projects(space='MY_SPACE') +space.get_projects() + +project.get_experiments() # see details and limitations in Section 'search for experiments' + +project.get_attachments() # deprecated, as attachments are not compatible with ELN-LIMS. + # Attachments are an old concept and should not be used anymore. +p.add_attachment( # deprecated, see above + fileName='testfile', + description= 'another file', + title= 'one more attachment' +) +project.download_attachments(<path or cwd>) # deprecated, see above + +# get individual attributes +project.code +project.description + +# set individual attribute +project.description = '...' + +# get all attributes as a dictionary +project.attrs.all() + +project.freeze = True +project.freezeForExperiments = True +project.freezeForSamples = True +``` + +## Experiments / Collections + +Experiments live within projects: + +- space + - project + - experiment / collection + - sample / object + - dataset + +The new name for **experiment** is **collection**. You can use boths names interchangeably: + +- `get_experiment()` = `get_collection()` +- `new_experiment()` = `new_collection()` +- `get_experiments()` = `get_collections()` + +### create a new experiment + +``` +exp = o.new_experiment + code='MY_NEW_EXPERIMENT', + type='DEFAULT_EXPERIMENT', + space='MY_SPACE', + project='YEASTS' +) +exp.save() +``` + +### search for experiments + +``` +experiments = o.get_experiments( + project = 'YEASTS', + space = 'MY_SPACE', + type = 'DEFAULT_EXPERIMENT', + tags = '*', + finished_flag = False, + props = ['name', 'finished_flag'] +) +experiments = project.get_experiments() +experiment = experiments[0] # get first experiment of result list +experiment = experiment +for experiment in experiments: # iterate over search results + print(experiment.props.all()) +dataframe = experiments.df # get Pandas DataFrame of result list + +exp = o.get_experiment('/MY_SPACE/MY_PROJECT/MY_EXPERIMENT') +``` + +***Note: Attributes download*** + +The `get_experiments()` method, by default, returns fewer details to make the download process faster. +However, if you want to include specific attributes in the results, you can do so by using the `attrs` parameter. + +The `get_experiments()` method results include only `identifier`, `permId`, `type`, `registrator`, `registrationDate`, `modifier`, `modificationDate` + +```get attributes +experiments = o.get_experiments( + project = 'YEASTS', + space = 'MY_SPACE', + type = 'DEFAULT_EXPERIMENT', + attrs = ["parents", "children"] +) + + identifier permId type registrator registrationDate modifier modificationDate parents children +-- --------------------- -------------------- ----------------- ------------- ------------------- ---------- ------------------- ------------------------- ---------- + 0 /MY_SPACE/YEASTS/EXP1 20230407070122991-46 DEFAULT_EXPERIMENT admin 2023-04-07 09:01:23 admin 2023-04-07 09:02:22 ['/MY_SPACE/YEASTS/EXP2'] [] + +``` + +### Experiment attributes + +``` +exp.attrs.all() # returns all attributes as a dict + +exp.attrs.tags = ['some', 'tags'] +exp.tags = ['some', 'tags'] # same thing +exp.save() + +exp.code +exp.description +exp.registrator +... + +exp.project = 'my_project' +exp.space = 'my_space' +exp.freeze = True +exp.freezeForDataSets = True +exp.freezeForSamples = True + +exp.save() # needed to save/update the changed attributes and properties +``` + +### Experiment properties + +**Getting properties** + +``` +experiment.props == ds.p # you can use either .props or .p to access the properties +experiment.p # in Jupyter: show all properties in a nice table +experiment.p() # get all properties as a dict +experiment.props.all() # get all properties as a dict +experiment.p('prop1','prop2') # get some properties as a dict +experiment.p.get('$name') # get the value of a property +experiment.p['property'] # get the value of a property +``` + +**Setting properties** + +``` +experiment.experiment = 'first_exp' # assign sample to an experiment +experiment.project = 'my_project' # assign sample to a project + +experiment.p. + TAB # in Jupyter/IPython: show list of available properties +experiment.p.my_property_ + TAB # in Jupyter/IPython: show datatype or controlled vocabulary +experiment.p['my_property']= "value" # set the value of a property +experiment.p.set('my_property, 'value') # set the value of a property +experiment.p.my_property = "some value" # set the value of a property +experiment.p.set({'my_property':'value'}) # set the values of some properties +experiment.set_props({ key: value }) # set the values of some properties + +experiment.save() # needed to save/update the changed attributes and properties +``` + +## Samples / Objects + +Samples usually live within experiments/collections: + +- space + - project + - experiment / collection + - sample / object + - dataset + +The new name for **sample** is **object**. You can use boths names interchangeably: + +- `get_sample()` = `get_object()` +- `new_sample()` = `new_object()` +- `get_samples()` = `get_objects()` + +etc. + +``` +sample = o.new_sample( + type = 'YEAST', + space = 'MY_SPACE', + experiment = '/MY_SPACE/MY_PROJECT/EXPERIMENT_1', + parents = [parent_sample, '/MY_SPACE/YEA66'], # you can use either permId, identifier + children = [child_sample], # or sample object + props = {"name": "some name", "description": "something interesting"} +) +sample = space.new_sample( type='YEAST' ) +sample.save() + +sample = o.get_sample('/MY_SPACE/MY_SAMPLE_CODE') +sample = o.get_sample('20170518112808649-52') +samples= o.get_samples(type='UNKNOWN') # see details and limitations in Section 'search for samples / objects' + +# get individual attributes +sample.space +sample.code +sample.permId +sample.identifier +sample.type # once the sample type is defined, you cannot modify it + +# set attribute +sample.space = 'MY_OTHER_SPACE' + +sample.experiment # a sample can belong to one experiment only +sample.experiment = '/MY_SPACE/MY_PROJECT/MY_EXPERIMENT' + +sample.project +sample.project = '/MY_SPACE/MY_PROJECT' # only works if project samples are +enabled + +sample.tags +sample.tags = ['guten_tag', 'zahl_tag' ] + +sample.attrs.all() # returns all attributes as a dict +sample.props.all() # returns all properties as a dict + +sample.get_attachments() # deprecated, as attachments are not compatible with ELN-LIMS. + # Attachments are an old concept and should not be used anymore. +sample.download_attachments(<path or cwd>) # deprecated, see above +sample.add_attachment('testfile.xls') # deprecated, see above + +sample.delete('deleted for some reason') +``` + +## create/update/delete many samples in a transaction + +Creating a single sample takes some time. If you need to create many samples, you might want to create them in one transaction. This will transfer all your sample data at once. The Upside of this is the **gain in speed**. The downside: this is a **all-or-nothing** operation, which means, either all samples will be registered or none (if any error occurs). + +**create many samples in one transaction** + +``` +trans = o.new_transaction() +for i in range (0, 100): + sample = o.new_sample(...) + trans.add(sample) + +trans.commit() +``` + +**update many samples in one transaction** + +``` +trans = o.new_transaction() +for sample in o.get_samples(count=100): + sample.prop.some_property = 'different value' + trans.add(sample) + +trans.commit() +``` + +**delete many samples in one transaction** + +``` +trans = o.new_transaction() +for sample in o.get_samples(count=100): + sample.mark_to_be_deleted() + trans.add(sample) + +trans.reason('go what has to go') +trans.commit() +``` + +**Note:** You can use the `mark_to_be_deleted()`, `unmark_to_be_deleted()` and `is_marked_to_be_deleted()` methods to set and read the internal flag. + +### parents, children, components and container + +``` +sample.get_parents() +sample.set_parents(['/MY_SPACE/PARENT_SAMPLE_NAME') +sample.add_parents('/MY_SPACE/PARENT_SAMPLE_NAME') +sample.del_parents('/MY_SPACE/PARENT_SAMPLE_NAME') + +sample.get_children() +sample.set_children('/MY_SPACE/CHILD_SAMPLE_NAME') +sample.add_children('/MY_SPACE/CHILD_SAMPLE_NAME') +sample.del_children('/MY_SPACE/CHILD_SAMPLE_NAME') + +# A Sample may belong to another Sample, which acts as a container. +# As opposed to DataSets, a Sample may only belong to one container. +sample.container # returns a sample object +sample.container = '/MY_SPACE/CONTAINER_SAMPLE_NAME' # watch out, this will change the identifier of the sample to: + # /MY_SPACE/CONTAINER_SAMPLE_NAME:SAMPLE_NAME +sample.container = '' # this will remove the container. + +# A Sample may contain other Samples, in order to act like a container (see above) +# caveat: containers are NOT compatible with ELN-LIMS +# The Sample-objects inside that Sample are called «components» or «contained Samples» +# You may also use the xxx_contained() functions, which are just aliases. +sample.get_components() +sample.set_components('/MY_SPACE/COMPONENT_NAME') +sample.add_components('/MY_SPACE/COMPONENT_NAME') +sample.del_components('/MY_SPACE/COMPONENT_NAME') +``` + +### sample tags + +``` +sample.get_tags() +sample.set_tags('tag1') +sample.add_tags(['tag2','tag3']) +sample.del_tags('tag1') +``` + +### Sample attributes and properties + +**Getting properties** + +``` +sample.attrs.all() # returns all attributes as a dict +sample.attribute_name # return the attribute value + +sample.props == ds.p # you can use either .props or .p to access the properties +sample.p # in Jupyter: show all properties in a nice table +sample.p() # get all properties as a dict +sample.props.all() # get all properties as a dict +sample.p('prop1','prop2') # get some properties as a dict +sample.p.get('$name') # get the value of a property +sample.p['property'] # get the value of a property +``` + +**Setting properties** + +``` +sample.experiment = 'first_exp' # assign sample to an experiment +sample.project = 'my_project' # assign sample to a project + +sample.p. + TAB # in Jupyter/IPython: show list of available properties +sample.p.my_property_ + TAB # in Jupyter/IPython: show datatype or controlled vocabulary +sample.p['my_property']= "value" # set the value of a property +sample.p.set('my_property, 'value') # set the value of a property +sample.p.my_property = "some value" # set the value of a property +sample.p.set({'my_property':'value'}) # set the values of some properties +sample.set_props({ key: value }) # set the values of some properties + +sample.save() # needed to save/update the attributes and properties +``` + +### search for samples / objects + +The result of a search is always list, even when no items are found. The `.df` attribute returns +the Pandas dataFrame of the results. + +``` +samples = o.get_samples( + space ='MY_SPACE', + type ='YEAST', + tags =['*'], # only sample with existing tags + start_with = 0, # start_with and count + count = 10, # enable paging + where = { + "$SOME.WEIRD-PROP": "hello" # only receive samples where properties match + } + + registrationDate = "2020-01-01", # date format: YYYY-MM-DD + modificationDate = "<2020-12-31", # use > or < to search for specified date and later / earlier + attrs=[ # show these attributes in the dataFrame + 'sample.code', + 'registrator.email', + 'type.generatedCodePrefix' + ], + parent_property = 'value', # search in a parent's property + child_property = 'value', # search in a child's property + container_property = 'value' # search in a container's property + parent = '/MY_SPACE/PARENT_SAMPLE', # sample has this as its parent + parent = '*', # sample has at least one parent + child = '/MY_SPACE/CHILD_SAMPLE', + child = '*', # sample has at least one child + container = 'MY_SPACE/CONTAINER', + container = '*' # sample lives in a container + props=['$NAME', 'MATING_TYPE'] # show these properties in the result +) + +sample = samples[9] # get the 10th sample + # of the search results +sample = samples['/SPACE/AABC'] # same, fetched by identifier +for sample in samples: # iterate over the + print(sample.code) # search results + + +samples.df # returns a Pandas DataFrame object + +samples = o.get_samples(props="*") # retrieve all properties of all samples +``` + +***Note: Attributes download*** + +The `get_samples()` method, by default, returns fewer details to make the download process faster. +However, if you want to include specific attributes in the results, you can do so by using the `attrs` parameter. + +The `get_samples()` method results include only `identifier`, `permId`, `type`, `registrator`, `registrationDate`, `modifier`, `modificationDate` + +```get attributes +experiments = o.get_samples( + space = 'MY_SPACE', + type = 'YEAST', + attrs = ["parents", "children"] +) + + identifier permId type registrator registrationDate modifier modificationDate parents children +-- --------------------- -------------------- ----------------- ------------- ------------------- ---------- ------------------- ------------------------- ---------- + 0 /MY_SPACE/YEASTS/SAMPLE1 20230407070121337-47 YEAST admin 2023-04-07 09:06:23 admin 2023-04-07 09:06:22 ['/MY_SPACE/YEASTS/EXP2'] [] + +``` + +### freezing samples + +``` +sample.freeze = True +sample.freezeForComponents = True +sample.freezeForChildren = True +sample.freezeForParents = True +sample.freezeForDataSets = True +``` + +## Datasets + +Datasets are by all means the most important openBIS entity. The actual files are stored as datasets; all other openBIS entities mainly are necessary to annotate and to structure the data: + +- space + - project + - experiment / collection + - sample / object + - dataset + +### working with existing dataSets + +**search for datasets** + +This example does the following + +- search for all datasets of type `SCANS`, retrieve the first 10 entries +- print out all properties +- print the list of all files in this dataset +- download the dataset + +``` +datasets = sample.get_datasets(type='SCANS', start_with=0, count=10) +for dataset in datasets: + print(dataset.props()) + print(dataset.file_list) + dataset.download() +dataset = datasets[0] +``` + +***Note: Attributes download*** + +The `get_datasets()` method, by default, returns fewer details to make the download process faster. +However, if you want to include specific attributes in the results, you can do so by using the `attrs` parameter. + +The `get_datasets()` method results include only `permId`, `type`, `experiment`, `sample`, `registrationDate`, `modificationDate`, +`location`, `status`, `presentInArchive`, `size` + +```get attributes +experiments = o.get_datasets( + space = 'MY_SPACE', + attrs = ["parents", "children"] +) + + permId type experiment sample registrationDate modificationDate location status presentInArchive size parents children +-- -------------------- -------- ------------------------ --------------------- ------------------- ------------------- --------------------------------------- --------- ------------------ ------ ------------------------ ------------------------ + 0 20230526101657295-48 RAW_DATA /MY_SPACE/DEFAULT/DEFAULT /MY_SPACE/DEFAULT/EXP1 2023-05-26 12:16:58 2023-05-26 12:17:37 1F60C7DC-63D8-4C07/20230526101657295-48 AVAILABLE False 469 [] ['20230526101737019-49'] + 1 20230526101737019-49 RAW_DATA /MY_SPACE/DEFAULT/DEFAULT /MY_SPACE/DEFAULT/EXP1 2023-05-26 12:17:37 2023-05-26 12:17:37 1F60C7DC-63D8-4C07/20230526101737019-49 AVAILABLE False 127 ['20230526101657295-48'] [] +``` + +**More dataset functions:** + +``` +ds = o.get_dataset('20160719143426517-259') +ds.get_parents() +ds.get_children() +ds.sample +ds.experiment +ds.physicalData +ds.status # AVAILABLE LOCKED ARCHIVED + # ARCHIVE_PENDING UNARCHIVE_PENDING + # BACKUP_PENDING +ds.archive() # archives a dataset, i.e. moves it to a slower but cheaper diskspace (tape). + # archived datasets cannot be downloaded, they need to be unarchived first. + # This is an asynchronous process, + # check ds.status regularly until the dataset becomes 'ARCHIVED' +ds.unarchive() # this starts an asynchronous process which gets the dataset from the tape. + # Check ds.status regularly until it becomes 'AVAILABLE' + +ds.attrs.all() # returns all attributes as a dict +ds.props.all() # returns all properties as a dict + +ds.add_attachment() # Deprecated. Attachments usually contain meta-data +ds.get_attachments() # about the dataSet, not the data itself. +ds.download_attachments(<path or cwd>) # Deprecated, as attachments are not compatible with ELN-LIMS. + # Attachments are an old concept and should not be used anymore. +``` + +### download dataSets + +``` +o.download_prefix # used for download() and symlink() method. + # Is set to data/hostname by default, but can be changed. +ds.get_files(start_folder="/") # get file list as Pandas dataFrame +ds.file_list # get file list as array +ds.file_links # file list as a dict containing direct https links + +ds.download() # simply download all files to data/hostname/permId/ +ds.download( + destination = 'my_data', # download files to folder my_data/ + create_default_folders = False, # ignore the /original/DEFAULT folders made by openBIS + wait_until_finished = False, # download in background, continue immediately + workers = 10 # 10 downloads parallel (default) +) +ds.download_path # returns the relative path (destination) of the files after a ds.download() +ds.is_physical() # TRUE if dataset is physically +``` + +### link dataSets + +Instead of downloading a dataSet, you can create a symbolic link to a dataSet in the openBIS dataStore. To do that, the openBIS dataStore needs to be mounted first (see mount method above). **Note:** Symbolic links and the mount() feature currently do not work with Windows. + +``` +o.download_prefix # used for download() and symlink() method. + # Is set to data/hostname by default, but can be changed. +ds.symlink() # creates a symlink for this dataset: data/hostname/permId + # tries to mount openBIS instance + # in case it is not mounted yet +ds.symlink( + target_dir = 'data/dataset_1/', # default target_dir is: data/hostname/permId + replace_if_symlink_exists=True +) +ds.is_symlink() +``` + +### dataSet attributes and properties + +**Getting properties** + +``` +ds.attrs.all() # returns all attributes as a dict +ds.attribute_name # return the attribute value + +ds.props == ds.p # you can use either .props or .p to access the properties +ds.p # in Jupyter: show all properties in a nice table +ds.p() # get all properties as a dict +ds.props.all() # get all properties as a dict +ds.p('prop1','prop2') # get some properties as a dict +ds.p.get('$name') # get the value of a property +ds.p['property'] # get the value of a property +``` + +**Setting properties** + +``` +ds.experiment = 'first_exp' # assign dataset to an experiment +ds.sample = 'my_sample' # assign dataset to a sample + +ds.p. + TAB # in Jupyter/IPython: show list of available properties +ds.p.my_property_ + TAB # in Jupyter/IPython: show datatype or controlled vocabulary +ds.p['my_property']= "value" # set the value of a property +ds.p.set('my_property, 'value') # set the value of a property +ds.p.my_property = "some value" # set the value of a property +ds.p.set({'my_property':'value'}) # set the values of some properties +ds.set_props({ key: value }) # set the values of some properties +``` + +### search for dataSets + +- The result of a search is always list, even when no items are found +- The `.df` attribute returns the Pandas dataFrame of the results + +``` +datasets = o.get_datasets( + type ='MY_DATASET_TYPE', + **{ "SOME.WEIRD:PROP": "value"}, # property name contains a dot or a + # colon: cannot be passed as an argument + start_with = 0, # start_with and count + count = 10, # enable paging + registrationDate = "2020-01-01", # date format: YYYY-MM-DD + modificationDate = "<2020-12-31", # use > or < to search for specified date and later / earlier + parent_property = 'value', # search in a parent's property + child_property = 'value', # search in a child's property + container_property = 'value' # search in a container's property + parent = '/MY_SPACE/PARENT_DS', # has this dataset as its parent + parent = '*', # has at least one parent dataset + child = '/MY_SPACE/CHILD_DS', + child = '*', # has at least one child dataset + container = 'MY_SPACE/CONTAINER_DS', + container = '*', # belongs to a container dataset + attrs=[ # show these attributes in the dataFrame + 'sample.code', + 'registrator.email', + 'type.generatedCodePrefix' + ], + props=['$NAME', 'MATING_TYPE'] # show these properties in the result +) +datasets = o.get_datasets(props="*") # retrieve all properties of all dataSets +dataset = datasets[0] # get the first dataset in the search result +for dataset in datasets: # iterate over the datasets + ... +df = datasets.df # returns a Pandas dataFrame object of the search results +``` + +In some cases, you might want to retrieve precisely certain datasets. This can be achieved by +methods chaining (but be aware, it might not be very performant): + +``` +datasets = o.get_experiments(project='YEASTS')\ + .get_samples(type='FLY')\ + .get_datasets( + type='ANALYZED_DATA', + props=['MY_PROPERTY'], + MY_PROPERTY='some analyzed data' + ) +``` + +- another example: + +``` +datasets = o.get_experiment('/MY_NEW_SPACE/MY_PROJECT/MY_EXPERIMENT4')\ + .get_samples(type='UNKNOWN')\ + .get_parents()\ + .get_datasets(type='RAW_DATA') +``` + +### freeze dataSets + +- once a dataSet has been frozen, it cannot be changed by anyone anymore +- so be careful! + +``` +ds.freeze = True +ds.freezeForChildren = True +ds.freezeForParents = True +ds.freezeForComponents = True +ds.freezeForContainers = True +ds.save() +``` + +### create a new dataSet + +``` +ds_new = o.new_dataset( + type = 'ANALYZED_DATA', + experiment = '/SPACE/PROJECT/EXP1', + sample = '/SPACE/SAMP1', + files = ['my_analyzed_data.dat'], + props = {'name': 'some good name', 'description': '...' } +) +ds_new.save() +``` + +### create dataSet with zipfile + +DataSet containing one zipfile which will be unzipped in openBIS: + +```python +ds_new = o.new_dataset( + type = 'RAW_DATA', + sample = '/SPACE/SAMP1', + zipfile = 'my_zipped_folder.zip', +) +ds_new.save() +``` + +### create dataSet with mixed content + +- mixed content means: folders and files are provided +- a relative specified folder (and all its content) will end up in the root, while keeping its structure + - `../measurements/` --> `/measurements/` + - `some/folder/somewhere/` --> `/somewhere/` +- relative files will also end up in the root + - `my_file.txt` --> `/my_file.txt` + - `../somwhere/else/my_other_file.txt` --> `/my_other_file.txt` + - `some/folder/file.txt` --> `/file.txt` +- useful if DataSet contains files and folders +- the content of the folder will be zipped (on-the-fly) and uploaded to openBIS +- openBIS will keep the folder structure intact +- relative path will be shortened to its basename. For example: + +| local | openBIS | +| -------------------------- | ---------- | +| `../../myData/` | `myData/` | +| `some/experiment/results/` | `results/` | + +``` +ds_new = o.new_dataset( + type = 'RAW_DATA', + sample = '/SPACE/SAMP1', + files = ['../measurements/', 'my_analyis.ipynb', 'results/'] +) +ds_new.save() +``` + +### create dataSet container + +A DataSet of kind=CONTAINER contains other DataSets, but no files: + +``` +ds_new = o.new_dataset( + type = 'ANALYZED_DATA', + experiment = '/SPACE/PROJECT/EXP1', + sample = '/SPACE/SAMP1', + kind = 'CONTAINER', + props = {'name': 'some good name', 'description': '...' } +) +ds_new.save() +``` + +### get, set, add and remove parent datasets + +``` +dataset.get_parents() +dataset.set_parents(['20170115220259155-412']) +dataset.add_parents(['20170115220259155-412']) +dataset.del_parents(['20170115220259155-412']) +``` + +#### get, set, add and remove child datasets + +``` +dataset.get_children() +dataset.set_children(['20170115220259155-412']) +dataset.add_children(['20170115220259155-412']) +dataset.del_children(['20170115220259155-412']) +``` + +### dataSet containers + +- A DataSet may belong to other DataSets, which must be of kind=CONTAINER +- As opposed to Samples, DataSets may belong (contained) to more than one DataSet-container +- caveat: containers are NOT compatible with ELN-LIMS + +``` +dataset.get_containers() +dataset.set_containers(['20170115220259155-412']) +dataset.add_containers(['20170115220259155-412']) +dataset.del_containers(['20170115220259155-412']) +``` + +- a DataSet of kind=CONTAINER may contain other DataSets, to act like a folder (see above) +- the DataSet-objects inside that DataSet are called components or contained DataSets +- you may also use the xxx_contained() functions, which are just aliases. +- caveat: components are NOT compatible with ELN-LIMS + +``` +dataset.get_components() +dataset.set_components(['20170115220259155-412']) +dataset.add_components(['20170115220259155-412']) +dataset.del_components(['20170115220259155-412']) +``` + +## Semantic Annotations + +create semantic annotation for sample type 'UNKNOWN': + +``` + +sa = o.new_semantic_annotation( + entityType = 'UNKNOWN', + predicateOntologyId = 'po_id', + predicateOntologyVersion = 'po_version', + predicateAccessionId = 'pa_id', + descriptorOntologyId = 'do_id', + descriptorOntologyVersion = 'do_version', + descriptorAccessionId = 'da_id' +) +sa.save() +``` + +Create semantic annotation for property type (predicate and descriptor values omitted for brevity) + +``` +sa = o.new_semantic_annotation(propertyType = 'DESCRIPTION', ...) +sa.save() +``` + +**Create** semantic annotation for sample property assignment (predicate and descriptor values omitted for brevity) + +``` +sa = o.new_semantic_annotation( + entityType = 'UNKNOWN', + propertyType = 'DESCRIPTION', + ... +) +sa.save() +``` + +**Create** a semantic annotation directly from a sample type. Will also create sample property assignment annotations when propertyType is given: + +``` +st = o.get_sample_type("ORDER") +st.new_semantic_annotation(...) +``` + +**Get all** semantic annotations + +``` +o.get_semantic_annotations() +``` + +**Get** semantic annotation by perm id + +``` +sa = o.get_semantic_annotation("20171015135637955-30") +``` + +**Update** semantic annotation + +``` +sa.predicateOntologyId = 'new_po_id' +sa.descriptorOntologyId = 'new_do_id' +sa.save() +``` + +**Delete** semantic annotation + +``` +sa.delete('reason') +``` + +## Tags + +``` +new_tag = o.new_tag( + code = 'my_tag', + description = 'some descriptive text' +) +new_tag.description = 'some new description' +new_tag.save() +o.get_tags() +o.get_tag('/username/TAG_Name') +o.get_tag('TAG_Name') + +tag.get_experiments() +tag.get_samples() +tag.get_owner() # returns a person object +tag.delete('why?') +``` + +## Vocabulary and VocabularyTerms + +An entity such as Sample (Object), Experiment (Collection), Material or DataSet can be of a specific _entity type_: + +- Sample Type (Object Type) +- Experiment Type (Collection Type) +- DataSet Type +- Material Type + +Every type defines which **Properties** may be defined. Properties act like **Attributes**, but they are type-specific. Properties can contain all sorts of information, such as free text, XML, Hyperlink, Boolean and also **Controlled Vocabulary**. Such a Controlled Vocabulary consists of many **VocabularyTerms**. These terms are used to only allow certain values entered in a Property field. + +So for example, you want to add a property called **Animal** to a Sample and you want to control which terms are entered in this Property field. For this you need to do a couple of steps: + +1. create a new vocabulary _AnimalVocabulary_ +2. add terms to that vocabulary: _Cat, Dog, Mouse_ +3. create a new PropertyType (e.g. _Animal_) of DataType _CONTROLLEDVOCABULARY_ and assign the _AnimalVocabulary_ to it +4. create a new SampleType (e.g. _Pet_) and _assign_ the created PropertyType to that Sample type. +5. If you now create a new Sample of type _Pet_ you will be able to add a property _Animal_ to it which only accepts the terms _Cat, Dog_ or _Mouse_. + +**create new Vocabulary with three VocabularyTerms** + +``` +voc = o.new_vocabulary( + code = 'BBB', + description = 'description of vocabulary aaa', + urlTemplate = 'https://ethz.ch', + terms = [ + { "code": 'term_code1', "label": "term_label1", "description": "term_description1"}, + { "code": 'term_code2', "label": "term_label2", "description": "term_description2"}, + { "code": 'term_code3', "label": "term_label3", "description": "term_description3"} + ] +) +voc.save() + +voc.vocabulary = 'description of vocabulary BBB' +voc.chosenFromList = True +voc.save() # update +``` + +**create additional VocabularyTerms** + +``` +term = o.new_term( + code='TERM_CODE_XXX', + vocabularyCode='BBB', + label='here comes a label', + description='here might appear a meaningful description' +) +term.save() +``` + +**update VocabularyTerms** + +To change the ordinal of a term, it has to be moved either to the top with the `.move_to_top()` method or after another term using the `.move_after_term('TERM_BEFORE')` method. + +``` +voc = o.get_vocabulary('STORAGE') +term = voc.get_terms()['RT'] +term.label = "Room Temperature" +term.official = True +term.move_to_top() +term.move_after_term('-40') +term.save() +term.delete() +``` + +## Change ELN Settings via pyBIS + +### Main Menu + +The ELN settings are stored as a **JSON string** in the `$eln_settings` property of the `GENERAL_ELN_SETTINGS` sample. You can show the **Main Menu settings** like this: + +```python +import json +settings_sample = o.get_sample("/ELN_SETTINGS/GENERAL_ELN_SETTINGS") +settings = json.loads(settings_sample.props["$eln_settings"]) +print(settings["mainMenu"]) +{'showLabNotebook': True, + 'showInventory': True, + 'showStock': True, + 'showObjectBrowser': True, + 'showExports': True, + 'showStorageManager': True, + 'showAdvancedSearch': True, + 'showUnarchivingHelper': True, + 'showTrashcan': False, + 'showVocabularyViewer': True, + 'showUserManager': True, + 'showUserProfile': True, + 'showZenodoExportBuilder': False, + 'showBarcodes': False, + 'showDatasets': True} +``` + +To modify the **Main Menu settings**, you have to change the settings dictionary, convert it back to json and save the sample: + +```python +settings['mainMenu']['showTrashcan'] = False +settings_sample.props['$eln_settings'] = json.dumps(settings) +settings_sample.save() +``` + +### Storages + +The **ELN storages settings** can be found in the samples of project `/ELN_SETTINGS/STORAGES` + +```python +o.get_samples(project='/ELN_SETTINGS/STORAGES') +``` + +To change the settings, just change the sample's properties and save the sample: + +```python +sto = o.get_sample('/ELN_SETTINGS/STORAGES/BENCH') +sto.props() +{'$name': 'Bench', + '$storage.row_num': '1', + '$storage.column_num': '1', + '$storage.box_num': '9999', + '$storage.storage_space_warning': '80', + '$storage.box_space_warning': '80', + '$storage.storage_validation_level': 'BOX_POSITION', + '$xmlcomments': None, + '$annotations_state': None} + sto.props['$storage.box_space_warning']= '80' + sto.save() +``` + +### Templates + +The **ELN templates settings** can be found in the samples of project `/ELN_SETTINGS/TEMPLATES` + +```python +o.get_samples(project='/ELN_SETTINGS/TEMPLATES') +``` + +To change the settings, use the same technique as shown above with the storages settings. + +### Custom Widgets + +To change the **Custom Widgets settings**, get the `property_type` and set the `metaData` attribute: + +```python +pt = o.get_property_type('YEAST.SOURCE') +pt.metaData = {'custom_widget': 'Spreadsheet'} +pt.save() +``` + +Currently, the value of the `custom_widget` key can be set to either + +- `Spreadsheet` (for tabular, Excel-like data) +- `Word Processor` (for rich text data) diff --git a/docs/software-developer-documentation/client-side-extensions/eln-lims-web-ui-extensions.md b/docs/software-developer-documentation/client-side-extensions/eln-lims-web-ui-extensions.md new file mode 100644 index 0000000000000000000000000000000000000000..5aad91e7cc31544fa48f19e80c7ebcc29cff4c05 --- /dev/null +++ b/docs/software-developer-documentation/client-side-extensions/eln-lims-web-ui-extensions.md @@ -0,0 +1,205 @@ +ELN-LIMS WEB UI extensions +========================== + +## Introduction + +The current aim of this extensions is to accommodate two groups of +modifications: + +Pure Configuration, enabling/disabling some features, to clean the +interface and make it less confusing for non expert users. Very often +also to add type extensions for types specified with another master data +extension. + +extending the interface to accommodate additional functionality without +needing to deal with the internals. + +## Plugin structure + + + +### plugins folder + +Each folder on this folder is a ELN UI extension. + +Each extension currently contains a single file with name "plugin.js". + +### config.js file + +Contains a section called  PLUGINS\_CONFIGURATION indicating the plugins +to be loaded from the plugins folder. + + var PLUGINS_CONFIGURATION = { + extraPlugins : ["life-sciences", "flow", "microscopy"] + } + +### plugin.js file + +Contains the actual source of the plugin, we can distinguish three clear +sections/patterns on the skeleton of the interface: + +- Interface: + <https://sissource.ethz.ch/sispub/openbis/-/blob/master/ui-eln-lims/src/core-plugins/eln-lims/1/as/webapps/eln-lims/html/js/config/ELNLIMSPlugin.js> + +1. Configuring views through the use of a JSON structure. Part of this +structure are: + +- forcedDisableRTF (Deprecated in favour of Custom Widgets + configurable from the Instance Settings on the UI) +- forceMonospaceFont (Deprecated in favour of Custom Widgets + configurable from the Instance Settings on the UI) +- experimentTypeDefinitionsExtension +- sampleTypeDefinitionsExtension +- dataSetTypeDefinitionsExtension + +These are used extensively since they come at a very low development +effort. Best examples of how to use these definition extensions can be +found in technologies that ship with the ELN: + +- Generic Technology: + <https://sissource.ethz.ch/sispub/openbis/-/blob/master/ui-eln-lims/src/core-plugins/eln-lims/1/as/webapps/eln-lims/html/plugins/generic/plugin.js> +- Life Sciences Technology: + <https://sissource.ethz.ch/sispub/openbis/-/blob/master/ui-eln-lims/src/core-plugins/eln-lims/1/as/webapps/eln-lims/html/plugins/life-sciences/plugin.js> + +2\. Extending views through the use of the [Interceptor +Pattern](https://en.wikipedia.org/wiki/Interceptor_pattern) + +- Template Methods: **ONLY** allow to add content in certain portions + of the Interface. **ONLY** available for Experiment, Sample and + DataSet form views. These template methods are easy to use, they + allow to add custom components isolating the programmer from the + rest of the form. + - experimentFormTop + - experimentFormBottom + - sampleFormTop + - dataSetFormBottom + - dataSetFormTop + - dataSetFormBottom +- Event Listeners: Allow to listen the before/after paint events for + **ALL** form views and list views. Allow the programmer to change + the model before is displayed and any part of the view after. + Provide versatility but with added complexity of dealing with the + complete form. + - beforeViewPaint + - afterViewPaint + + + +- Template methods are only needed to add custom components to from + views. Best examples of how to use these can be found in + technologies that ship with the ELN: + - Microscopy Technology: + <https://sissource.ethz.ch/sispub/openbis/-/blob/master/ui-eln-lims/src/core-plugins/eln-lims/1/as/webapps/eln-lims/html/plugins/microscopy/plugin.js> + +3\. Other Extensions: + +- onSampleSave: Reserved for internal use and discouraged to use. It + is tricky to use properly. +- getExtraUtilities: Allows to extend the utilities menu. A great + example is this template: + <https://sissource.ethz.ch/sispub/openbis/-/blob/master/ui-eln-lims/src/core-plugins/eln-lims/1/as/webapps/eln-lims/html/plugins/template-extra-utilities/plugin.js> + +## Source Code Examples (plugin.js) + +### Configuration Only Extensions + +An example with only type configurations extensions is show below. + + function MyTechnology() { + this.init(); + } + + $.extend(MyTechnology.prototype, ELNLIMSPlugin.prototype, { + init: function() { + + }, + experimentTypeDefinitionsExtension : { + "FOLDER": { + "TOOLBAR": { CREATE: false, FREEZE: false, EDIT: false, MOVE: false, DELETE: false, UPLOAD_DATASET: false, UPLOAD_DATASET_HELPER: false, EXPORT_ALL: false, EXPORT_METADATA: true } + } + }, + sampleTypeDefinitionsExtension : { + "SAMPLE_TYPE" : { + "TOOLBAR": { CREATE : true, EDIT : true, FREEZE : true, MOVE : true, COPY: true, DELETE : true, PRINT: true, HIERARCHY_GRAPH : true, HIERARCHY_TABLE : true, UPLOAD_DATASET : true, UPLOAD_DATASET_HELPER : true, EXPORT_ALL : true, EXPORT_METADATA : true, TEMPLATES : true, BARCODE : true }, + "SHOW" : false, + "SAMPLE_CHILDREN_DISABLED": false, + "SAMPLE_CHILDREN_ANY_TYPE_DISABLED" : false, + "SAMPLE_PARENTS_DISABLED": false, + "SAMPLE_PARENTS_ANY_TYPE_DISABLED": true, + "SAMPLE_PARENTS_HINT": [{ + "LABEL": "Parent Label", + "TYPE": "PARENT_TYPE", + "ANNOTATION_PROPERTIES": [] + }], + "SAMPLE_CHILDREN_HINT" : [{ + "LABEL": "Children Label", + "TYPE": "CHILDREN_TYPE", + "MIN_COUNT" : 0, + "ANNOTATION_PROPERTIES": [{"TYPE" : "ANNOTATION.SYSTEM.COMMENTS", "MANDATORY" : false }] + }], + "ENABLE_STORAGE" : false, + "SHOW_ON_NAV": false, + "SHOW_ON_NAV_FOR_PARENT_TYPES": undefined, + extraToolbar : undefined + }, + }, + dataSetTypeDefinitionsExtension : { + "DATASET_TYPE" : { + "TOOLBAR": { EDIT : true, FREEZE : true, MOVE : true, ARCHIVE : true, DELETE : true, HIERARCHY_TABLE : true, EXPORT_ALL : true, EXPORT_METADATA : true }, + "DATASET_PARENTS_DISABLED" : false, + extraToolbar : undefined + }, + } + }); + + profile.plugins.push(new MyTechnology()); + +### Toolbar Extensions + +An example with only toolbar extensions is shown below, variables with a +dollar sign '$' indicate they are jquery components: + + function MyTechnology() { + this.init(); + } + + $.extend(MyTechnology.prototype, ELNLIMSPlugin.prototype, { + init: function() { + + }, + sampleTypeDefinitionsExtension : { + "SAMPLE_TYPE" : { + extraToolbar : function(mode, sample) { + var toolbarModel = []; + if(mode === FormMode.VIEW) { + var $demoButton = FormUtil.getButtonWithIcon("glyphicon-heart", function () { + //This empty function could be a call to do something in particular + }); + toolbarModel.push({ component : $demoButton, tooltip: "Demo" }); + } + return toolbarModel; + } + }, + }, + dataSetTypeDefinitionsExtension : { + "DATASET_TYPE" : { + extraToolbar : function(mode, dataset) { + var toolbarModel = []; + if(mode === FormMode.VIEW) { + var $demoButton = FormUtil.getButtonWithIcon("glyphicon-heart", function () { + //This empty function could be a call to do something in particular + }); + toolbarModel.push({ component : $demoButton, tooltip: "Demo" }); + } + return toolbarModel; + } + }, + } + }); + + profile.plugins.push(new MyTechnology()); + +### Extra Views as Utilities + +Please check the provided example: +<https://sissource.ethz.ch/sispub/openbis/-/blob/master/ui-eln-lims/src/core-plugins/eln-lims/1/as/webapps/eln-lims/html/plugins/template-extra-utilities/plugin.js> \ No newline at end of file diff --git a/docs/software-developer-documentation/client-side-extensions/img/122.png b/docs/software-developer-documentation/client-side-extensions/img/122.png new file mode 100644 index 0000000000000000000000000000000000000000..a9181d4a675aa07174687ab0d146f48c6d699c78 Binary files /dev/null and b/docs/software-developer-documentation/client-side-extensions/img/122.png differ diff --git a/docs/software-developer-documentation/client-side-extensions/img/128.png b/docs/software-developer-documentation/client-side-extensions/img/128.png new file mode 100644 index 0000000000000000000000000000000000000000..1584c02406792ccc2cb1b057c8de26bb3ba24545 Binary files /dev/null and b/docs/software-developer-documentation/client-side-extensions/img/128.png differ diff --git a/docs/software-developer-documentation/client-side-extensions/img/473.png b/docs/software-developer-documentation/client-side-extensions/img/473.png new file mode 100644 index 0000000000000000000000000000000000000000..5bfc1da58a7e1d3144e1d523b5b6a5ec955328aa Binary files /dev/null and b/docs/software-developer-documentation/client-side-extensions/img/473.png differ diff --git a/docs/software-developer-documentation/client-side-extensions/img/771.png b/docs/software-developer-documentation/client-side-extensions/img/771.png new file mode 100644 index 0000000000000000000000000000000000000000..5985617c00c52a67ec9ec9348810b57717d00026 Binary files /dev/null and b/docs/software-developer-documentation/client-side-extensions/img/771.png differ diff --git a/docs/software-developer-documentation/client-side-extensions/index.rst b/docs/software-developer-documentation/client-side-extensions/index.rst new file mode 100644 index 0000000000000000000000000000000000000000..573bf51924ae0060d4cb102640213cb585dccd3d --- /dev/null +++ b/docs/software-developer-documentation/client-side-extensions/index.rst @@ -0,0 +1,8 @@ +Client-Side Extensions +====================== + +.. toctree:: + :maxdepth: 4 + + eln-lims-web-ui-extensions + openbis-webapps \ No newline at end of file diff --git a/docs/software-developer-documentation/client-side-extensions/openbis-webapps.md b/docs/software-developer-documentation/client-side-extensions/openbis-webapps.md new file mode 100644 index 0000000000000000000000000000000000000000..3889340c5384be77b8237872176ffdb62fd963af --- /dev/null +++ b/docs/software-developer-documentation/client-side-extensions/openbis-webapps.md @@ -0,0 +1,437 @@ +openBIS webapps +=============== + +## Introduction + +Webapps are HTML5 apps that interact with openBIS. Webapps can be +distributed as core-plugins. To supply a webapp plugin, create a folder +called `webapps` in the `as`. Each subfolder of the `webapps` folder is +treated as a webapp plugin. A webapp plugin requires two things, a +`plugin.properties` file, as with all plugins, and a folder containing +the content of the webapp. This folder can have any name and needs to be +referenced in the `plugin.properties` file with the key +**webapp-folder**. + +It is recommended to name the webapp folder `html` as done in the +examples below. This has the advantage that an existing subfolder named +`etc` will not be changed after an upgrade of the plugin. That is, the +content of the folder `html/etc` will be completely untouched by +upgrades. This feature allows to provide an initial configuration (say +in `html/etc/config.js`) with some default settings which can be +overridden by the customer. + +The webapp is then served by the same web server (jetty) that serves +openBIS. The name of the webapp defines the URL used to access it. See +the example below. The file index.html is used as a welcome page if the +user does not specifically request a particular page. + +An openBIS webapp is *not* a J2EE webapp. It has more in common with an +app for mobile devices. + + + +### Example + +This is an example of a webapp. In a real webapp, the name of the webapp +can be any valid folder name. The same goes for the folder in the webapp +containing the the code. The name of the webapp folder is what is used +to define the URL. The name of the folder containing the code is neither +shown nor available to the user. + +#### Directory Structure + +- \[module\] + - \[version\] + - `as` + - `webapps` + - example-webapp + - `plugin.properties` + - html + - `index.html` + - fun-viewer + - `plugin.properties` + - html + - code + - `index.html` + +#### plugin.properties + + # The properties file for an example webapps plugin + # This file has no properties defined because none need to be defined. + webapp-folder = html + +#### URL + +If openBIS is served at the URL <https://my.domain.com:8443/openbis>, +the above webapps will be available under the following URLs: + +- <https://my.domain.com:8443/openbis/webapp/example-webapp> +- <https://my.domain.com:8443/openbis/webapp/fun-viewer> + + +Server Configuration +-------------------- + +There are two things to consider in the server configuration. The +injection of webapps is done through Jetty, which is the web server we +use for openBIS. If you use the default provided jetty.xml +configuration, then you do not need to do anything extra; if, on the +other hand, you have a custom jetty.xml configuration, then you will +need to update your jetty.xml file to support webapps. + + + +### Jetty Configuration + +If your openBIS server has a custom jetty.xml file, you will need to +modify the file to include support for injecting web apps. To do this, +you will need to replace +org.eclipse.jetty.deploy.providers.WebAppProvider by +ch.systemsx.cisd.openbis.generic.server.util.OpenbisWebAppProvider in +`addAppProvider` call to your jetty.xml. + +**jetty.xml** + + <Call name="addBean"> + <Arg> + <New id="DeploymentManager" class="org.eclipse.jetty.deploy.DeploymentManager"> + <Set name="contexts"> + <Ref id="Contexts" /> + </Set> + <Call name="addAppProvider"> + <Arg> + <New class="ch.systemsx.cisd.openbis.generic.server.util.OpenbisWebAppProvider"> + <Set name="monitoredDir"><Property name="jetty.home" default="." />/webapps</Set> + <Set name="scanInterval">0</Set> + <Set name="extractWars">true</Set> + </New> + </Arg> + </Call> + </New> + </Arg> + </Call> + +Embedding webapps in the OpenBIS UI +----------------------------------- + +#### Introduction + +Webapps can be used as both standalone applications as well as can be +embedded in the OpenBIS web UI. Standalone webapps are built to +completely replace the original OpenBIS web interface with customer +adjusted layout and functionality. Users of the standalone webapps are +usually completely unaware of the default OpenBIS look and feel. The +webapp itself provides them with all the functionality they need: login +pages, web forms, searches, images, charts etc. The standalone webapp is +a right choice when you want to build a very specific and fully featured +web interface from scratch. If you want to use the default OpenBIS UI +but extend it with some custom functionality then embedding a webapp in +the OpenBIS UI is probably a way to go. To make a webapp visible as a +part of the default OpenBIS UI you have to define where the webapp +should be shown using "openbisui-contexts" property. Moreover some of +the contexts also require additional information describing when the +webapp should be shown. For instance, to embed a webapp in the +experiment details view that will be displayed for experiments with type +"MY\_EXPERIMENT\_TYPE" your plugin.properties file should look like: + +**plugin.propeties** + + webapp-folder = html + openbisui-contexts = experiment-details-view + experiment-entity-types = MY_EXPERIMENT_TYPE + +#### Configuring embedded webapps + +A full list of supported properties is presented below. + +|Property Key |Description |Allowed values | +|-----------------------|------------------------------------------------------------------------------------------------------------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +|openbisui-contexts |Place where the webapp is shown in the OpenBIS UI. |modules-menu <ul><li>webapp is an item in the modules top menu</li></ul> experiment-details-view <ul><li>webapp is a tab in the experiment details view</li><li>requires experiment-entity-types to be defined</li></ul> sample-details-view <ul><li>webapp is a tab in the sample details view</li><li>requires sample-entity-types to be defined</li></ul> data-set-details-view <ul><li>webapp is a tab in the data set details view </li><li>requires data-set-entity-types to be defined</li></ul> material-details-view <ul><li>webapp is a tab in the material details view</li><li>requires material-entity-types to be defined</li></ul> Accepts a comma separated list of values with regular expressions, e.g. "modules-menu, .*-details-view"| +|label |The label. It will be shown in the GUI. |String | +|sorting |Sorting of the webapp. Webapps are sorted by "sorting" and "folder name" ascending with nulls last (webapps without sorting are presented last).|Integer | +|experiment-entity-types|Types of experiments the webapp should be displayed for. |Accepts a comma separated list of values with regular expressions, e.g. "TYPE_A_1, TYPE_A_2, TYPE_B_.*" | +|sample-entity-types |Types of samples the webapp should be displayed for. |Accepts a comma separated list of values with regular expressions, e.g. "TYPE_A_1, TYPE_A_2, TYPE_B_.*" | +|data-set-entity-types |Types of data sets the webapp should be displayed for. |Accepts a comma separated list of values with regular expressions, e.g. "TYPE_A_1, TYPE_A_2, TYPE_B_.*" | +|material-entity-types |Types of materials the webapp should be displayed for. |Accepts a comma separated list of values with regular expressions, e.g. "TYPE_A_1, TYPE_A_2, TYPE_B_.*" | + + +#### Creating embedded webapps + +Embedded webapps similar to the standalone counterparts are HTML5 +applications that interact with OpenBIS. Because embedded webapps are +shown inside the OpenBIS UI they have access to additional information +about the context they are displayed in. For instance, a webapp that is +displayed in the experiment-details-view context knows that it is +displayed for an experiment entity, with a given type, identifier and +permid. Having this information the webapp can adjust itself and display +only data related to the currently chosen entity. Apart from the entity +details, a webapp also receives a current sessionId that can be used for +calling OpenBIS JSON RPC services. This way embedded webapps can reuse a +current session that was created when a user logged in to the OpenBIS +rather than provide their own login pages for authentication. A sample +webapp that makes use of this context information is presented below: + +**webapp.html** + + <html> + <head> + <!-- include jquery library required by the openbis.js --> + <script src="/openbis/resources/js/jquery.js"></script> + <!-- include openbis library to gain access to the openbisWebAppContext and openbis objects --> + <script src="/openbis/resources/js/openbis.js"></script> + </head> + <body> + + + <div id="log"></div> + + + <script> + $(document).ready(function(){ + + + // create a context object to access the context information + var c = new openbisWebAppContext(); + $("#log").append("SessionId: " + c.getSessionId() + "<br/>"); + $("#log").append("EntityKind: " + c.getEntityKind() + "<br/>"); + $("#log").append("EntityType: " + c.getEntityType() + "<br/>"); + $("#log").append("EntityIdentifier: " + c.getEntityIdentifier() + "<br/>"); + $("#log").append("EntityPermId: " + c.getEntityPermId() + "<br/>"); + + + // create an OpenBIS facade to call JSON RPC services + var o = new openbis(); + + + // reuse the current sessionId that we received in the context for all the facade calls + o.useSession(c.getSessionId()); + + // call one of the OpenBIS facade methods + o.listProjects(function(response){ + $("#log").append("<br/>Projects:<br/>"); + $.each(response.result, function(index, value){ + $("#log").append(value.code + "<br/>"); + }); + }); + }); + + </script> + </body> + </html> + +#### Linking to subtabs of other entity detail views + +A link from a webapp to an entity subtab looks like this: + + <a href="#" onclick="window.top.location.hash='#entity=[ENTITY_KIND]&permId=[PERM_ID]&ui-subtab=[SECTION];return false;">Link Text</a> + +, for example + + <a href="#" onclick="window.top.location.hash='#entity=EXPERIMENT&permId=20140716095938913-1&ui-subtab=webapp-section_test-webapp;return false;">Experiment webapp</a> + +ENTITY\_KIND = 'EXPERIMENT' / 'SAMPLE' / 'DATA\_SET' / 'MATERIAL' + +PERM\_ID = Entity permid + +SECTION = Subtab identifier. + +Notes about subtab identifiers: + +- The valid subtab identifiers can be found from + ch.systemsx.cisd.openbis.generic.client.web.client.application.framework.DisplayTypeIDGenerator.java +- Managed property subtab identifiers are of format + 'managed\_property\_section\_\[MANAGED\_PROPERTY\_TYPE\_CODE\]' +- Webapp subtab identifiers are of format + 'webapp-section\_\[WEBAPP\_CODE\]' (webapp code is a name of the + webapp core-plugin folder, i.e. + \[technology\]/\[version\]/as/webapps/\[WEBAPP\_CODE\]) + +Cross communication openBIS > DSS +------------------------------------ + +### Background + +Sometimes is required for a web app started in openBIS to make a call to +the DSS. This happens often to upload files or navigate datasets between +others. + +Making calls to different domains is forbidden by the web security +sandbox and a common client side issue. + +To make the clients accept the responses without additional +configuration by the users, the server should set a special header +"Access-Control-Allow-Origin" on the response when accessing from a +different domain or port. + +### Default Configuration + +This is done automatically by the DSS for any requests coming from well +known openBIS web apps. + +A well known openBIS web app is a web app running using the same URL +configured for openbis on the DSS service.properties. + +**DSS service.properties** + + # The URL of the openBIS server + server-url = https://sprint-openbis.ethz.ch:8446 + +Even if the web app is accessible from other URLs, not using the URL +configured on the DSS service.properties will lead to the DSS not +recognizing the app. + +As a consequence the DSS will not set the necessary header and the +client will reject the responses. + +### Basic Configuration + +This is required very often in enterprise environments where the +reachable openBIS URL is not necessarily the one configured on the DSS +service.properties. + +Is possible to add additional URLS configuring the AS +service.properties. + +**AS service.properties** + + trusted-cross-origin-domains= https://195.176.122.56:8446 + +The first time the DSS will need to check the valid URLs after a start +up will contact the AS to retrieve the additional trusted domain list. + +### Advanced Configuration + +A very typical approach is to run both the AS and DSS on the same port +using a reverse proxy like Apache or NGNIX. This way the web security +sandbox is respected. On this case the "Access-Control-Allow-Origin" +header is unnecessary and will also work out of the box. + +Even with this configuration, sometimes happens  that a web app call the +DSS using an auto detected URL given by openBIS. This auto detected URL +not necessarily respects your proxy configuration, giving a different +port or hostname to the DSS. + +On this case you will need to solve the problems with one of the methods +explained above or modify your web app. + +Embedding openBIS Grids in Web Apps +----------------------------------- + +Users of openBIS will have encountered the advanced and powerful table +views used in the application. These views allow for sorting and +filtering. It is possible to take advantage of these views in custom web +UIs. + +### Requirements + +It is possible to use openBIS table views in a web UI when the data for +the table comes from an aggregation service. The parameters to the +aggregation service are passed as URL query parameters, thus an +additional requirement is that all the aggregation service parameters +can be passed this way. A final requirement is that the web UI be +exposed as an embedded webapp (this is necessary because of the way +openBIS keeps track of the user of the system). If these requirements +are met, then it will be possible to embed an openBIS table view display +the aggregation service data in a web UI. + +### Use + +To embed a table view, add an iframe to the web UI. The URL of the +iframe should have the following form: + + {openbis url}?viewMode=GRID#action=AGGREGATION_SERVICE&serviceKey={aggregation service key}&dss={data store server code}[& gridSettingsId][& gridHeaderText][& service parameters] + +Parameters: + +|Parameter|Description|Required| +|--- |--- |--- | +|serviceKey|An aggregation service that will be used for generating the data for the grid.|true| +|dss|A code of a data store that will be used for generating the data for the grid.|true| +|gridSettingsId|An identifier of the grid that will be used for storing the grid settings (visibility of columns, sorting, filters etc.). If not specified then the serviceKey parameter is used.|false| +|gridHeaderText|A header of the grid. If not specified then the header is not shown.|false| + +Example: + +[http://localhost:8888/openbis-test/index.html?viewMode=GRID\#action=AGGREGATION\_SERVICE&serviceKey=sp-233&dss=standard&gridSettingsId=myTestGridSettingsId&gridHeaderText=myTestGridHeaderText&name=hello](http://localhost:8888/openbis-test/index.html?viewMode=GRID#action=AGGREGATION_SERVICE&serviceKey=sp-233&dss=standard&name=hello) + +Full Example + + <!DOCTYPE html> + <html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en-US" lang="en-US"> + <head> + <meta http-equiv="content-type" content="text/html; charset=utf-8" /> + <title>Embedded Grid Example</title> + </head> + <body> + <iframe src="http://localhost:8888/openbis-test/index.html?viewMode=GRID#action=AGGREGATION_SERVICE&serviceKey=sp-233&dss=standard&gridSettingsId=myTestGridSettingsId&gridHeaderText=myTestGridHeaderText&name=hello" width="100%" height="95%" style="border: none"> + </body> + </html> + +Image Viewer component +---------------------- + +Image viewer screenshot: + + + +Example usage of the image viewer component: + + <!DOCTYPE html> + <html> + <head> + <meta charset="utf-8"> + <title>Image Viewer Example</title> + + <link rel="stylesheet" href="/openbis/resources/lib/bootstrap/css/bootstrap.min.css"> + <link rel="stylesheet" href="/openbis/resources/lib/bootstrap-slider/css/bootstrap-slider.min.css"> + <link rel="stylesheet" href="/openbis/resources/components/imageviewer/css/image-viewer.css"> + + <script type="text/javascript" src="/openbis/resources/config.js"></script> + <script type="text/javascript" src="/openbis/resources/require.js"></script> + + </head> + <body> + <script> +  + // ask for jquery library, openbis-screening facade and the image viewer component + require([ "jquery", "openbis-screening", "components/imageviewer/ImageViewerWidget" ], function($, openbis, ImageViewerWidget) { + + $(document).ready( + function() { + var facade = new openbis(); + facade.login("admin", "password", function(response) { +  + // create the image viewer component for the specific data sets + var widget = new ImageViewerWidget(facade, [ "20140513145946659-3284", "20140415140347875-53", "20140429125231346-56", + "20140429125614418-59", "20140506132344798-146" ]); + + + // do the customization once the component is loaded + widget.addLoadListener(function() { + var view = widget.getDataSetChooserWidget().getView(); + + + // example of how to customize a widget + view.getDataSetText = function(dataSetCode) { + return "My data set: " + dataSetCode; + }; + + + // example of how to add a change listener to a widget + widget.getDataSetChooserWidget().addChangeListener(function(event) { + console.log("data set changed from: " + event.getOldValue() + " to: " + event.getNewValue()); + }); + }); + + + // render the component and add it to the page + $("#container").append(widget.render()); + }); + }); + }); + </script> + + <div id="container" style="padding: 20px"></div> + + </body> + </html> diff --git a/docs/software-developer-documentation/development-environment/architectural-overview.md b/docs/software-developer-documentation/development-environment/architectural-overview.md new file mode 100644 index 0000000000000000000000000000000000000000..dc65bec12a58b485e18016f359e669698718410b --- /dev/null +++ b/docs/software-developer-documentation/development-environment/architectural-overview.md @@ -0,0 +1,4 @@ +Architectural Overview +====================== + +hello world \ No newline at end of file diff --git a/docs/software-developer-documentation/development-environment/index.rst b/docs/software-developer-documentation/development-environment/index.rst new file mode 100644 index 0000000000000000000000000000000000000000..5e790e68677c58b293929ac041c57b11eacf08a1 --- /dev/null +++ b/docs/software-developer-documentation/development-environment/index.rst @@ -0,0 +1,9 @@ +Development Environment +======================= + +.. toctree:: + :maxdepth: 4 + + system-requirements + architectural-overview + installation-and configuration-guide \ No newline at end of file diff --git a/docs/software-developer-documentation/development-environment/installation-and configuration-guide.md b/docs/software-developer-documentation/development-environment/installation-and configuration-guide.md new file mode 100644 index 0000000000000000000000000000000000000000..6cfd30cd4b087f93740f2c4134a6e47bd72564d1 --- /dev/null +++ b/docs/software-developer-documentation/development-environment/installation-and configuration-guide.md @@ -0,0 +1,4 @@ +Installation And Configuration Guide +==================================== + +hello world \ No newline at end of file diff --git a/docs/software-developer-documentation/development-environment/system-requirements.md b/docs/software-developer-documentation/development-environment/system-requirements.md new file mode 100644 index 0000000000000000000000000000000000000000..a1deff5b1a8c9c36173da30ec70e70c9f91c0fb9 --- /dev/null +++ b/docs/software-developer-documentation/development-environment/system-requirements.md @@ -0,0 +1,4 @@ +System Requirements +=================== + +hello world \ No newline at end of file diff --git a/docs/software-developer-documentation/legacy-server-side-extensions/custom-import.md b/docs/software-developer-documentation/legacy-server-side-extensions/custom-import.md new file mode 100644 index 0000000000000000000000000000000000000000..8cc4d6d15081135868ad5aed30b0ce5201506431 --- /dev/null +++ b/docs/software-developer-documentation/legacy-server-side-extensions/custom-import.md @@ -0,0 +1,60 @@ +Custom Import +============= + +### Introduction + +`Custom Import` is a feature designed to give web users a chance to +import a file via `Jython Dropboxes`. + +### Usage + +To upload a file via `Custom Import`, the user should +choose `Import -> Custom Import` in openBIS top menu. The +`Custom Import` tab will be opened, and the user will get the combo box +filled with the list of configured imports. After selecting the desired +`Custom Import, the` user will be asked to select a file. After +selecting a file and clicking `the Save` button, the import will start. +The user should be aware, that the import is done in a synchronous way, +sometimes it might take a while to import data (it depends on the +dropbox code). + +If a template file has been configured a download link will appear. The +downloaded template file can be used to create the file to be imported. + +### Configuration + +To have the possibility to use a `Custom Import` functionality, this +needs an AS [core plugin](/display/openBISDoc2010/Core+Plugins) of type +custom-imports. The `plugin.properties` of each plugin has several +parameters: + +|parameter name |description | +|------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +|name |The value of this parameter will be used as a name of Custom Import in web UI. | +|dss-code |This parameter needs to specify the code of the datastore server running the dropbox which should be used by the Custom Import. | +|dropbox-name |The value is the name of the dropbox that is used by the Custom Import. | +|description |Specifies a description of the Custom Import. The description is shown as a tooltip in the web UI. | +|template-entity-kind |Custom import templates are represented in OpenBIS as entity attachments. To make a given file available as a custom import template create an attachment with this file and refer to this attachment with template-entity-kind, template-entity-permid, template-attachment-name parameters, where: template-entity-kind is the kind of the entity the attachment has been added to (allowed values: PROJECT, EXPERIMENT, SAMPLE), template-entity-permid is the perm id of that entity and template-attachment-name is the file name of the attachment.| +|template-entity-permid | | +|template-attachment-name| | + + +#### Example configuration + +**plugin.properties** + + name = Example custom import + dss-code = DSS1 + dropbox-name = jython-dropbox-1 + description = This is an example custom import + template-entity-kind = PROJECT + template-entity-permid = 20120814111307034-82319 + template-attachment-name = project_custom_import_template.txt + +The dropbox needs to be defined on `the DSS` side as a `RPC dropbox`: + +**service.properties** + + dss-rpc.put.<DATA_SET_TYPE> = jython-dropbox-1 + + \ No newline at end of file diff --git a/docs/software-developer-documentation/legacy-server-side-extensions/index.rst b/docs/software-developer-documentation/legacy-server-side-extensions/index.rst new file mode 100644 index 0000000000000000000000000000000000000000..2066db32b9fb633fe0520410f427d3a1c816c69b --- /dev/null +++ b/docs/software-developer-documentation/legacy-server-side-extensions/index.rst @@ -0,0 +1,10 @@ +Legacy Server-Side Extensions +============================= + +.. toctree:: + :maxdepth: 4 + + custom-import + processing-plugins + reporting-plugins + search-domain-services \ No newline at end of file diff --git a/docs/software-developer-documentation/legacy-server-side-extensions/processing-plugins.md b/docs/software-developer-documentation/legacy-server-side-extensions/processing-plugins.md new file mode 100644 index 0000000000000000000000000000000000000000..063ce7e45860e153cf42052a3e16f1e7751d1246 --- /dev/null +++ b/docs/software-developer-documentation/legacy-server-side-extensions/processing-plugins.md @@ -0,0 +1,247 @@ +Processing Plugins +================== + +## Introduction + +A processing plugin runs on the DSS. It processes a specified set of +data sets. The user can trigger a processing plugin in the openBIS Web +application. After processing an e-mail is sent to the user. + +A processing plugin is configured on the DSS best by introducing a [core +plugin](/display/openBISDoc2010/Core+Plugins) of type +`processing-plugins`. All processing plugins have the following +properties in common: + +|Property Key|Description| +|--- |--- | +|class|The fully-qualified Java class name of the reporting plugin. The class has to implement IProcessingPluginTask.| +|label|The label. It will be shown in the GUI.| +|dataset-types|Comma-separated list of regular expressions. The plugin can process only data sets of types matching one of the regular expressions.  If new data set types are registered with openBIS, the DSS will need to be restarted before the new data set types are known to the processing plugins.| +|properties-file|Path to an optional file with additional properties.| +|allowed-api-parameter-classes|A comma-separated list of regular expression for fully-qualified class names. Any classes matching on of the regular expressions is allowed as a class of a Java parameter object of a remote API call. For more details see API Security.| +|disallowed-api-parameter-classes|A comma-separated list of regular expression for fully-qualified class names. Any classes matching on of the regular expressions is not allowed as a class of a Java parameter object of a remote API call. For more details see API Security.| + +## Multiple Processing Queues + +By default only one processing plugin task is processed. All other +scheduled tasks have to wait in a queue. This can be inconvenient if +there is a mixture of long tasks (taking hours or even days) and short +tasks (taking only seconds or minutes). + +DSS can be configured two run more than one processing queue. Each queue +(except the default one) has a name (which also appears in the log +file). Also a regular expression is associated with the queue. When a +processing plugin task is scheduled the appropriate queue is selected by +the ID of the processing plugin (this is either a name in the +property `processing-plugins` of `service.properties` of DSS or the name +of the core-plugin folder). If the ID matches the regular expression the +task is added to the corresponding queue. If non of the regular +expression matches the default queue is used. + +The queues have to be specified by the +property `data-set-command-queue-mapping`. It contains a comma-separated +list of queue definitions. Each definition has the form + +`<queue name>:<regular expression>` + +### Archiving + +If archiving is enable (i.e. `archiver.class` in `service.properties` of +DSS is defined or a core-plugin of type `miscellaneous` with +ID `archiver` is defined) there will be three processing plugins with +the following IDs: `Archiving`, `Copying data sets to archive`, and +`Unarchiving` + +## Generic Processing Plugins + +### RevokeLDAPUserAccessMaintenanceTask + +**NOTE: This Maintenance Task should only be used if the server uses +LDAP only, it will take users from other authentication services as +missing. +** + +**Description**: Renames, deactivates and delete all roles from users +that are no longer available on LDAP following the next algorithm. + +- Grabs all active users. +- The users that follow all the points of the next criteria are + renamed to userId-YYYY.MM.DD and deactivated: + - Are not a system user. + - Don't have the ETL\_SERVER role. + - Don't have a LDAP principal. + +**Configuration**: + +|Property Key|Description| +|--- |--- | +|server-url|LDAP server URL.| +|security-principal-distinguished-name|LDAP principal distinguished name.| +|security-principal-password|LDAP principal password.| + +**Example**: + +**plugin.properties** + + class = ch.systemsx.cisd.openbis.generic.server.task.RevokeLDAPUserAccessMaintenanceTask + interval = 60 s + server-url = ldap://d.ethz.ch/DC=d,DC=ethz,DC=ch + security-principal-distinguished-name = CN=cisd-helpdesk,OU=EthUsers,DC=d,DC=ethz,DC=ch + security-principal-password = ****** + +### DataSetCopierForUsers + +### DataSetCopier + +**Description**: Copies all files of the specified data sets to another +(remote) folder. The actual copying is done by the rsync command. + +**Configuration**: + +|Property Key|Description| +|--- |--- | +|destination|Path to the destination folder. This can be a path to a local/mounted folder or to a remote folder accessible via SSH. In this case the name of the host has to appear as a prefix. General syntax: [<host>:][<rsync module>:]<path>| +|hard-link-copy|If true hard links are created for each file of the data sets. This works only if the share which stores the data set is in the same local file system as the destination folder. Default: false.| +|rename-to-dataset-code|If true the copied data set will be renamed to the data set code. Default: false.| +|rsync-executable|Optional path to the executable command rsync.| +|rsync-password-file|Path to the rsync password file. It is only needed if an rsync module is used.| +|ssh-executable|Optional path to the executable command ssh. SSH is only needed for not-mounted folders which are accessible via SSH.| +|ln-executable|Optional path to the executable command ln. The ln command is only needed when hard-link-copy = true.| + +**Example**: + +**plugin.properties** + + class = ch.systemsx.cisd.openbis.dss.generic.server.plugins.standard.DataSetCopier + label = Copy to analysis incoming folder + dataset-types = MS_DATA, UNKNOWN + destination = analysis-server:analysis-incoming-data + rename-to-dataset-code = true + +### DataSetCopierForUsers + +**Description**: Copies all files of the specified data sets to a +(remote) user folder. The actual copying is done by the rsync command. + +**Configuration**: + +|Property Key|Description| +|--- |--- | +|destination|Path template to the destination folder. It should contain ${user} as a placeholder for the user ID. +The path can point to a local/mounted folder or to a remote folder accessible via SSH. In this case the name of the host has to appear as a prefix. General syntax: [<host>:][<rsync module>:]<path>| +|hard-link-copy|If true hard links are created for each file of the data sets. This works only if the share which stores the data set is in the same local file system as the destination folder. Default: false.| +|rename-to-dataset-code|If true the copied data set will be renamed to the data set code. Default: false.| +|rsync-executable|Optional path to the executable command rsync.| +|rsync-password-file|Path to the rsync password file. It is only needed if an rsync module is used.| +|ssh-executable|Optional path to the executable command ssh. SSH is only needed for not-mounted folders which are accessible via SSH.| +|ln-executable|Optional path to the executable command ln. The ln command is only needed when hard-link-copy = true.| + +**Example**: + +**plugin.properties** + + class = ch.systemsx.cisd.openbis.dss.generic.server.plugins.standard.DataSetCopierForUsers + label = Copy to user playground + dataset-types = MS_DATA, UNKNOWN + destination = tmp/playground/${user}/data-sets + hard-link-copy = true + rename-to-dataset-code = true + +### JythonBasedProcessingPlugin + +**Description**: Invokes a Jython script to do the processing. For more +details see [Jython-based Reporting and Processing +Plugins](/display/openBISDoc2010/Jython-based+Reporting+and+Processing+Plugins). + +**Configuration**: + +|Property Key|Description| +|--- |--- | +|script-path|Path to the jython script.| + + +**Example**: + +**plugin.properties** + + class = ch.systemsx.cisd.openbis.dss.generic.server.plugins.jython.JythonBasedProcessingPlugin + label = Calculate some numbers + dataset-types = MS_DATA, UNKNOWN + script-path = script.py + +### ReportingBasedProcessingPlugin + +**Description**: Runs a Jython-based reporting plugin of type +TABLE\_MODEL and sends the result table as a TSV file to the user. + +**Configuration**: + +|Property Key|Description| +|--- |--- | +|script-path|Path to the jython script.| +|single-report|If true only one report will be sent. Otherwise a report for each data set will be sent. Default: false| +|email-subject|Subject of the e-mail to be sent. Default: None| +|email-body|Body of the e-mail to be sent. Default: None| +|attachment-name|Name of the attached TSV file. Default: report.txt| + +**Example**: + +**plugin.properties** + + class = ch.systemsx.cisd.openbis.dss.generic.server.plugins.jython.ReportingBasedProcessingPlugin + label = Create monthly report + dataset-types = MS_DATA, UNKNOWN + script-path = script.py + email-subject = DSS Monthly Report + +### DataSetAndPathInfoDBConsistencyCheckProcessingPlugin + +**Description**: The processing task checks the consistency between the +data store and the meta information stored in the `PathInfoDB`. It will +check for: + +- existence (i.e. exists in PathInfoDB but not on file system or + exists on file system but not in PathInfoDB) +- file size +- CRC32 checksum + +If it finds any deviations, it will send out an email which contains all +differences found. + +**Configuration**: Properties common for all processing plugins (see +Introduction) + +**Example**: + +**plugin.properties** + + class = ch.systemsx.cisd.openbis.dss.generic.server.plugins.standard.DataSetAndPathInfoDBConsistencyCheckProcessingPlugin + label = Check consistency between data store and path info database + dataset-types = .* + creening Processing Plugins + +### ScreeningReportingBasedProcessingPlugin + +**Description**: Runs a Jython-based reporting plugin of type +TABLE\_MODEL and sends the result table as a TSV file to the user. There +is some extra support for screening. + +**Configuration**: + +|Property Key|Description| +|--- |--- | +|script-path|Path to the jython script.| +|single-report|If true only one report will be sent. Otherwise a report for each data set will be sent. Default: false| +|email-subject|Subject of the e-mail to be sent. Default: None| +|email-body|Body of the e-mail to be sent. Default: None| +|attachment-name|Name of the attached TSV file. Default: report.txt| + +**Example**: + +**plugin.properties** + + class = ch.systemsx.cisd.openbis.dss.screening.server.plugins.jython.ScreeningReportingBasedProcessingPlugin + label = Create monthly report + dataset-types = HCS_IMAGE + script-path = script.py + email-subject = DSS Monthly Report \ No newline at end of file diff --git a/docs/software-developer-documentation/legacy-server-side-extensions/reporting-plugins.md b/docs/software-developer-documentation/legacy-server-side-extensions/reporting-plugins.md new file mode 100644 index 0000000000000000000000000000000000000000..fd962f4f88eaea860f6ba42471ccf3fa37422d82 --- /dev/null +++ b/docs/software-developer-documentation/legacy-server-side-extensions/reporting-plugins.md @@ -0,0 +1,449 @@ +Reporting Plugins +================= + +Introduction +------------ + +A reporting plugin runs on the DSS. It creates a report as a table or an +URL for a specified set of data sets or key-value pairs. The user can +invoke a reporting plugin in the openBIS Web application. The result +will be shown as a table or a link. + +A reporting plugin is one of the three following types. The differences +are the type of input and output: + +- TABLE\_MODEL: *Input*: A set of data sets. *Output*: A table +- DSS\_LINK: *Input*: One data set. *Output*: An URL +- AGGREGATION\_TABLE\_MODEL: *Input*: A set of key-value pairs. + *Output*: A table + +A reporting plugin is configured on the DSS best by introducing a [core +plugin](/display/openBISDoc2010/Core+Plugins) of type +`reporting-plugins`. All reporting plugins have the following properties +in common: + +|Property Key|Description| +|--- |--- | +|class|The fully-qualified Java class name of the reporting plugin. The class has to implement IReportingPluginTask.| +|label|The label. It will be shown in the GUI.| +|dataset-types|Comma-separated list of regular expressions. The plugin can create a report only for the data sets of types matching one of the regular expressions. If new data set types are registered with openBIS, the DSS will need to be restarted before the new data set types are known to the processing plugins. This is a mandatory property for reporting plugins of type TABLE_MODEL and DSS_LINK. It will be ignored if the type is AGGREGATION_TABLE_MODEL.| +|properties-file|Path to an optional file with additional properties.| +|servlet.<property>|Properties for an optional servlet. It provides resources referred by URLs in the output of the reporting plugin. +This should be used if the servlet is only needed by this reporting plugin. If other plugins also need this servlet it should be configured as a core plugin of type services.| +|allowed-api-parameter-classes|A comma-separated list of regular expression for fully-qualified class names. Any classes matching on of the regular expressions is allowed as a class of a Java parameter object of a remote API call. For more details see API Security.| +|disallowed-api-parameter-classes|A comma-separated list of regular expression for fully-qualified class names. Any classes matching on of the regular expressions is not allowed as a class of a Java parameter object of a remote API call. For more details see API Security.| + +Generic Reporting Plugins +------------------------- + +### DecoratingTableModelReportingPlugin + +**Type**: TABLE\_MODEL + +**Description**: Modifies the output of a reporting plugin of type +TABLE\_MODEL + +**Configuration**: + +|Property Key|Description| +|--- |--- | +|reporting-plugin.class|The fully-qualified Java class name of the wrapped reporting plugin of type TABLE_MODEL| +|reporting-plugin.<property>|Property of the wrapped reporting plugin.| +|transformation.class|The fully-qualified Java class name of the transformation. It has to implement ITableModelTransformation.| +|transformation.<property>|Property of the transformation to be applied.| + +**Example**: + +**plugin.properties** + + class = ch.systemsx.cisd.openbis.dss.generic.server.plugins.standard.DecoratingTableModelReportingPlugin + label = Analysis Summary + dataset-types = HCS_IMAGE_ANALYSIS_DATA + reporting-plugin.class = ch.systemsx.cisd.openbis.dss.generic.server.plugins.standard.TSVViewReportingPlugin + reporting-plugin.separator = , + transformation.class = ch.systemsx.cisd.openbis.dss.generic.server.plugins.standard.EntityLinksDecorator + transformation.link-columns = BARCODE, GENE + transformation.BARCODE.entity-kind = SAMPLE + transformation.BARCODE.default-space = DEMO + transformation.GENE.entity-kind = MATERIAL + transformation.GENE.material-type = GENE + +##### Transformations + +###### EntityLinksDecorator + +**Description**: Changes plain columns into entity links. + +**Configuration**: + +|Property Key|Description| +|--- |--- | +|link-columns|Comma-separated list of column keys.| +|<column key>.entity-kind|Entity kind of column <column key>. Possible values are MATERIAL and SAMPLE.| +|<column key>.default-space|Optional space code for SAMPLE columns. It will be used if the column value contains only the sample code.| +|<column key>.material-type|Mandatory type code for MATERIAL columns.| + +### GenericDssLinkReportingPlugin + +**Type**: DSS\_LINK + +**Description**: Creates an URL for a file inside the data set. + +**Configuration**: + +|Property Key|Description| +|--- |--- | +|download-url|Base URL. Contains protocol, domain, and port.| +|data-set-regex|Optional regular expression which specifies the file.| +|data-set-path|Optional relative path in the data set to narrow down the search.| + +**Example**: + +**plugin.properties** + + class = ch.systemsx.cisd.openbis.dss.generic.server.plugins.standard.GenericDssLinkReportingPlugin + label = Summary + dataset-types = MS_DATA + download-url = https://my.domain.org:8443 + data-set-regex = summary.* + data-set-path = report + +### AggregationService + +Import Note on Authorization + +In AggregationServices and IngestionServices, the service programmer +needs to ensure proper authorization by himself. He can do so by using +the methods from +[IAuthorizationService](http://svnsis.ethz.ch/doc/openbis/current/ch/systemsx/cisd/openbis/dss/generic/shared/api/internal/authorization/IAuthorizationService.html). +The user id, which is needed when calling these methods, can be obtained +from `DataSetProcessingContext` (when using Java), or the +variable `userId` (when using Jython). + +** +** + +**Type: **AGGREGATION\_TABLE\_MODEL + +**Description**: An abstract superclass for aggregation service +reporting plugins. An aggregation service reporting plugin takes a hash +map containing user parameters as an argument and returns tabular data +(in the form of a TableModel). The +JythonBasedAggregationServiceReportingPlugin below is a subclass that +allows for implementation of the logic in Jython. + +**Configuration**: Dependent on the subclass. + +To implement an aggregation service in Java, define a subclass +of `ch.systemsx.cisd.openbis.dss.generic.server.plugins.standard.AggregationService`. +This subclass must implement the method + + TableModel createReport(Map<String, Object>, DataSetProcessingContext). + +**Example**: + +**ExampleAggregationServicePlugin** + + package ch.systemsx.cisd.openbis.dss.generic.server.plugins.standard; + import java.io.File; + import java.util.Map; + import java.util.Properties; + import ch.systemsx.cisd.openbis.dss.generic.shared.DataSetProcessingContext; + import ch.systemsx.cisd.openbis.generic.shared.basic.dto.TableModel; + import ch.systemsx.cisd.openbis.generic.shared.util.IRowBuilder; + import ch.systemsx.cisd.openbis.generic.shared.util.SimpleTableModelBuilder; + /** + * @author Chandrasekhar Ramakrishnan + */ + public class ExampleAggregationServicePlugin extends AggregationService + { + private static final long serialVersionUID = 1L; + /** + * Create a new plugin. + * + * @param properties + * @param storeRoot + */ + public ExampleAggregationServicePlugin(Properties properties, File storeRoot) + { + super(properties, storeRoot); + } + @Override + public TableModel createReport(Map<String, Object> parameters, DataSetProcessingContext context) + { + SimpleTableModelBuilder builder = new SimpleTableModelBuilder(true); + builder.addHeader("String"); + builder.addHeader("Integer"); + IRowBuilder row = builder.addRow(); + row.setCell("String", "Hello"); + row.setCell("Integer", 20); + row = builder.addRow(); + row.setCell("String", parameters.get("name").toString()); + row.setCell("Integer", 30); + return builder.getTableModel(); + } + } + +**plugin.properties** + + class = ch.systemsx.cisd.openbis.dss.generic.server.plugins.standard.ExampleAggregationServicePlugin + label = My Report + +#### JythonAggregationService + +**Type:** AGGREGATION\_TABLE\_MODEL + +**Description**: Invokes a Jython script to create an aggregation +service report. For more details see [Jython-based Reporting and +Processing +Plugins](/display/openBISDoc2010/Jython-based+Reporting+and+Processing+Plugins). + +**Configuration**: + +|Property Key|Description| +|--- |--- | +|script-path|Path to the jython script.| + +**Example**: + +**plugin.properties** + + class = ch.systemsx.cisd.openbis.dss.generic.server.plugins.jython.JythonAggregationService + label = My Report + script-path = script.py + +### IngestionService + +**Type: **AGGREGATION\_TABLE\_MODEL + +**Description**: An abstract superclass for aggregation service +reporting plugins that modify entities in the database. A db-modifying +aggregation service reporting plugin takes a hash map containing user +parameters and a transaction as arguments and returns tabular data (in +the form of a TableModel). The transaction is an +[IDataSetRegistrationTransactionV2](http://svnsis.ethz.ch/doc/openbis/current/ch/systemsx/cisd/etlserver/registrator/api/v2/IDataSetRegistrationTransactionV2.html), +the same interface that is used by +[dropboxes](/display/openBISDoc2010/Dropboxes) to register and modify +entities. The JythonBasedDbModifyingAggregationServiceReportingPlugin +below is a subclass that allows for implementation of the logic in +Jython. + +**Configuration**: Dependent on the subclass. + +To implement an aggregation service in Java, define a subclass +of `ch.systemsx.cisd.openbis.dss.generic.server.plugins.standard.IngestionService`. +This subclass must implement the method + + TableModel process(IDataSetRegistrationTransactionV2 transaction, Map<String, Object> parameters, DataSetProcessingContext context) + +**Example**: + +**ExampleDbModifyingAggregationService.java** + + package ch.systemsx.cisd.openbis.dss.generic.server.plugins.standard; + import java.io.File; + import java.util.Map; + import java.util.Properties; + import ch.systemsx.cisd.etlserver.registrator.api.v2.IDataSetRegistrationTransactionV2; + import ch.systemsx.cisd.openbis.dss.generic.shared.DataSetProcessingContext; + import ch.systemsx.cisd.openbis.dss.generic.shared.dto.DataSetInformation; + import ch.systemsx.cisd.openbis.generic.shared.basic.dto.TableModel; + import ch.systemsx.cisd.openbis.generic.shared.util.IRowBuilder; + import ch.systemsx.cisd.openbis.generic.shared.util.SimpleTableModelBuilder; + /** + * An example aggregation service + * + * @author Chandrasekhar Ramakrishnan + */ + public class ExampleDbModifyingAggregationService extends IngestionService<DataSetInformation> + { + private static final long serialVersionUID = 1L; + /** + * @param properties + * @param storeRoot + */ + public ExampleDbModifyingAggregationService(Properties properties, File storeRoot) + { + super(properties, storeRoot); + } + @Override + public TableModel process(IDataSetRegistrationTransactionV2 transaction, + Map<String, Object> parameters, DataSetProcessingContext context) + { + transaction.createNewSpace("NewDummySpace", null); + SimpleTableModelBuilder builder = new SimpleTableModelBuilder(true); + builder.addHeader("String"); + builder.addHeader("Integer"); + IRowBuilder row = builder.addRow(); + row.setCell("String", "Hello"); + row.setCell("Integer", 20); + row = builder.addRow(); + row.setCell("String", parameters.get("name").toString()); + row.setCell("Integer", 30); + return builder.getTableModel(); + } + } + +**plugin.properties** + + class = ch.systemsx.cisd.openbis.dss.generic.server.plugins.standard.ExampleDbModifyingAggregationService + label = My Report + +#### JythonIngestionService + +**Type: **AGGREGATION\_TABLE\_MODEL + +**Description**: Invokes a Jython script to register and modify entities +and create an aggregation service report. The script receives a +transaction as an argument. For more details see [Jython-based Reporting +and Processing +Plugins](/display/openBISDoc2010/Jython-based+Reporting+and+Processing+Plugins). + +**Configuration**: + +|Property Key|Description| +|--- |--- | +|script-path|Path to the jython script.| +|share-id|Optional, defaults to 1 when not stated otherwise.| + +**Example**: + +**plugin.properties** + + class = ch.systemsx.cisd.openbis.dss.generic.server.plugins.jython.JythonIngestionService + label = My Report + script-path = script.py + +### JythonBasedReportingPlugin + +**Type:** TABLE\_MODEL** +** + +**Description**: Invokes a Jython script to create the report. For more +details see [Jython-based Reporting and Processing +Plugins](/display/openBISDoc2010/Jython-based+Reporting+and+Processing+Plugins). + +**Configuration**: + +|Property Key|Description| +|--- |--- | +|script-path|Path to the jython script.| + +**Example**: + +**plugin.properties** + + class = ch.systemsx.cisd.openbis.dss.generic.server.plugins.jython.JythonBasedReportingPlugin + label = My Report + dataset-types = MS_DATA, UNKNOWN + script-path = script.py + +### TSVViewReportingPlugin + +**Type:** TABLE\_MODEL** +** + +**Description**: Presents the main data set file as a table. The main +file is specified by the Main Data Set Pattern and the Main Data Set +Path of the data set type. The file can be a CSV/TSV file or an Excel +file. This reporting plugin works only for one data set. + +**Configuration**: + +|Property Key|Description| +|--- |--- | +|separator|Separator character. This property will be ignored if the file is an Excel file. Default: TAB character| +|ignore-comments|If true all rows starting with '#' will be ignored. Default: true| +|ignore-trailing-empty-cells|If true trailing empty cells will be ignored. Default: false| +|excel-sheet|Name or index of the Excel sheet used. This property will only be used if the file is an Excel file. Default: 0| +|transpose|If true transpose the original table, that is exchange rows with columns. Default: false| + +**Example**: + +**plugin.properties** + + class = ch.systemsx.cisd.openbis.dss.generic.server.plugins.standard.TSVViewReportingPlugin + label = My Report + dataset-types = MS_DATA, UNKNOWN + separator = ; + +Screening Reporting Plugins +--------------------------- + +### ScreeningJythonBasedAggregationServiceReportingPlugin + +**Type:** AGGREGATION\_TABLE\_MODEL** +** + +**Description**: Invokes a Jython script to create an aggregation +service report. For more details see [Jython-based Reporting and +Processing +Plugins](/display/openBISDoc2010/Jython-based+Reporting+and+Processing+Plugins). +There is some extra support for screening. + +**Configuration**: + +|Property Key|Description| +|--- |--- | +|script-path|Path to the jython script.| + +**Example**: + +**plugin.properties** + + class = ch.systemsx.cisd.openbis.dss.screening.server.plugins.jython.ScreeningJythonBasedReportingPlugin + label = My Report + dataset-types = HCS_IMAGE + script-path = script.py + +### ScreeningJythonBasedDbModifyingAggregationServiceReportingPlugin + +**Type: **AGGREGATION\_TABLE\_MODEL** +** + +**Description**: Invokes a Jython script to register and modify entities +and create an aggregation service report. The screening-specific version +has access to the screening facade for queries to the imaging database +and is given a screening transaction that supports registering plate +images and feature vectors. For more details see [Jython-based Reporting +and Processing +Plugins](/display/openBISDoc2010/Jython-based+Reporting+and+Processing+Plugins). + +**Configuration**: + +|Property Key|Description| +|--- |--- | +|script-path|Path to the jython script.| + +**Example**: + +**plugin.properties** + + class = ch.systemsx.cisd.openbis.dss.screening.server.plugins.jython.ScreeningJythonBasedReportingPlugin + label = My Report + dataset-types = HCS_IMAGE + script-path = script.py + +### ScreeningJythonBasedReportingPlugin + +**Type:** TABLE\_MODEL** +** + +**Description**: Invokes a Jython script to create the report. For more +details see [Jython-based Reporting and Processing +Plugins](/display/openBISDoc2010/Jython-based+Reporting+and+Processing+Plugins). +There is some extra support for screening. + +**Configuration**: + +|Property Key|Description| +|--- |--- | +|script-path|Path to the jython script.| + +**Example**: + +**plugin.properties** + + class = ch.systemsx.cisd.openbis.dss.screening.server.plugins.jython.ScreeningJythonBasedAggregationServiceReportingPlugin + label = My Report + script-path = script.py \ No newline at end of file diff --git a/docs/software-developer-documentation/legacy-server-side-extensions/search-domain-services.md b/docs/software-developer-documentation/legacy-server-side-extensions/search-domain-services.md new file mode 100644 index 0000000000000000000000000000000000000000..80bc9f8f467eacee73aa0b467e52ff22b60a90c9 --- /dev/null +++ b/docs/software-developer-documentation/legacy-server-side-extensions/search-domain-services.md @@ -0,0 +1,120 @@ +Search Domain Services +====================== + +A search domain service is a DSS plugin which allows to query some +domain specific search services. For example, a search service on a +database of nucleotide acid sequences. Currently only one search service +is supported: Searching of local BLAST databases for nucleotide and/or +protein sequences. + +## Configuring a Service + +To configure a service a +[core-plugin](/display/openBISDoc2010/Core+Plugins) of +type `search-domain-services` has to be created. The minimum +configuration for `plugin.properties` reads: + +||Description| +|--- |--- | +|class|Fully qualified name of a Java class implementing ch.systemsx.cisd.openbis.dss.generic.shared.api.internal.v2.ISearchDomainService| +|label|The label. Can be used in user interfaces.| + +## Querying a Service + +Search domain services can be accessed via `IGeneralInformationService`. +The method `listAvailableSearchDomains` returns all available services. + +A service can be queried by the method `searchOnSearchDomain`. Beside of +the `sessionToken` it has the following parameters: + +- `preferredSearchDomainOrNull`: This can be `null` If there is only + one service configured. Otherwise the name of the core-plugin + specifies the preferred services. If no such service hasn't been + configured or it isn't be available the first available service will + be used. If there is no available service the search will return an + empty list. +- `searchString`: This is the string to search for. +- `optionalParametersOrNull`: This is a map of string-string key-value + pairs of optional parameters. Can be `null`. The semantics of these + parameters depends on the used service. + +The method returns a list of `SearchDomainSearchResult` instances which +contain the following attributes: A description of the search domain +(class `SearchDomain`), the location +(interface `ISearchDomainResultLocation`), and a score. The result list +is sorted by score in descending order. The location has information +where the sequence is stored in openBIS and where it matches the search +string. + +## Service Implementations + +### BlastDatabase + +**Description**: This implementations requires the +[BLAST+](http://blast.ncbi.nlm.nih.gov/Blast.cg) tools. The latest +versions can be downloaded from +[here](ftp://ftp.ncbi.nlm.nih.gov/blast/executables/blast+/LATEST/). +Note, that this service is only available if the BLAST+ tools have been +installed. Only the tools `blastn` (for nucleotide search) and `blastp` +(for protein search) are used. + +In order to build up a local BLAST database the maintenance task +[BlastDatabaseCreationMaintenanceTask](/display/openBISDoc2010/Maintenance+Tasks#MaintenanceTasks-BlastDatabaseCreationMaintenanceTask) +has to be configured. + +Because the maintenance task to create the BLAST databases runs often +only once per day a change in entity properties or a registration of a +data sets will not immediately be reflected by the search results. That +is, new sequences aren't found and changed/deleted sequences are still +found. + +**Configuration**: + +|Property Key|Description| +|--- |--- | +|blast-tools-directory|Path to the directory with BLAST+ command line tools. If defined it will be prepended to the commands blastn and blastp. If undefined it is assumed that the path is in the PATH environment variable.| +|blast-databases-folder|Path to the folder where all BLAST databases are stored. Default: <data store root>/blast-databases| + +**Example**: + +**plugin.properties** + + class = ch.systemsx.cisd.openbis.dss.generic.server.api.v2.sequencedatabases.BlastDatabase + label = BLAST database + +#### **Optional Query Parameters** + +The following optional query parameters (i.e. service method +parameter `optionalParametersOrNull` as described above) are understood +and used as command line parameters of the BLAST+ tools: + +|Name |Description | +|----------------|-------------------------------| +|`evalue` |Defines the threshold of so-called "Expect Value" of found matches (for details see http://www.ncbi.nlm.nih.gov/blast/Blast.cgi?CMD=Web&PAGE_TYPE=BlastDocs&DOC_TYPE=FAQ#expect and http://homepages.ulb.ac.be/~dgonze/TEACHING/stat_scores.pdf). Higher values means more found matches. Default value is 10.| +|`word_size` |Word size for initial match. Decreasing word size results in increasing number of matches. Default values (if `task` parameter hasn't been specified): 11 for `blastn` and 3 for `blastp`.| +|`task` |Defines values for a set of parameters of the tools blastn and blastp. Possible values are<ul><li>blastn: Default value blastn</li></ul><table> <thead> <tr> <th>Value</th> <th>Description</th> <th>Default value of word_size</th> </tr> </thead> <tbody> <tr> <td>blastn-short</td> <td>`blastn` program optimized for sequences shorter than 50 bases</td> <td>7</td> </tr> <tr> <td>blastn</td> <td>Traditional `blastn` requiring an exact match of 11</td> <td>11</td> </tr> <tr> <td>dc-megablast</td> <td>Discontiguous megablast used to find more distant (e.g., interspecies) sequences</td> <td>11</td> </tr> <tr> <td>megablast</td> <td>Traditional megablast used to find very similar (e.g., intraspecies or closely related species) sequences</td> <td>28</td> </tr> </tbody> </table><ul><li>blastp: Default value blastp</li></ul><table> <thead> <tr> <th>Value</th> <th>Description</th> <th>Default value of word_size</th> </tr> </thead> <tbody> <tr> <td>blastp</td> <td>Traditional `blastp` to compare a protein query to a protein database</td> <td>3</td> </tr> <tr> <td>blastp-short</td> <td>`blastp` optimized for queries shorter than 30 residues</td> <td>2</td> </tr> </tbody> </table> | +|ungapped |If specified (with an empty string value) only ungapped matches are returned. Will be ignored for `blastp`.| + +For more details about these parameters see +<http://www.ncbi.nlm.nih.gov/books/NBK1763/>. + +#### Search Results + +A search result has either a `DataSetFileBlastSearchResultLocation` or +an `EntityPropertyBlastSearchResultLocation` instance depending on +whether the result has been found in a sequence of a FASTA or FASTQ file +of a data set or in a sequence stored as a property of an experiment, a +sample or a data set. In any case the following informations can be +retrieved for each match: + +|BLAST output column|Access in Java |Description | +|-------------------|----------------------------------------------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +|`score` |`SearchDomainSearchResult.getScore().getScore()` |=HYPERLINK("http://homepages.ulb.ac.be/~dgonze/TEACHING/stat_scores.pdf","Score. See http://homepages.ulb.ac.be/~dgonze/TEACHING/stat_scores.pdf for an explanation of score, bit-score and evalue.")| +|`bitscore` |`SearchDomainSearchResult.getScore().getBitScore()` | | +|`evalue` |`SearchDomainSearchResult.getScore().getEvalue()` | | +|`sstart` |`SearchDomainSearchResult.getResultLocation().getAlignmentMatch().getSequenceStart()` |Start of alignment in found sequence | +|`send` |`SearchDomainSearchResult.getResultLocation().getAlignmentMatch().getSequenceEnd()` |End of alignment in found sequence | +|`qstart` |`SearchDomainSearchResult.getResultLocation().getAlignmentMatch().getQueryStart()` |Start of alignment in search string. | +|`qend` |`SearchDomainSearchResult.getResultLocation().getAlignmentMatch().getQueryEnd()` |End of alignment in search string. | +|`mismatch` |`SearchDomainSearchResult.getResultLocation().getAlignmentMatch().getNumberOfMismatches()`|Number of mismatches. | +|`gaps` |`SearchDomainSearchResult.getResultLocation().getAlignmentMatch().getTotalNumberOfGaps()` |Total number of gap. | diff --git a/docs/software-developer-documentation/server-side-extensions/as-api-listener.md b/docs/software-developer-documentation/server-side-extensions/as-api-listener.md new file mode 100644 index 0000000000000000000000000000000000000000..4333859d805da8b2a855f247585d5c13cba9924c --- /dev/null +++ b/docs/software-developer-documentation/server-side-extensions/as-api-listener.md @@ -0,0 +1,160 @@ +API Listener Core Plugin (V3 API)] +================================== + +Introduction +------------ + +The V3 API listener core plugin is an implementation of the interceptor +pattern: <https://en.wikipedia.org/wiki/Interceptor_pattern> + +It actually intercepts twice, right before an operation is executed, and +right after. + + + +Its main focus is to help integrations. It gives an opportunity to +integrators to execute additional functionality before or after an api +call with the next purposes: + +- Modify the API call inputs/outputs immediately before/after they + reach its executor. +- Trigger additional internal logic. +- Notify third party systems. + +Core Plugin +----------- + +To archive these goals is necessary to provide a core plugin of the type +'api-listener' to the AS: + +### Plugin.properties + +It is required to provide an 'operation-listener.class' indicating the +class name of the listener that will be loaded. + +Additionally any number of properties following the +pattern 'operation-listener.<your-custom-name>' can be provided. +Custom properties are provided to help maintainability, they give an +opportunity to the integrator to only need to compile the listener once +and configure it differently for different instances. + +**plugin.properties** + + operation-listener.class = ch.ethz.sis.openbis.generic.server.asapi.v3.executor.operation.OperationListenerExample + operation-listener.your-config-property = Your Config Message + +### lib + +The core plugin should contain a lib folder with a jar containing a +class that implements the interface IOperationListener, this interface +is provided with the V3 API jar and provides 3 methods: + +- setup: Runs on startup. Gives one opportunity to read the + configuration provided to the core plugin +- beforeOperation: Runs before each operation occurs. In addition to + the operation intercepted it also provides access to the api and the + session token used for the operation. +- afterOperation: Intercepts after the operation occurs. In addition + to the operation intercepted it also provides access to the api, the + session token used for the operation, the operation result and any + exception that happened during the operation. + + + +Implicit Requirements + +**Requirement 1:  The Listener should be Thread Safe Code** + +A single instance of the Listener is created during the server startup. + +Since a single instance is used to serve all requests thread safe code +is a requirement. + +We strongly suggest to not to keep any state. + +**Requirement 2: The Listener should not throw Exceptions** + +If the listener throw an exception it will make the API call fail. + +**Requirement 3: The Listener should use IOperation and IOperationResult +as indicated below** + +All API Operations go through every listener so the method signatures +should use IOperation and IOperationResult. + +Please use instanceof for safe casting. + + + +**IOperationListener** + + package ch.ethz.sis.openbis.generic.asapi.v3.plugin.listener; + + import ch.ethz.sis.openbis.generic.asapi.v3.IApplicationServerApi; + import ch.ethz.sis.openbis.generic.asapi.v3.dto.common.operation.IOperation; + import ch.ethz.sis.openbis.generic.asapi.v3.dto.common.operation.IOperationResult; + import java.util.Properties; + + public interface IOperationListener<OPERATION extends IOperation, RESULT extends IOperationResult> + { + public static final String LISTENER_PROPERTY_KEY = "operation-listener"; + public static final String LISTENER_CLASS_KEY = LISTENER_PROPERTY_KEY + ".class"; + public abstract void setup(Properties properties); + public abstract void beforeOperation(IApplicationServerApi api, String sessionToken, OPERATION operation); + public abstract void afterOperation(IApplicationServerApi api, String sessionToken, OPERATION operation, + RESULT result, RuntimeException runtimeException); + } + +### Example - Logging + +The next implementation example captures the calls and logs on the +standard openbis log the operation name: + +**OperationListenerExample** + + package ch.ethz.sis.openbis.generic.server.asapi.v3.executor.operation; + + import ch.ethz.sis.openbis.generic.asapi.v3.IApplicationServerApi; + import ch.ethz.sis.openbis.generic.asapi.v3.dto.common.operation.IOperation; + import ch.ethz.sis.openbis.generic.asapi.v3.dto.common.operation.IOperationResult; + import ch.ethz.sis.openbis.generic.asapi.v3.plugin.listener.IOperationListener; + import ch.systemsx.cisd.common.logging.LogCategory; + import ch.systemsx.cisd.common.logging.LogFactory; + import org.apache.log4j.Logger; + + import java.util.Properties; + + public class OperationListenerExample implements IOperationListener<IOperation, IOperationResult> + { + + private static final Logger operationLog = LogFactory.getLogger(LogCategory.OPERATION, + OperationListenerExample.class); + + private String yourConfigProperty = null; + + @Override + public void setup(Properties properties) + { + yourConfigProperty = properties.getProperty("operation-listener.your-config-property"); + operationLog.info("setup: " + yourConfigProperty); + } + + @Override + public void beforeOperation(IApplicationServerApi api, String sessionToken, IOperation operation) + { + operationLog.info("beforeOperation: " + operation.getClass().getSimpleName()); + } + + @Override + public void afterOperation(IApplicationServerApi api, String sessionToken, IOperation operation, + IOperationResult result, RuntimeException runtimeException) + { + operationLog.info("afterOperation: " + operation.getClass().getSimpleName()); + } + } + +### Example - Loggin Sources + +You can download a complete example with sources +[here](/download/attachments/132286253/api-listener-example.zip?version=1&modificationDate=1663665058217&api=v2) to +use as a template to make your own. \ No newline at end of file diff --git a/docs/software-developer-documentation/server-side-extensions/as-services.md b/docs/software-developer-documentation/server-side-extensions/as-services.md new file mode 100644 index 0000000000000000000000000000000000000000..2017588c25cc13af5bd8617e1791b6462267ab1d --- /dev/null +++ b/docs/software-developer-documentation/server-side-extensions/as-services.md @@ -0,0 +1,94 @@ +Custom Application Server Services +================================== + +## Introduction + +On Data Store Server (DSS) aggregation/ingestion services based on +Jython scripts can be used to extend openBIS by custom services. These +services have full access on data store and Application Server (AS). + +Often only access on AS is needed. Going over DSS is a detour. For such +cases it is better to write an AS core plugin of type `services`. + +## How to write a custom AS service core plugin + +Here is the recipe to create an AS core plugin of type `services`: + +1. The + folder `<core plugin folder>/<module>/<version>/as/services/<core plugin name>` + has to be created. + +2. In this folder two files have to be created: `plugin.properties` + and `script.py`. The properties file should contain: + + **plugin.properties** + + class = ch.ethz.sis.openbis.generic.server.asapi.v3.helper.service.JythonBasedCustomASServiceExecutor + script-path = script.py + +3. The script file should have the function `process` with two + arguments. The first argument is the context. It contains the + methods `getSessionToken()` and `getApplicationService()` which + returns an instance + of `ch.ethz.sis.openbis.generic.asapi.v3.IApplicationServerApi`. The + second argument is a map of key-value pairs. The key is a string and + the values is an arbitrary object. + + Anything returned by the script will be returned to the caller of + the service. Here is an example of a script which creates a space: + + **script.py** + + from ch.ethz.sis.openbis.generic.asapi.v3.dto.space.create import SpaceCreation + + def process(context, parameters): + space_creation = SpaceCreation() + space_creation.code = parameters.get('space_code'); + result = context.applicationService.createSpaces(context.sessionToken, [space_creation]); + return "Space created: %s" % result + + Note, that all changes on the AS database will be done in one + transaction. + +## How to use a custom AS service + +The application API version 3 offers the following method to search for +existing services: + + SearchResult<CustomASService> searchCustomASServices(String sessionToken, CustomASServiceSearchCriteria searchCriteria, + CustomASServiceFetchOptions fetchOptions) + +The following Java code example returns all available services: + + SearchResult<CustomASService> services = service.searchCustomASServices(sessionToken, new CustomASServiceSearchCriteria(), new CustomASServiceFetchOptions()); + + + +With the following method of the API version 3 a specified service can +be executed: + + public Object executeCustomASService(String sessionToken, ICustomASServiceId serviceId, CustomASServiceExecutionOptions options); + +The `serviceId` can be obtained from a `CustomASService` object (as +returned by the `searchCustomASServices` method) by the getter method +`getCode()`. It can also be created as an instance of +`CustomASServiceCode`. Note, that the service code is just the core +plugin name. + +Parameter bindings (i.e. key-value pairs) are specified in the +`CustomASServiceExecutionOptions` object by invoking for each binding +the method `withParameter()`. + +Here is a code example: + + CustomASServiceExecutionOptions options = new CustomASServiceExecutionOptions().withParameter("space_code", "my-space"); + Object result = service.executeCustomASService(sessionToken, new CustomASServiceCode("space-creator"), options); + System.out.println(result); + + + + + + + + \ No newline at end of file diff --git a/docs/software-developer-documentation/server-side-extensions/core-plugins.md b/docs/software-developer-documentation/server-side-extensions/core-plugins.md new file mode 100644 index 0000000000000000000000000000000000000000..6ca96076a1e20d203fde1b80dad49520eb835031 --- /dev/null +++ b/docs/software-developer-documentation/server-side-extensions/core-plugins.md @@ -0,0 +1,307 @@ +Core Plugins +============ + +## Motivation + +The `service.properties` file of openBIS Application Server (AS) and +Data Store Server (DSS) can be quite big because of all the +configuration data for maintenance tasks, drop-boxes, reporting and +processing plugins, etc. Making this configuration more modular will +improve the structure. It would also allow to have core plugins shipped +with distribution and customized plugins separately. This makes +maintenance of these plugins more independent. For example, a new +maintenance task plugin can be added in an update without any need for +an admin to put the configuration data manually into the +`service.properties` file. + +## Core Plugins Folder Structure + +All plugins whether they are a part of the distribution or added and +maintained are stored in the folder usually called `core-plugins`. +Standard (i.e. core) plugins are part of the distribution. During +installation the folder `core-plugins` is unpacked as a sibling folder +of `openBIS-server` and` datastore_server`. + +The folder structure is organized as follows: + +- The file `core-plugins.properties` containing the following + properties: + - `enabled-modules`: comma-separated list of regular expressions + for all enabled modules. + - `disabled-core-plugins`: comma-separated list of disabled + plugins. All plugins are disabled for which the beginning of + full plugin ID matches one of the terms of this list. To disable + initialization of master data of a module - disable it's core + plugin "initialize-master-data" +- The children of `core-plugins` are folders denoting modules like the + standard technologies, `proteomics` and `screening`. For + customization, any module can be added. +- Each module folder has children which are numbered folders. The + number denotes the version of the plugins of that module. The + version with the largest number will be used. Different modules can + have different largest version numbers. +- Every version folder has the subfolder `as` and/or` dss `which have + subfolders for the various types of plugins. The types are different + for AS and DSS: + - AS: + - `maintenance-tasks`: Maintenance tasks triggered by some + time schedule.` `Property `class` + denotes fully-qualified class name of a class implementing + `ch.systemsx.cisd.common.maintenance.IMaintenanceTask`. + For more details see [Maintenance + Tasks](/display/openBISDoc2010/Maintenance+Tasks). + - `dss-data-sources`: Definition of data sources with + corresponding data source definitions for DSS. For more + details see [Installation and Administrator Guide of the + openBIS + Server](/display/openBISDoc2010/Installation+and+Administrator+Guide+of+the+openBIS+Server). + - `query-databases`: Databases for SQL queries. For more + details see [Custom Database + Queries](/display/openBISDoc2010/Custom+Database+Queries). + - `custom-imports`: Custom file imports to DSS via Web + interface. For more details see [Custom + Import](/display/openBISDoc2010/Custom+Import). + - `services`: Custom services. For more details see [Custom + Application Server + Services](/display/openBISDoc2010/Custom+Application+Server+Services). + - `webapps`: HTML5 applications that use the openBIS API. For + more details see [openBIS + webapps](/display/openBISDoc2010/openBIS+webapps). + - `miscellaneous`: Any additional properties. + - `DSS:` + - `drop-boxes`: ETL server threads for registration of data + sets.` ` + - `reporting-plugins`: Reports visible in openBIS. + Property `class` denotes fully-qualified class name of a + class implementing + `ch.systemsx.cisd.openbis.dss.generic.server.plugins.tasks.IReportingPluginTask`. + For more details see [Reporting + Plugins](/display/openBISDoc2010/Reporting+Plugins). + - `processing-plugins`: Processing tasks triggered by + users.` `Property `class` denotes + fully-qualified class name of a class implementing + `ch.systemsx.cisd.openbis.dss.generic.server.plugins.tasks.IProcessingPluginTask`.` `For + more details see [Processing + Plugins](/display/openBISDoc2010/Processing+Plugins).` ` + - `maintenance-tasks`: Maintenance tasks triggered by some + time schedule.` `Property `class` denotes + fully-qualified class name of a class implementing + `ch.systemsx.cisd.common.maintenance.IMaintenanceTask`.` `For + more details see [Maintenance + Tasks](/display/openBISDoc2010/Maintenance+Tasks). + - `search-domain-services`: Services for variaous search + domains (e.g. search on sequence databases using BLAST). + Property `class` denotes fully-qualified class name of a + class implementing + `ch.systemsx.cisd.openbis.dss.generic.shared.api.internal.v2.ISearchDomainService`. + - `data-sources`: Internal or external database sources. + - `services`: Services based on servlets. + Property `class` denotes fully-qualified class name of a + class implementing `javax.servlet.Servlet`. + - `imaging-overview-plugins`: Data set type specific provider + of the overview image of a data set. + Property `class` denotes fully-qualified class name of a + class implementing + `ch.systemsx.cisd.openbis.dss.generic.server.IDatasetImageOverviewPlugin`. + - `file-system-plugins`: Provider of a custom DSS file system + (FTP/SFTP) view hierarchy. + Property `class` denotes fully-qualified class name of a + class + implementing `ch.systemsx.cisd.openbis.dss.generic.server.fs.IResolverPlugin` + Property code denotes the name of the top-level directory + under which the custom hierarchy will be visible + - `miscellaneous`: Any additional + properties.` ` +- Folders of each of these types can have an arbitrary number of + subfolders. But if the type folder is present it should have at + least one subfolder. Each defining one plugin. The name of these + subfolders define the plugin ID. It has to be unique over all + plugins independent of module and plugin type. It should not contain + the characters space ' ', comma '`,`', and equal sign '`=`'. +- Each plugin folder should contain at least the file + `plugin.properties`. There could be additional files (referred in + `plugin.properties`) but no subfolders. + +Here is an example of a typical structure of a core plugins folder: + + core-plugins + core-plugins.properties + proteomics + 1 + as + initialize-master-data.py +  dss + drop-boxes + ms-injection + plugin.properties + maintenance-tasks + data-set-clean-up + plugin.properties + screening + 1 + core-plugin.properties + as + initialize-master-data.py + maintenance-tasks + material-reporting + mapping.txt + plugin.properties + custom-imports + myCustomImport + plugin.properties + dss + drop-boxes + hcs-dropbox + lib + custom-lib.jar + hcs-dropbox.py + plugin.properties + +You might noticed the file `initialize-master-data.py` in AS core +plugins sections in this example. It is a script to register master +data in the openBIS core database. For more details see [Installation +and Administrator Guide of the openBIS +Server](/display/openBISDoc2010/Installation+and+Administrator+Guide+of+the+openBIS+Server). + +Each plugin can refer to any number of files. These files are part of +the plugin folder. In `plugin.properties` they are referred relative to +the plugin folder, that is by file name. Example: + +**plugin.properties** + + incoming-dir = ${incoming-root-dir}/incoming-hcs + incoming-data-completeness-condition = auto-detection + top-level-data-set-handler = ch.systemsx.cisd.openbis.dss.etl.jython.JythonPlateDataSetHandler + script-path = hcs-dropbox.py + storage-processor = ch.systemsx.cisd.openbis.dss.etl.PlateStorageProcessor + storage-processor.data-source = imaging-db + storage-processor.define-channels-per-experiment = false + +## Merging Configuration Data + +At start up of AS and DSS merges the content of `service.properties` +with the content of all `plugin.properties` of the latest version per +enabled module. Plugin properties can be deleted by adding +`<plugin ID>.<plugin property key> = __DELETED__` to service.properties. +Example: + + simple-dropbox.incoming-data-completeness-condition = __DELETED__ + +This leads to a deletion of the property +`incoming-data-completeness-condition` specified in `plugins.properties` +of the plugin `simple-dropbox`. + +Merging is done by injection the properties of `plugin.properties` into +`service.properties `by adding the plugin ID as a prefix to the property +key (not for `miscellaneous). `For example, the property `script-path` +of plugin `hcs-dropbox` becomes `hcs-dropbox.script-path`. References to +files inside the plugin are replaced by a path relative to the working +directory. For the various plugin types (except `miscellaneous`) the +plugin ID is appended to the related property in `service.properties` +for this plugin type. For example, plugins of type `drop-boxes` are +added to the property `inputs`. + +## Enabling Modules and Disabling Plugins + +There are three methods to control which plugins are available and witch +not: + +- enabling by property `enabled-modules` in` core-plugins.properties`: + This enables all plugins of certain modules. +- disabling by property `disabled-core-plugins` in + ` core-plugins.properties` : This allows to disable on a fine + grade level specific plugins. +- disabling by marker file: Plugin developers should use this method + when developing new plugins. + +### Enabling Modules + +The property `enabled-modules` in `core-plugins.properties` is a +comma-separated list of regular expressions denoting modules. All +plugins in a module folder of `core-plugins` folder are enabled if the +module name matches one of these regular expressions. If this list is +empty or the property hasn't been specified no core-plugin will be used. +Note, that this property is manipulated by openBIS Installer for +Standard Technologies. Example: + +**service.properties** + + enabled-modules = screening, proteomics, dev-module-.* + +### Disabling Core Plugins by Property + +The property `disabled-core-plugins` in `core-plugins.properties` allows +to disable plugins selectively either by module name, module combined +with plugin type or full plugin ID. Example: + +**service.properties** + + disabled-core-plugins = screening, proteomics:reporting-plugins, proteomics:maintenance-tasks:data-set-clean-up + +### Disabling Core Plugins by Marker File + +The empty marker file `disabled` in a certain plugin folder disables the +particular plugin. + +## Core Plugin Dependency + +A core plugin can depend on another core plugin. The dependency is +specified in `<module>/<version>/core-plugin.properties`. It has a +property named `required-plugins`. Its value is a comma-separated list +of core-plugins on which it depends. The dependency can be specified +selectively either by module name, module combined with plugin type or +full plugin ID. Example: + +**core-plugin.properties** + + required-plugins = module-a, module-b:initialize-master-data, module-b:reporting-plugins, module-a:drop-boxes:generic + +## Rules for Plugin Writers + +As a consequence of the way plugins are merged with +`service.properties` writers of plugins have to obey the following +rules: + +- Plugin IDs have to be unique among all plugins whether they are + defined in `service.properties` or as core plugins. The only + exceptions are plugins of type `miscellaneous`. +- In `plugin.properties` other properties can be referred by the usual + `${<property key>`} notation. The referred property can be in + `service.properties` or in any `plugin.properties`. +- As convention use `${incoming-root-dir`} when defining the incoming + folder for a drop box. +- Refer files in `plugin.properties` only by names and add them as + siblings of `plugin.properties` to the plugin folder. Note, that + different plugins can refer files with the same name. There will be + no ambiguity which file is meant. +- In order to be completely independent from updates of the core + plugins which are part of the distribution create your own module, + like `my-plugins`, and put all your plugins there. Do not forget to + add your module to the property `enabled-modules` in + `core-plugins.properties`. + +## Using Java libraries in Core Plugins + +OpenBIS allows you to include Java libraries in core plugin folders. The +\*.jar files have to be stored in "<code plugin folder>/lib" +folder. For instance, in order to use "my-lib.jar" in "my-dropbox" a +following file structure is needed: + +**service.properties** + + my-technology + 1 + dss + drop-boxes + my-dropbox + lib + my-lib.jar + dropbox.py + plugin.properties + +Having this structure, Java classes from "my-lib.jar" can be imported +and used in "dropbox.py" script. + +NOTICE: Currently this feature is only supported for DSS core +plugins. Under the hood, a symbolic link to a jar file is created in +"datastore\_server/lib" folder during DSS startup. \ No newline at end of file diff --git a/docs/software-developer-documentation/server-side-extensions/dss-dropboxes.md b/docs/software-developer-documentation/server-side-extensions/dss-dropboxes.md new file mode 100644 index 0000000000000000000000000000000000000000..5ef6ed40be76ba60541fd143f0a2da7b02ad98a7 --- /dev/null +++ b/docs/software-developer-documentation/server-side-extensions/dss-dropboxes.md @@ -0,0 +1,1017 @@ +Dropboxes +========= + +Jython Dropboxes +---------------- + +### Introduction + +The jython dropbox feature makes it possible for a script written in the +Python language to control the data set registration process of the +openBIS Data Store Server. A script can modify the files in the dropbox +and register data sets, samples, and experiments as part of its +processing. The framework provides tools to track file operations and, +if necessary, revert them, ensuring that the incoming file or directory +is returned to its original state in the event of an error. + +By deafult python 2.5 is used, but it's possible to use python version +2.7. + +Dropboxes are dss core plugins: [Core +Plugins](/display/openBISDoc2010/Core+Plugins) + +### Simple Example + +Here is an example that registers files that arrive in the drop box as +data sets. They are explicitly attached to the experiment "JYTHON" in +the project "TESTPROJ" and space "TESTGROUP". + +**data-set-handler-basic.py** + + def process(transaction): + # Create a data set + dataSet = transaction.createNewDataSet() +  + # Reference the incoming file that was placed in the dropbox + incoming = transaction.getIncoming() + # Add the incoming file into the data set + transaction.moveFile(incoming.getAbsolutePath(), dataSet) +  + # Get an experiment for the data set + exp = transaction.getExperiment("/TESTGROUP/TESTPROJ/JYTHON") + + # Set the owner of the data set -- the specified experiment + dataSet.setExperiment(exp) + +This example is is unrealistically simple, but contains all the elements +necessary to implement a jython drop box. The main idea is to perform +several operations within the bounds of a transaction on the data and +metadata. The transaction is used to track the changes made so they can +be executed together or all reverted if a problem occurs. + +### More Realistic Example + +The above example demonstrates the concept, but it is unrealistically +simple. In general, we want to be able to determine and specify the +experiment/sample for a data set and explicitly set the data set type as +well. + +In this example, we handle a usage scenario where there is one +experiment done every day. All data produced on a single day is +associated with the experiment for that date. If the experiment for a +given day does not exist, it is created. + +**data-set-handler-experiment-reg.py** + + from datetime import datetime +  + def process(transaction): + + # Try to get the experiment for today + now_str = datetime.today().strftime('%Y%m%d') + expid = "/TESTGROUP/TESTPROJ/" + now_str + exp = transaction.getExperiment(expid) + + + # Create an experiment if necessary + if None == exp: + exp = transaction.createNewExperiment(expid, "COMPOUND_HCS") + exp.setPropertyValue("DESCRIPTION", "An experiment created on " + datetime.today().strftime('%Y-%m-%d')) + exp.setPropertyValue("COMMENT", now_str) +  + dataSet = transaction.createNewDataSet() + + incoming = transaction.getIncoming() + transaction.moveFile(incoming.getAbsolutePath(), dataSet) + dataSet.setDataSetType("HCS_IMAGE") + dataSet.setExperiment(exp) + +More complex processing is also possible. In the following sections, we +explain how to configure a jython dropbox and describe the API in +greater detail. + +### Model + +The model underlying dropbox registration is the following: when a new +file or folder is found in the dropbox folder, the process function of +the script file is invoked with a [data set registration +transaction](#Dropboxes-IDataSetRegistrationTransaction) as an argument. +The process function has the responsibility of looking at the incoming +file or folder and determining what needs to be registered or modified +in the metadata database and what data needs to be stored on the file +system. The +[IDataSetRegistrationTransaction](#Dropboxes-IDataSetRegistrationTransaction) interface +defines the API for specifying entities to register and update. + +Committing a transaction is actually a two-part process. The metadata is +stored in the openBIS application server's database; the data is kept on +the file system in a sharded directory structure beneath the data store +server's *store* directory. All modifications requested as part of a +transaction are committed atomically — they either all succeed or all +fail. + +Several [Events](#Dropboxes-Events) occur in the process of committing a +transaction. By defining jython functions, it is possible to be notified +and intervene when an event occurs. Because the infrastructure reserves +the right to delay or retry actions if resources become unavailable, the +process function and event functions cannot use global variables to +communicate with each other. Instead, they should use the registration +context object to communicate. Anything stored in the registration +context must, however, be serializable by Java serialization. + +Details +------- + +### Dropbox Configuration + +A jython dropbox is typically distributed as a [core +plugin](/display/openBISDoc2010/Core+Plugins) and configured in its +plugin.properties file. A dropbox configured to run a jython script, +which is kept in the same directory as plugin.properties. The +configuration requires a storage processor and the name of the script (a +full path is not necessary if the script is in the same directory as the +plugin.properties). Here is an example configuration for a dropbox that +uses the jython handler. + +**plugin.properties** + + # + # REQUIRED PARAMETERS + # + # The directory to watch for new data sets + incoming-dir = ${root-dir}/incoming-jython + + # The handler class. Must be either ch.systemsx.cisd.etlserver.registrator.api.v2.JythonTopLevelDataSetHandlerV2 or a subclass thereof + top-level-data-set-handler = ch.systemsx.cisd.etlserver.registrator.api.v2.JythonTopLevelDataSetHandlerV2 + + # The script to execute, reloaded and recompiled each time a file/folder is placed in the dropbox + script-path = ${root-dir}/data-set-handler.py + + # The appropriate storage processor + storage-processor = ch.systemsx.cisd.etlserver.DefaultStorageProcessor + + # Specify jython version. Default is whatever is specified in datastore server service.properties under property "jython-version" + plugin-jython-version=2.5 + # + # OPTIONAL PARAMETERS + # +  + # False if incoming directory is assumed to exist. + # Default - true: Incoming directory will be created on start up if it doesn't exist. + incoming-dir-create = true + + # Defines how the drop box decides if a folder is ready to process: either by a 'marker-file' or a time out which is called 'auto-detection' + # The time out is set globally in the service.properties and is called 'quiet-period'. This means when the number of seconds is over and no changes have + # been made to the incoming folder the drop will start to register. The marker file must have the following naming schema: '.MARKER_is_finished_<incoming_folder_name>' + incoming-data-completeness-condition = marker-file +  + # Defines whether the dropbox should handle .h5 archives as folders (true) or as files (false). Default is true. + h5-folders = true +  + # Defines whether the dropbox should handle .h5ar archives as folders (true) or as files (false). Default is true. + h5ar-folders = true + +#### Development mode + +Set property `development-mode = true` in your dropbox to enable a quick +feedback loop when developing your dropbox. By default dropboxes have +complex auto-recovery mechanism working, which on errors waits and +retries the registration several times. It can be useful in case of +short network problems or other unexpected turbulences. In this case it +can take a long time between the dropbox tries to register something, +and actual error report. During development it is essential to have a +quick feedback if your dropbox does what it should or not. Thus - set +the development mode if you are modifying your script and remember to +set it back when you are done. + +#### Jython version + +Set property `plugin-jython-version=2.7` in your dropbox +plugin.properties to change default jython version for the single +dropbox. Available are versions 2.5 and 2.7 + +Jython API +---------- + +When a new file is placed in the dropbox, the framework compiles and +executes the script, checks that the signatures of the `process` +function and any defined event-handling functions are correct, and then +invokes its `process` function. + +### IDataSetRegistrationTransaction + +Have a look +at [IDataSetRegistrationTransactionV2](https://openbis.ch/javadoc/20.10.x/javadoc-dropbox-api/ch/systemsx/cisd/etlserver/registrator/api/v2/IDataSetRegistrationTransactionV2.html) +for the calls available in a transaction. Note that you need to use the +file methods in the transaction, like e.g. `moveFile()`, rather than +manipulating the file system directly to get fully transactional +behavior. + +#### TransDatabase queries + +The query object returned +by `getDatabaseQuery(String dataSourceName)` allows to perform any query +and executing any statement on the given query database in the context +of a database transaction. Here are the methods available from the query +interface: + + public interface DynamicQuery { + + /** + * Performs a SQL query. The returned List is connected to the database and + * updateable. + * + * @param query The SQL query template. + * @param parameters The parameters to fill into the SQL query template. + * + * @return The result set as List; each row is represented as one Map<String,Object>. + */ + List<Map<String, Object>> select(final String query, + final Object... parameters); + + /** + * Performs a SQL query. The returned List is connected and + * updateable. + * + * @param type The Java type to return one rows in the returned + * result set. + * @param query The SQL query template. + * @param parameters The parameters to fill into the SQL query template. + * + * @return The result set as List; each row is represented as one Map<String,Object>. + */ + <T> List<T> select(final Class<T> type, final String query, + final Object... parameters); + + /** + * Executes a SQL statement. + * + * @param query The SQL query template. + * @param parameters The parameters to fill into the SQL query template. + * + * @return The number of rows updated by the SQL statement, or -1 if not + * applicable. <b>Note:</b> Not all JDBC drivers support this + * cleanly. + */ + int update(final String query, final Object... parameters); + + /** + * Executes a SQL statement as a batch for all parameter values provided. + * + * @param query The SQL query template. + * @param parameters The parameters to fill into the SQL query template. At least + * one of the parameters needs to be an array or + * <code>Collection</code>. If multiple parameters are arrays or + * <code>Collection</code>, all of them need to have the same + * size. + * + * @return The number of rows updated by the SQL statement, or -1 if not + * applicable. <b>Note:</b> Not all JDBC drivers support this + * cleanly. + */ + int batchUpdate(final String query, final Object... parameters); + + /** + * Executes a SQL statement. Supposed to be used for INSERT statements with + * an automatically generated integer key. + * + * @param query The SQL query template. + * @param parameters The parameters to fill into the SQL query template. + * + * @return The automatically generated key. <b>Note:</b> Not all JDBC + * drivers support this cleanly. + */ + long insert(final String query, final Object... parameters); + + /** + * Executes a SQL statement. Supposed to be used for INSERT statements with + * one or more automatically generated keys. + * + * @param query The SQL query template. + * @param parameters The parameters to fill into the SQL query template. + * + * @return The automatically generated keys. <b>Note:</b> Not all JDBC + * drivers support this cleanly and it is in general driver-dependent + * what keys are present in the returned map. + */ + Map<String, Object> insertMultiKeys(final String query, + final Object... parameters); + + /** + * Executes a SQL statement as a batch for all parameter values provided. + * Supposed to be used for INSERT statements with an automatically generated + * integer key. + * + * @param query The SQL query template. + * @param parameters The parameters to fill into the SQL query template. At least + * one of the parameters needs to be an array or + * <code>Collection</code>. If multiple parameters are arrays or + * <code>Collection</code>, all of them need to have the same + * size. + * + * @return The automatically generated key for each element of the batch. + * <b>Note:</b> Not all JDBC drivers support this cleanly. + */ + long[] batchInsert(final String query, final Object... parameters); + + /** + * Executes a SQL statement as a batch for all parameter values provided. + * Supposed to be used for INSERT statements with one or more automatically + * generated keys. + * + * @param query The SQL query template. + * @param parameters The parameters to fill into the SQL query template. At least + * one of the parameters needs to be an array or + * <code>Collection</code>. If multiple parameters are arrays or + * <code>Collection</code>, all of them need to have the same + * size. + * + * @return The automatically generated keys for each element of the batch. + * <b>Note:</b> Not all JDBC drivers support this cleanly and it is + * in general driver-dependent what keys are present in the returned map. + */ + Map<String, Object>[] batchInsertMultiKeys(final String query, + final Object... parameters); + } + +### Events / Registration Process Hooks + + +The script can be informed of events that occur during the registration +process. To be informed of an event, define a function in the script +file with the name specified in the table. The script can do anything it +wants within an event function. Typical things to do in event functions +include sending emails or registering data in secondary databases. Some +of the event functions can be used to control the behavior of the +registration. + +This table summarizes the supported events. + +#### Events Table + +|Function Name|Return Value|Description| +|--- |--- |--- | +|pre_metadata_registration(DataSetRegistrationContext context)|void|Called before the openBIS AS is informed of the metadata modifications. Throwing an exception in this method aborts the transaction.| +|post_metadata_registration(DataSetRegistrationContext context)|void|The metadata has been successfully stored in the openBIS AS. This can also be a place to register data in a secondary transaction, with the semantics that any errors are ignored.| +|rollback_pre_registration(DataSetRegistrationContext context, Exception exception)|void|Called if the metadata was not successfully storedin the openBIS AS.| +|post_storage(DataSetRegistrationContext context)|void|Called once the data has been placed in the appropriate sharded directory of the store. This can only happen if the metadata was successfully registered with the AS.| +|should_retry_processing(DataSetRegistrationContext context, Exception problem)|boolean|A problem occurred with the process function, should the operation be retried? A retry happens only if this method returns true.| + +Note: the `rollback_pre_registration` function is intended to handle +cases when the dropbox code finished properly, but the registration of +data in openbis failed. These kinds of problems are impossible to handle +from inside of the `process` function. The exceptions raised during the +call to the `process` function should be handled by the function itself +by catching exceptions. + +#### Typical Usage Table + +|Function Name|Usage| +|--- |--- | +|pre_metadata_registration(DataSetRegistrationContext context)|This event can be used as a place to register information in a secondary database. If the transaction in the secondary database does not commit, false can be returned to prevent the data from entering openBIS.| +|post_metadata_registration(DataSetRegistrationContext context)|This event can be used as a place to register information in a secondary database. Errors encountered are ignored.| +|rollback_pre_registration(DataSetRegistrationContext context, Exception exception)|Undoing a commit to a secondary transaction. Sending an email to the admin that the data set could not be stored.| +|post_storage(DataSetRegistrationContext context)|Sending an email to tell the user that the data has been successfully registered. Notifying an external system that a data set has been registered.| +|should_retry_processing(DataSetRegistrationContext context, Exception problem)|Informing openBIS if it should retry processing a data set.| + +Example Scripts +--------------- + +A simple script that registers the incoming file as a data set +associated with a particular experiment. + +**data-set-handler-basic.py** + + def process(transaction): + dataSet = transaction.createNewDataSet() + incoming = transaction.getIncoming() + transaction.moveFile(incoming.getAbsolutePath(), dataSet) + dataSet.setExperiment(transaction.getExperiment("/TESTGROUP/TESTPROJ/JYTHON")) + +A script that registers the incoming file and associates it to a daily +experiment, which is created if necessary. + +**data-set-handler-experiment-reg.py** + + from datetime import datetime + def process(transaction) + # Try to get the experiment for today + now_str = datetime.today().strftime('%Y%m%d') + expid = "/TESTGROUP/TESTPROJ/" + now_str + exp = transaction.getExperiment(expid) + # Create an experiment + if None == exp: + exp = transaction.createNewExperiment(expid, "COMPOUND_HCS") + exp.setPropertyValue("DESCRIPTION", "An experiment created on " + datetime.today().strftime('%Y-%m-%d')) + exp.setPropertyValue("COMMENT", now_str) + dataSet = transaction.createNewDataSet() + incoming = transaction.getIncoming() + transaction.moveFile(incoming.getAbsolutePath(), dataSet) + dataSet.setDataSetType("HCS_IMAGE") + dataSet.setExperiment(exp) + +Delete, Move, or Leave Alone on Error +------------------------------------- + +When a problem occurs processing a file in the dropbox, the processing +is retried. This behavior can be controlled (see +[\#Errors](#Dropboxes-Errors)). If openBIS determines that it should not +retry after an error or that it cannot successfully register the +entities requested, the registration fails. It it possible to configure +what happens to a file in the dropbox if a registration fails. The +configuration can specify a behavior – delete the file, move it to an +error folder, or leave it untouched – for each of several possible +sources of errors. + +By default, the file is left untouched in every case. To change this +behavior, specify an on-error-decision property on the drop box. This +has one required sub-key, "class"; other sub-keys are determined by the +class. + +### Summary + +- Main Key: + - on-error-decision + +- Required Sub Keys: + - class : The class the implements the decision + +There is currently one class available : +ch.systemsx.cisd.etlserver.registrator.ConfiguredOnErrorActionDecision + +This class has the following sub keys: + +- - invalid-data-set (a data set that fails validation) + - validation-script-error (the validation script did not execute + correctly) + - registration-error (openBIS failed to register the data set) + - registration-script-error (the registration script did not + execute correctly) + - storage-processor-error (the storage processor reports an error) + - post-registration-error (an error happened after the data set + had been registered and stored) + +### Example + +**plugin.properties** + + # + # On Error Decision + # + # The class that implements the decision + on-error-decision.class = ch.systemsx.cisd.etlserver.registrator.ConfiguredOnErrorActionDecision + + # What to do if the data set fails validation + on-error-decision.invalid-data-set = MOVE_TO_ERROR + + # What to do if the validation script has problems + on-error-decision.validation-script-error = MOVE_TO_ERROR + + # What to do if the openBIS does not accept the entities + on-error-decision.registration-error = MOVE_TO_ERROR + + # What to do if the registration script has problems + on-error-decision.registration-script-error = MOVE_TO_ERROR + + # What to do if the storage processor does not run correctly + on-error-decision.storage-processor-error = MOVE_TO_ERROR + + # What to do if an error occurs after the entities have been registered in openBIS + on-error-decision.post-registration-error = MOVE_TO_ERROR + +### Search + +The transaction provides an interface for listing and searching for core +entities, experiment, sample, and data set. + +#### API + +To use the search capability, one must first retrieve the search service +from the transaction. By default the search service returns the entities +filtered to only those accessible by the user on behalf of wich, the +script is running. It is still possible to search all existing entities +by using unfiltered search service accessible from the transaction via +method getSearchServiceUnfiltered(). + +#### Experiment + +For experiment, there is a facility for listing all experiments that +belong to a specified project. + +#### Sample and Data Set + +For sample and data set, a more powerful search capability is available. +This requires a bit more knowledge of the java classes, but is very +flexible. For each entity, there is a simplified method that performs a +search for samples or data sets, respectively, with a specified value +for a particular property, optionally restricted by entity type (sample +type or data set type). This provides an easy-to-use interface for a +common case. More complex searches, however, need to use the more +powerful API. + +### Authorization Service + +The transaction provides an interface for querying the access privileges +of a user and for filtering collections of entities down to those +visible to a user. + +#### API + +To use the authorization service, one must first retrieve the it from +the transaction. + +### Example + +#### Combined Example + +In this example, we create a data set, list experiments belonging to a +project, search for samples, search for data sets, and assign the +experiment, sample, and parent data sets based on the results of the +searches. + +**data-set-handler-with-search.py** + + def process(tr): + data_set = tr.createNewDataSet() + incoming = tr.getIncoming() + tr.moveFile(incoming.getAbsolutePath(), data_set) + # Get the search service + search_service = tr.getSearchService() + + # List all experiments in a project + experiments = search_service.listExperiments("/cisd/noe") + + # Search for all samples with a property value determined by the file name; we don't care about the type + samplePropValue = incoming.getName() + samples = search_service.searchForSamples("ORGANISM", samplePropValue, None) + + # If possible, set the owner to the first sample, otherwise the first experiment + if samples.size() > 0: + data_set.setSample(samples[0]) + else: + data_set.setExperiment(experiments[0]) + + # Search for any potential parent data sets and use them as parents + parent_data_sets = search_service.searchForDataSets("COMMENT", "no comment", "HCS_IMAGE") + parent_data_set_codes = map(lambda each : each.getDataSetCode(), parent_data_sets) + data_set.setParentDatasets(parent_data_set_codes) + +An example from the Deep Sequencing environment handling BAM files: + +**data-set-handler-alignment.py** + + ''' + This is handling bowtie-BAM files and extracts some properties from the BAM header and + the samtools flagstat command. The results are formatted and attached as a property + to the openBIS DataSet. + Prerequisites are the DataSetType: ALIGNMENT and + the following properties assigned to the DataSetType mentioned above: + ALIGNMENT_SOFTWARE, ISSUED_COMMAND, SAMTOOLS_FLAGSTAT, + TOTAL_READS, MAPPED_READS + Obviously you need a working samtools binary + Note: + print statements go to: ~openbis/sprint/datastore_server/log/startup_log.txt + ''' + import os + from ch.systemsx.cisd.openbis.generic.shared.api.v1.dto import SearchCriteria + FOLDER='/net/bs-dsu-data/array0/dsu/dss/incoming-jython-alignment/' + SAMTOOLS='/usr/local/dsu/samtools/samtools' + def process(transaction): + incoming = transaction.getIncoming() + # Create a data set and set type + dataSet = transaction.createNewDataSet("ALIGNMENT") + dataSet.setMeasuredData(False) + incomingPath = incoming.getAbsolutePath() + # Get the incoming name + name = incoming.getName() + # expected incoming Name, e.g.:ETHZ_BSSE_110429_63558AAXX_1_sorted.bam + split = name.split("_") + sample=split[2]+ '_'+ split[3] + ':' + split[4] + # Extract values from a samtools view and set the results as DataSet properties + # Command: samtools view -H ETHZ_BSSE_110429_63558AAXX_1_sorted.bam + arguments = SAMTOOLS + ' view -H ' + FOLDER + name + #print('Arguments: '+ arguments) + cmdResult = os.popen(arguments).read() + properties = cmdResult.split("\n")[-2].split('\t') + aligner = (properties[1].split(':')[1].upper() + '_' + properties[2].split(':')[1]) + command = properties[3] + arguments = SAMTOOLS + ' flagstat ' + FOLDER + name + cmdResult = os.popen(arguments).read() + totalReads = cmdResult.split('\n')[0].split(' ')[0] + mappedReads = cmdResult.split('\n')[2].split(' ')[0] + dataSet.setPropertyValue("ALIGNMENT_SOFTWARE", aligner) + dataSet.setPropertyValue("ISSUED_COMMAND", command) + dataSet.setPropertyValue("SAMTOOLS_FLAGSTAT", cmdResult) + dataSet.setPropertyValue("TOTAL_READS", totalReads) + dataSet.setPropertyValue("MAPPED_READS", mappedReads) + # Add the incoming file into the data set + transaction.moveFile(incomingPath, dataSet) + # Get the search service + search_service = transaction.getSearchService() + # Search for the sample + sc = SearchCriteria() + sc.addMatchClause(SearchCriteria.MatchClause.createAttributeMatch(SearchCriteria.MatchClauseAttribute.CODE, sample)); + foundSamples = search_service.searchForSamples(sc) + if foundSamples.size() > 0: + dataSet.setSample(foundSamples[0]) + +Error Handling +-------------- + +### Automatic Retry (auto recovery) + +OpenBIS has a complex mechanism to ensure that the data registration via +dropboxes is atomic. When error occurs during data registration, the +dropbox will try several times before it gives up on the process. The +retries can happen to the initial processing of the data, as well as to +the registration in application server. Even if these fail there is +still a chance to finish the registration. If the registration reaches +the certain level it stores the checkpoint on the disk. If at any point +the process fails, or the dss goes down it tries to recover from the +checkpoint. + +There are two types of checkpoint files: State files and marker files. +There are stored in two different directories. The default location for +the state files is `datastore_sever/recovery-state`. This can be changed +by the property `dss-recovery-state-dir` in DSS `service.properties`. +The default location for the marker files was +`<store location>/<share id>/recovery-marker`. This may lead to problems +if this local is remote. Since version 20.10.6 the default location is + `datastore_sever/recovery-marker-dir`. This can be changed by the +property `dss-recovery-marker-dir` in DSS `service.properties`. + +The `process` function will be retried if a +`should_retry_processing` function is defined in the dropbox script and +it returns true. There are two configuration settings that affect this +behavior. The setting `process-max-retry-count` limits the number of +times the process function can be retried. The number of times to retry +before giving up and the waiting periods are defined using properties +shown in the table below. + +IMPORTANT NOTE: Please note, that the registration is considered as +failed only after, the whole retrying / recovery process will fail. It +means that it can take a long time before the .faulty\_paths file is +created, even when there is a simple dropbox error. + +Therefor during development of a dropbox we recommend +using** [development mode](#Dropboxes-Developmentmode)** , wich +basically sets all retry values to 0, thus disabling the auto-recovery +feature. + +|Key|Default Value|Meaning| +|--- |--- |--- | +|process-max-retry-count|6|The maximum number of times the process function can be retried.| +|process-retry-pause-in-sec|300|The amount of time to wait between retries of the process function.| +|metadata-registration-max-retry-count|6|The number of times registering metadata with the server can be retried.| +|metadata-registration-retry-pause-in-sec|300|The number of times registering metadata with the server can be retried.| +|recovery-max-retry-count|50|The number of times the recovery from checkpoint can be retries.| +|recovery-min-retry-period|60|The amount of time to wait between recovery from checkpoint retries.| + + + +### Manual Recovery + +The registration of data sets with Jython dropboxes has been designed to +be quite robust. Nonetheless, there are situations in which problems may +arise. This can especially be a problem during the development of +dropboxes. Here are the locations and semantics of several important +files and folders that can be useful for debugging a dropbox. + +|File or Folder|Meaning| +|--- |--- | +|datastore_server/log-registrations|Keeps logs of registrations. See the registration log documentation for more information.| +|[store]/[share]/pre-staging|Contains hard-link copies of the original data. Dropbox process operate on these hardlink copies.| +|[store]/[share]/staging|The location used to prepare data sets for registration.| +|[store]/[share]/pre-commit|Where data from data sets are kept while register the metadata with the AS. Once metadata registration succeeds, files are moved from this folder into the final store directory.| +|[store]/[share]/recovery-marker (before version 20.10.6) +datastore_sever/recovery-marker-dir (since version 20.10.6)|Directories, one per dropbox, where marker files are kept that indicate that a recovery should happen on an incoming file if it is reprocessed. Deleting a marker file will force the incoming file to be processed as a new file, not a recovery.| + +Classpath / Configuration +------------------------- + +If you want other jython modules to be available to the code that +implements the drop box, you will need to modify the +datastore\_server.conf file and add something like + + -Dpython.path=data/dropboxes/scripts:lib/jython-lib + +To the JAVA\_OPTS environment variable. The line should now look +something like this: + + JAVA_OPTS=${JAVA_OPTS:=-server -d64 -Dpython.path=data/dropboxes/scripts:lib/jython-lib} + +If the Jython dropbox need third-party JAR files they have to be added +to the core plugin in a sub-folder `lib/`. + +Validation scripts +------------------ + +See [Jython +DataSetValidator](/display/openBISDoc2010/Jython+DataSetValidator). + +Global Thread Parameters +------------------------ + +If you want to write a drop box which uses some parameters defined in +the service.properties you can access those properties via +the `getGlobalState`. Here we show an example how to use: + +**Global tread properties** + + def getThreadProperties(transaction): + threadPropertyDict = {} + threadProperties = transaction.getGlobalState().getThreadParameters().getThreadProperties() + for key in threadProperties: + try: + threadPropertyDict[key] = threadProperties.getProperty(key) + except: + pass + return threadPropertyDict + + # You can later access the thread properties like this: + threadPropertyDict = getThreadProperties(transaction) + incomingRootDir = threadPropertyDict[u'incoming-root-dir'] + +Sending Emails from a Drop box +------------------------------ + + def post_storage(context): + mailClient = context.getGlobalState().getMailClient() + results = context.getPersistentMap().get(PERSISTANT_KEY_MAP) + sendEmail(mailClient, results[0]) + + def process(transaction): + transaction.getRegistrationContext().getPersistentMap().put(PERSISTANT_KEY_MAP, [fcId]) + +Java Dropboxes +-------------- + +The above examples show how to implement dropboxes in Python. Python, +however, is not the only language option: it is also possible to write +dropboxes in Java. Whereas Python has the advantage of short turnaround +and less verbose syntax, Java is a good choice in the dropbox employs +complex logic and/or does not need to be modified frequently. A natural +progression is to use Python at the beginning, when creating a new +dropbox, to take advantage of the short turnaround cycle and then move +to Java once the dropbox implementation becomes more stable. Since the +API is the same, this language transition process is quite painless. + +### Configuration + +As with other dropboxes, a Java dropbox should be deployed as a +core-plugin. + +**plugin.properties** + + # + # REQUIRED PARAMETERS + # + # The directory to watch for new data sets + incoming-dir = ${root-dir}/incoming-java-dropbox + + # The handler class. Must be either ch.systemsx.cisd.etlserver.registrator.api.v2.JavaTopLevelDataSetHandlerV2 or a subclass thereof + top-level-data-set-handler = ch.systemsx.cisd.etlserver.registrator.api.v2.JavaTopLevelDataSetHandlerV2 + + # The class that implements the dropbox (must implement ch.systemsx.cisd.etlserver.registrator.api.v2.IJavaDataSetRegistrationDropboxV2) + program-class = ch.systemsx.cisd.etlserver.registrator.api.v2.ExampleJavaDataSetRegistrationDropboxV2 + + # The appropriate storage processor + storage-processor = ch.systemsx.cisd.etlserver.DefaultStorageProcessor + + # + # OPTIONAL PARAMETERS + # + + # False if incoming directory is assumed to exist. + # Default - true: Incoming directory will be created on start up if it doesn't exist. + incoming-dir-create = true + +The program-class parameter specifies the class that implements the +logic of the dropbox. This class must implement the +IJavaDataSetRegistrationDropboxV2 interface. This class, and any other +code it uses, should be packaged in a jar file that is provided with the +core-plugin. The name of the jar file can be freely chosen. + +### Implementation + +To implement a dropbox in Java, implement +the IJavaDataSetRegistrationDropboxV2 interface, which codifies the +interaction between the datastore server and the dropbox. We recommend +subclassing AbstractJavaDataSetRegistrationDropboxV2 to bootstrap the +implementation of this interface. + +**IJavaDataSetRegistrationDropboxV2** + + /** + * The interface that V2 dropboxes must implement. Defines the process method, which is called to + * handle new data in the dropbox's incoming folder, and various event methods called as the + * registration process progresses. + * + * @author Pawel Glyzewski + */ + public interface IJavaDataSetRegistrationDropboxV2 + { + /** + * Invoked when new data is found in the incoming folder. Implements the logic of registering + * and modifying entities. + * + * @param transaction The transaction that offers methods for registering and modifying entities + * and performing operations on the file system. + */ + public void process(IDataSetRegistrationTransactionV2 transaction); + /** + * Invoked just before the metadata is registered with the openBIS AS. Gives dropbox + * implementations an opportunity to perform additional operations. If an exception is thrown in + * this method, the transaction is rolledback. + * + * @param context Context of the registration. Offers access to the global state and persistent + * map. + */ + public void preMetadataRegistration(DataSetRegistrationContext context); + /** + * Invoked if the transaction is rolledback before the metadata is registered with the openBIS + * AS. + * + * @param context Context of the registration. Offers access to the global state and persistent + * map. + * @param throwable The throwable that triggered rollback. + */ + public void rollbackPreRegistration(DataSetRegistrationContext context, Throwable throwable); + /** + * Invoked just after the metadata is registered with the openBIS AS. Gives dropbox + * implementations an opportunity to perform additional operations. If an exception is thrown in + * this method, it is logged but otherwise ignored. + * + * @param context Context of the registration. Offers access to the global state and persistent + * map. + */ + public void postMetadataRegistration(DataSetRegistrationContext context); + /** + * Invoked after the data has been stored in its final location on the file system and the + * storage has been confirmed with the AS. + * + * @param context Context of the registration. Offers access to the global state and persistent + * map. + */ + public void postStorage(DataSetRegistrationContext context); + /** + * Is a function defined that can be used to check if a failed registration should be retried? + * Primarily for use implementations of this interface that dispatch to dynamic languages. + * + * @return true shouldRetryProcessing is defined, false otherwise. + */ + public boolean isRetryFunctionDefined(); + /** + * Given the problem with registration, should it be retried? + * + * @param context Context of the registration. Offers access to the global state and persistent + * map. + * @param problem The exception that caused the registration to fail. + * @return true if the registration should be retried. + */ + public boolean shouldRetryProcessing(DataSetRegistrationContext context, Exception problem) + throws NotImplementedException; + } + +Sending Emails in a drop box (simple) +------------------------------------- + + from ch.systemsx.cisd.common.mail import EMailAddress + + def process(transaction): + replyTo = EMailAddress("manuel.kohler@id.ethz.ch") + fromAddress = replyTo + recipient1 = EMailAddress("recipient1@ethz.ch") + recipient2 = EMailAddress("recipient2@ethz.ch") + + transaction.getGlobalState().getMailClient().sendEmailMessage("This is the subject", \ + "This is the body", replyTo, fromAddress, recipient1, recipient2); + +### Java Dropbox Example + +This is a simple example of a pure-java dropbox that creates a sample +and registers the incoming file as a data set of this sample. + +**ExampleJavaDataSetRegistrationDropboxV2.java** + + package ch.systemsx.cisd.etlserver.registrator.api.v2; + import ch.systemsx.cisd.etlserver.registrator.api.v1.IDataSet; + import ch.systemsx.cisd.etlserver.registrator.api.v1.ISample; + import ch.systemsx.cisd.openbis.dss.generic.shared.api.internal.v1.IExperimentImmutable; + /** + * An example dropbox implemented in Java. + * + * @author Chandrasekhar Ramakrishnan + */ + public class ExampleJavaDataSetRegistrationDropboxV2 extends + AbstractJavaDataSetRegistrationDropboxV2 + { + @Override + public void process(IDataSetRegistrationTransactionV2 transaction) + { + String sampleId = "/CISD/JAVA-TEST"; + ISample sample = transaction.createNewSample(sampleId, "DYNAMIC_PLATE"); + IExperimentImmutable exp = transaction.getExperiment("/CISD/NEMO/EXP-TEST-1"); + sample.setExperiment(exp); + IDataSet dataSet = transaction.createNewDataSet(); + dataSet.setSample(sample); + transaction.moveFile(transaction.getIncoming().getAbsolutePath(), dataSet); + } + } + +Java Code location + +The Java file should go into a `lib` folder and should be wrapped as a +`jar`. The name does not matter. + +While building a jar, the project should have the following +dependencies: `openBIS-API-dropbox-<version>.jar`, +`lib-commonbase-<version>.jar` and `cisd-hotdeploy-13.01.0.jar`. The +first two are available in the distribution in the archives +`openBIS-API-commonbase-<version>.zip` and +`openBIS-API-dropbox-<version>.zip`, the third one is available in [the +Ivy +repo](https://sissource.ethz.ch/openbis/openbis-public/openbis-ivy/-/blob/main/cisd/cisd-hotdeploy/13.01.0/cisd-hotdeploy-13.01.0.jar). + +Example path where the created `jar` should reside: + +`servers/core-plugins/illumina-ngs/2/dss/drop-boxes/register-cluster-alignment-java/lib` + +Create a `jar` from your java dropbox file: + +`jar cvf foo.jar foo.java` + +Restart the DSS + +Calling an Aggregation Service from a drop box +---------------------------------------------- + +**drop box code** + + ''' + @author: + Manuel Kohler + ''' + from ch.systemsx.cisd.openbis.dss.generic.server.EncapsulatedOpenBISService import createQueryApiServer +  + def process(transaction): + # use the etl server session token + session_token = transaction.getOpenBisServiceSessionToken() + + # To find out do SQL on the openBIS DB: select code from data_stores; + dss = "STANDARD" + + # folder name under the reporting_plugins + service_key = "reporting_experimental" + + # some parameters which are handed over + d = {"param1": "hello", "param2": "from a drop box"} + + # connection to the openbis server returns IQueryApiServer + s = createQueryApiServer("http://127.0.0.1:8888/openbis/openbis/", "600") + + # Actual call + # Parameters: String sessionToken, String dataStoreCode,String serviceKey, Map<String, Object> parameters) + s.createReportFromAggregationService(session_token, dss, service_key, d) + +Known limitations +----------------- + +#### Blocking + +Registering/updating a large number of entities can cause other +concurrent operations that try to modify the same or related entities to +be blocked. This limitation applies to both dropboxes and batch +operations triggered from the web UI. Lists of operations that are +blocked are presented below. Each list contains operations that cannot +be performed when a specific kind of entity is being registered/updated. + +Experiment: + +- creating/updating an experiment in the same project +- updating the same space +- updating the same project +- updating the same experiment + +Sample: + +- creating/updating an experiment in the same project +- creating/updating a sample in the same experiment +- updating the same space +- updating the same project +- updating the same experiment +- updating the same sample + +Data set: + +- creating/updating an experiment in the same project +- creating/updating a sample in the same experiment +- creating a dataset in the same experiment +- updating the same space +- updating the same project +- updating the same experiment +- updating the same sample + +Material: + +- updating the same material diff --git a/docs/software-developer-documentation/server-side-extensions/img/122.png b/docs/software-developer-documentation/server-side-extensions/img/122.png new file mode 100644 index 0000000000000000000000000000000000000000..a9181d4a675aa07174687ab0d146f48c6d699c78 Binary files /dev/null and b/docs/software-developer-documentation/server-side-extensions/img/122.png differ diff --git a/docs/software-developer-documentation/server-side-extensions/img/771.png b/docs/software-developer-documentation/server-side-extensions/img/771.png new file mode 100644 index 0000000000000000000000000000000000000000..5985617c00c52a67ec9ec9348810b57717d00026 Binary files /dev/null and b/docs/software-developer-documentation/server-side-extensions/img/771.png differ diff --git a/docs/software-developer-documentation/server-side-extensions/index.rst b/docs/software-developer-documentation/server-side-extensions/index.rst new file mode 100644 index 0000000000000000000000000000000000000000..d8e89e3e4946b6472e4ab8de7a18b428173668ab --- /dev/null +++ b/docs/software-developer-documentation/server-side-extensions/index.rst @@ -0,0 +1,10 @@ +Server-Side Extensions +====================== + +.. toctree:: + :maxdepth: 4 + + core-plugins + as-services + as-api-listener + dss-dropboxes \ No newline at end of file diff --git a/docs/system-admin-documentation/advanced-features/archive-datasets.md b/docs/system-admin-documentation/advanced-features/archive-datasets.md new file mode 100644 index 0000000000000000000000000000000000000000..190d4e6aa6ca9f82c41f3759db840bc92cc5a9ab --- /dev/null +++ b/docs/system-admin-documentation/advanced-features/archive-datasets.md @@ -0,0 +1,106 @@ +Archiving Datasets +================== + +## Manual archiving + +### openBIS core UI + +Archiving can be triggered by doing the following steps: + +- go to an experiment/collection or an object. +- switch to the tab "Data Sets". There will be in ther lower right + corner the button 'Archiving'. +- click on the button and choose either 'Copy to Archive' or 'Move to + Archive'. +- if you did not select any data set all data sets will be archived. + If you have selected some data sets you can choose if you want to + archive only them or all the data sets accessible in the table. + +Because archiving does not happens immediately the status (called +'Archiving Status' in data set tables) of the data sets will be changed +to BACKUP\_PENDING or ARCHIVE\_PENDING. + +To make archived data sets available again repeat the steps, but choose +'Unarchive'. + +If you want to disallow archiving, choose 'Lock'. Remember that you can +do this only for available data sets. The 'Archiving Status' will change +to 'AVAILABLE (LOCKED)'. To make archiving possible again, choose +'Unlock'. + +### ELN-LIMS + +Instead of triggering archiving only requesting archiving is possible. +The maintenance task +[ArchivingByRequestTask](/display/openBISDoc2010/Maintenance+Tasks#MaintenanceTasks-ArchivingByRequestTask) +is required. It triggers the actual archiving. + +## Automatic archiving + +Archiving can be automated by the Auto Archiver. This is a [maintenance +task](/display/openBISDoc2010/Maintenance+Tasks) which triggers +archiving of data sets fullfulling some conditions (e.g. not accessed +since a while). Note that the auto archiver doesn't archives itself. It +just automates the selection of data sets to be archived. For all +configuration parameters see +[AutoArchiverTask](/display/openBISDoc2010/Maintenance+Tasks#MaintenanceTasks-AutoArchiverTask). + +### Archiving Policies + +An archiving policy selects from the unarchived data sets candidates +(which are either data sets not accessed since some days or data sets +marked by a tag) the data sets to be archived. If not specified all +candidates will be archived. + +The policy can be specified by `policy.class` property. It has to be the +fully-qualified name of a Java class +implementing` ch.systemsx.cisd.etlserver.IAutoArchiverPolicy`. All +properties starting with `policy.` specifying the policy further. + +#### ch.systemsx.cisd.etlserver.plugins.GroupingPolicy + +**Description**: Policy which tries to find a group of data sets with a +total size from a specified interval. This is important in case of +[Multi Data Set +Archiving](/display/openBISDoc2010/Multi+data+set+archiving). Grouping +can be defined by space, project, experiment, sample, data set type or a +combination of those. Groups can be merged if they are too small. +Several grouping keys can be specified. + +Searching for an appropriate group of data sets for auto archiving is +logged. If no group could be found an admin is notified via email (email +address specified in `log.xml`). The email contains the searching log. + +**Configuration**: + +|Property Key |Description | +|--------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +|minimal-archive-size|The total size (in bytes) of the selected data sets has to be equal or the larger than this value. Default: 0 | +|maximal-archive-size|The total size (in bytes) of the selected data sets has to be equal or the less than this value. Default: Unlimited | +|grouping-keys |Comma separated list of grouping keys. A grouping key has the following form: <basic key 1>#<basic key 2>#...#<basic key n>[:merge] A basic key is from the following vocabulary: All, Space, Project, Experiment, Sample, DataSetType, DataSet. All basic keys of a group key define a grouping of all data set candidates. In each group all data sets have the all attributes defined by the basic keys in common. Note, that basic key All means no grouping. For example: Experiment#DataSetType means that the candidates are grouped according to experiment and data set type. The optional :merge is used when no group fulfills the total size condition and there are at least two groups with total size below minimal-archive-size. In this case groups which are too small will be merged until the total size condition is fulfilled. If a grouping key doesn't lead to a group of data set fulfilling the total size condition the next grouping key is used until a matching group is found. If for a grouping key more than one matching group is found the oldest one will be chosen. If merging applies for more than two groups the oldest groups will be merged first. The age of a group is defined by the most recent access time stamp. Examples: Grouping policy by experiment: DataSetType#Experiment, DataSetType#Project, DataSetType#Experiment#Sample Grouping policy by space: DataSetType#Space, DataSetType#Project:merge, DataSetType#Experiment:merge, DataSetType#Experiment#Sample:merge, DataSet:merge| + + +**Example**: + +**plugin.properties** + + class = ch.systemsx.cisd.etlserver.plugins.AutoArchiverTask + interval = 10 days + archive-candidate-discoverer.class = ch.systemsx.cisd.etlserver.plugins.TagArchiveCandidateDiscoverer + archive-candidate-discoverer.tags = /admin-user/archive + policy.class = ch.systemsx.cisd.etlserver.plugins.GroupingPolicy + policy.minimal-archive-size = 30000000000 + policy.maximal-archive-size = 150000000000 + policy.grouping-keys = Space#DataSetType, Experiment#Sample:merge + +In this example the candidates are unarchived data sets which have been +tag by the user `admin-user` with the tag `archive`. The policy tries to +find a group of data set with total size between 30 Gb and 150 Gb. It +first looks for groups where all data sets are of the same type and from +the same space. If no group is found it tries to find groups where all +data sets are from the same experiment and sample (data set with no +samples are assigned to `no_sample`). If no matching groups are found +and at least two groups are below the minimum the policy tries to merge +groups to a bigger group until the bigger group match the size +condition. If no group can be found an email will be sent describing in +detail the several steps of finding a matching group. \ No newline at end of file diff --git a/docs/system-admin-documentation/advanced-features/authentication-systems.md b/docs/system-admin-documentation/advanced-features/authentication-systems.md new file mode 100644 index 0000000000000000000000000000000000000000..e60b24d6e1d65d206cacc5c106fd77621bb060c8 --- /dev/null +++ b/docs/system-admin-documentation/advanced-features/authentication-systems.md @@ -0,0 +1,4 @@ +Authentication Systems +====================== + +To be written \ No newline at end of file diff --git a/docs/system-admin-documentation/advanced-features/index.rst b/docs/system-admin-documentation/advanced-features/index.rst new file mode 100644 index 0000000000000000000000000000000000000000..b2fafeac1298fd3be8c2c653b2cf4b3b745825aa --- /dev/null +++ b/docs/system-admin-documentation/advanced-features/index.rst @@ -0,0 +1,11 @@ +Advanced Features +================= + +.. toctree:: + :maxdepth: 4 + + archive-datasets + authentication-system + share-ids + maintenance-tasks + synchronization-of-openbis-databases \ No newline at end of file diff --git a/docs/system-admin-documentation/advanced-features/maintenance-tasks.md b/docs/system-admin-documentation/advanced-features/maintenance-tasks.md new file mode 100644 index 0000000000000000000000000000000000000000..5cb3691c072c33f40fe735bc234e48977ef206ea --- /dev/null +++ b/docs/system-admin-documentation/advanced-features/maintenance-tasks.md @@ -0,0 +1,1270 @@ +Maintenance Tasks +================= + +## Maintenance Task Classification + +| Category | +|------------------------------------------| +| Feature | +| Consistency and other Reports | +| Consistency Repair and Manual Migrations | + +| Relevancy | +|------------| +| Default | +| Relevant | +| Rare | +| Deprecated | + +## Introduction + +A maintenance task is a process which runs once or in regular time +intervals. It is defined by a [core +plugin](/pages/viewpage.action?pageId=80699503) of type +`maintenance-tasks`. Usually a maintenance task can only run on AS or +DSS but not in both environments. + +The following properties are common for all maintenance tasks: + +| Property Key | Description | +|-------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| class | The fully-qualified Java class name of the maintenance task. The class has to implement IMaintenanceTask. | +| execute-only-once | A flag which has to be set to true if the task should be executed only once. Default value: false | +| interval | A time interval (in seconds) which defines the pace of execution of the maintenance task. Can be specified with one of the following time units: ms, msec, s, sec, m, min, h, hours, d, days. Default time unit is sec. Default value: one day. | +| start | A time at which the task should be executed the first time. Format: HH:mm. where HH is a two-digit hour (in 24h notation) and mm is a two-digit minute. By default the task is execute at server startup. | +| run-schedule | Scheduling plan for task execution. Properties execute-only-once, interval, and start will be ignored if specified. +Crontab syntax: +cron: <second> <minute> <hour> <day> <month> <weekday> +Examples: +cron: 0 0 * * * *: the top of every hour of every day. +cron: */10 * * * * *: every ten seconds. +cron: 0 0 8-10 * * *: 8, 9 and 10 o'clock of every day. +cron: 0 0 6,19 * * *: 6:00 AM and 7:00 PM every day. +cron: 0 0/30 8-10 * * *: 8:00, 8:30, 9:00, 9:30, 10:00 and 10:30 every day. +cron: 0 0 9-17 * * MON-FRI: on the hour nine-to-five weekdays. +cron: 0 0 0 25 12 ?: every Christmas Day at midnight. +Non-crontab syntax: +Comma-separated list of definitions with following syntax: +[[<counter>.]<week day>] [<month day>[.<month>]] <hour>[:<minute>] +where <counter> counts the specified week day of the month. <week day> is MO, MON, TU, TUE, WE, WED, TH, THU, FR, FRI, SA, SAT, SU, or SUN (ignoring case). <month> is either the month number (followed by an optionl '.') or JAN, FEB, MAR, APR, MAY, JUN, JUL, AUG, SEP, OCT, NOV, or DEC (ignoring case). +Examples: +6, 18: every day at 6 AM and 6 PM. +3.FR 22:15: every third friday of a month at 22:15. +1. 15:50: every first day of a month at 3:50 PM. +SAT 1:30: every saturday at 1:30 AM. +1.Jan 5:15, 1.4. 5:15, 1.7 5:15, 1. OCT 5:15: every first day of a quarter at 5:15 AM. | +| run-schedule-file | File where the timestamp for next execution is stored. It is used if run-schedule is specified. Default: <installation folder>/<plugin name>_<class name> | +| retry-intervals-after-failure | Optional comma-separated list of time intervals (format as for interval) after which a failed execution will be retried. Note, that a maintenance task will be execute always when the next scheduled timepoint occurs. This feature allows to execute a task much earlier in case of temporary errors (e.g. temporary unavailibity of another server). | + +## Feature + +### ArchivingByRequestTask + +**Environment**: AS + +**Relevancy:** Relevant + +**Description**: Triggers archiving for data sets where the 'requested +archiving' flag is set. Waits with archiving until enough data sets for +a group come together. This is necessary for taped-base archiving where +the files to be stored have to be larger than a minimum size. + +**Configuration**: + +| Property Key | Description | +|---------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| keep-in-store | If true the archived data set will not be removed from the store. That is, only a backup will be created. Default: false | +| minimum-container-size-in-bytes | Minimum size of an archive container which has one or more data set. This is important for Multi Data Set Archiving. Default: 10 GB | +| maximum-container-size-in-bytes | Maximum size of an archive container which has one or more data set. This is important for Multi Data Set Archiving. Default: 80 GB | +| configuration-file-path | Path to the configuration file as used by User Group Management. Here only the group keys are needed. They define a set of groups. If there is no configuration file at the specified path this set is empty. +A data set requested for archiving belongs the a specified group if its space starts with the group key followed by an underscore character '_'. Otherwise it belongs to no group. This maintenance task triggers archiving an archive container with one or more data set from the same group if the container fits the specified minimum and maximum size. Note, that data sets which do not belong to a group are handled as a group too. If a data set is larger than the maximum container size it will be archived even though the container is to large. The group key (in lower case) is provided to the archiver. The Multi Data Set Archiver will use this for storing the archive container in a sub folder of the same name. + + +Default: etc/user-management-maintenance-config.json | + +**Example**: + +**plugin.properties** + + class = ch.systemsx.cisd.openbis.generic.server.task.ArchivingByRequestTask + interval = 1 d + minimum-container-size-in-bytes = 20000000000 + maximum-container-size-in-bytes = 200000000000 + configuration-file-path = ../../../data/groups.json + +**Notes:** In practice every instance using multi dataset archiving +feature and also the ELN-LIMS should have this enabled. + +### AutoArchiverTask + +**Environment**: DSS + +**Relevancy:** Rare + +**Description**: Triggers archiving of data sets that have not been +archived yet. + +**Configuration**: + +| Property Key | Description | +|------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| remove-datasets-from-store | If true the archived data set will be removed from the store. Default: false | +| data-set-type | Data set type of the data sets to be archived. If undefined all data set of all types might be archived. | +| older-than | Minimum number of days a data set to be archived hasn't been accessed. Default: 30 | +| archive-candidate-discoverer.class | Discoverer of candidates to be archived: + +ch.systemsx.cisd.etlserver.plugins.AgeArchiveCandidateDiscoverer: All data sets with an access time stamp older than specified by property older-than are candidates. This is the default discoverer. +ch.systemsx.cisd.etlserver.plugins.TagArchiveCandidateDiscoverer: All data sets which are marked by one of the tags specified by the property archive-candidate-discoverer.tags are candidates. + | +| policy.class | A policy specifies which data set candidates should be archived. If undefined all candidates will be archived. Has to be a fully-qualified name of a Java class implementing ch.systemsx.cisd.etlserver.IAutoArchiverPolicy. | +| policy.* | Properties specific for the policy specified by policy.class. More about policies can be found here. | + +**Example**: + +**plugin.properties** + + class = ch.systemsx.cisd.etlserver.plugins.AutoArchiverTask + interval = 10 days + archive-candidate-discoverer.class = ch.systemsx.cisd.etlserver.plugins.TagArchiveCandidateDiscoverer + archive-candidate-discoverer.tags = /admin-user/archive + policy.class = ch.systemsx.cisd.etlserver.plugins.GroupingPolicy + policy.minimal-archive-size = 1500000 + policy.maximal-archive-size = 3000000 + policy.grouping-keys = Space#DataSetType, Space#Experiment:merge + +### BlastDatabaseCreationMaintenanceTask + +**Environment**: DSS + +**Relevancy:** Default (ELN-LIMS) + +**Description**: Creates BLAST databases from FASTA and FASTQ files of +data sets and/or properties of experiments, samples, and data sets. + +The title of all entries of the FASTA and FASTQ files will be extended +by the string `[Data set: <data set code>, File: <path>]`. Sequences +provide by an entity property will have identifiers of the form +`<entity kind>+<perm id>+<property type>+<time stamp>`. This allows to +determine where the matching sequences are stored in openBIS. A sequence +can be a nucleic acid sequence or an amino acid sequence. + +For each data set a BLAST nucl and prot databases will be created (if +not empty) by the tool `makeblastdb`. For all entities of a specified +kind and type one BLAST database (one for nucleic sequences and one +for amino acid sequences) will be created from the plain sequences +stored in the specified property (white spaces will be removed). In +addition an index is created by the tool `makembindex` if the sequence +file of the database (file type `.nsq`) is larger than 1MB. The name of +the databases are `<data set code>-nucl/prot` +and `<entity kind>+<entity type code>+<property type code>+<time stamp>-nucl/prot`. +These databases are referred in the virtual database `all-nucl` (file: +`all-nucl.nal`) and `all-prot` (file: `all-prot.pal`). + +If a data set is deleted the corresponding BLAST nucl and prot databases +will be automatically removed the next time this maintenance task runs. +If an entity of specified type has been modified the BLAST databases +will be recalculated the next time this maintenance task runs. + +Works only if BLAST+ tool suite has been installed. BLAST+ can be +downloaded from +<ftp://ftp.ncbi.nlm.nih.gov/blast/executables/blast+/LATEST/> + +**Notes:** It comes pre-configured with the ELN-LIMS but if additional +properties need to scanned they should be added to the plugin.properties + + + +**Configuration**: + +| Property Key | Description | +|----------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| dataset-types | Comma-separated list of regular expressions of data set types. All FASTA and FASTQ files from those data sets are handled. All data sets of types not matching at least one of the regular expression are not handled. | +| entity-sequence-properties | Comma-separated list of descriptions of entity properties with sequences. A description is of the form +<entity kind>+<entity type code>+<property type code> +where <entity kind> is either EXPERIMENT, SAMPLE or DATA_SET (Materials are not supported). | +| file-types | Space separated list of file types. Data set files of those file types have to be FASTA or FASTQ files. Default: .fasta .fa .fsa .fastq | +| blast-tools-directory | Path in the file system where all BLAST tools are located. If it is not specified or empty the tools directory has to be in the PATH environment variable. | +| blast-databases-folder | Path to the folder where all BLAST databases are stored. Default: <data store root>/blast-databases | +| blast-temp-folder | Path to the folder where temporary FASTA files are stored. Default: <blast-databases-folder>/tmp | +| last-seen-data-set-file | Path to the file which stores the id of the last seen data set. Default: <data store root>/last-seen-data-set-for-BLAST-database-creation | + + +**Example**: + +**plugin.properties** + + class = ch.systemsx.cisd.etlserver.plugins.BlastDatabaseCreationMaintenanceTask + interval = 1 h + dataset-types = BLAST-.+ + entity-sequence-properties = SAMPLE+OLIGO+SEQUENCE, EXPERIMENT+YEAST+PLASMID_SEQUENCE + blast-tools-directory = /usr/local/ncbi/blast/bin + +### DeleteDataSetsAlreadyDeletedInApplicationServerMaintenanceTask + +**Environment**: DSS + +**Relevancy:** Default + +**Description**: Deletes data sets which have been deleted on AS. + +If this task isn't configured neither in service.properties nor as a +core plugin it will be established automatically by using default +configuration and running every 5 minutes. + +**Configuration**: + +| Property Key | Description | +|------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| last-seen-data-set-file | Path to a file which will store the code of the last data set handled. Default: deleteDatasetsAlreadyDeletedFromApplicationServerTaskLastSeen | +| timing-parameters.max-retries | Maximum number of retries in case of currently not available filesystem of the share containing the data set. Default:11 | +| timing-parameters.failure-interval | Waiting time (in seconds) between retries. Default: 10 | +| chunk-size | Number of data sets deleted together. The task is split into deletion tasks with maximum number of data sets. Default: No chunk size. That is, all data sets to be deleted are deleted in one go. | + +**Example**: + +**plugin.properties** + + class = ch.systemsx.cisd.etlserver.plugins.DeleteDataSetsAlreadyDeletedInApplicationServerMaintenanceTask + interval = 60 + last-seen-data-set-file = lastSeenDataSetForDeletion.txt + +### DeleteFromArchiveMaintenanceTask + +**Environment**: DSS + +**Relevancy:** Rare + +**Description**: Deletes archived data sets which have been deleted on +AS. This tasks needs the archive plugin to be configured in +`service.properties. This task only works with non multi data set archivers.` + +**Configuration**: + +| Property Key | Description | +|-----------------|---------------------------------------------------------------------------------------------| +| status-filename | Path to a file which will store the technical ID of the last data set deletion event on AS. | +| chunk-size | Maximum number of entries deleted in one maintenance task run. Default: Unlimited | + +**Example**: + +**plugin.properties** + + class = ch.systemsx.cisd.etlserver.plugins.DeleteFromArchiveMaintenanceTask + interval = 3600 + status-filename = ../archive-cleanup-status.txt + +### DeleteFromExternalDBMaintenanceTask + +**Environment**: DSS + +**Relevancy:** Rare + +**Description**: Deletes database entries which are related to data sets +deleted in AS. The database is can be any relational database accessible +by DSS. + +**Configuration**: + +| Property Key | Description | +|---------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| data-source | Key of a data source configured in service.properties or in a core plugin of type 'data-sources'. A data source defines the credentials to access the database. | +| synchronization-table | Name of the table which stores the technical ID of the last data set deletion event on AS. This is ID is used to ask AS for all new data set deletion events. Default value: EVENTS | +| last-seen-event-id-column | Name of the column in the database table defined by property synchronization-table which stores the ID of the last data set deletion event. Default value: LAST_SEEN_DELETION_EVENT_ID | +| data-set-table-name | Comma-separated list of table names which contain stuff related to data sets to be deleted. In case of cascading deletion only the tables at the beginning of the cascade should be mentioned. Default value: image_data_sets, analysis_data_sets. | +| data-set-perm-id | Name of the column in all tables defined by data-set-table-name which stores the data set code. Default value: PERM_ID | +| chunk-size | Maximum number of entries deleted in one maintenance task run. Default: Unlimited | + +**Example**: + +**plugin.properties** + + class = ch.systemsx.cisd.etlserver.plugins.DeleteFromExternalDBMaintenanceTask + interval = 300 + data-source = proteomics-db + data-set-table-name = data_sets + +### EventsSearchMaintenanceTask + +**Environment**: AS + +**Relevancy:** Default + +**Description**: Populates EVENTS\_SEARCH database table basing on +entries from EVENTS database table. EVENTS\_SEARCH table contains the +same information as EVENTS table but in a more search friendly format +(e.g. a single entry in EVENTS table may represent a deletion of +multiple objects deleted at the same time, in EVENT\_SEARCH table such +entry is split into separate entries - one for each deleted object.). +This is set up automatically. + +**Configuration:** + +There are no specific configuration parameters for this task. + +**Example**: + +**plugin.properties** + + class = ch.systemsx.cisd.openbis.generic.server.task.events_search.EventsSearchMaintenanceTask + interval = 1 day + +### ExperimentBasedArchivingTask + +**Environment**: DSS + +**Relevancy:** rare, used when no MultiDataSetArchiver is used and +AutoArchiverTask is too complex.** +** + +**Description**: Archives all data sets of experiments which fulfill +some criteria. This tasks needs the archive plugin to be configured in +`service.properties`. + +**Configuration**: + +| Property Key | Description | +|-----------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| excluded-data-set-types | Comma-separated list of data set types. Data sets of such types are not archived. Default: No data set type is excluded. | +| estimated-data-set-size-in-KB.<data set type> | Specifies for the data set type <data set type> the average size in KB. If <data set type> is DEFAULT it will be used for all data set types with unspecified estimated size. | +| free-space-provider.class | Fully qualified class name of the free space provider (implementing ch.systemsx.cisd.common.filesystem.IFreeSpaceProvider). Depending on the free space provider additional properties, all starting with prefix free-space-provider., might be needed. Default: ch.systemsx.cisd.common.filesystem.SimpleFreeSpaceProvider | +| monitored-dir | Path to the directory to be monitored by the free space provider. | +| minimum-free-space-in-MB | Minimum free space in MB. If the free space is below this limit the task archives data sets. Default: 1 GB | + +**Example**: + +**plugin.properties** + + class = ch.systemsx.cisd.etlserver.plugins.ExperimentBasedArchivingTask + interval = 86400 + minimum-free-space-in-MB = 2048 + monitored-dir = /my-data/ + estimated-data-set-size-in-KB.RAW_DATA = 12000 + estimated-data-set-size-in-KB.DEFAULT = 35000 +  + +If there is not enough free space the task archives all data sets +experiment by experiment until free space is above the specified limit. +The oldest experiments are archived first. The age of an experiment is +determined by the youngest modification/registration time stamp of all +its data sets which are not excluded by data set type or archiving +status. + +The free space is only calculated once when the task starts to figure +out whether archiving is necessary or not. This value is than used +together with estimated data set sizes to get an estimated free space +which is used for the stopping criteria. Why not calculating the free +space again with the free space provider after the data sets of an +experiment have been archived? The reason is that providing the free +space might be an expensive operation. This is the case when archiving +means removing data from a database which have been fed by data from +data sets of certain type. In this case archiving (i.e. deleting) those +data in the database do not automatically frees disk space because +freeing disk space is for databases often an expensive operation. + +The DSS admin will be informed by an e-mail about which experiments have +been archived. + +### HierarchicalStorageUpdater + +**Environment**: DSS + +**Description**: Creates/updates a mirrot of the data store. Data set +are organized hierachical in accordance to their experiment and samples + +**Relevancy:** Deprecated + +**Configuration**: + +| Property Key | Description | +|---------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| storeroot-dir-link-path | Path to the root directory of the store as to be used for creating symbolic links. This should be used if the path to the store as seen by clients is different than seen by DSS. | +| storeroot-dir | Path to the root directory of the store. Used if storeroot-dir-link-path is not specified. | +| hierarchy-root-dir | Path to the root directory of mirrored store. | +| link-naming-strategy.class | Fully qualified class name of the strategy to generate the hierarchy (implementing ch.systemsx.cisd.etlserver.plugins.IHierarchicalStorageLinkNamingStrategy). Depending on the actual strategy additional properties, all starting with prefix link-naming-strategy., mighty be needed. Default: ch.systemsx.cisd.etlserver.plugins.TemplateBasedLinkNamingStrategy | +| link-source-subpath.<data set type> | Link source subpath for the specified data set type. Only files and folder in this relative path inside a data set will be mirrored. Default: The complete data set folder will be mirroed. | +| link-from-first-child.<data set type> | Flag which specifies whether only the first child of or the complete folder (either the data set or the one specified by link-source-subpath.<data set type>). Default: False | +| with-meta-data | Flag, which specifies whether directories with meta-data.tsv and a link should be created or only links. The default behavior is to create links-only. Default: false | +| link-naming-strategy.template | The exact form of link paths produced by TemplateBasedLinkNamingStrategy is defined by this template. +The variables dataSet, dataSetType, sample, experiment, project and space will be recognized and replaced in the actual link path. +Default: ${space}/${project}/${experiment}/${dataSetType}+${sample}+${dataSet} | +| link-naming-strategy.component-template | If defined, specifies the form of link paths for component datasets. If undefined, component datasets links are formatted with link-naming-strategy.template. +Works as link-naming-strategy.template, but has these additional variables: containerDataSetType, containerDataSet, containerSample. +Default: Undefined. | + +**Example**: + +**plugin.properties** + + class = ch.systemsx.cisd.etlserver.plugins.HierarchicalStorageUpdater + storeroot-dir = ${root-dir} + hierarchy-root-dir = ../../mirror + link-naming-strategy.template = ${space}/${project}/${experiment}/${sample}/${dataSetType}-${dataSet} + link-naming-strategy.component-template = ${space}/${project}/${experiment}/${containerSample}/${containerDataSetType}-${containerDataSet}/${dataSetType}-${dataSet} + +### MultiDataSetDeletionMaintenanceTask + +**Environment**: DSS + +**Relevancy:** Relevant + +**Description**: Deletes data sets which are already deleted on AS also +from multi-data-set archives. This maintenance task works only if the +[Multi Data Set Archiver](/pages/viewpage.action?pageId=80699422)  is +configured. It does the following: + +1. Extracts the not-deleted data sets of a TAR container with deleted + data sets into the store. +2. Marks them as *not present in archive*. +3. Deletes the TAR containers with deleted data sets. +4. Requests archiving of the non-deleted data sets. + +The last step requires that the maintenance task +[ArchivingByRequestTask](#MaintenanceTasks-ArchivingByRequestTask) is +configured. + +**Configuration**: + +| Property Key | Description | +|-------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| last-seen-event-id-file | File which contains the last seen event id. | +| mapping-file | Optional file which maps data sets to share ids and archiving folders (for details see Mapping File for Share Ids and Archiving Folders). If not specified the first share which has enough free space and which isn't a unarchiving scratch share will be used for extracting the not-deleted data sets. | + +**Example**: + +**plugin.properties** + + class = ch.systemsx.cisd.openbis.dss.generic.server.plugins.standard.archiver.MultiDataSetDeletionMaintenanceTask + interval = 1 d + last-seen-event-id-file = ${storeroot-dir}/MultiDataSetDeletionMaintenanceTask-last-seen-event-id.txt + mapping-file = etc/mapping.tsv + +**NOTE**: Should be configured on any instance using the multi dataset +archiver when the archive data should be deletable. + +### MultiDataSetUnarchivingMaintenanceTask + +**Environment**: DSS + +**Relevancy:** Relevant + +**Description**: Triggers unarchiving of multi data set archives. Is +only needed if the configuration property `delay-unarchiving` of the +[Multi Data Set Archiver](/pages/viewpage.action?pageId=80699422) is +set `true`. + +This maintenance task allows to reduce the stress of the tape system by +otherwise random unarchiving events triggered by the users. + +**Configuration**: No specific properties. + +**Example**: + +**plugin.properties** + + class = ch.systemsx.cisd.openbis.dss.generic.server.plugins.standard.archiver.MultiDataSetUnarchivingMaintenanceTask + interval = 1 d + start = 01:00 + +### MultiDataSetArchiveSanityCheckMaintenanceTask + +**Environment**: DSS + +**Relevancy: **Default + +**Description**: Task that verifies checksums of data sets archived +within a specific time window. It reads archives from the final +destination and checks if they are consistent with path info database +entries. + +WARNING: the task assumes MultiDataSetArchiver task is configured (the +task uses some of the multi data set archiver configuration properties +e.g. final destination location). + +**Configuration**: + +| Property Key | Description | +|-----------------|-------------------------------------------------------------------------------| +| status-file | Path to a JSON file that keeps a list of already checked archive containers | +| notify-emails | List of emails to notify about problematic archive containers | +| interval | Interval in seconds | +| check-to-date | "To date" of the time window to be checked. Date in format yyyy-MM-dd HH:mm | +| check-from-date | "From date" of the time window to be checked. Date in format yyyy-MM-dd HH:mm | + +**Example**: + +**plugin.properties** + + class = ch.systemsx.cisd.openbis.dss.generic.server.plugins.standard.archiver.MultiDataSetArchiveSanityCheckMaintenanceTask + interval = 3600 + check-from-date = 2022-09-01 00:00 + check-to-date = 2022-10-01 00:00 + notify-emails = test1@email.com, test2@email.com + status-file = ../../multi-dataset-sanity-check-statuses.json + +### PathInfoDatabaseFeedingTask + +**Environment**: DSS + +**Relevancy:** Default, is part of the post registration task + +**Description**: Feeds the pathinfo database with file paths of all data +sets in the store. It can be used as a maintenance task as well as a +post registration task. As a maintenance task it is needed to run only +once if a **PostRegistrationMaintenanceTask** is configured. This task +assumes a data source with for 'path-info-db'. + +If used as a maintenance task the data sets are processed in the order +they are registered. The registration time stamp of the last processed +data set is the starting point when the task is executed next time. + +**Configuration**: + +| Property Key | Description | +|----------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| compute-checksum | If true the CRC32 checksum (and optionally a checksum of the type specified by checksum-type) of all files will be calculated and stored in pathinfo database. Default value: false | +| checksum-type | Optional checksum type. If specified and compute-checksum = true two checksums are calculated: CRC32 checksum and the checksum of specified type. The type and the checksum are stored in the pathinfo database. An allowed type has to be supported by MessageDigest.getInstance(<checksum type>). For more details see http://docs.oracle.com/javase/8/docs/api/java/security/MessageDigest.html#getInstance-java.lang.String-. | +| data-set-chunk-size | Number of data sets requested from AS in one chunk if it is used as a maintenance task. Default: 1000 | +| max-number-of-chunks | Maximum number of chunks of size data-set-chunk-size are processed if it is used as a maintenance task. If it is <= 0 and time-limit isn't defined all data sets are processed. Default: 0 | +| time-limit | Limit of execution time of this task if it is used as a maintenance task. The task is stopped before reading next chunk if the time has been used up. If it is specified it is an alternative way to limit the number of data sets to be processed instead of specifying max-number-of-chunks. This parameter can be specified with one of the following time units: ms, msec, s, sec, m, min, h, hours, d, days. Default time unit is sec. | + +**Example**: + +**plugin.properties** + + class = ch.systemsx.cisd.etlserver.path.PathInfoDatabaseFeedingTask + execute-only-once = true + compute-checksum = true + +### PostRegistrationMaintenanceTask + +**Environment**: DSS + +**Relevancy:** Default + +**Description**: A tasks which runs a sequence of so-called +post-registration tasks for each freshly registered data set. + +**Configuration**: + +| Property Key | Description | +|------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| ignore-data-sets-before-date | Defines a registration date. All data sets registered before this date are ignored. Format: yyyy-MM-dd, where yyyy is a four-digit year, MM is a two-digit month, and dd is a two-digit day. Default value: no restriction. | +| last-seen-data-set-file | Path to a file which stores the code of the last data set successfully post-registered. Default value: last-seen-data-set.txt | +| cleanup-tasks-folder | Path to a folder which stores serialized clean-up tasks always created before a post-registration task is executed. These clean-up tasks are executed on start up of DSS after a server crash. Default value: clean-up-tasks | +| post-registration-tasks | Comma-separated list of keys of post-registration task configuration. Each key defines (together with a '.') the prefix of all property keys defining the post-registration task. They are executed in the order their key appear in the list. | + +**Example**: + +**plugin.properties** + + class = ch.systemsx.cisd.etlserver.postregistration.PostRegistrationMaintenanceTask + interval = 60 + cleanup-tasks-folder = ../cleanup-tasks + ignore-data-sets-before-date = 2011-01-27 + last-seen-data-set-file = ../last-seen-data-set + post-registration-tasks = eager-shuffling, eager-archiving + eager-shuffling.class = ch.systemsx.cisd.etlserver.postregistration.EagerShufflingTask + eager-shuffling.share-finder.class = ch.systemsx.cisd.openbis.dss.generic.shared.ExperimentBasedShareFinder + eager-archiving.class = ch.systemsx.cisd.etlserver.postregistration.ArchivingPostRegistrationTask + +### RevokeUserAccessMaintenanceTask + +**Environment**: AS + +**Relevancy:** Relevant + +**Description**: Check if the users are available on the configured +authentication services, if they are not available, are automatically +disabled and their id renamed with the disable date. + +For this to work the services should be able to list the available +users. If you use any service that doesn't allow it, the task +automatically disables itself because is impossible to know if the users +are active or not. + +| Service | Compatible | +|----------------------------|------------| +| CrowdAuthenticationService | NO | +| DummyAuthenticationService | NO | +| NullAuthenticationService | NO | +| FileAuthenticationService | YES | +| LDAPAuthenticationService | YES | + +**Configuration**: + +This maintenance task automatically uses the services already configured +on the server. + +**Example**: + +**plugin.properties** + + class = ch.systemsx.cisd.openbis.generic.server.task.RevokeUserAccessMaintenanceTask + interval = 60 s + +### UserManagementMaintenanceTask + +**Environment**: AS + +**Relevancy:** Relevant + +**Description**: Creates users, spaces, samples, projects and +experiments for all members of an LDAP authorization group or an +explicit list of user ids. A configuration file (in JSON format) will be +read each time this task is executed. All actions are logged in an audit +log file. For more details see [User Group Management for Multi-groups +openBIS +Instances](https://unlimited.ethz.ch/display/openBISDoc2010/User+Group+Management+for+Multi-groups+openBIS+Instances) + +**Configuration:** + +| Property Key | Description | +|---------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| configuration-file-path | Relative or absolute path to the configuration file. Default: etc/user-management-maintenance-config.json | +| audit-log-file-path | Relative or absolute path to the audit log file. Default: logs/user-management-audit_log.txt | +| shares-mapping-file-path | Relative or absolute path to the mapping file for data store shares. This is optional. If not specified the mapping file will not be managed by this maintenance task. | +| filter-key | Key which is used to filter LDAP results. Will be ignored if ldap-group-query-template is specified. Default value: ou | +| ldap-group-query-template | Direct LDAP query template. It should have '%' character which will be replaced by an LDAP key as specified in the configuration file. | +| deactivate-unknown-users | If true a user unknown by the authentication service will be deactivated. It should be set to false if no authenication service can be asked (like in Single-Sign-On). Default: true | + +**Example**: + +**plugin.properties** + + class = ch.systemsx.cisd.openbis.generic.server.task.UserManagementMaintenanceTask + start = 02:42 + interval = 1 day + +## Consistency and other Reports + +### DataSetArchiverOrphanFinderTask + +**Environment**: DSS + +**Relevancy:** Rare + +**Description**: Finds archived data sets which are no longer in openBIS +(at least not marked as present-in-archive). A report will be created +and sent to the specified list of e-mail addresses (mandatory +property `email-addresses`). The task also looks for data sets which are +present-in-archive but actually not found in the archive. + +This orphan finder task only works for Multi Data Set Archiver. It +doesn't work for RsyncArchiver, TarArchiver or ZipArchiver. + +**Configuration**: + +**plugin.properties** + + class = ch.systemsx.cisd.etlserver.plugins.DataSetArchiverOrphanFinderTask + interval = 60 s + email-addresses = email1@bsse.ethz.ch, email2@bsse.ethz.ch + +**Notes: **This is a consistency check task. It checks consistency for +datasets with the flag present-in-archive. + +### DataSetAndPathInfoDBConsistencyCheckTask + +**Environment**: DSS + +**Relevancy:** Rare + +**Description**: Checks that the file information in pathinfo database +is consistent with the information the file system provides. This is +done for all recently registered data sets. Note, archived data sets are +skipped. After all data sets (in the specified checking time interval) +have been checked the task checks them again. + +**Configuration**: + +| Property Key | Description | +|------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| checking-time-interval | Time interval in the past which defines the range of data sets to be checked. That is, all data sets with registration date between now minus checking-time-interval and now will be checked. Can be specified with one of the following time units: ms, msec, s, sec, m, min, h, hours, d, days. Default time unit is sec. Default value: one day. | +| pausing-time-point | Optional time point. Format: HH:mm. where HH is a two-digit hour (in 24h notation) and mm is a two-digit minute. +When specified this task stops checking after the specified pausing time point and continues when executed the next time or the next day if start or continuing-time-point is specified. +After all data sets have been checked the task checks again all data sets started by the oldest one specified by checking-time-interval. | +| continuing-time-point | Time point where checking continous. Format: HH:mm. where HH is a two-digit hour (in 24h notation) and mm is a two-digit minute. Ignored when pausing-time-point isn't specified. Default value: Time when the task is executed. | +| chunk-size | Maximum number of data sets retrieved from AS. Ignored when pausing-time-point isn't specified. Default value: 1000 | +| state-file | File to store registration time stamp and code of last considered data set. This is only used when pausing-time-point has been specified. Default: <store root>/DataSetAndPathInfoDBConsistencyCheckTask-state.txt | + +**Example**: The following example checks all data sets of the last ten +years. It does the check only during the night and continues next night. + +**plugin.properties** + + class = ch.systemsx.cisd.etlserver.path.DataSetAndPathInfoDBConsistencyCheckTask + interval = 1 days + start = 23:15 + pausing-time-point = 5:00 + checking-time-interval = 3653 days + +### MaterialExternalDBSyncTask + +**Environment**: AS + +**Relevancy:** Deprecated + +**Description**: Feeds a report database with recently added or modified +materials. + +**Configuration**: + +| Property Key | Description | +|----------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| database-driver | Fully qualified name of the JDBC driver class. | +| database-url | URL to access the database server. | +| database-username | User name of the database. Default: User who started openBIS AS. | +| database-password | Optional password of the database user. | +| mapping-file | Path to the file containing configuration information of mapping material types and material properties to tables and columns in the report database. | +| read-timestamp-sql | The SQL select statement which returns one column of type time stamp for the time stamp of the last report. If the result set is empty the time stamp is assumed to be 1970-01-01. If the result set has more than one row the first row is used. | +| update-timestamp-sql | The SQL statement which updates or adds a time stamp. The statement has to contain a '?' symbol as the placeholder of the actual time stamp. | +| insert-timestamp-sql | The SQL statement to add a time stamp the first time. The statement has to contain a '?' symbol as the placeholder of the actual time stamp. Default: same as update-timestamp-sql. | + +**Example**: + +**service.properties of AS** + + <task id>.class = ch.systemsx.cisd.openbis.generic.server.task.MaterialExternalDBSyncTask + <task id>.interval = 120 + <task id>.read-timestamp-sql = select timestamp from timestamp + <task id>.update-timestamp-sql = update timestamp set timestamp = ? + <task id>.insert-timestamp-sql = insert into timestamp values(?) + <task id>.mapping-file = ../report-mapping.txt + <task id>.database-driver = org.postgresql.Driver + <task id>.database-url = jdbc:postgresql://localhost/material_reporting + +#### Mapping File + +The mapping file is a text file describing the mapping of the data (i.e. +material codes and material properties) onto the report database. It +makes several assumptions on the database schema: + +- One table per material type. There are only table of materials to be + reported. +- Each table has a column which contains the material code. + - The entries are unique. + - The material code is a string not longer than 60 characters. +- Each table has one column for each property type. Again, there are + only column for properties to be reported. +- The data type of the column should match the data type of the + properties: + - MATERIAL: only the material code (string) will be reported. + Maximum length: 60 + - CONTROLLEDVOCABULARY: the label (if defined) or the code will be + reported. Maximum length: 128 + - TIMESTAMP: timestamp + - INTEGER: integer of any number of bits (maximum 64). + - REAL: fixed or floating point number + - all other data types are mapped to text. + +The general format of the mapping file is as follows: + +\[<Material Type Code>: <table Name>, <code column +name>\] + +<Property Type Code>: <column name> + +<Property Type Code>: <column name> + +... + +\[<Material Type Code>: <table Name>, <code column +name>\] + +<Property Type Code>: <column name> + +<Property Type Code>: <column name> + +... + + Example: + +**mapping.txt** + + # Some comments + [GENE: GENE, GENE_ID] + GENE_SYMBOLS: symbol + + [SIRNA: si_rna, code] + INHIBITOR_OF: suppressed_gene + SEQUENCE: Nucleotide_sequence + +Some rules: + +- Empty lines and lines starting with '\#' will be ignored. +- Table and column names can be upper or lower case or mixed. +- Material type codes and property type codes have to be in upper + case. + +If you put a foreign key constraint on the material code of one of the +material properties, you need to define the constraint checking as +DEFERRED in order to not get a constraint violation. The reason is that +this task will *not* order the `INSERT` statements by its dependencies, +but in alphabetical order. + +### UsageReportingTask + +**Environment**: AS + +**Relevancy:** Relevant + +**Description**: Creates a daily/weekly/monthly report to a list of +e-mail recipients about the usage (i.e. creation of experiments, samples +and data sets) by users or groups. For more details see [User Group +Management for Multi-groups openBIS +Instances](/pages/viewpage.action?pageId=80699449). + +In order to be able to send an e-mail the following properties in +`service.properties` have to be defined: + + mail.from = openbis@<host> + mail.smtp.host = <SMTP host> + mail.smtp.user = <can be empty> + mail.smtp.password = <can be empty> + +**Configuration**: + +| Property Key | Description | +|-------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| interval | Determines the length of period: daily if less than or equal one day, weekly if less than or equal seven days, monthly if above seven days. The actual period is always the day/week/month before the execution day | +| email-addresses | Comma-separated e-mail addresses which will receive the report as an attached text file (format: TSV). | +| user-reporting-type | Type of reporting individual user activities. Possible values are + +NONE: No reporting +ALL: Activities inside and outside groups and for all users +OUTSIDE_GROUP_ONLY: Activities outside groups and users of no groups + +Default: ALL | +| spaces-to-be-ignored | Optional list of comma-separated space codes of all the spaces which should be ignored for the report. | +| configuration-file-path | Optional configuration file defining groups. | +| count-all-entities | If true shows the number of all entities (collections, objects, data sets) in an additional column. Default: false | + +**Example**: + + class = ch.systemsx.cisd.openbis.generic.server.task.UsageReportingTask + interval = 7 days + email-addresses = ab@c.de, a@bc.de + + + +## Consistency Repair and Manual Migrations + +### BatchSampleRegistrationTempCodeUpdaterTask + +**Environment**: AS + +**Relevancy:** Rare + +**Description**: Replaces temporary sample codes (i.e. codes matching +the regular expression `TEMP\\.[a-zA-Z0-9\\-]+\\.[0-9]+`) by normal +codes (prefix specified by sample type plus number). This maintenance +task is only needed when `create-continuous-sample-codes` is set `true` +in `service.properties` of AS. + +**Example**: + +**plugin.properties** + + class = ch.systemsx.cisd.openbis.generic.server.task.BatchSampleRegistrationTempCodeUpdaterTask + +### CleanUpUnarchivingScratchShareTask + +**Environment**: DSS + +**Relevancy:** Default + +**Description**: Removes data sets from the unarchiving scratch share +which have status ARCHIVED and which are present in archive. For more +details see [Multi data set +archiving](/pages/viewpage.action?pageId=80699422). + +**Configuration**: + +**plugin.properties** + + class = ch.systemsx.cisd.openbis.dss.generic.server.plugins.standard.archiver.CleanUpUnarchivingScratchShareTask + interval = 60 s + +**Notes: **Recommended cleanup task to run on every instance. + +### DataSetRegistrationSummaryTask + +**Environment**: AS + +**Relevancy:** Rare + +**Description**: Sends a data set summary report to a list of e-mail +recipients in regular time intervals. The report contains all new data +sets registered since the last report. Selected properties can be +included into the report. The data sets are grouped by the data set +type. + +In order to be able to send an e-mail the following properties in +`service.properties` have to be defined: + + mail.from = openbis@<host> + mail.smtp.host = <SMTP host> + mail.smtp.user = <can be empty> + mail.smtp.password = <can be empty> + +**Configuration:** + +| Property Key | Description | +|---------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| interval | Interval (in seconds) between regular checks whether to create a report or not. This value should be set to 86400 (1 day). Otherwise the same report might be sent twice or no report will be sent. | +| start | Time the report will be created. A good values for this parameter is some early time in the morning like in the example below. | +| days-of-week | Comma-separated list of numbers denoting days of week (Sunday=1, Monday=2, etc.). This parameter should be used if reports should be sent weekly or more often. | +| days-of-month | Comma-separated list of numbers denoting days of month. Default value of this parameter is 1. | +| email-addresses | Comma-separated list of e-mail addresses. | +| shown-data-set-properties | Optional comma-separated list of data set properties to be included into the report. | +| data-set-types | Restrict the report to the specified comma-separated data set types. | +| configured-content | Use the specified content as the body of the email. | + +A report is sent at each day which is either a specified day of week or +day of month. If only weekly reports are needed the parameter +`days-of-month` should be set to an empty string. + +**Example**: + +**service.properties of AS** + + <task id>.class = ch.systemsx.cisd.openbis.generic.server.task.DataSetRegistrationSummaryTask + <task id>.interval = 86400 + <task id>.start = 1:00 + <task id>.data-set-types = RAW_DATA, MZXML_DATA + <task id>.email-addresses = albert.einstein@princeton.edu, charles.darwin@evolution.org + +This means that on the 1st day of every month at 1:00 AM openBIS sends +to the specified e-mail recipients a report about the data sets of types +RAW\_DATA and MZXML\_DATA that have been uploaded in the previous month. + +### DynamicPropertyEvaluationMaintenanceTask + +**Environment**: AS + +**Relevancy:** Rare + +**Description**: Re-evaluates dynamic properties of all entities + +**Configuration**: + +| Property Key | Description | +|--------------|---------------------------------------------------------------------------------------| +| class | ch.systemsx.cisd.openbis.generic.server.task.DynamicPropertyEvaluationMaintenanceTask | + +**Example**: + +**plugin.properties** + + class = ch.systemsx.cisd.openbis.generic.server.task.DynamicPropertyEvaluationMaintenanceTask + interval = 3600 + +### DynamicPropertyEvaluationTriggeredByMaterialChangeMaintenanceTask + +**Environment**: AS + +**Relevancy:** Deprecated + +**Description**: Re-evaluates dynamic properties of all samples which +refer via properties of type MATERIAL directly or indirectly to +materials changed since the last re-evaluation. + +**Configuration**: + +| Property Key | Description | +|-------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| class | ch.systemsx.cisd.openbis.generic.server.task.DynamicPropertyEvaluationTriggeredByMaterialChangeMaintenanceTask | +| timestamp-file | Path to a file which will store the timestamp of the last evaluation. Default value: ../../../data/DynamicPropertyEvaluationTriggeredByMaterialChangeMaintenanceTask-timestamp.txt. | +| initial-timestamp | Initial timestamp of the form YYYY-MM-DD (e.g. 2013-09-15) which will be used the first time when the timestamp file doesn't exist or has an invalid value. This is a mandatory property. | + +**Example**: + +**plugin.properties** + + class = ch.systemsx.cisd.openbis.generic.server.task.DynamicPropertyEvaluationTriggeredByMaterialChangeMaintenanceTask + interval = 7 days + initial-timestamp = 2012-12-31 + +### FillUnknownDataSetSizeInOpenbisDBFromPathInfoDBMaintenanceTask + +**Environment**: DSS + +**Relevancy:** Rare + +**Description**: Queries openBIS database to find data sets without a +size filled in, then queries the pathinfo DB to see if the size info is +available there; if it is available, it fills in the size from the +pathinfo information. If it is not available, it does nothing. Data sets +from openBIS database are fetched in chunks (see data-set-chunk-size +property). After each chunk the maintenance tasks checks whether a time +limit has been reached (see time-limit property). If so, it stops +processing. A code of the last processed data set is stored in a file +(see last-seen-data-set-file property). The next run of the maintenance +task will process data sets with a code greater than the one saved in +the "last-seen-data-set-file". This file is deleted periodically (see +delete-last-seen-data-set-file-interval) to handle a situation where +codes of new data sets are lexicographically smaller than the codes of +the old datasets. Deleting the file is also needed when pathinfo +database entries are added after a data set has been already processed +by the maintenance task. + +**Configuration**: + +| Property Key | Description | +|-----------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| last-seen-data-set-file | Path to a file that will store a code of the last handled data set. Default value: "fillUnknownDataSetSizeTaskLastSeen" | +| delete-last-seen-data-set-file-interval | A time interval (in seconds) which defines how often the "last-seen-data-set-file" file should be deleted. The parameter can be specified with one of the following time units: ms, msec, s, sec, m, min, h, hours, d, days . Default time unit is sec . Default value: 7 days. | +| data-set-chunk-size | Number of data sets requested from AS in one chunk. Default: 100 | +| time-limit | Limit of execution time of this task. The task is stopped before reading next chunk if the time has been used up. This parameter can be specified with one of the following time units: ms, msec, s, sec, m, min, h, hours, d, days. Default time unit is sec. | + +**Example:** + +**plugin.properties** + + <task id>.class = ch.systemsx.cisd.etlserver.plugins.FillUnknownDataSetSizeInOpenbisDBFromPathInfoDBMaintenanceTask + <task id>.interval = 86400 + <task id>.data-set-chunk-size = 1000 + <task id>.time-limit = 1h + +**NOTE**: Useful in scenarios where the path info feeding sub task of +post registration task fails. + +### PathInfoDatabaseChecksumCalculationTask + +**Environment**: DSS + +**Relevancy:** Rare, often the CRC32 is calculated during the post +registration. + +**Description**: Calculates the CRC32 checksum (and optionally a +checksum of specified type) of all files in the pathinfo database with +unknown checksum. This task is needed to run only once. It assumes a +data source for key 'path-info-db'. + +**Configuration**: + +| Property Key | Description | +|---------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| checksum-type | Optional checksum type. If specified two checksums are calculated: CRC32 checksum and the checksum of specified type. The type and the checksum are stored in the pathinfo database. An allowed type has to be supported by MessageDigest.getInstance(<checksum type>). For more details see http://docs.oracle.com/javase/8/docs/api/java/security/MessageDigest.html#getInstance-java.lang.String-. | + +**Example**: + +**plugin.properties** + + class = ch.systemsx.cisd.etlserver.path.PathInfoDatabaseChecksumCalculationTask + execute-only-once = true + checksum-type = SHA-256 + +### PathInfoDatabaseRefreshingTask + +**Environment**: DSS + +**Relevancy:** Rare + +**Description**: Refreshes the pathinfo database with file metadata of +physical and available data sets in the store. This task assumes a data +source with for 'path-info-db'. + +The data sets are processed in the inverse order they are registered. +Only a maximum number of data sets are processed in one run. This is +specified by `chunk-size`. + +Under normal circumstances this maintenance task is **never** needed, +because the content of a physical data set is **never** changed by +openBIS itself. + +Only in the rare cases that the content of physical data sets have to be +changed this maintenance task allows to refresh the file meta data in +the pathinfo database. + + + +**Configuration**: + +| Property Key | Description | +|---------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| time-stamp-of-youngest-data-set | Time stamp of the youngest data set to be considered. The format has to be <4 digit year>-<month>-<day> <hour>:<minute>:<second>. | +| compute-checksum | If true the CRC32 checksum (and optionally a checksum of the type specified by checksum-type) of all files will be calculated and stored in pathinfo database. Default value: true | +| checksum-type | Optional checksum type. If specified and compute-checksum = true two checksums are calculated: CRC32 checksum and the checksum of specified type. The type and the checksum are stored in the pathinfo database. An allowed type has to be supported by MessageDigest.getInstance(<checksum type>). For more details see http://docs.oracle.com/javase/8/docs/api/java/security/MessageDigest.html#getInstance-java.lang.String-. | +| chunk-size | Number of data sets requested from AS in one chunk. Default: 1000 | +| data-set-type | Optional data set type. If specified, only data sets of the specified type are considered. Default: All data set types. | +| state-file | File to store registration time stamp and code of last considered data set. Default: <store root>/PathInfoDatabaseRefreshingTask-state.txt | + +**Example**: + +**plugin.properties** + + class = ch.systemsx.cisd.etlserver.path.PathInfoDatabaseRefreshingTask + interval = 30 min + time-stamp-of-youngest-data-set = 2014-01-01 00:00:00 + data-set-type = HCS_IMAGE + +### RemoveUnusedUnofficialTermsMaintenanceTask + +**Environment**: AS + +**Relevancy:** Rare + +**Description**: Removes unofficial unused vocabulary terms. For more +details about unofficial vocabulary terms see [Ad Hoc Vocabulary +Terms](/pages/viewpage.action?pageId=80699498). + +**Configuration:** + +| Property Key | Description | +|-----------------|-----------------------------------------------------------------------------------------------------------------------------| +| older-than-days | Unofficial terms are only deleted if they have been registered more than the specified number of days ago. Default: 7 days. | + +**Example**: + +**service.properties of AS** + + <task id>.class = ch.systemsx.cisd.openbis.generic.server.task.RemoveUnusedUnofficialTermsMaintenanceTask + <task id>.interval = 86400 + <task id>.older-than-days = 30 + +### ResetArchivePendingTask + +**Environment**: DSS + +**Relevancy:** Rare + +**Description**: For each data set not present in archive and status +ARCHIVE\_PENDING the status will be set to AVAILABLE if there is no +command in the DSS data set command queues referring to it. + +**Configuration**: + +**plugin.properties** + + class = ch.systemsx.cisd.etlserver.plugins.ResetArchivePendingTask + interval = 60 s + +### SessionWorkspaceCleanUpMaintenanceTask + +**Environment**: AS + +**Relevancy:** Default + +**Description**: Cleans up session workspace folders of no longer active +sessions. This maintenance plugin is automatically added by default with +a default interval of 1 hour. If a manually configured version of the +plugin is detected then the automatic configuration is skipped. + +**Example**: + +**plugin.properties** + + class = ch.systemsx.cisd.openbis.generic.server.task.SessionWorkspaceCleanUpMaintenanceTask + interval = 1 day + +### MaterialsMigration + +**Environment**: AS + +**Relevancy:** Relevant + +**Description**: Migrates the Materials entities and types to use a +Sample based model using Sample Properties. It automatically creates and +assigns sample types, properties and entities. + +It allows to execute the migration and to delete of the old Materials +model in separate steps. + +Deleting Materials and material types requires the migration to have +been a success,  before the deletion a validation check is run. + +**Example**: + +This maintenance task can be directly configured on the AS +service.properties + +**service.properties** + + maintenance-plugins = materials-migration + + materials-migration.class = ch.systemsx.cisd.openbis.generic.server.task.MaterialsMigration + materials-migration.execute-only-once = true + materials-migration.doMaterialsMigrationInsertNew = true + materials-migration.doMaterialsMigrationDeleteOld = true + + + +## Microscopy Maintenance Tasks + +### MicroscopyThumbnailsCreationTask + +**Environment**: DSS + +**Relevancy:** Relevant + +**Description**: Creates thumbnails for already registered microscopy +data sets. + +**Configuration:** + +| Property Key | Description | +|-------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| data-set-container-type | Type of the data set container. Default: MICROSCOPY_IMG_CONTAINER | +| data-set-thumbnail-type-regex | Regular expression for the type of data sets which have thumbnails. This is used to test whether there are already thumbnails or not. Default: MICROSCOPY_IMG_THUMBNAIL | +| main-data-set-type-regex | Regular expression for the type of data sets which have actual images. Default: MICROSCOPY_IMG | +| max-number-of-data-sets | The maximum number of data sets to be handle in a run of this task. If zero or less than zero all data sets will be handled. Default: 1000 | +| state-file | Name of the file which stores the registration time stamp of the last successfully handled data set. Default: MicroscopyThumbnailsCreationTask-state.txt | +| maximum-number-of-workers | If specified the creation will be parallelized among several workers. The actual number of workers depends on the number CPUs. There will be not more than 50% of CPUs used. | +| script-path | Path to the jython script which specifies the thumbnails to be generated. The script should have defined the method process(transaction, parameters, tablebuilder) as for JythonIngestionService (see Jython-based Reporting and Processing Plugins). Note, that tablebuilder will be ignored. In addition the global variables image_config and image_data_set_structure are defined: + +image_data_set_structure: It is an object of the class ImageDataSetStructure. Information about channels, series numbers etc. can be requested. +image_config: It is an object of the class SimpleImageContainerDataConfig. It should be used to specify the thumbnails to be created. Currently only setImageGenerationAlgorithm() is supported. + | + +**Example**: + +**plugin.properties** + + class = ch.systemsx.cisd.openbis.dss.etl.MicroscopyThumbnailsCreationTask + interval = 1 h + script-path = specify_thumbnail_generation.py + +with + +**specify\_thumbnail\_generation.py** + + from ch.systemsx.cisd.openbis.dss.etl.dto.api.impl import MaximumIntensityProjectionGenerationAlgorithm + from sets import Set + + def _get_series_num(): + series_numbers = Set() + for image_info in image_data_set_structure.getImages(): + series_numbers.add(image_info.tryGetSeriesNumber()) + return series_numbers.pop() + + def process(transaction, parameters, tableBuilder): + seriesNum = _get_series_num() + if int(seriesNum) % 2 == 0: + image_config.setImageGenerationAlgorithm( + MaximumIntensityProjectionGenerationAlgorithm( + "MICROSCOPY_IMG_THUMBNAIL", 256, 128, "thumbnail.png")) + +### DeleteFromImagingDBMaintenanceTask + +**Environment**: DSS + +**Relevancy:** Relevant + +**Description**: Deletes database entries from the imaging database. +This is special variant of +[DeleteFromExternalDBMaintenanceTask](#MaintenanceTasks-DeleteFromExternalDBMaintenanceTask) +with the same configuration parameters. + +**Configuration**: See +[DeleteFromExternalDBMaintenanceTask](#MaintenanceTasks-DeleteFromExternalDBMaintenanceTask) + +**Example**: + +**plugin.properties** + + class = ch.systemsx.cisd.openbis.dss.etl.DeleteFromImagingDBMaintenanceTask + data-source = imaging-db +  + +## Proteomics Maintenance Tasks \ No newline at end of file diff --git a/docs/system-admin-documentation/advanced-features/share-ids.md b/docs/system-admin-documentation/advanced-features/share-ids.md new file mode 100644 index 0000000000000000000000000000000000000000..2eaf5ad0ea9a6db391525576e32fdc2901a4a635 --- /dev/null +++ b/docs/system-admin-documentation/advanced-features/share-ids.md @@ -0,0 +1,4 @@ +Share IDs +========= + +To be written \ No newline at end of file diff --git a/docs/system-admin-documentation/docker-installation/docker-installation-and-configuration.md b/docs/system-admin-documentation/docker-installation/docker-installation-and-configuration.md new file mode 100644 index 0000000000000000000000000000000000000000..1b5697bdad4e303de5190fb6b625e64d820d02fd --- /dev/null +++ b/docs/system-admin-documentation/docker-installation/docker-installation-and-configuration.md @@ -0,0 +1,4 @@ +Docker Installation And Configuration +===================================== + +To be written \ No newline at end of file diff --git a/docs/system-admin-documentation/docker-installation/docker-system-requirements.md b/docs/system-admin-documentation/docker-installation/docker-system-requirements.md new file mode 100644 index 0000000000000000000000000000000000000000..b81b3c5b59f69fabd20c0936da04b971913138dc --- /dev/null +++ b/docs/system-admin-documentation/docker-installation/docker-system-requirements.md @@ -0,0 +1,4 @@ +Docker System Requirements +========================== + +To be written diff --git a/docs/system-admin-documentation/docker-installation/index.rst b/docs/system-admin-documentation/docker-installation/index.rst new file mode 100644 index 0000000000000000000000000000000000000000..dce95fc08134bd316e1e7bdc659d200c05248e8f --- /dev/null +++ b/docs/system-admin-documentation/docker-installation/index.rst @@ -0,0 +1,8 @@ +Docker Installation +=================== + +.. toctree:: + :maxdepth: 4 + + docker-system-requirements + docker-installation-and-configuration \ No newline at end of file diff --git a/docs/system-admin-documentation/installation/architectural-overview.md b/docs/system-admin-documentation/installation/architectural-overview.md new file mode 100644 index 0000000000000000000000000000000000000000..e4cc8172296063063338b14975833a0cd2c09dd4 --- /dev/null +++ b/docs/system-admin-documentation/installation/architectural-overview.md @@ -0,0 +1,4 @@ +Architectural Overview +====================== + +To be written \ No newline at end of file diff --git a/docs/system-admin-documentation/installation/index.rst b/docs/system-admin-documentation/installation/index.rst new file mode 100644 index 0000000000000000000000000000000000000000..ea71aed8af8cfb51503c6e6d2ed5b1fa1bcb5602 --- /dev/null +++ b/docs/system-admin-documentation/installation/index.rst @@ -0,0 +1,11 @@ +Installation +============ + +.. toctree:: + :maxdepth: 4 + + system-requirements + architectural-overview + installation-and-configuration-guide + optional-application-server-configuration + optional-datastore-server-configuration \ No newline at end of file diff --git a/docs/system-admin-documentation/installation/installation-and-configuration-guide.md b/docs/system-admin-documentation/installation/installation-and-configuration-guide.md new file mode 100644 index 0000000000000000000000000000000000000000..a6d2b8646fc72c618a0aaf15d5e73d25db11f8af --- /dev/null +++ b/docs/system-admin-documentation/installation/installation-and-configuration-guide.md @@ -0,0 +1,1463 @@ +Installation and Administrator Guide of the openBIS Server +========================================================== + +## System Requirements + +The minimal requirements of a system running openBIS are: + +- Operating System: Linux / MacOS X + (The binaries: `bash`, `awk`, `sed`, `unzip` need to be installed + and in the `PATH` of the openBIS user.) +- Java Runtime Environment: recent versions of Oracle JRE 11 +- PostgreSQL 11 + +We find Linux to be the easiest choice to get an openBIS server running +quickly. + +For recommended memory settings, see [Recommended CPU and memory +settings for openBIS +20.10](/display/openBISDoc2010/Recommended+CPU+and+memory+settings+for+openBIS+20.10). + +An SMTP server needs to be accessible and configured if you want to +obtain notifications. + +## Installation + +The server distribution is a `gzipped` `tar` file named +`openBIS-installation-standard-technologies-<version>` `.tar.gz`. It +contains: + +- `console.properties:` configuration file for a console/non-gui + installation + +- `extract.sh:` helper script for installation + +- `jul.config:` Log configuration for the openBIS install process + +- `openBIS-installer.jar` Java archive containing openBIS + +- `run-console.sh` Installation script for console/non-gui + installation + +- `run-ui.sh` Installation script for gui installation + +### Installation steps + +1. Create a service user account, i.e. an unprivileged, regular user + account. **Do not run openBIS as root!** + +2. Gunzip the distribution on the server machine into some temporary + folder. + +3. Run either the console/non-gui installation script or the gui + installation script: + + **GUI-based installation** + + > tar xvfz openBIS-installation-standard-technologies-S139.0-r26480.tar.gz + > cd openBIS-installation-standard-technologies-S139.0-r26480 + > ./run-ui.sh + + In the non-gui version you have to edit the `console.properties` + files: + + **Non-GUI installation** + + > tar xvfz openBIS-installation-standard-technologies-S139.0-r26480.tar.gz + > cd openBIS-installation-standard-technologies-S139.0-r26480 + > vi console.properties + > ./run-console.sh + + + **NOTE:** Please be aware that the directory where openbis is + installed should not already exist. Users should specify the + directory where they want to install openBIS (in the + console.properties or in the graphical installer) and this directory + will be created by the installation procedure. If the directory + already exists, the installation will fail. + +After the successful installation you should have a look at the +configuration file called s`ervice.properties`. It is located in +`<server_folder>openBIS-server/jetty/etc/` + +This file is a an [Extended Properties +File](/display/openBISDoc2010/Extended+Configuration+Properties). Here +is an example which can be used as a template: + +**service.properties** + + # --------------------------------------------------------------------------- + # Database configuration + # --------------------------------------------------------------------------- + # The database instance local unique identifier. Used when the new database is created. + database-instance = DEFAULT + + # Supported: currently only 'postgresql' is supported + database.engine = postgresql + database.url-host-part = + database.kind = prod + # User who owns the database. Default: Operating system user running the server. + database.owner = + database.owner-password = + # Superuser of the database. Default: database-dependent. + database.admin-user = + database.admin-password = + # Max. number of active database connections. Default: 20. + database.max-active-connections = + # Max. number of idle database connections to keep open. Default: 20. + database.max-idle-connections = + # Log interval (in seconds) between two regular log entries of the number of active database + # connections. Default: 3600s. + database.active-connections-log-interval = + + # --------------------------------------------------------------------------- + # Master data by Excel sheets + # --------------------------------------------------------------------------- + # Path to the file which stores version information of master data imported from Excel sheets. + # Default value: ../../../xls-import-version-info.json. The file will be created. + # It should be <openbis installation path>/servers/openBIS-server. + # Note, that the folder containing this file has to exist. + # xls-import.version-data-file = ../../../xls-import-version-info.json + + # --------------------------------------------------------------------------- + # Authentication configuration + # --------------------------------------------------------------------------- + # Supported Authentication options are: + # 'file-authentication-service' + # 'ldap-authentication-service' + # 'crowd-authentication-service' + # 'file-crowd-authentication-service' + # 'file-ldap-authentication-service' + # 'ldap-crowd-authentication-service' + # 'file-ldap-crowd-caching-authentication-service' + # For a detailed description, have a look at the Installation and Administrator + # Guide of the openBIS Server: https://wiki-bsse.ethz.ch/x/oYIUBQ + authentication-service = file-ldap-crowd-caching-authentication-service + + # --------------------------------------------------------------------------- + # Caching configuration (only used with 'file-ldap-crowd-caching-authentication-service') + # --------------------------------------------------------------------------- + # The time that the authentication cache keeps entries. Default: 28h + authentication.cache.time = 28h + # The time that the authentication cache does not perform re-validation on a cache entry. + # Default: 1h + authentication.cache.time-no-revalidation = 1h + + # --------------------------------------------------------------------------- + # Crowd configuration + # --------------------------------------------------------------------------- + # + # The Crowd host. + # Mandatory. + crowd.service.host = + # The Crowd service port. Default: 443 + crowd.service.port = + # The timeout (in s) to wait for a Crowd query to return, -1 for "wait indefinitely". Default: 10s. + crowd.service.timeout = + # The Crowd application name. The value 'openbis' is just a suggestion. + # Mandatory. + crowd.application.name = openbis + # The Crowd application password. + # Mandatory. + crowd.application.password = + + # --------------------------------------------------------------------------- + # LDAP configuration + # --------------------------------------------------------------------------- + # The space-separated URLs of the LDAP servers, e.g. "ldap://d.ethz.ch/DC=d,DC=ethz,DC=ch". + # Mandatory. + ldap.server.url = + # The distinguished name of the security principal, e.g. "CN=carl,OU=EthUsers,DC=d,DC=ethz,DC=ch". + # Mandatory. + ldap.security.principal.distinguished.name = + # Password of the LDAP user account that will be used to login to the LDAP server to perform the queries. + # Mandatory. + ldap.security.principal.password = + # The security protocol to use, use "ssl" or "none", default is "ssl" + ldap.security.protocol = + # The authentication method to use: "none" (no authentication), "simple", "strong" (SASL), defaults to "simple" + ldap.security.authentication-method = + # The referral mode: + # "follow" - follow referrals automatically (the default) + # "ignore" - ignore referrals + # "throw" - throw ReferralException when a referral is encountered + ldap.referral = + # The attribute name for the user id, defaults to "uid" + ldap.attributenames.user.id = + # The attribute name for the email, defaults to "mail" + ldap.attributenames.email = + # The attribute name for the first name, defaults to "givenName" + ldap.attributenames.first.name = + # The attribute name for the last name, defaults to "sn" + ldap.attributenames.last.name = + # Set to true to also query for email aliases + ldap.queryEmailForAliases = true + # The query template, needs to contain %s which will be filled with the query term, e.g. uid=username + # The default is: + # ldap.queryTemplate = (&(objectClass=organizationalPerson)(objectCategory=person)(objectClass=user)(%s)) + # which is known to work for many Active Directory installations. + # For OpenLDAP, replace by: + # ldap.queryTemplate = (&(%s)) + # For restriction to BSSE accounts in OpenLDAP, set to: + # ldap.queryTemplate = (&(objectClass=bssePosixAccount)(%s)) + ldap.queryTemplate = + # The number of times a failed LDAP query is retried at the max. Default: 1. + ldap.maxRetries = + # The timeout (in s) to wait for an LDAP query to return, -1 for "wait indefinitely". Default: 10s. + ldap.timeout = + # The time (in s) to wait after a failure before retrying the query. Default: 10s. + ldap.timeToWaitAfterFailure = + + # --------------------------------------------------------------------------- + # Anonymous login configuration (optional) + # --------------------------------------------------------------------------- + # Login of the existing user whose settings will be used for anonymous login + #user-for-anonymous-login = <user-login> + + # --------------------------------------------------------------------------- + # Project authorization + # --------------------------------------------------------------------------- + # Enabled if set to 'true'. Default: disabled + authorization.project-level.enabled = true + # Regular expression for user ids allowed to have a project role + authorization.project-level.users = .* + + # --------------------------------------------------------------------------- + # Project samples + # --------------------------------------------------------------------------- + # Enabled if set to 'true'. Default: disabled + # Note: Changing to 'true' turns experiment samples to project samples + # which can not be reverted after setting this flag back to 'false'. Also + # the sample identifier will change for such samples. + #project-samples-enabled = true + + # --------------------------------------------------------------------------- + # Client configuration + # --------------------------------------------------------------------------- + # Name of the file that stores Web Client configuration + web-client-configuration-file = etc/web-client.properties + + # A comma-separated list of trusted cross-origin domains, that are allowed to + # query openBIS content. Typically these are lightweight webapps that integrate with openBIS + # via JSON-RPC services, but are not directly hosted within the openBIS application. + # + # Example 1 (two different domains configured): + # + # trusted-cross-origin-domains=https://myapp.domain.com:8443, http://other.domain.com + # + # Example 2 (match every domain): + # + # trusted-cross-origin-domains= * + # + # The '*' matches any arbitrary domain. It should be used with care as it opens openBIS + # for potential cross-site scripting attacks. + # + #trusted-cross-origin-domains= + + # --------------------------------------------------------------------------- + # Session configuration + # --------------------------------------------------------------------------- + # The time after which an inactive session is expired by the service (in minutes). + session-timeout = 720 + + # Session time (in minutes) in case of presents of file etc/nologin.html. Should be < 30. + #session-timeout-no-login = 10 + + # Maximum number of sessions allowed per user. Zero means unlimited number of sessions. Default value is 1. + # max-number-of-sessions-per-user = 1 + + # Comma separated list of users allowed to have unlimited number of sessions. Default: Empty list. + # Note: The DSS (user 'etlserver' by default, see property 'username' of DSS service.properties) + # should be added to this list. + # users-with-unrestricted-number-of-sessions = + + + # --------------------------------------------------------------------------- + # Business rules configuration + # --------------------------------------------------------------------------- + # When set to "true" enables the system to store material codes containing non-alphanumeric characters. + # Regardless of the value of this property no white spaces are allowed in the material codes. + #material-relax-code-constraints=false + + # Comma-separated list of regular expression of data set types which do not require that the data set + # is linked to an experiment. If not linked to an experiment a link to a sample with space is required. + data-set-types-with-no-experiment-needed = .* + + # When set to 'true' the sequence of sample codes is gap less for each type if all samples are created by + # batch registrations. + #create-continuous-sample-codes = false + + + # --------------------------------------------------------------------------- + # RPC Dropbox Default DSS configuration + # --------------------------------------------------------------------------- + # Set this to the DSS code of the DSS handling RPC Dropboxes for this user. + # Note: This is only required if more than one DSS is connected to this openBIS server. + dss-rpc.put.dss-code = + + # --------------------------------------------------------------------------- + # Hibernate Search + # --------------------------------------------------------------------------- + # The working directory. + hibernate.search.index-base = ./indices + # One of NO_INDEX, SKIP_IF_MARKER_FOUND, INDEX_FROM_SCRATCH. + # If not specified, default (SKIP_IF_MARKER_FOUND) is taken. + hibernate.search.index-mode = SKIP_IF_MARKER_FOUND + # Defines the maximum number of elements indexed before flushing the transaction-bound queue. + # Default is 1000. + hibernate.search.batch-size = 1000 + # Maximum number of search results + hibernate.search.maxResults = 100000 + # If 'async', the update of indices will be done in a separate thread. + hibernate.search.worker.execution=async + # How long fulltext searches can take (in seconds) before they are timed out. + # When not defined, there is no timeout. + # fulltext-timeout = 30 + + # --------------------------------------------------------------------------- + # Online Help + # --------------------------------------------------------------------------- + # Online help is broken into two sections -- generic and specific. Generic help links back to + # the CISD. Specific help is provided by the host of the installation + # + # OpenBIS needs to know the root URL for the online help and a template for the individual pages. + # The template should have on parameter, called title, and should be constructed to automatically + # create the page if it does not already exist. + # The template can be created by going to the root page, adding a new link to the page, and + # replacing the title of the new page with the ${title} + onlinehelp.generic.root-url = https://wiki-bsse.ethz.ch/display/CISDDoc/OnlineHelp + onlinehelp.generic.page-template = https://wiki-bsse.ethz.ch/pages/createpage.action?spaceKey=CISDDoc&title=${title}&linkCreation=true&fromPageId=40633829 + #onlinehelp.specific.root-url = https://wiki-bsse.ethz.ch/display/CISDDoc/OnlineHelp + #onlinehelp.specific.page-template = https://wiki-bsse.ethz.ch/pages/createpage.action?spaceKey=CISDDoc&title=${title}&linkCreation=true&fromPageId=40633829 + + # --------------------------------------------------------------------------- + # JMX memory monitor + # --------------------------------------------------------------------------- + # Interval between two runs of the memory monitor (in seconds). + # Set to -1 to disable the memory monitor. + memorymonitor-monitoring-interval = 60 + # Interval between two regular log call of the memory monitor (in seconds). + # Set to -1 to disable regular memory usage logging. + memorymonitor-log-interval = 3600 + # The percentage of memory that, if exceeded, triggers a notify log of the memory manager, + # Set to 100 to disable. + memorymonitor-high-watermark-percent = 90 + + # --------------------------------------------------------------------------- + # Database Configurations for Query module (optional) + # --------------------------------------------------------------------------- + # Comma separated keys of databases configured for Query module. + # Each database should have configuration properties prefixed with its key. + # Mandatory properties for each <database> include: + # <database>.label - name shown to the openBIS user when adding or editing a customized query + # <database>.database-driver - JDBC Driver of the database (e.g. org.postgresql.Driver) + # <database>.database-url - JDBC URL to the database (e.g. jdbc:postgresql://localhost/openbis) + # Optional properties for each <database> include: + # <database>.database-user - name of the database user (default: user.name from system properties) + # <database>.database-password - password of the database user + # <database>.creator-minimal-role - minimal role required to create/edit queries on this database (default: POWER_USER) + # <database>.data-space - If NOT specified OBSERVER of any space will be allowed to perform + # queries and <creator-minimal-role> of any space will allowed + # to create/edit queries on this DB. + # - If specified only OBSERVER of the space will be allowed to perform + # queries and <creator-minimal-role> of the space will allowed + # to create/edit queries on this DB. + #query-databases = openbisDB + # + #openbisDB.label = openBIS meta data + #openbisDB.data-space = CISD + #openbisDB.creator-minimal-role = SPACE_ADMIN + #openbisDB.database-driver = org.postgresql.Driver + #openbisDB.database-url = jdbc:postgresql://localhost/openbis_${database.kind} + #openbisDB.database-username = + #openbisDB.database-password = + + # --------------------------------------------------------------------------- + # Maintenance plugins configuration (optional) + # --------------------------------------------------------------------------- + # Comma separated names of maintenance plugins. + # Each plugin should have configuration properties prefixed with its name. + # Mandatory properties for each <plugin> include: + # <plugin>.class - Fully qualified plugin class name + # <plugin>.interval - The time between plugin executions (in seconds) + # Optional properties for each <plugin> include: + # <plugin>.start - Time of the first execution (HH:mm) + # <plugin>.execute-only-once - If true the task will be executed exactly once, + # interval will be ignored. By default set to false. + #maintenance-plugins = demo + # + #demo.class = ch.systemsx.cisd.openbis.generic.server.task.DemoMaintenanceTask + #demo.interval = 60 + #demo.property_1 = some value + #demo.property_2 = some value 2 + + # + # Internal - do not change + # + + # Authorization + # Supported: 'no-authorization' and 'active-authorization' + authorization-component-factory = active-authorization + + script-folder = . + + # + # Version of Jython to be used in plugins. 2.5 and 2.7 are supported + # + jython-version=2.7 + + ########## + # V3 API # + ########## + + # ------------------------------------------------------------------------- + # The configuration below reflects the default values used by the V3 API. + # Please uncomment and change the chosen values to overwrite the defaults. + # ------------------------------------------------------------------------- + # + # A path to a directory where operation execution details are stored. + # + # api.v3.operation-execution.store.path = operation-execution-store + # + # A thread pool that is used for executing all asynchronous operations. + # + # api.v3.operation-execution.thread-pool.name = operation-execution-pool + # api.v3.operation-execution.thread-pool.core-size = 10 + # api.v3.operation-execution.thread-pool.max-size = 10 + # api.v3.operation-execution.thread-pool.keep-alive-time = 0 + # + # A name of a thread that updates operation execution progress information. + # + # api.v3.operation-execution.progress.thread-name = operation-execution-progress + # + # An interval that controls how often operation execution progress information gets updated. The interval is defined in seconds. + # + # api.v3.operation-execution.progress.interval = 5 + # + # Availability times control for how long information about an operation execution is stored in the system. + # There are 3 levels of such information: + # + # * core information (code, state, owner, description, creation_date, start_date, finish_date) + # * summary information (summary of operations, progress, error, results) + # * detailed information (details of operations, progress, error, results) + # + # Each level of information can have a different availability time. + # The availability times can be defined at the moment of scheduling an operation execution. + # If a time is not provided explicitly then a corresponding 'default' value is used. + # The maximum possible value that can be used for a given availability time is controlled with the 'max' property. + # + # All availability times are defined in seconds. + # Examples of values: 31536000 (1 year), 2592000 (30 days), 86400 (1 day), 3600 (1 hour). + # + # api.v3.operation-execution.availability-time.default = 31536000 + # api.v3.operation-execution.availability-time.max = 31536000 + # api.v3.operation-execution.availability-time.summary.default = 2592000 + # api.v3.operation-execution.availability-time.summary.max = 2592000 + # api.v3.operation-execution.availability-time.details.default = 86400 + # api.v3.operation-execution.availability-time.details.max = 86400 + # + # Maintenance tasks responsible for marking and deleting timed out operation executions. Intervals are defined in seconds. + # + # api.v3.operation-execution.availability-update.mark-timeout-pending-task.name = operation-execution-mark-timeout-pending-task + # api.v3.operation-execution.availability-update.mark-timeout-pending-task.interval = 60 + # + # api.v3.operation-execution.availability-update.mark-timed-out-or-deleted-task.name = operation-execution-mark-timed-out-or-deleted-task + # api.v3.operation-execution.availability-update.mark-timed-out-or-deleted-task.interval = 300 + # + # Maintenance task responsible for marking new, scheduled and running operation executions as failed after server restart. + # + # api.v3.operation-execution.state-update.mark-failed-after-server-restart-task.name = operation-execution-mark-failed-after-server-restart-task + # + # + +### Database Settings + +All properties starting with `database.` specify the settings for the +openBIS database. They are all mandatory. + +| Property | Description | +|----------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `database.engine` | Type of database. Currently only postgresql is supported. | +| `database.create-from-scratch` | If true the database will be dropped and an empty database will be created. In productive use always set this value to false . | +| `database.script-single-step-mode` | If true all SQL scripts are executed in single step mode. Useful for localizing errors in SQL scripts. Should be always false in productive mode. | +| `database.url-host-part` | Part of JDBC URL denoting the host of the database server. If openBIS Application Server and database server are running on the same machine this property should be an empty string. | +| `database.kind` | Part of the name of the database. The full name reads openbis_< kind >. | +| `database.admin-user` | ID of the user on database server with admin rights, like creation of tables. Should be an empty string if default admin user should be used. In case of PostgreSQL the default admin user is assumed to be postgres. | +| database.admin-password | Password for admin user. Usual an empty string. | +| `database.owner` | ID of the user owning the data. This should generally be openbis. The openbis role and password need to be created. In case of an empty string it is the same user who started up openBIS Application Server. | +| `database.owner-password` | Password of the owner. | + +The credentials for the database user with the privilege to create a new +database depends on the installation and configuration of the PostgreSQL +database. + +## Start Server + +The server is started as follows: + + prompt> <installation folder>/bin/bisup.sh + +On startup the openBIS server creates the database on PostgreSQL and +checks the connection with the remote authentication services (if they +are configured). Log files can be found in +`<installation folder>/servers/openBIS-server/jetty/logs`. Also the +following command shows the log: `<installation folder>/bin/bislog.sh` + +The first user logged in into the system will have full administrator +rights (role `INSTANCE_ADMIN`). + +## Stop Server + +The server is stopped as follows: + + prompt> <installation folder>/bin/bisdown.sh + +## Authentication systems + +Generic openBIS currently supports four authentication systems: a +self-contained system based on a UNIX-like passwd file, LDAP, the Crowd +system (see <http://www.atlassian.com/software/crowd>) and Single Sign +On (eg SWITCH AAI). Beside this there are also so called stacked +authentication methods available. Stacked authentication methods use +multiple authentication systems in the order indicated by the name. The +first authentication system being able to provide an entry for a +particular user id will be used. If you need full control over what +authentication systems are used in what order, you can define your own +stacked authentication service in the Spring application context file: +`<server folder>/openBIS-server/jetty/webapps/openbis/WEB-INF/classes/genericCommonContext.xml.` + +### The default authentication configuration + +In the template service properties, we set +`authentication-service = file-ldap-crowd-caching-authentication-service`, +which means that file-based authentication, LDAP and Crowd are used for +authentication, in this order. As LDAP and Crowd are not configured in +the template service properties, this effectively corresponds +to `file-authentication-service`, however when LDAP and / or Crowd are +configured, they are picked up on server start and are used to +authenticate users when they are not found in the local `passwd` file. +Furthermore, as it is a caching authentication service, it will cache +authentication results from LDAP and / or Crowd in +`file <server folder>/jetty/etc/passwd_cache`. See section +*Authentication Cache* below for details on this caching. + +### The file based authentication system + +This authentication schema uses the file +`<server folder>/jetty/etc/passwd` to determine whether a login to the +system is successful or not. + +The script `<server folder>/jetty/bin/passwd.sh` can be used to maintain +this file. This script supports the options: + + passwd list | [remove|show|test] <userid> | [add|change] <userid> [option [...]] + --help : Prints out a description of the options. + [-P,--change-password] : Read the new password from the console, + [-e,--email] VAL : Email address of the user. + [-f,--first-name] VAL : First name of the user. + [-l,--last-name] VAL : Last name of the user. + [-p,--password] VAL : The password. + +A new user can be added with + + prompt> passwd.sh add [-f <first name>] [-l <last name>] [-e <email>] [-p <password>] <username> + +If no password is provided with the `-p` option, the system will ask for +a password of the new user on the console. Please note that providing a +password on the command line can be a security risk, because the +password can be found in the shell history, and, for a short time, in +the `ps` table. Thus `-p` is not recommended in normal operation. + +The password of a user can be tested with + + prompt> passwd.sh test <username> + +The system will ask for the current password on the console and then +print whether the user was authenticated successfully or not. + +An account can be changed with + + prompt> passwd.sh change [-f <first name>] [-l <last name>] [-e <email>] [-P] <username> + +An account can be removed with + + prompt> passwd.sh remove <username> + +The details of an account can be queried with + + prompt> passwd.sh show <username> + +All accounts can be listed with + + prompt> passwd.sh list + +The password file contains each user in a separate line. The fields of +each line are separated by colon and contain (in this order): *User Id*, +*Email Address*, *First Name*, *Last Name* and *Password Hash*. +The *Password Hash* field represents the +[salted](http://en.wikipedia.org/wiki/Salted_hash) +[SHA1](http://en.wikipedia.org/wiki/Sha1) hash of the user's password in +[BASE64 encoding](http://en.wikipedia.org/wiki/Base64). + +### The interface to LDAP + +To work with an LDAP server, you need to provide the server URL with +(example) and set the +`authentication-service = ldap-authentication-service` + + ldap.server.url = ldap://d.ethz.ch/DC=d,DC=ethz,DC=ch + +and the details of an LDAP account who is allowed to make queries on the +LDAP server with (example) + + ldap.security.principal.distinguished.name = CN=carl,OU=EthUsers,DC=d,DC=ethz,DC=ch + ldap.security.principal.password = Carls_LDAP_Password + +Note: A space-separated list of URLs can be provided if distinguished +name and password are valid for all specified LDAP servers. + +### The interface to Crowd + +When setting `authentication-service = crowd-authentication-service` in +`service.properties`, the `passwd` file has no effect. Instead, the +following properties need to be configured via the following properties. + +The URL (without port information): + + crowd.service.url = https://crowd.your.org + +The Port of the URL: + + crowd.service.port = 443 + +The name of the application account in Crowd: + + crowd.application.name = openbis + +The password of the application account in Crowd: + + crowd.application.password = <application password> + +If Crowd is used as an authentication service, the IP of the openBIS +server and the name (of the openBIS application) has to be registered +with the Crowd server. + +### Authentication Cache + +If configuring a caching authentication service like +`file-ldap-crowd-caching-authentication-service`, authentication results +from remote authentication services like LDAP and / or Crowd are cached +locally in the openBIS Application Server. The advantage is a faster +login time on repeated logins when one or more remote authentication +services are slow. The disadvantage is that changes to data in the +remote authentication system (like a changed password or email address) +are becoming known to openBIS only with a delay. In order to minimize +this effect, the authentication caching performs "re-validation" of +authentication requests asynchronously. That means it doesn't block the +user from logging in because it is performed in different thread than +the login. + +There are two service properties which give you control over the working +of the authentication cache: + +- `authentication.cache.time` lets you set for how long (after putting + it into the cache) a cache entry (read: "user name and password") + will be kept if the user does not have a successful login to openBIS + in this period of time (as successful logins will trigger + re-validation and thus renewal of the cache entry). The default is + 28h, which means that users logging into the system every day will + never experience a delay from slow remote authentication systems. A + non-positive value will disable caching. +- `authentication.cache.time-no-revalidation` lets you set for how + long (after putting it into the cache) a cache entry will *not* be + re-validated if the login was successful. This allows you to reduce + the load that openBIS creates on the remote authentication servers + for successful logins of the same user. The default is 1h. Setting + it to 0 will always trigger re-validation, setting it to + `authentication.cache.time` will not perform re-validation at all + and thus expire every cache entry after that time. + +An administrator with shell access to the openBIS Application Server can +see and change the current cache entries in the +file `<server folder>/jetty/`etc/passwd\_cache. The format is the same +as for the file-based authentication system (see section *The file based +authentication system* above), but has an additional field *Cached At* +added to the end of each line. *Cached At* is the time (in milli-seconds +since start of the Unix epoch, which is midnight *Universal Time +Coordinated*, 1 January 1970) when the entry was cached. Removing a line +from this file will remove the corresponding cache entry. The +authentication cash survives openBIS Application Server restarts because +of this persisted file. If you need to clear the cache altogether, it is +sufficient to remove the `passwd_cache` file at any time. No server +restart is needed to make changes to this file take effect. + +You can switch off authentication caching by either +setting `authentication.cache.time = -1`, or by choosing an +authentication service which does not have `caching` in its name. + +### Anonymous Login + +In order to allow anonymous login a certain user known by openBIS (not +necessarily by the authentication system) has to be specified. This is +done by the property `user-for-anonymous-login`. The value is the user +ID. The display settings and the authorization settings of this user are +used for the anonymous login. + +Anonymous login is possible with URL parameter `anonymous` set to `true` +or by property `default-anonymous-login` in web configuration properties +(see [Web Client +Customization](#InstallationandAdministratorGuideoftheopenBISServer-WebClientCustomization)). +Note, that for the ELN client the property `default-anonymous-login` +isn't used. Anonymous login needs only the property +`user-for-anonymous-login` for an existing user with some rights. + +### Single Sign On Authentication + +Currently only Shibboleth SSO is supported. For more details see [Single +Sign On +Authentication](/display/openBISDoc2010/Single+Sign+On+Authentication). + +## Authorization + +openBIS authorization is described +here: [Authorization](/display/openBISDoc2010/Authorization). + +## System Customization + +### Login Page - Banners + +To add banners to the main OpenBIS change `loginHeader.html` page. It is +stored in the same directory as `index.html`. Note that if the height of +`loginHeader.html` is too big, the content may overlap with the rest of +the OpenBIS login page. + +Example of the `loginHeader.html`: + + <center><img src="images/banner.gif"></center> + +For announcements you have to edit the `index.html` file. Here is an +example showing the tail: + + <input style="margin-left: 200px" type="submit" id="openbis_login_submit" value="Login"> + <br> + <br> + <br> + <br> + <span style="color:red"> + Due the server maintenance openBIS + <br> + will not be available on 24th of + <br> + December 2010 from 10 am to 3 pm! + <br> + </span> + </form> + </div> + </body> + </html> + +Note: the current work around with `br` tags between the lines ensures +that the login box is still centered. + +### Client Customization + +#### Configuration + +To reconfigure some parts of the openBIS Web Client and Data Set Upload +Client, prepare the configuration file and add the path to the value of +`web-client-configuration-file` property in openBIS +`service.properties`. + + web-client-configuration-file = etc/web-client.properties + +#### Web client customizations + +- Enable the trashcan. When the trashcan is enabled, deleting entities + only marks them as deleted but not deletes them physically (it is + also called "logical deletion"). When clicking on the trashcan icon + in the Web GUI, the user can see all of his deletion operations and + can revert them individually. Only an admin can empty the trashcan + and thus delete the entities physically. Only with enabled trashcan + is it possible to delete complete hierarchies (e.g. an experiment + with samples and datasets attached). +- Default view mode (`SIMPLE/NORMAL`) that should be used if user + doesn't have it specified in a URL. +- Replacement texts for 'Experiment' and 'Sample' by `experiment-text` + and `sample-text`, respectively. +- Anonymous login by default. +- Sample, material, experiment and data set `detail views `can be + customized by: + - hiding the sections (e.g. attachments) +- Additionally `data set detail view` can be customized by: + - removing `Smart View` and `File View` from the list of available + reports in `Data View` section +- Technology specific properties with property `technologies` which is + a comma-separated list of technologies. For each technology + properties are defined where the property names start with + technology name followed by a dot character. + +#### Data Set Upload Client Customizations + +It is possible to restrict the set of data set types available to the +user in the data set uploader. This is useful when there are some data +set types that a user would never upload; for example, if there are data +set types that are used only internally exist only to support +third-party software. + +The restriction is specified in the web-client.properties file using +either a whitelist or a blacklist. If both are specified, the whitelist +is used. To specify a whitelist, use the key +`creatable-data-set-types-whitelist`; for a blacklist, use the key +`creatable-data-set-types-blacklist`. The value for the property should +be a comma-separated list of regular-expression patterns to match. In +the case of the whitelist, data set types that match the specified +patterns are shown to the user, whereas for the blacklist, the data set +types that match the specified patterns are those that are not shown to +the user. + +####### Examples + +- Specifying a whitelist + +**web-client.properties.** + + creatable-data-set-types-whitelist = .*IMAGE.*, ANALYSIS, THUMBNAIL[0-9]? + +Assume we have the following data set types in our system: + +*PROCESSED-DATA*, *MICROSCOPE-IMAGE*, *IMAGE-SCREENING*, *ANALYSIS*, +*ANALYSIS-FEATURES*, *THUMBNAIL1*, *THUMBNAIL-BIG* + +In this case, the follwing data set types will be available to the user: + +*MICROSCOPE-IMAGE*, *IMAGE-SCREENING*, *ANALYSIS*, *THUMBNAIL1* + +- Specifying a blacklist + +**web-client.properties.** + + creatable-data-set-types-blacklist = .*IMAGE.*, ANALYSIS, THUMBNAIL[0-9]? + +Assume we have the following data set types in our system: + +*PROCESSED-DATA*, *MICROSCOPE-IMAGE*, *IMAGE-SCREENING*, *ANALYSIS*, +*ANALYSIS-FEATURES*, *THUMBNAIL1*, *THUMBNAIL-BIG* + +In this case, the follwing data set types will be available to the user: + +*PROCESSED-DATA*, *ANALYSIS-FEATURES*, *THUMBNAIL-BIG* + +#### Full web-client.properties Example + +**web-client.properties** + + # Enable the trash can and logical deletion. + # Default value: false + enable-trash = true + + # Replacement texts for 'Experiment' and 'Sample' in the UI + # sample-text = Object + # experiment-text = Collection + + # Default view mode that should be used if user doesn't have it specified in URL. + # Options: 'NORMAL' (standard or application mode - default), 'SIMPLE' (read-only mode with simplified GUI) + # + default-view-mode = SIMPLE + + # Flag specifying whether default login mode is anonymous or not. + # If true a user-for-anonymous-login has to be defined in service.properties + # Default value: false + default-anonymous-login = true + + # Configuration of entity (experiment, sample, data set, material) detail views. + # + # Mandatory properties: + # - view (entity detail view id) + # - types (list of entity type codes) + # Optional properties: + # - hide-sections (list of section ids) + # - hide-smart-view (removes "Smart View" from Data Set Detail View -> Data View) (generic_dataset_viewer) + # - hide-file-view (removes "File View" from Data Set Detail View -> Data View) (generic_dataset_viewer) + # Available sections in entity-detail-views: + # generic_dataset_viewer + # data-set-data-section + # data-set-parents-section + # data-set-children-section + # query-section + # generic_experiment_viewer + # data-set-section + # attachment-section + # query-section + # container-sample-section + # generic_sample_viewer + # container-sample-section + # derived-samples-section + # parent-samples-section + # data-set-section + # attachment-section + # query-section + # generic_material_viewer + # query-section + # + # Example: + # + #detail-views = sample-view, experiment-view, data-view + # + #sample-view.view = generic_sample_viewer + #sample-view.types = STYPE1, STYPE2 + #sample-view.hide-sections = derived-samples-section, container-sample-section + # + #experiment-view.view = generic_sample_viewer + #experiment-view.types = ETYPE1, ETYPE2 + #experiment-view.hide-sections = data-set-section + # + #data-view.view = generic_dataset_viewer + #data-view.types = DSTYPE + #data-view.hide-smart-view = false + #data-view.hide-file-view = true + + #technologies = screening + #screening.image-viewer-enabled = true + + # + # Only render these types when creating new data sets via the + # Data Set Upload Client + # + #creatable-data-set-types-whitelist=WHITELISTED_TYPE1, WHITELISTED_TYPE2 + + # + # Do not render these types in the Data Set Upload Client. + # The value of the property is only taken into account if + # creatable-data-set-types-whitelist is not configured + # + #creatable-data-set-types-blacklist=BLACKLISTED_TYPE1, BLACKLISTED_TYPE2 + +### Configuring File Servlet + +This service is specially tailored for web applications requiring to +upload files to the system without using the DataSet concept, it was +meant to be used for small images and rich text editors like CKEditor. + +| Property Key | Default Value | Description | +|-------------------------------------|----------------------------|-----------------------------------------------------------------------------------------------------------| +| file-server.maximum-file-size-in-MB | 10 |  Max size of files. | +| file-server.repository-path |  ../../../data/file-server | Path where files will be stored, ideally should be a folder on the same NAS you are storing the DataSets. | +| file-server.download-check | true | Checks that the user is log in into the system to be able to download files. | + +### Configuring DSS Data Sources + +It is quite common that openBIS AS is using a database filled by DSS. +Depending on the DSS (specified by the DSS code) and the technology +different databases have to be used. + +Configuration is best done by AS [core +plugins](/display/openBISDoc2010/Core+Plugins) of type +`dss-data-sources`. The name of the plugin is just the DSS code. The +following properties of `plugin.properties` are recognized: + +| Property Key | Description | +|--------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| technology | Normally the technology/module folder of the core plugin specifies the technology/module for which this data source has to be configured. If this is not the case this property allows to specify the technology/module independently. | +| database-driver | Fully qualified class name of the data base driver, e.g. `org.postgresql.Driver`. | +| database-url | URL of the database, e.g. `jdbc:postgresql://localhost/imaging_dev` | +| username | Optional user name needed to access database. | +| password | Optional password needed to access database. | +| validation-query | Optional SQL script to be executed to validate database connections. | +| database-max-idle-connections | The maximum number of connections that can remain idle in the pool. A negative value indicates that there is no limit. Default: -1 | +| database-max-active-connections | The maximum number of active connections that can be allocated at the same time. A negative value indicates that there is no limit. Default: -1 | +| database-max-wait-for-connection | The maximum number of seconds that the pool will wait for a connection to be returned before throwing an exception. A value less than or equal to zero means the pool is set to wait indefinitely. Default: -1 | +| database-active-connections-log-interval | The interval (in ms) between two regular log entries of currently active database connections if more than one connection is active. Default: Disabled | +| database-active-number-connections-log-threshold | The number of active connections that will trigger a NOTIFY log and will switch on detailed connection logging. Default: Disabled | +| database-log-stacktrace-on-connection-logging | If true and logging enabled also stack traces are logged. Default: `false` | + +Properties `database-driver` and `database-url` can be omitted if a +`etc/dss-datasource-mapping` is defined. For more see [Sharing +Databases](/display/openBISDoc2010/Sharing+Databases). + +### Changing the Capability-Role map + +openBIS uses a map of capabilities to roles to decide what role is +needed to perform a given action. The defaults can be overridden by +creating a file `etc/capabilities`. One line in this file has one of the +following formats: + + <Capability>: <Role>[,<ROLE>...] + <Capability>: <Role>[,<ROLE>...][; <Parameter> = <Role>[, <Role>...]][; <Parameter> = <Role>[, <Role>]] ... + <Capability>: <Parameter> = <Role>[, <Role>...][; <Parameter> = <Role>[, <Role>]] ... + +which sets a new (minimal) role for the given capability. There is a +special role `INSTANCE_DISABLED` which allows to completely disable a +capability for an instance. Note: to set multiple roles for single +capability use multiple lines in the file. + +This is the default map: + +|Capability |Parameter|Default Role |Comment | +|--------------------------------|---------|-----------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +|`WRITE_CUSTOM_COLUMN` | |`SPACE_POWER_USER` | | +|`DELETE_CUSTOM_COLUMN` | |`SPACE_POWER_USER` | | +|`WRITE_FILTER` | |`SPACE_POWER_USER` | | +|`DELETE_FILTER` | |`SPACE_POWER_USER` | | +|`WRITE_DATASET` | |`SPACE_POWER_USER` | | +|`WRITE_DATASET_PROPERTIES` | |`SPACE_USER` | | +|`DELETE_DATASET` | |`SPACE_POWER_USER` | | +|`WRITE_EXPERIMENT_SAMPLE` | |`SPACE_USER` | | +|`WRITE_EXPERIMENT_ATTACHMENT` | |`SPACE_USER` | | +|`WRITE_EXPERIMENT_PROPERTIES` | |`SPACE_USER` | | +|`DELETE_EXPERIMENT` | |`SPACE_POWER_USER` | | +|`WRITE_SAMPLE` | |`SPACE_USER` | | +|`WRITE_SAMPLE_ATTACHMENT` | |`SPACE_USER` | | +|`WRITE_SAMPLE_PROPERTIES` | |`SPACE_USER` | | +|`DELETE_SAMPLE` | |`SPACE_POWER_USER` | | +|`DELETE_SAMPLE_ATTACHMENT` | |`SPACE_POWER_USER` | | +|`WRITE_DATASET` | |`SPACE_POWER_USER` | | +|`WRITE_DATASET_PROPERTIES` | |`SPACE_USER` | | +|`DELETE_DATASET` | |`SPACE_POWER_USER` |Delete datasets (this capability IS NOT enough to delete datasets with deletion_disallow flag set to true in their type - see `FORCE_DELETE_DATASET`) | +|`FORCE_DELETE_DATASET` | |`INSTANCE_DISABLED` |Delete datasets (this capability IS enough to delete datasets with deletion_disallow flag set to true in their type - see `DELETE_DATASET`) | +|`ARCHIVE_DATASET` | |`SPACE_POWER_USER` |Move dataset from data store into archive | +|`UNARCHIVE_DATASET` | |`SPACE_USER` |Copy back dataset from archive to data store | +|`LOCK_DATA_SETS` | |`SPACE_ADMIN` |Prevent data sets from being archived | +|`UNLOCK_DATA_SETS` | |`SPACE_ADMIN` |Release locked data sets | +|`WRITE_EXPERIMENT_SAMPLE_MATERIAL`| |`INSTANCE_ADMIN` |Registration / update of experiments, samples and materials in one go | +|`REGISTER_SPACE` | |`SPACE_ADMIN` |The user will become space admin of the freshly created space | +|`DELETE_SPACE` | |`SPACE_ADMIN` | | +|`UPDATE_SPACE` | |`SPACE_ADMIN` | | +|`REGISTER_PROJECT` | |`SPACE_POWER_USER` | | +|`WRITE_PROJECT` | |`SPACE_POWER_USER` | | +|`WRITE_SAMPLE_ATTACHMENT` | |`SPACE_POWER_USER` | | +|`DELETE_PROJECT` | |`SPACE_POWER_USER` | | +|`WRITE_PROJECT_ATTACHMENT` | |`SPACE_POWER_USER` | | +|`REGISTER_VOCABULARY` | |`INSTANCE_ADMIN` | | +|`WRITE_VOCABULARY` | |`INSTANCE_ADMIN` | | +|`DELETE_VOCABULARY` | |`INSTANCE_ADMIN` | | +|`WRITE_VOCABULARY_TERM` | |`SPACE_POWER_USER` | | +|`WRITE_UNOFFICIAL_VOCABULARY_TERM`| |`SPACE_USER` | | +|`PURGE` | |`SPACE_ADMIN` |Permanently delete experiments, samples and datasets in the trashcan (this capability IS NOT enough to delete datasets with deletion_disallow flag set to true in their type - see `FORCE_PURGE`)| +|`FORCE_PURGE` | |`INSTANCE_DISABLED` |Permanently delete experiments, samples and datasets in the trashcan (this capability IS enough to delete datasets with deletion_disallow flag set to true in their type - see `PURGE`) | +|`RESTORE` | |`SPACE_USER` |Get back experiments, samples and datasets from the trashcan | +|`ASSIGN_EXPERIMENT_TO_PROJECT` | |`SPACE_POWER_USER`, `SPACE_ETL_SERVER` | | +|`ASSIGN_PROJECT_TO_SPACE` | |`SPACE_POWER_USER`, `SPACE_ETL_SERVER` | | +|`ASSIGN_SAMPLE_TO_EXPERIMENT` | |`SPACE_POWER_USER`, `SPACE_ETL_SERVER`|Re-assign a sample to a new experiment (called in 'register experiment', 'update experiment', 'update sample'') | +|`UNASSIGN_SAMPLE_FROM_EXPERIMENT` | |`SPACE_POWER_USER`, `SPACE_ETL_SERVER`| | +|`ASSIGN_SAMPLE_TO_SPACE` | |`SPACE_POWER_USER`, `SPACE_ETL_SERVER` |Re-assign a sample to a new space (called in 'update sample') | +|`ASSIGN_DATASET_TO_EXPERIMENT` | |`SPACE_POWER_USER`, `SPACE_ETL_SERVER` | | +|`ASSIGN_DATASET_TO_SAMPLE` | |`SPACE_POWER_USER`, `SPACE_ETL_SERVER` | | +|`SHARE_SAMPLE` | |`INSTANCE_ADMIN`, `INSTANCE_ETL_SERVER`| | +|`UNSHARE_SAMPLE` | |`INSTANCE_ADMIN`, `INSTANCE_ETL_SERVER`| | +|`ADD_PARENT_TO_SAMPLE` | |`SPACE_USER`, `SPACE_ETL_SERVER` | | +|`ADD_PARENT_TO_SAMPLE` |SAMPLE |`SPACE_USER`, `SPACE_ETL_SERVER` | | +|`ADD_PARENT_TO_SAMPLE` |PARENT |`SPACE_USER`, `SPACE_ETL_SERVER` | | +|`REMOVE_PARENT_FROM_SAMPLE` | |`SPACE_POWER_USER`, `SPACE_ETL_SERVER` | | +|`REMOVE_PARENT_FROM_SAMPLE` |SAMPLE |`SPACE_POWER_USER`, `SPACE_ETL_SERVER` | | +|`REMOVE_PARENT_FROM_SAMPLE` |PARENT |`SPACE_USER`, `SPACE_ETL_SERVER` | | +|`ADD_CONTAINER_TO_SAMPLE` | |`SPACE_POWER_USER`, `SPACE_ETL_SERVER` | | +|`REMOVE_CONTAINER_FROM_SAMPLE` | |`SPACE_POWER_USER`, `SPACE_ETL_SERVER` | | +|`ADD_PARENT_TO_DATASET` | |`SPACE_POWER_USER`, `SPACE_ETL_SERVER` | | +|`REMOVE_PARENT_FROM_DATASET` | |`SPACE_POWER_USER`, `SPACE_ETL_SERVER` | | +|`ADD_CONTAINER_TO_DATASET` | |`SPACE_POWER_USER`, `SPACE_ETL_SERVER` | | +|`REMOVE_CONTAINER_FROM_DATASET` | |`SPACE_POWER_USER`, `SPACE_ETL_SERVER` | | +|`ASSIGN_ROLE_TO_SPACE_VIA_DSS` | |`SPACE_ADMIN, `INSTANCE_ETL_SERVER` | | +|`CREATE_SPACES_VIA_DSS` | |`SPACE_ADMIN, `INSTANCE_ETL_SERVER` | | +|`CREATE_PROJECTS_VIA_DSS` | |`SPACE_POWER_USER`, `SPACE_ETL_SERVER` | | +|`UPDATE_PROJECTS_VIA_DSS` | |`SPACE_POWER_USER`, `SPACE_ETL_SERVER` | | +|`CREATE_EXPERIMENTS_VIA_DSS` | |`SPACE_USER`, `SPACE_ETL_SERVER` | | +|`UPDATE_EXPERIMENTS_VIA_DSS` | |`SPACE_USER`, `SPACE_ETL_SERVER` | | +|`CREATE_SPACE_SAMPLES_VIA_DSS` | |`SPACE_USER`, `SPACE_ETL_SERVER` | | +|`UPDATE_SPACE_SAMPLES_VIA_DSS` | |`SPACE_USER`, `SPACE_ETL_SERVER` | | +|`CREATE_INSTANCE_SAMPLES_VIA_DSS` | |`INSTANCE_ETL_SERVER` | | +|`UPDATE_INSTANCE_SAMPLES_VIA_DSS` | |`INSTANCE_ETL_SERVER` | | +|`CREATE_MATERIALS_VIA_DSS` | |`INSTANCE_ETL_SERVER` | | +|`UPDATE_MATERIALS_VIA_DSS` | |`INSTANCE_ETL_SERVER` | | +|`CREATE_DATA_SETS_VIA_DSS` | |`SPACE_USER`, `SPACE_ETL_SERVER` | | +|`UPDATE_DATA_SETS_VIA_DSS` | |`SPACE_POWER_USER`, `SPACE_ETL_SERVER` | | +|`SEARCH_ON_BEHALF_OF_USER` | |`INSTANCE_OBSERVER` |All search or list operations being performed on behalf of another user. Supposed to be used by a service user for server-to-server communication tasks. | + + +Older versions of openBIS used to allow changing entity relationships to +regular `SPACE_USER`. If you want to get this behavior back, put these +lines into `etc/capabilities`: + + ASSIGN_EXPERIMENT_TO_PROJECT: SPACE_USER + ASSIGN_EXPERIMENT_TO_PROJECT: SPACE_ETL_SERVER + ASSIGN_SAMPLE_TO_EXPERIMENT: SPACE_USER + ASSIGN_SAMPLE_TO_EXPERIMENT: SPACE_ETL_SERVER + UNASSIGN_SAMPLE_FROM_EXPERIMENT: SPACE_USER + UNASSIGN_SAMPLE_FROM_EXPERIMENT: SPACE_ETL_SERVER + ASSIGN_SAMPLE_TO_SPACE: SPACE_USER + ASSIGN_SAMPLE_TO_SPACE: SPACE_ETL_SERVER + ASSIGN_DATASET_TO_EXPERIMENT: SPACE_USER + ASSIGN_DATASET_TO_EXPERIMENT: SPACE_ETL_SERVER + ASSIGN_DATASET_TO_SAMPLE: SPACE_USER + ASSIGN_DATASET_TO_SAMPLE: SPACE_ETL_SERVER + ADD_PARENT_TO_SAMPLE: SPACE_USER + ADD_PARENT_TO_SAMPLE: SPACE_ETL_SERVER + REMOVE_PARENT_FROM_SAMPLE: SPACE_USER + REMOVE_PARENT_FROM_SAMPLE: SPACE_ETL_SERVER + ADD_CONTAINER_TO_SAMPLE: SPACE_USER + ADD_CONTAINER_TO_SAMPLE: SPACE_ETL_SERVER + REMOVE_CONTAINER_FROM_SAMPLE: SPACE_USER + REMOVE_CONTAINER_FROM_SAMPLE: SPACE_ETL_SERVER + ADD_PARENT_TO_DATASET: SPACE_USER + ADD_PARENT_TO_DATASET: SPACE_ETL_SERVER + REMOVE_PARENT_FROM_DATASET: SPACE_USER + REMOVE_PARENT_FROM_DATASET: SPACE_ETL_SERVER + ADD_CONTAINER_TO_DATASET: SPACE_USER + ADD_CONTAINER_TO_DATASET: SPACE_ETL_SERVER + REMOVE_CONTAINER_FROM_DATASET: SPACE_USER + REMOVE_CONTAINER_FROM_DATASET: SPACE_ETL_SERVER + +#### Capability Role Map for V3 API + +| Method of IApplicationServerApi | Default Roles | Capability | +|------------------------------------------|---------------------------------------------------|-----------------------------------| +| archiveDataSets | PROJECT_POWER_USER, SPACE_ETL_SERVER | ARCHIVE_DATASET | +| confirmDeletions, forceDeletion == false | PROJECT_ADMIN, SPACE_ETL_SERVER | CONFIRM_DELETION | +| confirmDeletions, forceDeletion == true | disabled | CONFIRM_DELETION_FORCED | +| createAuthorizationGroups | INSTANCE_ADMIN | CREATE_AUTHORIZATION_GROUP | +| createCodes | PROJECT_USER, SPACE_ETL_SERVER | CREATE_CODES | +| createDataSetTypes | INSTANCE_ADMIN, INSTANCE_ETL_SERVER | CREATE_DATASET_TYPE | +| createDataSets | PROJECT_USER, SPACE_ETL_SERVER | CREATE_DATASET | +| createExperimentTypes | INSTANCE_ADMIN, INSTANCE_ETL_SERVER | CREATE_EXPERIMENT_TYPE | +| createExperiments | PROJECT_USER, SPACE_ETL_SERVER | CREATE_EXPERIMENT | +| createExternalDataManagementSystems | INSTANCE_ADMIN | CREATE_EXTERNAL_DMS | +| createMaterialTypes | INSTANCE_ADMIN, INSTANCE_ETL_SERVER | CREATE_MATERIAL_TYPE | +| createMaterials | INSTANCE_ADMIN, INSTANCE_ETL_SERVER | CREATE_MATERIAL | +| createPermIdStrings | PROJECT_USER, SPACE_ETL_SERVER | CREATE_PERM_IDS | +| createPersons | INSTANCE_ADMIN | CREATE_PERSON | +| createPlugins | INSTANCE_ADMIN | CREATE_PLUGIN | +| createProjects | SPACE_POWER_USER, SPACE_ETL_SERVER | CREATE_PROJECT | +| createPropertyTypes | INSTANCE_ADMIN | CREATE_PROPERTY_TYPE | +| createQueries | PROJECT_OBSERVER, SPACE_ETL_SERVER | CREATE_QUERY | +| createRoleAssignments, instance role | INSTANCE_ADMIN | CREATE_INSTANCE_ROLE | +| createRoleAssignments, space role | SPACE_ADMIN | CREATE_SPACE_ROLE | +| createRoleAssignments, project role | PROJECT_ADMIN | CREATE_PROJECT_ROLE | +| createSampleTypes | INSTANCE_ADMIN, INSTANCE_ETL_SERVER | CREATE_SAMPLE_TYPE | +| createSamples | PROJECT_USER, SPACE_ETL_SERVER | CREATE_SAMPLE | +| createSemanticAnnotations | INSTANCE_ADMIN, INSTANCE_ETL_SERVER | CREATE_SEMANTIC_ANNOTATION | +| createSpaces | SPACE_ADMIN, SPACE_ETL_SERVER | CREATE_SPACE | +| createTags | PROJECT_OBSERVER, SPACE_ETL_SERVER | CREATE_TAG | +| createVocabularies | INSTANCE_ADMIN | CREATE_VOCABULARY | +| createVocabularyTerms, official terms | PROJECT_POWER_USER, SPACE_ETL_SERVER | CREATE_OFFICIAL_VOCABULARY_TERM | +| createVocabularyTerms, unofficial terms | PROJECT_USER, SPACE_ETL_SERVER | CREATE_UNOFFICIAL_VOCABULARY_TERM | +| deleteAuthorizationGroups | INSTANCE_ADMIN | DELETE_AUTHORIZATION_GROUP | +| deleteDataSetTypes | INSTANCE_ADMIN | DELETE_DATASET_TYPE | +| deleteDataSets | PROJECT_POWER_USER, SPACE_ETL_SERVER | DELETE_DATASET | +| deleteExperimentTypes | INSTANCE_ADMIN | DELETE_EXPERIMENT_TYPE | +| deleteExperiments | PROJECT_POWER_USER, SPACE_ETL_SERVER | DELETE_EXPERIMENT | +| deleteExternalDataManagementSystems | INSTANCE_ADMIN | DELETE_EXTERNAL_DMS | +| deleteMaterialTypes | INSTANCE_ADMIN | DELETE_MATERIAL_TYPE | +| deleteMaterials | INSTANCE_ADMIN, INSTANCE_ETL_SERVER | DELETE_MATERIAL | +| deleteOperationExecutions | PROJECT_USER, SPACE_ETL_SERVER | DELETE_OPERATION_EXECUTION | +| deletePlugins | INSTANCE_ADMIN | DELETE_PLUGIN | +| deleteProjects | SPACE_POWER_USER, PROJECT_ADMIN, SPACE_ETL_SERVER | DELETE_PROJECT | +| deletePropertyTypes | INSTANCE_ADMIN | DELETE_PROPERTY_TYPE | +| deleteQueries | PROJECT_OBSERVER, SPACE_ETL_SERVER | DELETE_QUERY | +| deleteRoleAssignments, instance role | INSTANCE_ADMIN | DELETE_INSTANCE_ROLE | +| deleteRoleAssignments, space role | SPACE_ADMIN | DELETE_SPACE_ROLE | +| deleteRoleAssignments, project role | PROJECT_ADMIN | DELETE_PROJECT_ROLE | +| deleteSampleTypes | INSTANCE_ADMIN | DELETE_SAMPLE_TYPE | +| deleteSamples | PROJECT_POWER_USER, SPACE_ETL_SERVER | DELETE_SAMPLE | +| deleteSemanticAnnotations | INSTANCE_ADMIN, INSTANCE_ETL_SERVER | DELETE_SEMANTIC_ANNOTATION | +| deleteSpaces | SPACE_ADMIN, SPACE_ETL_SERVER | DELETE_SPACE | +| deleteTags | PROJECT_OBSERVER, SPACE_ETL_SERVER | DELETE_TAG | +| deleteVocabularies | INSTANCE_ADMIN | DELETE_VOCABULARY | +| deleteVocabularyTerms | PROJECT_POWER_USER, SPACE_ETL_SERVER | DELETE_VOCABULARY_TERM | +| executeAggregationService | PROJECT_OBSERVER | EXECUTE_AGGREGATION_SERVICES | +| executeCustomASService | PROJECT_OBSERVER, SPACE_ETL_SERVER | EXECUTE_CUSTOM_AS_SERVICE | +| executeProcessingService | PROJECT_USER | EXECUTE_PROCESSING_SERVICES | +| executeQuery | PROJECT_OBSERVER, SPACE_ETL_SERVER | EXECUTE_QUERY | +| executeReportingService | PROJECT_OBSERVER | EXECUTE_REPORTING_SERVICES | +| executeSearchDomainService | PROJECT_OBSERVER | EXECUTE_SEARCH_DOMAIN_SERVICES | +| executeSql | PROJECT_OBSERVER, SPACE_ETL_SERVER | EXECUTE_QUERY | +| getAuthorizationGroups | PROJECT_ADMIN | GET_AUTHORIZATION_GROUP | +| getDataSetTypes | PROJECT_OBSERVER, SPACE_ETL_SERVER | GET_DATASET_TYPE | +| getDataSets | PROJECT_OBSERVER, SPACE_ETL_SERVER | GET_DATASET | +| getExperimentTypes | PROJECT_OBSERVER, SPACE_ETL_SERVER | GET_EXPERIMENT_TYPE | +| getExperiments | PROJECT_OBSERVER, SPACE_ETL_SERVER | GET_EXPERIMENT | +| getExternalDataManagementSystems | PROJECT_OBSERVER, SPACE_ETL_SERVER | GET_EXTERNAL_DMS | +| getMaterialTypes | PROJECT_OBSERVER, SPACE_ETL_SERVER | GET_MATERIAL_TYPE | +| getMaterials | PROJECT_OBSERVER, SPACE_ETL_SERVER | GET_MATERIAL | +| getOperationExecutions | PROJECT_USER, SPACE_ETL_SERVER | GET_OPERATION_EXECUTION | +| getPersons | PROJECT_OBSERVER, SPACE_ETL_SERVER | GET_PERSON | +| getPlugins | PROJECT_OBSERVER, SPACE_ETL_SERVER | GET_PLUGIN | +| getProjects | PROJECT_OBSERVER, SPACE_ETL_SERVER | GET_PROJECT | +| getPropertyTypes | PROJECT_OBSERVER, SPACE_ETL_SERVER | GET_PROPERTY_TYPE | +| getQueries | PROJECT_OBSERVER, SPACE_ETL_SERVER | GET_QUERY | +| getRoleAssignments | PROJECT_ADMIN | GET_ROLE_ASSIGNMENT | +| getSampleTypes | PROJECT_OBSERVER, SPACE_ETL_SERVER | GET_SAMPLE_TYPE | +| getSamples | PROJECT_OBSERVER, SPACE_ETL_SERVER | GET_SAMPLE | +| getSemanticAnnotations | PROJECT_OBSERVER, SPACE_ETL_SERVER | GET_SEMANTIC_ANNOTATION | +| getSessionInformation | PROJECT_OBSERVER, SPACE_ETL_SERVER | GET_SESSION | +| getSpaces | PROJECT_OBSERVER, SPACE_ETL_SERVER | GET_SPACE | +| getTags | PROJECT_OBSERVER, SPACE_ETL_SERVER | GET_TAG | +| getVocabularies | PROJECT_OBSERVER, SPACE_ETL_SERVER | GET_VOCABULARY | +| getVocabularyTerms | PROJECT_OBSERVER, SPACE_ETL_SERVER | GET_VOCABULARY_TERM | +| lockDataSets | PROJECT_ADMIN | LOCK_DATASET | +| revertDeletions | PROJECT_USER, SPACE_ETL_SERVER | REVERT_DELETION | +| searchAggregationServices | PROJECT_OBSERVER | SEARCH_AGGREGATION_SERVICES | +| searchAuthorizationGroups | PROJECT_ADMIN | SEARCH_AUTHORIZATION_GROUP | +| searchCustomASServices | PROJECT_OBSERVER, SPACE_ETL_SERVER | SEARCH_CUSTOM_AS_SERVICES | +| searchDataSetTypes | PROJECT_OBSERVER, SPACE_ETL_SERVER | SEARCH_DATASET_TYPE | +| searchDataSets | PROJECT_OBSERVER, SPACE_ETL_SERVER | SEARCH_DATASET | +| searchDataStores | PROJECT_OBSERVER, SPACE_ETL_SERVER | SEARCH_DATASTORE | +| searchDeletions | PROJECT_USER, SPACE_ETL_SERVER | SEARCH_DELETION | +| searchExperimentTypes | PROJECT_OBSERVER, SPACE_ETL_SERVER | SEARCH_EXPERIMENT_TYPE | +| searchExperiments | PROJECT_OBSERVER, SPACE_ETL_SERVER | SEARCH_EXPERIMENT | +| searchExternalDataManagementSystems | PROJECT_OBSERVER, SPACE_ETL_SERVER | SEARCH_EXTERNAL_DMS | +| searchGlobally | PROJECT_OBSERVER, SPACE_ETL_SERVER | SEARCH_GLOBALLY | +| searchMaterialTypes | PROJECT_OBSERVER, SPACE_ETL_SERVER | SEARCH_MATERIAL_TYPE | +| searchMaterials | PROJECT_OBSERVER, SPACE_ETL_SERVER | SEARCH_MATERIAL | +| searchObjectKindModifications | PROJECT_OBSERVER, SPACE_ETL_SERVER | SEARCH_OBJECT_KIND_MODIFICATION | +| searchOperationExecutions | PROJECT_USER, SPACE_ETL_SERVER | GET_OPERATION_EXECUTION | +| searchPersons | PROJECT_OBSERVER, SPACE_ETL_SERVER | GET_PERSON | +| searchPlugins | PROJECT_OBSERVER, SPACE_ETL_SERVER | SEARCH_PLUGIN | +| searchProcessingServices | PROJECT_OBSERVER | SEARCH_PROCESSING_SERVICES | +| searchProjects | PROJECT_OBSERVER, SPACE_ETL_SERVER | SEARCH_PROJECT | +| searchPropertyTypes | PROJECT_OBSERVER, SPACE_ETL_SERVER | SEARCH_PROPERTY_TYPE | +| searchQueries | PROJECT_OBSERVER, SPACE_ETL_SERVER | SEARCH_QUERY | +| searchReportingServices | PROJECT_OBSERVER | SEARCH_REPORTING_SERVICES | +| searchRoleAssignments | PROJECT_ADMIN | SEARCH_ROLE_ASSIGNMENT | +| searchSampleTypes | PROJECT_OBSERVER, SPACE_ETL_SERVER | SEARCH_SAMPLE_TYPE | +| searchSamples | PROJECT_OBSERVER, SPACE_ETL_SERVER | SEARCH_SAMPLE | +| searchSearchDomainServices | PROJECT_OBSERVER | SEARCH_SEARCH_DOMAIN_SERVICES | +| searchSemanticAnnotations | PROJECT_OBSERVER, SPACE_ETL_SERVER | SEARCH_SEMANTIC_ANNOTATION | +| searchSpaces | PROJECT_OBSERVER, SPACE_ETL_SERVER | SEARCH_SPACE | +| searchTags | PROJECT_OBSERVER, SPACE_ETL_SERVER | SEARCH_TAG | +| searchVocabularies | PROJECT_OBSERVER, SPACE_ETL_SERVER | SEARCH_VOCABULARY | +| searchVocabularyTerms | PROJECT_OBSERVER, SPACE_ETL_SERVER | SEARCH_VOCABULARY_TERM | +| unarchiveDataSets | PROJECT_USER, SPACE_ETL_SERVER | UNARCHIVE_DATASET | +| unlockDataSets | PROJECT_ADMIN | UNLOCK_DATASET | +| updateAuthorizationGroups | INSTANCE_ADMIN | UPDATE_AUTHORIZATION_GROUP | +| updateDataSetTypes | INSTANCE_ADMIN | UPDATE_DATASET_TYPE | +| updateDataSets | PROJECT_POWER_USER, SPACE_ETL_SERVER | UPDATE_DATASET | +| updateDataSets, properties | PROJECT_POWER_USER, SPACE_ETL_SERVER | UPDATE_DATASET_PROPERTY | +| updateExperimentTypes | INSTANCE_ADMIN | UPDATE_EXPERIMENT_TYPE | +| updateExperiments | PROJECT_USER, SPACE_ETL_SERVER | UPDATE_EXPERIMENT | +| updateExperiments, attachments | PROJECT_USER, SPACE_ETL_SERVER | UPDATE_EXPERIMENT_ATTACHMENT | +| updateExperiments, properties | PROJECT_USER, SPACE_ETL_SERVER | UPDATE_EXPERIMENT_PROPERTY | +| updateExternalDataManagementSystems | INSTANCE_ADMIN | UPDATE_EXTERNAL_DMS | +| updateMaterialTypes | INSTANCE_ADMIN | UPDATE_MATERIAL_TYPE | +| updateMaterials | INSTANCE_ADMIN, INSTANCE_ETL_SERVER | UPDATE_MATERIAL | +| updateMaterials, properties | INSTANCE_ADMIN, INSTANCE_ETL_SERVER | UPDATE_MATERIAL_PROPERTY | +| updateOperationExecutions | PROJECT_USER, SPACE_ETL_SERVER | UPDATE_OPERATION_EXECUTION | +| updatePersons, activate | INSTANCE_ADMIN | ACTIVATE_PERSON | +| updatePersons, deactivate | INSTANCE_ADMIN | DEACTIVATE_PERSON | +| updatePersons, set home space | SPACE_ADMIN | UPDATE_HOME_SPACE | +| updatePlugins | INSTANCE_ADMIN | UPDATE_PLUGIN | +| updateProjects | SPACE_POWER_USER, PROJECT_ADMIN, SPACE_ETL_SERVER | UPDATE_PROJECT | +| updateProjects, attachments | SPACE_POWER_USER, PROJECT_ADMIN, SPACE_ETL_SERVER | UPDATE_PROJECT_ATTACHMENT | +| updatePropertyTypes | INSTANCE_ADMIN | UPDATE_PROPERTY_TYPE | +| updateQueries | PROJECT_OBSERVER, SPACE_ETL_SERVER | UPDATE_QUERY | +| updateSampleTypes | INSTANCE_ADMIN | UPDATE_SAMPLE_TYPE | +| updateSamples | PROJECT_USER, SPACE_ETL_SERVER | UPDATE_SAMPLE | +| updateSamples, attachments | PROJECT_USER, SPACE_ETL_SERVER | UPDATE_SAMPLE_ATTACHMENT | +| updateSamples, properties | PROJECT_USER, SPACE_ETL_SERVER | UPDATE_SAMPLE_PROPERTY | +| updateSemanticAnnotations | INSTANCE_ADMIN, INSTANCE_ETL_SERVER | UPDATE_SEMANTIC_ANNOTATION | +| updateSpaces | SPACE_ADMIN, SPACE_ETL_SERVER | UPDATE_SPACE | +| updateTags | PROJECT_OBSERVER, SPACE_ETL_SERVER | UPDATE_TAG | +| updateVocabularies | INSTANCE_ADMIN | UPDATE_VOCABULARY | +| updateVocabularyTerms, official terms | PROJECT_POWER_USER, SPACE_ETL_SERVER | UPDATE_OFFICIAL_VOCABULARY_TERM | +| updateVocabularyTerms, unofficial terms | PROJECT_USER, SPACE_ETL_SERVER | UPDATE_UNOFFICIAL_VOCABULARY_TERM | + +### Querying Project Database + +In some customized versions of openBIS an additional project-specific +database is storing data from registered data sets. This database can be +queried via SQL Select statements in openBIS Web application. In order +to protect modification of this database by malicious SQL code openBIS +application server should access this database as a user which is member +of a read-only group. The name of this read-only group is project +specific. + +It is possible to configure openBIS to query multiple project-specific +databases. + +#### Create Read-Only User in PostgreSQL + +A new user (aka role) is created by + + CREATE ROLE <read-only user> LOGIN NOSUPERUSER INHERIT NOCREATEDB NOCREATEROLE; + +This new user is added to the read-only group by the following command: + + GRANT <read-only group> TO <read-only user>; + +The name of the read-only group can be obtained by having a look into +the list of all groups: + + SELECT * from PG_GROUP; + +*Note that by default openBIS creates a user* ` openbis_readonly ` +*which has read-only permissions to all database objects. You can use +this user to access the openBIS meta database through the openBIS query +interface.* + +#### Enable Querying + +To enable querying functionality for additional databases in openBIS Web +application a [core plugin](/display/openBISDoc2010/Core+Plugins) of +type query-databases has to be created. The following +`plugin.properties` have to be specified: + +| Property | Description | +|-------------------|---------------------------------------------------------------------------------------------------------------------------| +| label | Label of the database. It will be used in the Web application in drop down lists for adding / editing customized queries. | +| database-driver | JDBC Driver of the database, e.g. org.postgresql.Driver for postgresql. | +| database-url | JDBC URL to the database containing full database name, e.g. jdbc:postgresql://localhost/database_name for postgresql | +| database-username | Above-mentioned defined read-only user. | +| database-password | Password of the read-only user. | + +#### Configure Authorization + +In order to configure authorization two additional properties can be +configured: + +| Property | Description | +|---------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| <database>.data-space | To which data-space this database belongs to (optional, i.e. a query database can be configured not to belong to one data space by leaving this configuration value empty). | +| <database>.creator-minimal-role | What role is required to be allowed to create / edit queries on this database (optional, default: INSTANCE_OBSERVER if data-space is not set, POWER_USER otherwise). | + +The given parameters data-space and creator-minimal-role are used by +openBIS to enforce proper authorization. + +For example, if + + data-space = CISD + creator-minimal-role = SPACE_ADMIN + +is configured, then for the query database configured with key `db1`: + +- only a `SPACE_ADMIN` on data space `CISD` and an `INSTANCE_ADMIN` + are allowed to create / edit queries, +- only a user who has the `OBSERVER` role in data space `CISD` is + allowed to execute a query. + +For query databases that do not belong to a space but that have a column +with any of the [magic column +names](/display/openBISDoc2010/Custom+Database+Queries#CustomDatabaseQueries-Hyperlinks), +the query result is filtered on a per-row basis according to what the +user executing the query is allowed to see. In detail this means: if the +user executing the query is not an instance admin, filter out all rows +which belong to a data space where the user doesn't have a least the +observer role. The relationship between a row and a data space is +established by means of the experiment / sample / data set whose +`permId` is given by one of the magical column names. + +For sensitive data where authorization needs to be enforced, there are +two setups possible: + +1. Configure a query database that **does not** belong to a data space + and set the creator-minimal-role to `INSTANCE_ADMIN`. Any instance + admin can be trusted to understand authorization issues and ensure + that only queries are added for this query database that contain a + proper reference to an experiment / sample / data set. This way, it + can be ensured that only properly filtered query results are + returned to the user running the query. +2. Configure a query database that **does** belong to a specific data + space and set the creator-minimal-role to `POWER_USER`. The + datastore server (or whatever server maintains the query database) + ensures that only information related to the configured data space + is added to the query database. Thus whatever query the power user + writes for this database, it will only reveal information from this + data space. As only users with `OBSERVER` role on this data space + are allowed to execute the query, authorization is enforced properly + without the need of filtering query results. + +### Master data import/export + +The master data of openBIS comprises all entity/property types, property +assignments, vocabularies etc. needed for your customized installation +to work. The system offers a way to export/import master data via Jython +scripts. More information on how to do create such scripts and run them +manually see the advanced guide [Jython Master Data +Scripts](/display/openBISDoc2010/Jython+Master+Data+Scripts#JythonMasterDataScripts-Commandlinetools). + +A master data script can be run automatically by start up of the AS if +it is defined in an AS core plugin. The script path should be +`<installation directory>/servers/core-plugins/<module name>/<version number>/as/initialize-master-data.py`. +For more details about the folder structure of core plugins see [Core +Plugins](/display/openBISDoc2010/Core+Plugins). If there are several +core plugins with master data scripts the scripts will be executed in +alphabetical order of the module names. For example, the master data +script of module `screening-optional` will be executed after the master +data script of module `screening` has been executed. + +Execution of master data script can be suppressed by +disabling `initialize-master-data` core plugin. For more details see +[Core Plugins](/display/openBISDoc2010/Core+Plugins). + +### Limit of open files + +When putting a lot of files in a drop box you might run into the problem +of  '`too many open files error`'. Please consider changing the ulimit +value (for RHEL6 edit `/etc/security/limits.conf` ) to a higher value. + +### Runtime changes to logging + +The +script  `<installation directory>/servers/openBIS-server/jetty/bin/configure.sh `can +be used to change the logging behavior of openBIS application server +while the server is running. + +The script is used like this: configure.sh \[command\] \[argument\] + +The table below describes the possible commands and their arguments. + +| Command | Argument(s) | Default Value | Description | +|--------------------------------------|--------------------------------------------------------|---------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| log-service-calls | 'on', 'off' | 'off' | Turns on / off detailed service call logging. +When this feature is enabled, openBIS will log about start and end of every service call it executes to file <installation directory>/servers/openBIS-server/jetty/log/openbis_service_calls.txt | +| log-long-running-invocations | 'on', 'off' | 'on' | Turns on / off logging of long running invocations. +When this feature is enabled, openBIS will periodically create a report of all service calls that have been in execution more than 15 seconds to file <installation directory>/servers/openBIS-server/jetty/log/openbis_long_running_threads.txt. | +| debug-db-connections | 'on', 'off' | 'off' | Turns on / off logging about database connection pool activity. +When this feature is enabled, information about every borrow and return to database connection pool is logged to openBIS main log in file <installation directory>/servers/openBIS-server/jetty/log/openbis_log.txt | +| log-db-connections | no argument / minimum connection age (in milliseconds) | 5000 | When this command is executed without an argument, information about every database connection that has been borrowed from the connection pool is written into openBIS main log in file <installation directory>/servers/openBIS-server/jetty/log/openbis_log.txt +If the "minimum connection age" argument is specified, only connections that have been out of the pool longer than the specified time are logged. The minimum connection age value is given in milliseconds. | +| record-stacktrace-db-connections | 'on', 'off' | 'off' | Turns on / off logging of stacktraces. +When this feature is enabled AND debug-db-connections is enabled, the full stack trace of the borrowing thread will be recorded with the connection pool activity logs. | +| log-db-connections-separate-log-file | 'on', 'off' | 'off' | Turns on / off database connection pool logging to separate file. +When this feature is disabled, the database connection pool activity logging is done only to openBIS main log. When this feature is enabled, the activity logging is done ALSO to file <installation directory>/servers/openBIS-server/jetty/log/openbis_db_connections.txt. | + + + +### Deleted Entity History + +Logging the history of deleted entities can be enabled / disabled in +service.properties using setting + +entity-history.enabled = \[true | false\] + +Since 20.10.1 the default value is true (meaning, entity history is +enabled). Before 20.10.1 the default value was false. + +Deleted entity history can be queried with script show-history.sh, which +is located in $OPENBIS\_INSTALL\_DIR/bin + +## Troubleshooting Problems + +### Samples with datasets and no experiments + +In the openBIS UI users could detach samples with container data sets +from the experiment. This bug was fix on version S176 released on 14 of +march of 2014. + +The following SQL script lists all samples with data sets but no +experiments: + + ## + ## SELECT SAMPLES WITH DATASETS AND NO EXPERIMENTS + ## + SELECT s.id, d.expe_id from samples_all s join data_all d on (d.samp_id=s.id) where s.expe_id is null ORDER by s.id + +If the last query shows no output the system is fine, if not, it can be +repaired with the following update query. + + ## + ## FIX SAMPLES WITH DATASETS AND NO EXPERIMENTS ASSIGNING EXPERIMENT FROM DATASET + ## + UPDATE samples_all + SET expe_id = subquery.expe_id + FROM ( + SELECT s.id as samp_id, d.expe_id as expe_id from samples_all s join data_all d on (d.samp_id=s.id) where s.expe_id is null + ) as subquery + where id = subquery.samp_id diff --git a/docs/system-admin-documentation/installation/optional-application-server-configuration.md b/docs/system-admin-documentation/installation/optional-application-server-configuration.md new file mode 100644 index 0000000000000000000000000000000000000000..104437f36d295cc99d603a2303034eeabaf9d24e --- /dev/null +++ b/docs/system-admin-documentation/installation/optional-application-server-configuration.md @@ -0,0 +1,4 @@ +Optional Application Server Configuration +========================================= + +To be written \ No newline at end of file diff --git a/docs/system-admin-documentation/installation/optional-datastore-server-configuration.md b/docs/system-admin-documentation/installation/optional-datastore-server-configuration.md new file mode 100644 index 0000000000000000000000000000000000000000..4cbbee2e0b822cb8b96e73673d9a33c38ef2a883 --- /dev/null +++ b/docs/system-admin-documentation/installation/optional-datastore-server-configuration.md @@ -0,0 +1,4 @@ +Optional Datastore Server Configuration +======================================= + +To be written \ No newline at end of file diff --git a/docs/system-admin-documentation/installation/system-requirements.md b/docs/system-admin-documentation/installation/system-requirements.md new file mode 100644 index 0000000000000000000000000000000000000000..62f9a29fe06623538b2ad88cd0f6edc1364dde9e --- /dev/null +++ b/docs/system-admin-documentation/installation/system-requirements.md @@ -0,0 +1,4 @@ +System Requirements +=================== + +To be written \ No newline at end of file diff --git a/docs/user-documentation/advance-features/command-line-tool.md b/docs/user-documentation/advance-features/command-line-tool.md new file mode 100644 index 0000000000000000000000000000000000000000..dac2b9a10aec5356acd1071e770b3de6df22b7ed --- /dev/null +++ b/docs/user-documentation/advance-features/command-line-tool.md @@ -0,0 +1,657 @@ +# openBIS Command Line Tool (oBIS) + +oBIS is a command-line tool that makes it possible to handle data sets tracked by OpenBIS, +where users have complete freedom to structure and manipulate the data as they wish, while retaining +the benefits of openBIS. + +With oBIS, it is possible not only to handle datasets stored in OpenBIS but also available to keep +only metadata send to openBIS, while the data itself is managed externally, by the user. In this +case, OpenBIS is aware of its existence and the data can be used for provenance tracking. + +## 1. Prerequisites + +- python 3.6 or higher +- git 2.11 or higher +- git-annex 6 or higher [Installation guide](https://git-annex.branchable.com/install/) + +## 2. Installation + +``` +pip3 install obis +``` + +Since `obis` is based on `pybis`, the pip command will also install pybis and all its dependencies. + +## 3. Quick start guide + +**Configure your openBIS Instance** +``` +# global settings to be use for all obis repositories +obis config -g set openbis_url=https://localhost:8888 +obis config -g set user=admin +``` +**Download Physical Dataset** +``` +# create a physical (-p) obis repository with a folder name +obis init -p data1 +cd data1 +# check configuration +obis config get is_physical +# download dataset giving a single permId +obis download 20230228091119011-58 +``` +**Upload Physical Dataset** +``` +# create a physical (-p) obis repository with a folder name +obis init -p data1 +cd data1 +# check configuration +obis config get is_physical +# upload as many files or folder as you want (-f) to an existing object as type RAW_DATA +obis upload 20230228133001314-59 RAW_DATA -f your_file_a -f your_file_b +``` + +## 4. Usage + +### 4.1 Help is your friend! + +$ obis --help + +``` +Usage: obis [OPTIONS] COMMAND [ARGS]... + +Options: + --version Show the version and exit. + -q, --quiet Suppress status reporting. + -s, --skip_verification Do not verify cerficiates + -d, --debug Show stack trace on error. + --help Show this message and exit. + +Commands: + addref Add the given repository as a reference to openBIS. + clone Clone the repository found in the given data set id. + collection Get/set settings related to the collection. + commit Commit the repository to git and inform openBIS. + config Get/set configurations. + data_set Get/set settings related to the data set. + download Download files of a data set. + init Initialize the folder as a data repository. + init_analysis Initialize the folder as an analysis folder. + move Move the repository found in the given data set id. + object Get/set settings related to the object. + removeref Remove the reference to the given repository from openBIS. + repository Get/set settings related to the repository. + settings Get all settings. + status Show the state of the obis repository. + sync Sync the repository with openBIS. + token create/show a openBIS token + upload Upload files to form a data set. +``` + +To show detailed help for a specific command, type `obis <command> --help` : + +``` +$ obis commit --help +Usage: obis commit [OPTIONS] [REPOSITORY] + +Options: + -m, --msg TEXT A message explaining what was done. + -a, --auto_add Automatically add all untracked files. + -i, --ignore_missing_parent If parent data set is missing, ignore it. + --help Show this message and exit. +``` + +## 5. Work modes + +oBIS command line tool can work in two modes depending on how data is stored: + +1. Standard Data Store mode +2. External Data Store mode + +**Warning:** Each repository can work in a single mode only! Mixing modes is not supported. + +Depending on the mode, some commands may be unavailable or behave differently. Please read details +in the adequate section. + +Here is a short summary of which commands are available in given modes: + +| Command | Standard Data Store | External Data Store | +|------------------|:-------------------:|:-------------------:| +| addref | ⌠| ✅ | +| clone | ⌠| ✅ | +| collection get | ✅ | ✅ | +| collection set | ✅ | ✅ | +| collection clear | ⌠| ✅ | +| commit | ⌠| ✅ | +| config get | ✅ | ✅ | +| config set | ✅ | ✅ | +| config clear | ⌠| ✅ | +| data_set get | ⌠| ✅ | +| data_set set | ⌠| ✅ | +| data_set clear | ⌠| ✅ | +| data_set search | ✅ | ⌠| +| download | ✅ | ⌠| +| init | ⌠| ✅ | +| init -p | ✅ | ⌠| +| init_analysis | ⌠| ✅ | +| move | ⌠| ✅ | +| object get | ✅ | ✅ | +| object set | ✅ | ✅ | +| object clear | ⌠| ✅ | +| object search | ✅ | ⌠| +| removeref | ⌠| ✅ | +| repository get | ⌠| ✅ | +| repository set | ⌠| ✅ | +| repository clear | ⌠| ✅ | +| settings get | ⌠| ✅ | +| settings set | ⌠| ✅ | +| settings clear | ⌠| ✅ | +| status | ⌠| ✅ | +| sync | ⌠| ✅ | +| token | ✅ | ✅ | +| upload | ✅ | ⌠| + +**Login** + +Some commands like `download` or `upload` will connect to OpenBIS instance. At that time, oBIS will +use username configured in `.obis/config.json` and will ask for password whenever session expires or +username changes. + +## 5.1 Standard Data Store + +Standard Data Store mode depicts a workflow where datasets are stored directly in the OpenBIS +instance. In this mode user can download/upload files to OpenBIS, search for objects/datasets +fulfilling filtering criteria +and get/set properties of objects/collections represented by datasets in current repository. + +## 5.1.1 Commands + +**collection** + +``` +obis collection get [key1] [key2] ... +obis collection set [key1]=[value1], [key2]=[value2] ... +``` + +With `collection` command, obis crawls through current repository and gathers all data set ids and +then - if +data set is connected directly to a collection - gets or sets given properties to it in OpenBIS + +*Note some property names may require to be encapsulated in '', e.g. '$name'* + +**config** + +``` +obis config get [key] +obis config set [key]=[value] +``` + +With `config` command, obis can get/set config of a local repository, e.g. when setting access link +to OpenBIS instance + +The settings are saved within the obis repository, in the `.obis` folder, as JSON files, or +in `~/.obis` for the global settings. They can be added/edited manually, which might be useful when +it comes to integration with other tools. + +**Example `.obis/config.json`** + +``` +{ + "fileservice_url": null, + "git_annex_hash_as_checksum": true, + "hostname": "bsse-bs-dock-5-160.ethz.ch", + "is_physical": true, + "openbis_url": "http://localhost:8888" +} +``` + +**data_set** + +``` +obis data_set search [OPTIONS] + +Options: + -object_type, --object_type TEXT + Object type code to filter by + -space, --space TEXT Space code + -project, --project TEXT Full project identification code + -experiment, --experiment TEXT Full experiment code + -object, --object TEXT Object identification information, it can be permId or identifier + -type, --type TEXT Type code + -registration-date, --registration-date TEXT + Registration date, it can be in the format + "oYYYY-MM-DD" (e.g. ">2023-01-31", "=2023-01-31", "<2023-01-31") + -modification-date, --modification-date TEXT + Modification date, it can be in the format + "oYYYY-MM-DD" (e.g. ">2023-01-31", "=2023-01-31", "<2023-01-31") + -property TEXT Property code + -property-value TEXT Property value + -save, --save TEXT Directory name to save results + -r, --recursive Search data recursively +``` + +With `data_set search` command, obis connects to a configured OpenBIS instance and searches for all +data sets that fulfill given filtering criteria or by using object identification string. +At least one search option must be specified. + +Search results can be downloaded into a file by using `save` option. + +Recursive option enables searching for datasets of children samples or datasets + +*Note: Filtering by `-project` may not work when `Project Samples` are disabled in OpenBIS +configuration.* + +**download** + +``` +obis download [options] [data_set_id] + +Options: + -from-file, --from-file TEXT An output .CSV file from `obis data_set search` + command with the list of objects to download + data sets from + -f, --file TEXT File in the data set to download - downloading + all if not given. + -s, --skip_integrity_check Flag to skip file integrity check with + checksums +``` + +The `download` command downloads, the files of a given data set from the OpenBIS instance specified +in `config`. This command requires the DownloadHandler / FileInfoHandler microservices to be running +and the `fileservice_url` needs to be configured. + +**init** + +``` +obis init -p [folder] +``` + +If a folder is given, obis will initialize that folder as an obis repository that works in the +Standard Data Store mode. +If not, it will use the current folder. + +**object get / set** + +``` +obis collection get [key1] [key2] ... +obis collection set [key1]=[value1], [key2]=[value2] ... +``` + +With `get` and `set` commands, obis crawls through current repository and gathers all data set ids +and then - if +data set is connected directly to an object - gets or sets given properties to it in OpenBIS + +*Note some property names may require to be encapsulated in '', e.g. '$name'* + +**object search** + +``` +obis object search [OPTIONS] + +Options: + -type, --type TEXT Type code to filter by + -space, --space TEXT Space code + -project, --project TEXT Full project identification code + -experiment, --experiment TEXT Full experiment + -object, --object TEXT Object identification information, it can be permId or identifier + -registration-date, --registration-date TEXT + Registration date, it can be in the format + "oYYYY-MM-DD" (e.g. ">2023-01-31", "=2023-01-31", "<2023-01-31") + -modification-date, --modification-date TEXT + Modification date, it can be in the format + "oYYYY-MM-DD" (e.g. ">2023-01-31", "=2023-01-31", "<2023-01-31") + -property TEXT Property code + -property-value TEXT Property value + -save, --save TEXT File name to save results in csv format + -r, --recursive Search data recursively +``` + +With `object search` command, obis connects to a configured OpenBIS instance and searches for all +objects/samples that fulfill given filtering criteria or by using object identification string. +At least one search option must be specified. + +Search results can be downloaded into a file by using `save` option. + +Recursive option enables searching for datasets of children samples or datasets + +*Note: Filtering by `-project` may not work when `Project Samples` are disabled in OpenBIS +configuration.* + +**upload** + +``` +obis upload [sample_id] [data_set_type] [OPTIONS] +``` + +With `upload` command, a new data set of type `data_set_type` will be created under +object `sample_id`. Files and folders specified with `-f` flag will be uploaded to a newly created +data set. + +### 5.1.2 Examples + +**Create an obis repository to work in Standard Data Store mode** + +``` +# global settings to be use for all obis repositories +obis config -g set openbis_url=https://localhost:8888 +obis config -g set user=admin +# create an obis repository with a folder name +obis init -p data1 +cd data1 +# check configuration +obis config get is_physical +# search for objects of type BACTERIA in sapce TESTID in OpenBIS +obis object search -space TESTID -type BACTERIA +# save search results in a files +obis object search -space TESTID -type BACTERIA -save results.csv +obis object search -space TESTID -save results_space.csv +# upload files to an existing object as type RAW_DATA +obis upload 20230228133001314-59 RAW_DATA -f results.csv -f results_space.csv +``` + +**download datasets of an object and check properties** + +``` +# assuming we are in a configured obis repository +obis download 20230228091119011-58 +# set object name to XYZ +obis object set '$name'=XYZ +# set children of an object to /TESTID/PROJECT_101/PROJECT_101_EXP_3 +obis object set children=/TESTID/PROJECT_101/PROJECT_101_EXP_3 +``` + +## 5.2 External Data Store + +External Data Store mode allows for orderly management of data in +conditions that require great flexibility. oBIS makes it possible to track data on a file system, +where users have complete freedom to structure and manipulate the data as they wish, while retaining +the benefits of openBIS. With oBIS, only metadata is actually stored and managed by openBIS. The +data itself is managed externally, by the user, but openBIS is aware of its existence and the data +can be used for provenance tracking. + +Under the covers, obis takes advantage of publicly available and tested tools to manage data on the +file system. In particular, it uses git and git-annex to track the content of a dataset. Using +git-annex, even large binary artifacts can be tracked efficiently. For communication with openBIS, +obis uses the openBIS API, which offers the power to register and track all metadata supported by +openBIS. + +### 5.2.1 Settings + +With `get` you retrieve one or more settings. If the `key` is omitted, you retrieve all settings of +the `type`: + +``` +obis [type] [options] get [key] +``` + +With `set` you set one or more settings: + +``` +obis [type] [options] set [key1]=[value1], [key2]=[value2], ... +``` + +With `clear` you unset one or more settings: + +``` +obis [type] [options] clear [key1] +``` + +With the type `settings` you can get all settings at once: + +``` +obis settings [options] get +``` + +The option `-g` can be used to interact with the global settings. The global settings are stored +in `~/.obis` and are copied to an obis repository when that is created. + +Following settings exist: + +| type | setting | description | +|------------|------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| collection |  `id` | Identifier of the collection the created data set is attached to. Use either this or the object id. | +| config | `allow_only_https` | Default is true. If false, http can be used to connect to openBIS. | +| config | `fileservice_url` | URL for downloading files. See DownloadHandler / FileInfoHandler services. | +| config | `git_annex_backend` | Git annex backend to be used to calculate file hashes. Supported backends are SHA256E (default), MD5 and WORM. | +| config | `git_annex_hash_as_checksum` | Default is true. If false, a CRC32 checksum will be calculated for openBIS. Otherwise, the hash calculated by git-annex will be used. | +| config | `hostname` | Hostname to be used when cloning / moving a data set to connect to the machine where the original copy is located. | +| config | `openbis_url` | URL for connecting to openBIS (only protocol://host:port, without a path). | +| config | `openbis_token` | Token to use when connecting to openBIS. Can be either a session token or a personal access token. Alternatively, it can be a path to a file containing the token. | +| config | `session_name` | The name every personal access token is associated with. | +| config | `obis_metadata_folder` | Absolute path to the folder which obis will use to store its metadata. If not set, the metadata will be stored in the same location as the data. This setting can be useful when dealing with read-only access to the data. The clone and move commands will not work when this is set. | +| config | `user` | User for connecting to openBIS. | +| data_set | `type` | Data set type of data sets created by obis. | +| data_set | `properties` | Data set properties of data sets created by obis. | +| object | `id` | Identifier of the object the created data set is attached to. Use either this or the collection id. | +| repository | `data_set_id` | This is set by obis. Is is the id of the most recent data set created by obis and will be used as the parent of the next one. | +| repository | `external_dms_id` | This is set by obis. Id of the external dms in openBIS. | +| repository | `id` | This is set by obis. Id of the obis repository. | + +The settings are saved within the obis repository, in the `.obis` folder, as JSON files, or +in `~/.obis` for the global settings. They can be added/edited manually, which might be useful when +it comes to integration with other tools. + +**Example `.obis/config.json`** + +``` +{ + "fileservice_url": null, + "git_annex_hash_as_checksum": true, + "hostname": "bsse-bs-dock-5-160.ethz.ch", + "openbis_url": "http://localhost:8888" +} +``` + +**Example `.obis/data_set.json`** + +``` +{ + "properties": { + "K1": "v1", + "K2": "v2" + }, + "type": "UNKNOWN" +} +``` + +## 5.2.2 Commands + +**init** + +``` +obis init [folder] +``` + +If a folder is given, obis will initialize that folder as an obis in the External Data Store mode. +If not, it will use the current folder. + +**init_analysis** + +``` +obis init_analysis [options] [folder] +``` + +With init_analysis, a repository can be created which is derived from a parent repository. If it is +called from within a repository, that will be used as a parent. If not, the parent has to be given +with the `-p` option. + +**commit** + +``` +obis commit [options] +``` + +The `commit` command adds files to a new data set in openBIS. If the `-m` option is not used to +define a commit message, the user will be asked to provide one. + +**sync** + +``` +obis sync +``` + +When git commits have been done manually, the `sync` command creates the corresponding data set in +openBIS. Note that, when interacting with git directly, use the git annex commands whenever +applicable, e.g. use "git annex add" instead of "git add". + +**status** + +``` +obis status [folder] +``` + +This shows the status of the repository folder from which it is invoked, or the one given as a +parameter. It shows file changes and whether the repository needs to be synchronized with openBIS. + +**clone** + +``` +obis clone [options] [data_set_id] +``` + +The `clone` command copies a repository associated with a data set and registers the new copy in +openBIS. In case there are already multiple copied of the repository, obis will ask from which copy +to clone. + +- To avoid user interaction, the copy index can be chosen with the option `-c` +- With the option `-u` a user can be defined for copying the files from a remote system +- By default, the file integrity is checked by calculating the checksum. This can be skipped + with `-s`. + +_Note_: This command does not work when `obis_metadata_folder` is set. + +**move** + +``` +obis move [options] [data_set_id] +``` + +The `move` command works the same as `clone`, except that the old repository will be removed. + +Note: This command does not work when `obis_metadata_folder` is set. + +**addref / removeref** + +``` +obis addref +obis removeref +``` + +Obis repository folders can be added or removed from openBIS. This can be useful when a repository +was moved or copied without using the `move` or `copy` commands. + +**token** + + +``` +obis token get <session_name> [--validity-days] [--validity-weeks] [--validity-months] +``` + +Gets or creates a new personal access token (PAT) and stores it in the obis configuration. If +no `session_name` is provided or is not stored in the configuration, you'll be asked interactively. +If no validity period is provided, the maximum (configured by the server) is used. If a PAT with +this `session_name` already exists and it is going to expire soon (according to server +setting `personal_access_tokens_validity_warning_period`), a new PAT will be created, stored in the +obis configuration and used for every subsequent request. + +### 5.2.3 Examples + +**Create an obis repository and commit to openBIS** + +``` +# global settings to be use for all obis repositories +obis config -g set openbis_url=https://localhost:8888 +obis config -g set user=admin +# create an obis repository with a file +obis init data1 +cd data1 +echo content >> example_file +# configure the repository +obis data_set set type=UNKNOWN +obis object set id=/DEFAULT/DEFAULT +# commit to openBIS +obis commit -m 'message' +``` + +**Commit to git and sync manually** + +``` +# assuming we are in a configured obis repository +echo content >> example_file +git annex add example_file +git commit -m 'message' +obis sync +``` + +**Create an analysis repository** + +``` +# assuming we have a repository 'data1' +obis init_analysis -p data1 analysis1 +cd analysis1 +obis data_set set type=UNKNOWN +obis object set id=/DEFAULT/DEFAULT +echo content >> example_file +obis commit -m 'message' +``` + +## 6. Authentication + +There are 2 ways to perform user authentication against OpenBIS. + +### 6.1. Login +Obis, internally, stores a session token which is used to connect with OpenBIS. Whenever this token +is invalidated, obis will ask user to provide credentials to log into OpenBIS again. + + +### 6.2. Personal Access Token +Session token is short-lived and its interactive generation makes it unfeasible for usage in automatic +scripts. An alternative way to authorize is to generate personal access token (PAT), which can be +configured to last for a long periods of time. + +PAT generation is explained in depth in `token` command section. + + +## 7. Big Data Link Services + +The Big Data Link Services can be used to download files which are contained in an obis repository. +The services are included in the installation folder of openBIS, +under `servers/big_data_link_services`. For how to configure and run them, consult +the [README.md](https://sissource.ethz.ch/sispub/openbis/blob/master/big_data_link_server/README.md) +file. + +## 8. Rationale for obis + +Data-provenance tracking tools like openBIS make it possible to understand and follow the research +process. What was studied, what data was acquired and how, how was data analyzed to arrive at final +results for publication -- this is information that is captured in openBIS. In the standard usage +scenario, openBIS stores and manages data directly. This has the advantage that openBIS acts as a +gatekeeper to the data, making it easy to keep backups or enforce access restrictions, etc. However, +this way of working is not a good solution for all situations. + +Some research groups work with large amounts of data (e.g., multiple TB), which makes it inefficient +and impractical to give openBIS control of the data. Other research groups require that data be +stored on a shared file system under a well-defined directory structure, be it for historical +reasons or because of the tools they use. In this case as well, it is difficult to give openBIS full +control of the data. + +For situations like these, we have developed `obis`, a tool for orderly management of data in +conditions that require great flexibility. `obis` makes it possible to track data on a file system, +where users have complete freedom to structure and manipulate the data as they wish, while retaining +the benefits of openBIS. With `obis`, only metadata is actually stored and managed by openBIS. The +data itself is managed externally, by the user, but openBIS is aware of its existence and the data +can be used for provenance tracking. `obis` is packaged as a stand-alone utility, which, to be +available, only needs to be added to the `PATH` variable in a UNIX or UNIX-like environment. + +Under the covers, `obis` takes advantage of publicly available and tested tools to manage data on +the file system. In particular, it uses `git` and `git-annex` to track the content of a dataset. +Using `git-annex`, even large binary artifacts can be tracked efficiently. For communication with +openBIS, `obis` uses the openBIS API, which offers the power to register and track all metadata +supported by openBIS. + +## 9. Literature + +V. Korolev, A. Joshi, V. Korolev, M.A. Grasso, A. Joshi, M.A. Grasso, et al., "PROB: A tool for +tracking provenance and reproducibility of big data experiments", Reproduce '14. HPCA 2014, vol. 11, +pp. 264-286, 2014. +http://ebiquity.umbc.edu/_file_directory_/papers/693.pdf diff --git a/docs/user-documentation/advance-features/excel-import-service.md b/docs/user-documentation/advance-features/excel-import-service.md new file mode 100644 index 0000000000000000000000000000000000000000..83414821908a5db6a5c2906b42dfa90964f82b85 --- /dev/null +++ b/docs/user-documentation/advance-features/excel-import-service.md @@ -0,0 +1,560 @@ +# Excel Import Service + +- Created by [Fuentes Serna Juan Mariano + (ID)](%20%20%20%20/display/~juanf%0A), last modified on [Dec 05, + 2022](/pages/diffpagesbyversion.action?pageId=53745981&selectedPageVersions=7&selectedPageVersions=8 "Show changes") + + + +## Introduction + +The Excel import service reads xls definitions for both types and +entities and send them to openBIS. It is the replacement of the old +master data scripts adding support for the creation of openBIS entities. + +**The goals are:** + +For common users an import format with the following features to avoid +the shortcomings of the old format: + +- Recognisable labels as column names. +- Multi-type imports. +- Parents/Children creation and linking on a single import. + +For advanced users like consultants and plugin developers a tool that +allows to specify on an Excel sheet: + +- Metadata model. +- Basic entity structures used for navigation. + +## Modes + +To support different use cases the import service supports the next +modes, specifying one of them is mandatory. + +- UPDATE IF EXISTS: This one should be the default mode to use to make + incremental updates. +- IGNORE EXISTING: This mode should be used when the intention is to + ignore updates. Existing entities will be ignored. That way is + possible to avoid unintentionally updating entities and at the same + time adding new ones. +- FAIL IF EXISTS: This mode should be used when the intention is to + fail if anything is found. That way is possible to avoid making any + unintentional changes. + +## Organising Definition Files + +All data can be arranged according to the needs of the user, in any +number of files and any number of worksheets. All files have to be in +one directory. + +The names of the files and worksheets are ignored by the service, the +user is advised to use descriptive names that they can quickly +remember/refer to later. + +If there are dependencies between files they should be submitted +together or an error will be shown. + +**Example:** + +We want to define vocabularies and sample types with properties using +these vocabularies. We can arrange our files in several ways: + +1. put vocabulary and sample types in separate files named i.e + vocabulary.xls and sample\_types.xlsx respectively +2. put vocabulary and sample types in different worksheets in the same + xls file +3. put everything in one worksheet in the same file + +## Organising Definitions + +**Type definitions:** + +The order of type definitions is not important for the Excel import +service, with exception of Vocabularies, those need to be placed before +the property types that use them. + +**Entity definitions:** + +Type definitions for the entities should already exist in the database +at the time when entities are registered. Generally Entity definitions +are placed at the end. + +### Text cell formatting (colours, fonts, font style, text decorations) + +All types of formatting are permitted, and users are encouraged to use +them to make their excel files more readable. Adding any non text +element (table, clipart) will cause the import to fail. + + + +(A valid, but not easily readable, example) + +### Definition, rows and sheet formatting + +- A valid sheet has to start with definition on the first row. +- Each definition has to be separated by one empty row. +- Two or more consecutive empty rows mark the end of the definitions. +- Empty spaces at the beginning or end of headers are silently + eliminated. + + + +If any content is placed after two consecutive empty rows it will result +in an error. This is to alert the user and avoid silently ignoring +content. + +Header rows **NEED TO BE** a valid attribute of the entity or entity +type, property label or property code. + +Any unintended header will result in an error. This is to avoid possible +misspellings and avoid silently ignoring content. + +## Entity Types Definitions + +All entity types can be created*.* There are differences due to the +nature of the defined elements themselves. + +### Vocabulary and Vocabulary Term + +Vocabulary + +|Headers|Mandatory| +|--- |--- | +|Version|Yes| +|Code|Yes| +|Description|Yes| + +Vocabulary Term + +|Headers|Mandatory| +|--- |--- | +|Version|Yes| +|Code|Yes| +|Label|Yes| +|Description|Yes| + +**Example** + +|VOCABULARY_TYPE|||| +|--- |--- |--- |--- | +|Version|Code|Description|| +|1|$STORAGE.STORAGE_VALIDATION_LEVEL|Validation Level|| +|Version|Code|Label|Description| +|1|RACK|Rack Validation|| +|1|BOX|Box Validation|| +|1|BOX_POSITION|Box Position Validation|| + +Experiment Type +--------------- + +|Headers|Mandatory| +|--- |--- | +|Version|Yes| +|Code|Yes| +|Description|Yes| +|Validation script|Yes| +|Ontology Id|No| +|Ontology Version|No| +|Ontology Annotation Id|No| + +**Example** + +|EXPERIMENT_TYPE||||||||||| +|--- |--- |--- |--- |--- |--- |--- |--- |--- |--- |--- | +|Version|Code|Description|Validation script|||||||| +|2|DEFAULT_EXPERIMENT||date_range_validation.py|||||||| + +### Sample Type + +|Headers|Mandatory| +|--- |--- | +|Version|Yes| +|Code|Yes| +|Description|Yes| +|Auto generate codes|Yes| +|Validation script|Yes| +|Generate code prefix|Yes| +|Ontology Id|No| +|Ontology Version|No| +|Ontology Annotation Id|No| + +**Example** + +|SAMPLE_TYPE|||||| +|--- |--- |--- |--- |--- |--- | +|Version|Code|Description|Auto generate codes|Validation script|Generated code prefix| +|2|STORAGE_POSITION||TRUE|storage_position_validation.py|STO| + +### Dataset Type + +|Headers|Mandatory| +|--- |--- | +|Version|Yes| +|Code|Yes| +|Description|Yes| +|Validation script|Yes| +|Ontology Id|No| +|Ontology Version|No| +|Ontology Annotation Id|No| + + +**Example** + +|DATASET_TYPE|||| +|--- |--- |--- |--- | +|Version|Code|Description|Validation script| +|1|RAW_DATA||| + +### Property Type + +A property type can exist unassigned to an entity type or assigned to an +entity type. + +|Headers|Mandatory Assigned|Mandatory Unassigned| +|--- |--- |--- | +|Version|Yes|Yes| +|Code|Yes|Yes| +|Mandatory|No|Yes| +|Show in edit views|No|Yes| +|Section|No|Yes| +|Property label|Yes|Yes| +|Data type|Yes|Yes| +|Vocabulary code|Yes|Yes| +|Description|Yes|Yes| +|Metadata|No|No| +|Dynamic script|No|No| +|Ontology Id|No|No| +|Ontology Version|No|No| +|Ontology Annotation Id|No|No| + +A property type requires a data type to be defined, valid data types +are. + +|Data type|Description| +|--- |--- | +|INTEGER|| +|REAL|| +|VARCHAR|Text of any length but displayed as a single line field.| +|MULTILINE_VARCHAR|Text of any length but displayed as a multi line field.| +|HYPERLINK|| +|BOOLEAN|| +|CONTROLLEDVOCABULARY|| +|XML|| +|TIMESTAMP|| +|DATE|| +|SAMPLE|Sample of any type.| +|SAMPLE:<SAMPLE_TYPE>|Sample of the indicated type.| + +**Example Unassigned Property** + +In this case, the property is registered without being assigned to a +type, and the block of property types uses the PROPERTY\_TYPE block. + +|PROPERTY_TYPE||||||||| +|--- |--- |--- |--- |--- |--- |--- |--- |--- | +|Version|Code|Mandatory|Show in edit views|Section|Property label|Data type|Vocabulary code|Description| +|1|$WELL.COLOR_ENCODED_ANNOTATION|FALSE|TRUE||Color Annotation|CONTROLLEDVOCABULARY|$WELL.COLOR_ENCODED_ANNOTATIONS|Color Annotation for plate wells| +|1|ANNOTATION.SYSTEM.COMMENTS|FALSE|TRUE||Comments|VARCHAR||Comments| +|1|ANNOTATION.REQUEST.QUANTITY_OF_ITEMS|FALSE|TRUE||Quantity of Items|INTEGER||Quantity of Items| +|2|$BARCODE|FALSE|FALSE||Custom Barcode|VARCHAR||Custom Barcode| + + +**Example Assigned** + +In this case the property types are assigned to a sample type and the +block of property types belong to the entity type block (SAMPLE\_TYPE in +this case). + +|SAMPLE_TYPE||||||||||| +|--- |--- |--- |--- |--- |--- |--- |--- |--- |--- |--- | +|Version|Code|Description|Auto generate codes|Validation script|Generated code prefix|||||| +|1|ENTRY||TRUE||ENTRY|||||| +|Version|Code|Mandatory|Show in edit views|Section|Property label|Data type|Vocabulary code|Description|Metadata|Dynamic script| +|1|$NAME|FALSE|TRUE|General info|Name|VARCHAR||Name||| +|1|$SHOW_IN_PROJECT_OVERVIEW|FALSE|TRUE|General info|Show in project overview|BOOLEAN||Show in project overview page||| +|1|$DOCUMENT|FALSE|TRUE|General info|Document|MULTILINE_VARCHAR||Document|{ "custom_widget" : "Word Processor" }|| +|1|$ANNOTATIONS_STATE|FALSE|FALSE||Annotations State|XML||Annotations State||| + +### Entity Type Validation Script and Property Type Dynamic Script + +Scripts have to reside in *.py* files in the *scripts* directory within +the folder that contains the Excel files. + +Within *scripts,* files can be organised in any suitable setup: + +In order to refer to a validation or dynamic script +(e.g. *storage\_position\_validation.py* below), the relative path (from +the *scripts* directory) to the file has to be provided in the relevant +column. See the example columns below. + +**Example** + + + +|SAMPLE_TYPE||||||||||| +|--- |--- |--- |--- |--- |--- |--- |--- |--- |--- |--- | +|Version|Code|Description|Auto generate codes|Validation scriptÆ’gre|Generated code prefix|||||| +|2|STORAGE_POSITION||TRUE|storage_position_validation.py|STO|||||| +|Version|Code|Mandatory|Show in edit views|Section|Property label|Data type|Vocabulary code|Description|Metadata|Dynamic script| +|1|$STORAGE_POSITION.STORAGE_CODE|FALSE|TRUE|Physical Storage|Storage Code|VARCHAR||Storage Code||| +|1|$STORAGE_POSITION.STORAGE_RACK_ROW|FALSE|TRUE|Physical Storage|Storage Rack Row|INTEGER||Number of Rows||| +|1|$STORAGE_POSITION.STORAGE_RACK_COLUMN|FALSE|TRUE|Physical Storage|Storage Rack Column|INTEGER||Number of Columns||| +|1|$STORAGE_POSITION.STORAGE_BOX_NAME|FALSE|TRUE|Physical Storage|Storage Box Name|VARCHAR||Box Name||| +|1|$STORAGE_POSITION.STORAGE_BOX_SIZE|FALSE|TRUE|Physical Storage|Storage Box Size|CONTROLLEDVOCABULARY|$STORAGE_POSITION.STORAGE_BOX_SIZE|Box Size||| +|1|$STORAGE_POSITION.STORAGE_BOX_POSITION|FALSE|TRUE|Physical Storage|Storage Box Position|VARCHAR||Box Position||| +|1|$STORAGE_POSITION.STORAGE_USER|FALSE|TRUE|Physical Storage|Storage User Id|VARCHAR||Storage User Id||| +|1|$XMLCOMMENTS|FALSE|FALSE||Comments|XML||Comments log||| +|1|$ANNOTATIONS_STATE|FALSE|FALSE||Annotations State|XML||Annotations State||| + +### Entity Types Versioning + +#### General Usage + +Version is a mandatory field for entity types, it just starts at 1; and +during updating a type definition is expected to increment it; +otherwise, the system will ignore the changes. + +#### Explanation + +Additionally, use the keyword FORCE to reinstall the type even if is +present and has been deleted. + +The system keeps track of what versions of entities have been installed +storing this information, so in the future when one updates their types, +the version specified in the spreadsheet is checked against the stored +version. + +For every TYPE found in the Excel sheet the next algorithm is performed: + + IF ENTITY OR (TYPE.Version > STORED_VERSION) OR (TYPE.Version == FORCE): // If is a new version + IF ITEM NOT EXISTS in openBIS: + CREATE ITEM               + ELSE: // Doesn't exist branch + IF FAIL_IF_EXISTS: + THROW EXCEPTION + IF UPDATE_IF_EXISTS: + UPDATE ITEM + ELSE IF IGNORE_EXISTING: +    PASS // Ignore as requested + ELSE: + PASS // Ignore object that have not been updated + + + +## Entity Definitions + +Most entities can be created, excluding DataSets*.* There are +differences due to the nature of the defined elements themselves. + +General Rules: + +- Header order is arbitrary. +- When referring to another entity only Identifiers are allowed. + Sample Variables are the only exception. +- Vocabulary values in property value rows can be referred to by + either the vocabulary term code or the vocabulary term label. + + + +If a mandatory header is missing it results in an error. + +Repeated headers will result in an error, in case a Property shares +Label with an Attribute is encouraged to use the property code instead. + +### Space + +|Headers|Mandatory| +|--- |--- | +|Code|Yes| +|Description|Yes| + +**Example** + +|SPACE|| +|--- |--- | +|Code|Description| +|ELN_SETTINGS|ELN Settings| +|DEFAULT_LAB_NOTEBOOK|Default Lab Notebook| +|METHODS|Folder for methods| +|MATERIALS|Folder for th materials| +|STOCK_CATALOG|Folder for the catalog| +|STOCK_ORDERS|Folder for orders| +|PUBLICATIONS|Folder for publications| + +### Project + +|Headers|Mandatory| +|--- |--- | +|Identifier|Yes on UPDATES, ignored on INSERT| +|Code|Yes| +|Space|Yes| +|Description|Yes| + +**Example** + +|PROJECT|||| +|--- |--- |--- |--- | +|Identifier|Code|Description|Space| +|/DEFAULT_LAB_NOTEBOOK/DEFAULT_PROJECT|DEFAULT_PROJECT|Default Project|DEFAULT_LAB_NOTEBOOK| +|/METHODS/PROTOCOLS|PROTOCOLS|Protocols|METHODS| +|/STOCK_CATALOG/PRODUCTS|PRODUCTS|Products|STOCK_CATALOG| +|/STOCK_CATALOG/SUPPLIERS|SUPPLIERS|Suppliers|STOCK_CATALOG| +|/STOCK_CATALOG/REQUESTS|REQUESTS|Requests|STOCK_CATALOG| +|/STOCK_ORDERS/ORDERS|ORDERS|Orders|STOCK_ORDERS| +|/ELN_SETTINGS/TEMPLATES|TEMPLATES|Templates|ELN_SETTINGS| +|/PUBLICATIONS/PUBLIC_REPOSITORIES|PUBLIC_REPOSITORIES|Public Repositories|PUBLICATIONS| + +### Experiment + +|Headers|Mandatory| +|--- |--- | +|Identifier|Yes on UPDATES, ignored on INSERT| +|Code|Yes| +|Project|Yes| +|Property Code|No| +|Property Label|No| + +**Example** + +|EXPERIMENT||||| +|--- |--- |--- |--- |--- | +|Experiment type||||| +|COLLECTION||||| +|Identifier|Code|Project|Name|Default object type| +|/METHODS/PROTOCOLS/GENERAL_PROTOCOLS|GENERAL_PROTOCOLS|/METHODS/PROTOCOLS|General Protocols|GENERAL_PROTOCOL| +|/STOCK_CATALOG/PRODUCTS/PRODUCT_COLLECTION|PRODUCT_COLLECTION|/STOCK_CATALOG/PRODUCTS|Product Collection|PRODUCT| +|/STOCK_CATALOG/SUPPLIERS/SUPPLIER_COLLECTION|SUPPLIER_COLLECTION|/STOCK_CATALOG/SUPPLIERS|Supplier Collection|SUPPLIER| +|/STOCK_CATALOG/REQUESTS/REQUEST_COLLECTION|REQUEST_COLLECTION|/STOCK_CATALOG/REQUESTS|Request Collection|REQUEST| +|/STOCK_ORDERS/ORDERS/ORDER_COLLECTION|ORDER_COLLECTION|/STOCK_ORDERS/ORDERS|Order Collection|ORDER| +|/ELN_SETTINGS/TEMPLATES/TEMPLATES_COLLECTION|TEMPLATES_COLLECTION|/ELN_SETTINGS/TEMPLATES|Template Collection|| +|/PUBLICATIONS/PUBLIC_REPOSITORIES/PUBLICATIONS_COLLECTION|PUBLICATIONS_COLLECTION|/PUBLICATIONS/PUBLIC_REPOSITORIES|Publications Collection|PUBLICATION| + +### Sample + +|Headers|Mandatory| +|--- |--- | +|$|No| +|Identifier|Yes on UPDATES, ignored on INSERT| +|Code|No| +|Project|No| +|Experiment|No| +|Auto generate code|No| +|Parents|No| +|Children|No| +|Property Code|No| +|Property Label|No| + +**Example** + +|SAMPLE||||||| +|--- |--- |--- |--- |--- |--- |--- | +|Sample type||||||| +|ORDER||||||| +|$|Identifier|Code|Space|Project|Experiment|Order Status| +||/ELN_SETTINGS/TEMPLATES/ORDER_TEMPLATE|ORDER_TEMPLATE|ELN_SETTINGS|/ELN_SETTINGS/TEMPLATES|/ELN_SETTINGS/TEMPLATES/TEMPLATES_COLLECTION|Not yet ordered| + +#### Defining Parent and Children in Samples + +Parent and child columns can be used to define relations between +samples. Samples can be addressed by: + +1. $ : Variables, only really useful during batch inserts for samples + with autogenerated codes since Identifiers can't be known. Variables + SHOULD start with $. +2. Identifiers + + + +Parents and children SHOULD be separated by an end of line, each sample +should be in its own line. + +|SAMPLE||||||||| +|--- |--- |--- |--- |--- |--- |--- |--- |--- | +|Sample type||||||||| +|ORDER||||||||| +|$|Parents|Children|Identifier|Code|Space|Project|Experiment|Order Status| +||||/ELN_SETTINGS/TEMPLATES/ORDER_TEMPLATE_A|ORDER_TEMPLATE|ELN_SETTINGS|/ELN_SETTINGS/TEMPLATES|/ELN_SETTINGS/TEMPLATES/TEMPLATES_COLLECTION|Not yet ordered| +|$B|||/ELN_SETTINGS/TEMPLATES/ORDER_TEMPLATE_B|ORDER_TEMPLATE|ELN_SETTINGS|/ELN_SETTINGS/TEMPLATES|/ELN_SETTINGS/TEMPLATES/TEMPLATES_COLLECTION|Not yet ordered| +||/ELN_SETTINGS/TEMPLATES/ORDER_TEMPLATE_A +$B|/ELN_SETTINGS/TEMPLATES/ORDER_TEMPLATE_D|/ELN_SETTINGS/TEMPLATES/ORDER_TEMPLATE_C|ORDER_TEMPLATE|ELN_SETTINGS|/ELN_SETTINGS/TEMPLATES|/ELN_SETTINGS/TEMPLATES/TEMPLATES_COLLECTION|Not yet ordered| +||||/ELN_SETTINGS/TEMPLATES/ORDER_TEMPLATE_D|ORDER_TEMPLATE|ELN_SETTINGS|/ELN_SETTINGS/TEMPLATES|/ELN_SETTINGS/TEMPLATES/TEMPLATES_COLLECTION|Not yet ordered| + +### Properties and Sample Variables + +As a general rule, properties would only accept data of the specified +type. + +Sample properties would typically require an Identifier to be given but +a variable '$' could be used instead for a sample declared at any point +of the document, including cyclical dependencies. This is useful for +scenarios where Sample codes are autogenerated and can't be known in +advance. + +### Entity Versioning + +They don't have versioning, only entity types have versioning. + +## Master Data as a Core Plugin + +The master data plugin is an AS core plugin. + +Directory structure **(important)** : + + + +Use standard initialize-master-data.py handle as it is ingested by +openbis on startup. **Excel files** should be organised **in +*master-data*** **directory** in the same plugin and **scripts** should +be contained in ***scripts* directory** under master-data. + +Contents of initialize-master-data.py: + + from ch.ethz.sis.openbis.generic.server.asapi.v3 import ApplicationServerApi + from ch.systemsx.cisd.openbis.generic.server import CommonServiceProvider + from ch.ethz.sis.openbis.generic.asapi.v3.dto.service.id import CustomASServiceCode + from ch.ethz.sis.openbis.generic.asapi.v3.dto.service import CustomASServiceExecutionOptions + from ch.systemsx.cisd.openbis.generic.server.jython.api.v1.impl import MasterDataRegistrationHelper + import sys + + helper = MasterDataRegistrationHelper(sys.path) + api = CommonServiceProvider.getApplicationContext().getBean(ApplicationServerApi.INTERNAL_SERVICE_NAME) + sessionToken = api.loginAsSystem() + props = CustomASServiceExecutionOptions().withParameter('xls', helper.listXlsByteArrays()) \ + .withParameter('xls_name', 'ELN-LIMS-LIFE-SCIENCES').withParameter('update_mode', 'UPDATE_IF_EXISTS') \ + .withParameter('scripts', helper.getAllScripts()) + result = api.executeCustomASService(sessionToken, CustomASServiceCode("xls-import-api"), props) + + + +There are following parameters to fill (Easiest is to use +MasterDataRegistrationHelper to evaluate parameter values): + +- 'xls': Array of excel files. It can be easily acquired by calling + helper.listXlsByteArrays or listCsvByteArrays. +- 'xls\_name' - Name for the batch, it is used by versioning system. +- 'update\_mode' - See "Modes" section. +- 'scripts' - if you have any scripts in your data, provide them here. + It is easiest to get it with MasterDataRegistrationHelper + getAllScripts function. + +'results' object is a summary of what has been created. + +**Example** + +For an complete up to date example, please check the +eln-lims-life-sciences plugin that ships with the installer or on the +official Git repository: + +<https://sissource.ethz.ch/sispub/openbis/-/tree/master/openbis_standard_technologies/dist/core-plugins/eln-lims-life-sciences/1/as> + +Or download the complete plugin using the next link: + +<https://sissource.ethz.ch/sispub/openbis/-/archive/master/openbis-master.zip?path=openbis_standard_technologies/dist/core-plugins/eln-lims-life-sciences> + +## Known Limitations + +- Property type assignments to entity types cannot be updated since + the current V3 API does not support this functionality. This means + that a change in the order of assignments or group names during an + update will be ignored. diff --git a/docs/user-documentation/advance-features/img/125.png b/docs/user-documentation/advance-features/img/125.png new file mode 100644 index 0000000000000000000000000000000000000000..726f766e068431382b4e7980c55ed8a7ee4856f5 Binary files /dev/null and b/docs/user-documentation/advance-features/img/125.png differ diff --git a/docs/user-documentation/advance-features/img/1806.png b/docs/user-documentation/advance-features/img/1806.png new file mode 100644 index 0000000000000000000000000000000000000000..e885f2a6b42377474e2a3b8734e02d5fe1f40eaa Binary files /dev/null and b/docs/user-documentation/advance-features/img/1806.png differ diff --git a/docs/user-documentation/advance-features/img/199.png b/docs/user-documentation/advance-features/img/199.png new file mode 100644 index 0000000000000000000000000000000000000000..c32292af05118e695ad4e12987fd0f99ba759d26 Binary files /dev/null and b/docs/user-documentation/advance-features/img/199.png differ diff --git a/docs/user-documentation/advance-features/img/222.png b/docs/user-documentation/advance-features/img/222.png new file mode 100644 index 0000000000000000000000000000000000000000..edb8058b050a81801fac7e995d15dcafc788b595 Binary files /dev/null and b/docs/user-documentation/advance-features/img/222.png differ diff --git a/docs/user-documentation/advance-features/img/232.png b/docs/user-documentation/advance-features/img/232.png new file mode 100644 index 0000000000000000000000000000000000000000..53d1726355447f338d14c8c02e93b264e03a22bf Binary files /dev/null and b/docs/user-documentation/advance-features/img/232.png differ diff --git a/docs/user-documentation/advance-features/img/244.png b/docs/user-documentation/advance-features/img/244.png new file mode 100644 index 0000000000000000000000000000000000000000..bb1843007267efc62e55f3a793abdc0fe0aee032 Binary files /dev/null and b/docs/user-documentation/advance-features/img/244.png differ diff --git a/docs/user-documentation/advance-features/img/252.png b/docs/user-documentation/advance-features/img/252.png new file mode 100644 index 0000000000000000000000000000000000000000..11727e26a0c66f7f2c062a7757e3865feb19aedd Binary files /dev/null and b/docs/user-documentation/advance-features/img/252.png differ diff --git a/docs/user-documentation/advance-features/img/255.png b/docs/user-documentation/advance-features/img/255.png new file mode 100644 index 0000000000000000000000000000000000000000..63e8d98ec5f6707670e6767203f24af679b4c095 Binary files /dev/null and b/docs/user-documentation/advance-features/img/255.png differ diff --git a/docs/user-documentation/advance-features/img/259.png b/docs/user-documentation/advance-features/img/259.png new file mode 100644 index 0000000000000000000000000000000000000000..f6d3bef45ce0ccefe1e27d7b9dbf7e131e414300 Binary files /dev/null and b/docs/user-documentation/advance-features/img/259.png differ diff --git a/docs/user-documentation/advance-features/img/263.png b/docs/user-documentation/advance-features/img/263.png new file mode 100644 index 0000000000000000000000000000000000000000..8a7a0e1a193215176619caca735f28a2293f7c3a Binary files /dev/null and b/docs/user-documentation/advance-features/img/263.png differ diff --git a/docs/user-documentation/advance-features/img/266.png b/docs/user-documentation/advance-features/img/266.png new file mode 100644 index 0000000000000000000000000000000000000000..2eb922d016a04bd68fe9f7030d2769b62937f06e Binary files /dev/null and b/docs/user-documentation/advance-features/img/266.png differ diff --git a/docs/user-documentation/advance-features/img/268.png b/docs/user-documentation/advance-features/img/268.png new file mode 100644 index 0000000000000000000000000000000000000000..848189833c0d9fed1738b1536caa3972db3a2ee4 Binary files /dev/null and b/docs/user-documentation/advance-features/img/268.png differ diff --git a/docs/user-documentation/advance-features/img/272.png b/docs/user-documentation/advance-features/img/272.png new file mode 100644 index 0000000000000000000000000000000000000000..a82ad3e8a512cf724b8124cfb5691648477a315a Binary files /dev/null and b/docs/user-documentation/advance-features/img/272.png differ diff --git a/docs/user-documentation/advance-features/img/298.png b/docs/user-documentation/advance-features/img/298.png new file mode 100644 index 0000000000000000000000000000000000000000..7ce311112f076b106a02e7a5b27be7cc6ea06120 Binary files /dev/null and b/docs/user-documentation/advance-features/img/298.png differ diff --git a/docs/user-documentation/advance-features/img/312.png b/docs/user-documentation/advance-features/img/312.png new file mode 100644 index 0000000000000000000000000000000000000000..62b1cd0fd5a1af2155dbda5d0642bf824a5b7e6a Binary files /dev/null and b/docs/user-documentation/advance-features/img/312.png differ diff --git a/docs/user-documentation/advance-features/img/321.png b/docs/user-documentation/advance-features/img/321.png new file mode 100644 index 0000000000000000000000000000000000000000..370d9f27bec7bb1d736fca213359e823b8e9f550 Binary files /dev/null and b/docs/user-documentation/advance-features/img/321.png differ diff --git a/docs/user-documentation/advance-features/img/447.png b/docs/user-documentation/advance-features/img/447.png new file mode 100644 index 0000000000000000000000000000000000000000..e885f2a6b42377474e2a3b8734e02d5fe1f40eaa Binary files /dev/null and b/docs/user-documentation/advance-features/img/447.png differ diff --git a/docs/user-documentation/advance-features/img/514.png b/docs/user-documentation/advance-features/img/514.png new file mode 100644 index 0000000000000000000000000000000000000000..05cfd3c0a543804e2fc41a604974b3742de80daf Binary files /dev/null and b/docs/user-documentation/advance-features/img/514.png differ diff --git a/docs/user-documentation/advance-features/img/530.png b/docs/user-documentation/advance-features/img/530.png new file mode 100644 index 0000000000000000000000000000000000000000..1beb70bd256f605c6f9027d2163d39ce3686e270 Binary files /dev/null and b/docs/user-documentation/advance-features/img/530.png differ diff --git a/docs/user-documentation/advance-features/img/534.png b/docs/user-documentation/advance-features/img/534.png new file mode 100644 index 0000000000000000000000000000000000000000..391920477ce6e041ed2f2565bf8ce41bf2c4d34e Binary files /dev/null and b/docs/user-documentation/advance-features/img/534.png differ diff --git a/docs/user-documentation/advance-features/img/538.png b/docs/user-documentation/advance-features/img/538.png new file mode 100644 index 0000000000000000000000000000000000000000..dff23459ade0693a4214371a54087d6a3e0419ef Binary files /dev/null and b/docs/user-documentation/advance-features/img/538.png differ diff --git a/docs/user-documentation/advance-features/img/542.png b/docs/user-documentation/advance-features/img/542.png new file mode 100644 index 0000000000000000000000000000000000000000..0cdff8df4e9bb8ad02095432cb8a3c5548447899 Binary files /dev/null and b/docs/user-documentation/advance-features/img/542.png differ diff --git a/docs/user-documentation/advance-features/img/547.png b/docs/user-documentation/advance-features/img/547.png new file mode 100644 index 0000000000000000000000000000000000000000..4b5cc08c1fbc7dd8e815c4ed521aba9a7aace726 Binary files /dev/null and b/docs/user-documentation/advance-features/img/547.png differ diff --git a/docs/user-documentation/advance-features/img/932.png b/docs/user-documentation/advance-features/img/932.png new file mode 100644 index 0000000000000000000000000000000000000000..62b1cd0fd5a1af2155dbda5d0642bf824a5b7e6a Binary files /dev/null and b/docs/user-documentation/advance-features/img/932.png differ diff --git a/docs/user-documentation/advance-features/img/94.png b/docs/user-documentation/advance-features/img/94.png new file mode 100644 index 0000000000000000000000000000000000000000..c32292af05118e695ad4e12987fd0f99ba759d26 Binary files /dev/null and b/docs/user-documentation/advance-features/img/94.png differ diff --git a/docs/user-documentation/advance-features/index.rst b/docs/user-documentation/advance-features/index.rst index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..e9d0115e16abce35eda914f3657dbe47213f0114 100644 --- a/docs/user-documentation/advance-features/index.rst +++ b/docs/user-documentation/advance-features/index.rst @@ -0,0 +1,10 @@ +Advance Features +================ + +.. toctree:: + :maxdepth: 4 + + jupiterhub-for-openbis + command-line-tool + openbis-data-modelling + excel-import-service diff --git a/docs/user-documentation/advance-features/jupiterhub-for-openbis.md b/docs/user-documentation/advance-features/jupiterhub-for-openbis.md new file mode 100644 index 0000000000000000000000000000000000000000..41e4d0f9f141125d84cd6e9859dceace8c869303 --- /dev/null +++ b/docs/user-documentation/advance-features/jupiterhub-for-openbis.md @@ -0,0 +1,425 @@ +JupyterHub for openBIS +====================== + +> :warning: **This guide is not meant to substitute the official Docker documentation. Standard Docker commands are present in sections that are not necessarily related with them**. + +## Overview + +SIS provides a Docker image for the installation of a JupyterHub server, +available at <https://hub.docker.com/r/openbis/>. + +This guide explains how to modify JupyterHub docker images and save +them. It is aimed at users who are not familiar with Docker, but it +should not be considered a substitute of the official Docker +documentation. + +> :warning: **We advise non expert users, to first test the instructions provided in this guide on their local machine, to familiarise themselves with the process, before making changes on the JupyterHub server**. + +Docker images are stateless, which means that after rebooting all +changes made will not be saved. This guarantees a stable environment, +which is particularly desirable to run services. + +If a user wants to introduce changes, the docker image needs to be +updated. There are two possibilities for this: + +- **For testing**: Login into the Docker container, modify it and save + the modified container as a new image. This method is not + recommended for production because it is not compatible with + official JupyterHub Docker image upgrades released by SIS. +- **For correct maintenance**: Extend the current image using a Docker + recipe that includes only your changes. This method is recommended + for production, because when a new official JupyterHub Docker image + is released by SIS, it will be possible to quickly apply the changes + to this image from the Docker recipe. + +### Nomenclature + +**Docker**: A computer program that performs operating-system-level +virtualisation also known as containerisation. The official website can +be found here <https://www.docker.com/>. + +**Docker image**: Docker images describe the environment to virtualise. +Docker images are stateless. + +**Docker container**: Docker containers provide the environment to +execute the images. + +### Prerequisites for testing in a local environment + +1. **Docker environment**. All examples shown below require a working + docker environment. Please visit <https://www.docker.com> to + download the Docker Community Edition for your OS. +2. **JupyterHub Docker image**. The jupyterhub-openbis images can be + found at <https://hub.docker.com/r/openbis/>. They can be installed + locally like any other Docker Hub image. +3. **openBIS installation** (optional). + + + +How to run the official JupyterHub for openBIS image in your local machine +-------------------------------------------------------------------------- + + + +1\. After downloading the jupyterhub-openbis, find the id of your image. + + $ docker images + REPOSITORY TAG IMAGE ID CREATED SIZE + openbis/jupyterhub-openbis-sis-20180405 latest 585a9adf333b 23 hours ago 4.75GB + + + +2\. Run the image with one of the two following commands: + +a\. if you want to connect to your productive openBIS instance (e.g. +https://openbis-elnlims.ch), use the following command: + + docker run -e OPENBIS_URL=https://openbis-elnlims.ch -e JUPYTERHUB_INTEGRATION_SERVICE_PORT=8002 -e JUPYTERHUB_PORT=8000 -e CERTIFICATE_KEY=/vagrant/config/certificates/default.key -e CERTIFICATE_CRT=/vagrant/config/certificates/default.crt -p 8000:8000 -p 8081:8081 -p 8001:8001 -p 8002:8002 585a9adf333b ./vagrant/initialize/start_jupyterhub.sh + +b\. if you have a local openBIS installation for testing, you can run +the following command: + + docker run -v /Users/juanf/jupyterhub-local/home:/home -v /Users/juanf/jupyterhub-local/config/certificates:/vagrant/config/certificates -e OPENBIS_URL=https://129.132.228.42:8443 -e JUPYTERHUB_INTEGRATION_SERVICE_PORT=8002 -e JUPYTERHUB_PORT=8000 -e CERTIFICATE_KEY=/vagrant/config/certificates/default.key -e CERTIFICATE_CRT=/vagrant/config/certificates/default.crt -p 8000:8000 -p 8081:8081 -p 8001:8001 -p 8002:8002 585a9adf333b ./vagrant/initialize/start_jupyterhub.sh + +> :warning: **Please note the following configuration options:** +> 1. -v /Users/juanf/jupyterhub-local/home:/home - +> This option is only required if you want to store the changes you are making. You need to have a home directory for this. It is not necessary for testing, as the image will provide a default one. This directory should contain a "vagrant" sub directory. +> 2. -v /Users/juanf/jupyterhub-local/config/certificates:/vagrant/config/certificates - +> This option is only required in production environments where you need valid certificates. It is not necssary for testing, as the image will provide a default one. +> 3. OPENBIS_URL= https://129.132.228.42:8443 - By defaut docker is in bridge mode, which means that your docker container accesses your local machine network directly through it. If you have a local openBIS installation please use your IP address; if you use a server installation use the typical address you use to access it. + +To stop a running docker container, run "**docker kill container\_ID"**. + +The container\_ID can be found by running the command **"docker ps"**. + +How to extend the official JupyterHub for openBIS image +------------------------------------------------------- + +Modify a currently running container - From UI (for users) +---------------------------------------------------------- + +Please note that libraries installed in this way are NOT permanently +saved. After upgrade of the image, the libraries need to be +re-installed. + +### Check Available Python 2 Libraries + + help("modules") + + + +### Add Python 2 Library + +It can probably be done but we are currently not supporting it. + +### Check Available Octave Libraries + + pkg list + + + +### Add Octave Library + +It can probably be done but we are currently not supporting it. + +### Check Available Python 3 Libraries + + pip freeze + + + + + +### Add Python 3 Library + +1\. Use pip install as you would normally do. The Python 3 kernel often +doesn't need to be restarted to pick up new libraries, but is +recommended to do so. + + + +### Check Available R Libraries + + my_packages <- library()$results + head(my_packages, 1000000) + + + +### Add R Library + +1\. Use the install command as you would normally do. The R kernel needs +to be restarted to pick up new libraries. + + + +Modify a currently running container - From Console (for admins) +---------------------------------------------------------------- + +1\. Find the container id of the image currently running. + + $ docker ps + CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES + a2b76d1dd204 jupyterhub-openbis-sis-20180405 "./vagrant/initial..." 4 seconds ago Up 2 seconds 0.0.0.0:8000-8002->8000-8002/tcp, 0.0.0.0:8081->8081/tcp nervous_leakey + +2\. Log into the container. + + $ docker exec -it a2b76d1dd204 bash + +### Add Python Library + +Add a new library to Python 3 + + # First we should move to the environment used by JupyterHub + [root@a2b76d1dd204 /]# export PATH=/vagrant_installation/miniconda3/bin:$PATH + [root@a2b76d1dd204 /]# export LC_ALL=en_US.utf8 + [root@a2b76d1dd204 /]# export LANG=en_US.utf8 + # Install a new python lib using pip + [root@a2b76d1dd204 /]# python --version + Python 3.6.4 :: Anaconda, Inc. + [root@a2b76d1dd204 /]# pip install prettytable + +This type of changes can be validated straightaway in JupyterHub, by +just starting a Python 3 notebook. Other changes could require to reboot +JupyterHub. + + + +Please note that this approach should only be used for testing. To +preserve the changes, the running container should be saved as a new +image, otherwise when the container is shutdown these changes will be +lost. + +### Add R Library + +Add a new library to R + + # First we should move to the environment used by JupyterHub + [root@a2b76d1dd204 /]# export PATH=/vagrant_installation/miniconda3/bin:$PATH + [root@a2b76d1dd204 /]# export LC_ALL=en_US.utf8 + [root@a2b76d1dd204 /]# export LANG=en_US.utf8 + # Install a new r lib using conda + [root@a2b76d1dd204 /]# sudo conda list r- + [root@a2b76d1dd204 /]# sudo conda install -c r -y r-base64enc + +This type of changes can be validated straightaway in JupyterHub, by +just starting a R notebook. Other changes could require to reboot +JupyterHub. + + + +### Save the state of a running container as a new image + + + +If you know that you have made significant changes that you want to keep +until you build a new docker recipe, you have the option to save the +running container as a new image. + + + + bs-mbpr28:jupyterhub_reference_installation juanf$ docker ps + CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES + a2b76d1dd204 jupyterhub-openbis-sis-20180405 "./vagrant/initial..." 37 minutes ago Up 37 minutes 0.0.0.0:8000-8002->8000-8002/tcp, 0.0.0.0:8081->8081/tcp lucid_stonebraker + + $ docker commit a2b76d1dd204 jupyterhub-openbis-sis-juanextensions-20180406 + sha256:5dd0036664c75a21d6a62b80bf5780e70fcad345bb12a7ad248d01e29a3caa99 + $ docker images + REPOSITORY TAG IMAGE ID CREATED SIZE + jupyterhub-openbis-sis-juanextensions-20180406 latest 5dd0036664c7 4 seconds ago 4.75GB + jupyterhub-openbis-sis-20180405 latest 585a9adf333b 23 hours ago 4.75GB + + + +Extend a docker image using a docker recipe (for maintenance) +------------------------------------------------------------- + +The recommended approach for maintenance purposes is to extend the +latest official docker image distributed by SIS. + +Using our last example, let's create a file called "Dockerfile" and with +the content shown below. + + # vim:set ft=dockerfile: + FROM openbis/jupyterhub-openbis-sis-20180405 + ## Adding Python 3 library + RUN export PATH=/vagrant_installation/miniconda3/bin:$PATH && \ + export LC_ALL=en_US.utf8 && \ + export LANG=en_US.utf8 && \ + pip install prettytable + +Please change the name of the image in the file to the one you are +using. + +Now we can create a new image using as a starting point the latest from +the official repository. + +> :warning: **It is best practice to include both the name of the user and the creation date in the image name. This will help when dealing with many versions created by different users at different times**. + + $ docker build -t jupyterhub-openbis-sis-juanextensions-recipe-20180406 . + Sending build context to Docker daemon 4.957GB + Step 1/2 : FROM openbis/jupyterhub-openbis-sis-20180405 + .... + Step 2/2 : RUN export PATH=/vagrant_installation/miniconda3/bin:$PATH && export LC_ALL=en_US.utf8 && export LANG=en_US.utf8 && pip install prettytable + .... + Successfully tagged jupyterhub-openbis-sis-juanextensions-recipe-20180406:latest +  + +The new image is now available and can be started as described above. + + $ docker images + REPOSITORY TAG IMAGE ID CREATED SIZE + jupyterhub-openbis-sis-juanextensions-recipe-20180406 latest a0106501b223 3 minutes ago 4.75GB + openbis/jupyterhub-openbis-sis-20180405 latest 585a9adf333b 23 hours ago 4.75GB + +How to start a jupyterhub-openbis docker image on a productive JupyterHub server +-------------------------------------------------------------------------------- + + + +> :warning: **You can only have **ONE** jupyterhub-openbis image running on a server at one given time, since JupyterHub makes use of certain ports on the machine that are also configured in openBIS**. + +1. Find the jupyterhub-openbis-start.sh file in your server (please ask +your admin). + +2\. Find the container id of the image that is currently running. + + $ docker ps + CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES + a2b76d1dd204 jupyterhub-openbis-sis-20180405 "./vagrant/initial..." 4 seconds ago Up 2 seconds 0.0.0.0:8000-8002->8000-8002/tcp, 0.0.0.0:8081->8081/tcp nervous_leakey + +3\. Stop the current container. + + $ docker kill a2b76d1dd204 + a2b76d1dd204 + +4\. Edit the  jupyterhub-openbis-start.sh file in your server and update +the name of the image it runs to the one of your choice + + docker run -v /Users/juanf/Documents/programming/git/jupyter-openbis-integration/jupyterhub_reference_installation/home:/home -v /Users/juanf/Documents/programming/git/jupyter-openbis-integration/jupyterhub_reference_installation/vagrant/config/certificates:/vagrant/config/certificates -e OPENBIS_URL=https://129.132.229.37:8443 -e JUPYTERHUB_INTEGRATION_SERVICE_PORT=8002 -e JUPYTERHUB_PORT=8000 -e CERTIFICATE_KEY=/vagrant/config/certificates/default.key -e CERTIFICATE_CRT=/vagrant/config/certificates/default.crt -p 8000:8000 -p 8081:8081 -p 8001:8001 -p 8002:8002 jupyterhub-openbis-sis-20180405 ./vagrant/initialize/start_jupyterhub.sh + +5\. Start the new image. + + $ ./jupyterhub-openbis-start.sh + +Other useful Docker commands +---------------------------- + +### Save an image as a tar file to share it + +> :warning: **It is best practice to include both the name of the user and the creation date in the image name. This will help when dealing with many versions created by different users at different times**. + + $ docker save jupyterhub-openbis-sis-20180405 > jupyterhub-openbis-sis-20180405.tar + $ ls -lah + total 9681080 + -rw-r--r-- 1 juanf 1029 4.6G Apr 5 15:38 jupyterhub-openbis-sis-20180405.tar + +### Load an image from a tar file + + $ docker load < jupyterhub-openbis-sis-20180405.tar + 8feeda13d3ce: Loading layer [==================================================>] 27.65kB/27.65kB + 622cd2c170f3: Loading layer [==================================================>] 152MB/152MB + 633fa40a6caa: Loading layer [==================================================>] 2.048kB/2.048kB + 7219a9159e4f: Loading layer [==================================================>] 223.9MB/223.9MB + 678b55e862c7: Loading layer [==================================================>] 4.377GB/4.377GB + Loaded image: jupyterhub-openbis-sis-20180405:latest + $ docker images + REPOSITORY TAG IMAGE ID CREATED SIZE + jupyterhub-openbis-sis-20180405 latest 585a9adf333b 24 hours ago 4.75GB + +### Remove an image + + $ docker rmi jupyterhub-openbis-sis-juanextensions-recipe-20180406 + +### Remove all stopped containers + + $ docker rm $(docker ps -aq) + +openBIS ELN Integration Configuration +------------------------------------- + +On the openBIS end, what needs to be done is to append the following +lines into your ELN instance profile: +servers/core-plugins/eln-lims/1/as/webapps/eln-lims/html/etc/InstanceProfile.js + + # Ansible yml syntax, replace the variables in the double curly braces by the appropriate values: + this.jupyterIntegrationServerEndpoint = "https://{{ openbis_jupyterhub_hostname }}:{{ openbis_jupyterhub_communication_port }}"; + this.jupyterEndpoint = "https://{{ openbis_jupyterhub_hostname }}/"; + + + # Example: + this.jupyterIntegrationServerEndpoint = "https://jupyterhub-demo.labnotebook.ch:80"; + this.jupyterEndpoint = "https://jupyterhub-demo.labnotebook.ch/"; + +On the jupyterhub end, the docker command would then look as follows: + + docker run -e OPENBIS_URL=https://{{ openbis_public_hostname }} -e JUPYTERHUB_INTEGRATION_SERVICE_PORT=8002 -e JUPYTERHUB_PORT=8000 -e CERTIFICATE_KEY=/vagrant/config/certificates/default.key -e CERTIFICATE_CRT=/vagrant/config/certificates/default.crt -p 8000:8000 -p 8081:8081 -p 8001:8001 -p {{ openbis_jupyterhub_communication_port }}:8002 585a9adf333b ./vagrant/initialize/start_jupyterhub.sh + + + # Example: + openbis_public_hostname: openbis-test.ethz.ch + openbis_jupyterhub_hostname: jupyterhub-test.ethz.ch + openbis_jupyterhub_communication_port: 80 + +The only port you need to open on your jupyterhub instance is the one +matching {{ openbis\_jupyterhub\_communication\_port }}. Using +firewall-cmd this would look as follows: + + firewall-cmd --permanent --zone=public --add-rich-rule='rule family="ipv4" source address="{{ openbis_jupyterhub_openbis_hostname }}" port protocol="tcp" port="{{ openbis_jupyterhub_communication_port }}" accept' + + + +Troubleshooting Connectivity to openBIS +--------------------------------------- + +Currently only connecting to the openBIS server used to validate your +log in is supported. + +### Session is no longer valid. Please log in again error + +This error can be show due to two reasons: + +- The openBIS server has a self-signed certificates for whatever + reason, typically this is true for test servers. +- The session has timeout. + +For each one of them a different fix needs to be applied. + +#### Session is no longer valid. The openBIS server has a self-signed certificate + +To fix this issue please allow self signed certificates when connecting +as on the example shown below using the verify\_certificates modifier. + + + +#### Session is no longer valid. The session has timeout + +The session token obtained during the login is stored by the Jupiter +server during its startup. This session token has most likely timeout +after a couple of days without use and needs to be refreshed. If you +just log in to JupyterHub there is a new session available that needs to +be handed over to the Jupyter server. For that just stop and start it +again. + + + +Step 1 : Go to your control panel clicking on the button of the top +right corner. + + + +Step 2: Press Stop My Server + + + +Step 3 : Press Start My Server + + + +Step 4: Press Launch My Server + + + +Step 5: Wait for the server to startup, after the startup finishes go +back to your notebook, it should connect now. + + diff --git a/docs/user-documentation/advance-features/openbis-data-modelling.md b/docs/user-documentation/advance-features/openbis-data-modelling.md new file mode 100644 index 0000000000000000000000000000000000000000..7c55c552fbb628ac3b0902621f4e3abff15b5459 --- /dev/null +++ b/docs/user-documentation/advance-features/openbis-data-modelling.md @@ -0,0 +1,178 @@ +openBIS Data Modelling +====================== + +Overview + +openBIS has a hierarchical data structure: + +1. **Space**: folder with *Code and *Description** +2. **Project**: folder with *Code* and *Description* +3. **Experiment/Collection:** folder with *user-defined properties* +4. **Object**: entity with *user-defined properties* +5. **Data set**: folder where data files are stored. A data set has + *user-defined properties* + + + + + +Access to openBIS is controlled either at the *Space* level or *Project* +level. + + + +Data model in openBIS ELN-LIMS +------------------------------- + +In the openBIS ELN-LIMS the following structure is used. + +## Inventory + +The inventory is usually conceived to be shared by all lab members. The +inventory is used to store all materials and protocols (i.e. standard +operating procedures) used in the lab. It is possible to create +additional inventories, for example of instruments and equipment. + +The following structure is used in the Inventory: + + + +**Materials** (=*Space*) + +**Methods** (=*Space*) + + + +In the generic ELN-LIMS, the *Materials* folder is empty and everything +has to be defined by an admin user. + +The *Methods* folder has default folders for general protocols defined: + + + +**Methods** (=*Space*) + +**Protocols** (=*Project*) + +**General Protocols** (=*Collection*) + + + +In the ELN-LIMS for life sciences, some folders are already predefined +in the *Materials* folder. For example: + + + +**Materials** (=*Space*) + +**Reagents** (=*Project*) + +**Chemicals Collection** (=*Collection*) + +**Enzymes Collection** (=*Collection*) + +**Antibodies Collection** (=*Collection*) + + + +An openBIS Instance admin can customise the Inventory folders for the +lab and create the needed Object types ([Register Master Data via the +Admin Interface](#)). + +## Lab Notebook + +By default, the lab notebook is organised per user. Each user has a +personal folder (=*Space*), where to create *Projects*, *Experiments* +and *Experimental Steps (*=Objects). Data files can be uploaded to *Data +Sets*. Example structure: + + + +**Username** (=*Space*) + +**Master thesis project** (=*Project*) + +**Experiment 1** (=*Experiment*) + +**Experimental step 1** (=*Object*) + +**Experimental step 2** (=*Object*) + +**Raw Data** (=*Data set*) + + + + + +Some labs prefer to organise their lab notebook using a classification +per project rather than per user. In this case an openBIS space would +correspond to a lab Project and an openBIS project could be a +sub-project. Example structure: + + + +**SNF projects** (=*Space*) + +**Project 1** (=*Project*) + +**Experiment** (=*Experiment*) + +**Experimental Step** (=*Object*) + +**Raw Data** (=*Data set*) + +**Project 2** (=*Project*) + +**Experiment** (=*Experiment*) + +**Experimental step 1** (=*Object*) + +**Experimental step 2** (=*Object*) + +**Raw Data** (=*Data set*) + + + +openBIS parents and children +---------------------------- + +Objects can be linked to other objects, datasets to other datasets with +N:N relationship. In openBIS these connections are known as *parents* +and *children*. + + + + + + + +## Examples of parent-child relationships + +1. One or more samples are derived from one main sample. This is the + parent of the other samples: + + +2. One Experimental step is written following a protocol stored in the + Inventory and using a sample stored in the inventory. The protocol + and the sample are the parents of the Experimental + + +3. One Experimental Step is done after another and we want to keep + track of the links between the steps: + + + +Protocols +--------- + +Protocols are standard procedures that can be followed to perform given +experiments in a lab. Usually protocols are stored in the common +inventory and are linked to Experimental procedures using the +parent-child relationships described above. + +The protocol contains the standard steps to follow. The parameters +measured during one experiment following a give protocol should be +recorded in the Experimental Step. + +Not all labs have standard procedures in place. In this case, the +*Methods* section of the Inventory does not need to be used. \ No newline at end of file diff --git a/docs/user-documentation/general-admin-users/admins-documentation/index.rst b/docs/user-documentation/general-admin-users/admins-documentation/index.rst index 5ba878cbf5636c45fbfb4aa2ff8a9dfa3e6a5752..17acdf57b15629b31753afa989b95e855f6d523c 100644 --- a/docs/user-documentation/general-admin-users/admins-documentation/index.rst +++ b/docs/user-documentation/general-admin-users/admins-documentation/index.rst @@ -1,5 +1,5 @@ -General Users -============= +Admins Documentation +==================== .. toctree:: :maxdepth: 4 diff --git a/docs/user-documentation/general-admin-users/custom-database-queries.md b/docs/user-documentation/general-admin-users/custom-database-queries.md new file mode 100644 index 0000000000000000000000000000000000000000..d79a599df4ffe907e16c947678194c9e8902d1c7 --- /dev/null +++ b/docs/user-documentation/general-admin-users/custom-database-queries.md @@ -0,0 +1,297 @@ +Custom Database Queries +======================= + +Introduction +------------ + +openBIS application server can be configured to query any relational +database server via SQL. There are three ways to use this feature in +openBIS Web application: + +- Running arbitrary SELECT statements. +- Defining parametrized queries. +- Running parametrized queries. + +The three features correspond to three menu items of the menu +**Queries**. + +The last feature can be used by any user having OBSERVER role whereas +for the first two features user needs a **query creator** role which +usually is at least POWER\_USER role and is +[configured](/display/openBISDoc2010/Installation+and+Administrator+Guide+of+the+openBIS+Server#InstallationandAdministratorGuideoftheopenBISServer-ConfigureAuthorization) +by administrator of the openBIS server. The idea is that power users +having the knowledge to write SQL queries define a query which can be +used by everybody without knowing much about SQL. + +Multiple query databases may be configured for any openBIS Web +application. Database labels specified in the configuration file will be +shown in a combo box for database selection while defining new / editing +existing queries. + +Note that only the first 100000 rows of the result set of a query are +shown. This restriction should prevent from running ill-designed queries +which consume all the memory of the server. There is also a time out of +5 minutes defined after which the query is canceled if it didn't return +any result. + +How it works +------------ + +Database: + +- is configured as a core-plugin of type "query-databases" +- can be assigned to a space: + - space == null : should be used for databases that contain data + from multiple spaces or data which is space unrelated + - space != null : should be used for databases that contain data + from one specific space only +- can be assigned a minimal query creator role: + - database with space == null : by default the minimal query + creator role is INSTANCE\_OBSERVER + - database with space != null : by default the minimal query + creator role is POWER\_USER + +Query: + +- can be created/updated/deleted only by a user with a database + minimal query creator role or stronger (if database space != null + then the user role has to be defined for that space or the user has + to be an instance admin) +- can be seen by: + - private query : a user who created it or an instance admin + - public query : any user +- can be executed by: + - database with space == null : by users with at least + PROJECT\_OBSERVER role (results are filtered by a + experiment\_key/sample\_key/data\_set\_key column values which + are expected to contain entity perm\_id; WARNING: if no such + column is returned by a query then ALL results are returned) + - database with space != null : by users with at least + SPACE\_OBSERVER role in that space (all results are returned + without any filtering as they all belong to the space a user has + access to) +- can be updated/executed/deleted only by a user who can see the query +- can contain additional parameters (e.g. ${my\_parameter}); values of + such parameters can be set in the UI by a user right before an + execution of a query +- can be GENERIC (accessible only from the "Queries" top menu) or + EXPERIMENT/SAMPLE/DATA\_SET/MATERIAL specific (accessible from the + "Queries" top menu and from Experiment/Sample/DataSet/Material view + respectively) +- entity specific queries should contain '${key}' parameter which will + be replaced by a permId of the displayed experiment/sample or by a + code of the displayed dataset/material before the query execution + (MATERIAL queries also have '${type}' parameter which is replaced + with a type code of the material) +- entity specific queries may be configured to appear only in the + views of entities of chosen types (e.g. only for samples of types + that match a given regexp) + +Arbitrary SQL: + +- running an arbitrary SQL is treated as a creation of a query which + is simply not stored for a future use i.e. only a user with a + minimal query creator role or stronger can do it (if database space + != null then the user role has to be defined for that space or the + user has to be an instance admin) + +Setup +----- + +To use the custom database queries, it is necessary to define query +databases. See [Installation and Administrator Guide of the openBIS +Server](#) for an explanation on how to do this. + +Running a Parametrized Query +---------------------------- + +1. Choose menu item **Queries -> Run Predefined Query**. The tab + *Predefined Query* opens. +2. Choose a query using the query combo box. Queries specified for all + configured databases are selected transparently using the same combo + box which displays only query names. +3. If the query has no parameters it will be executed immediately and + the result is shown in tabular form. Otherwise text fields for each + parameter appear right of the query combo box. +4. Enter some values into the parameter fields and click on the + **Execute** button. The query result will be shown as a table. + +Features of a query result: + +- The result can be browsed, exported, sorted, and filtered as most + tables in openBIS. +- Values referring to permIDs of an experiment, sample, or data set + might be shown as hyperlinks. A click on such a link opens a new tab + with details. + +Running a SELECT statement +-------------------------- + +This feature is only for users with *creator role*. It is useful for +exploring the database by ad hoc queries. + +1. Choose menu item **Queries -> Run Custom SQL Query**. The tab + *Custom SQL Query* opens. +2. Enter a SELECT statement in the text area, select database and click + on the **Execute** button. The result appears below in tabular form. + +Defining and Editing Parametrized Queries +----------------------------------------- + +This feature is only for users with *creator role*. + +### Define a Query + +1. Choose menu item **Queries -> Browse Query Definitions**. The tab + *Query Definitions* opens. It shows all definitions where the user + has access rights. +2. Click on **Add Query Definition** for defining a new parametrized + query. A large dialog pops up. +3. Enter a name, database, an optional description, and a SELECT + statement. +4. Click on button **Test Query Definition** to execute the query. The + result will be shown in the same dialog. +5. Click on button **Save** to save the definition. The dialog + disappears and the new definition appears in the table of query + definitions. + +#### Public flag + +A query definition can be public or private depending on whether the +check box **public** is checked or not. A private query is visible only +by its creator. Public queries are visible by everybody. The idea is +that a power user first creates query definitions for their own +purposes. If he or she find it useful for other users they will set the +public flag. + +#### Specifying Parameters + +A SQL query can have parameters which are defined later by the user +running the query. A parameter is of the form `${<parameter name>`}. +Example: + + select * from my_table where code = ${my table code} + +The parameter name will appear in the text field when running the query. +Optionally, you can provide key-value pairs which are "metadata" for the +parameter name and separated by '::' from the name. These metadata keys +are defined: + +[TABLE] + +It is possible to combine multiple keys like +this: `${estimate::type=integer::list=1,3,7,12`}. + +Why to provide a data type + +Providing a data type with `type=...` is not mandatory. In a future +version of the software we may add additional client-side validation +based on this value, but in the current version we don't do that yet. If +you do *not* provide a data type, openBIS will ask the database for the +type of the particular query parameter. This works fine for most +databases, but not for all. Oracle is a well-known example that cannot +provide this information. So if your query source is an Oracle database +and you do not provide a data type, you will get an error +saying` "Unsupported feature`". To fix this, you have to provide the +data type. + +#### Array Literals for PostgreSQL data sources + +For PostgreSQL, there exist neat array functions `ANY` and `ALL` (see +[PostgreSQL +documentation](http://www.postgresql.org/docs/9.2/static/functions-comparisons.html)). +In particularly `ANY` comes in handy in `WHERE` clauses to check whether +a column has one of several values. The official form for providing an +array literal as a string (which is what you have to do here) is a bit +clumsy, as you have to write for the query +`"select * from data where code = ANY(${codes}::text[])`" and then the +user running the query has to put the parameter value in curly braces +like "`{code1,code2,code3,...}`". + +The custom query engine has a simplification for this construct. You can +just write: `"select * from data where code = ANY({${codes}})`" for the +query and then the user running the query will be able to skip the curly +bracket and write for the parameter value: "`code1,code2,code3,...`". A +user who doesn't know that this is an array will in particular get away +with just providing a single value like "`code1`". + +Note that the most obvious way of specifying a set relationship with +`"select * from data where code in (${codes})`" does *not* work as +custom queries are not using simple text concatenation but prepared +queries to avoid a security problem known as "SQL Injection". + +#### Hyperlinks + +In order to create hyperlinks in the result table the column names in +the SQL statement should be one of the following **magic** words: + +- `experiment_key` + +- `sample_key` + +- `data_set_key` + They should denote a perm ID of specified type. + Example: + + select id, perm_id as data_set_key from data_sets + +Be careful with this feature: The table is shown with the hyperlinks +even if the value isn't a perm ID of specified type. + +### Edit a Query + +1. Choose menu item **Queries -> Browse Query Definitions**. The tab + *Query Definitions* opens. +2. Select a query and click on button **Edit**. The same dialog as for + defining a query pops up. + +Entity Queries (Experiment, Sample, Material, Data Set) +------------------------------------------------------- + +By default, all custom queries are `Generic`, which means that the user +will be able to execute them from the standard Queries menu. + +Additionally it is possible to create a query containing a special +'magic' parameter, which will be automatically replaced by the entity +identifier (perm id in case of experiments and samples, code for data +sets and a pair (code, type) in case of materials). Those entity +specific queries will be visible only in entity details views (e.g. +experiment details) in a special `section` called `Queries`. One can +also limit visibility of a query to a specific entity types (e.g. +experiment of type `EXP`). + + + +### How to create/edit entity custom queries + +Entity custom queries can be created and edited in the same way as +`Generic` queries (**Queries -> Browse Query Definitions**), but the +value of **`Query Type`** field should be set to Experiment, Sample, +Data Set or Material. + +**`Entity Type`** (e.g. Experiment Type) should be changed if one wants +to limit the visibility of a query to a specific type (default option - +`(all)`, doesn't introduce such a restriction). The field accepts not +only values selected from the list but also typed text containing a +regular expression (e.g. Experiment Type `'EXP.*'` would mean that the +query should be visible in views of experiments of type with code +starting with `'EXP'` prefix). + +Furthermore the sql should contain the 'magic' parameter **'${key}'** +(will be replaced by perm id (experiment, sample) or code (data set, +material)). In case of material custom query, additional 'magic' +parameter is required: **'${type}'** (will be replaced by material type +code). + + + +### Examples + + + +Legacy Syntax + +Older versions of openBIS required to put string parameters in ticks, +like '${param}'. Current versions of openBIS don't need this anymore, so +you can use ${param} without the ticks. However, the syntax with ticks +is still accept for backward compatibility. \ No newline at end of file diff --git a/docs/user-documentation/general-admin-users/img/110.png b/docs/user-documentation/general-admin-users/img/110.png new file mode 100644 index 0000000000000000000000000000000000000000..6fb42f81d1fe5c0fb09e5ef5401037a856cd08d0 Binary files /dev/null and b/docs/user-documentation/general-admin-users/img/110.png differ diff --git a/docs/user-documentation/general-admin-users/img/113.png b/docs/user-documentation/general-admin-users/img/113.png new file mode 100644 index 0000000000000000000000000000000000000000..53938d1e9fe470c32fc9f582c03a4a872c79ada7 Binary files /dev/null and b/docs/user-documentation/general-admin-users/img/113.png differ diff --git a/docs/user-documentation/general-admin-users/img/359.png b/docs/user-documentation/general-admin-users/img/359.png new file mode 100644 index 0000000000000000000000000000000000000000..be410fde4cf93721d20efee36ef220ebc07bfd63 Binary files /dev/null and b/docs/user-documentation/general-admin-users/img/359.png differ diff --git a/docs/user-documentation/general-admin-users/img/378.png b/docs/user-documentation/general-admin-users/img/378.png new file mode 100644 index 0000000000000000000000000000000000000000..b8e6a0e6c1364ff229c90ea9c93f1c77623847f4 --- /dev/null +++ b/docs/user-documentation/general-admin-users/img/378.png @@ -0,0 +1,859 @@ + + +<!DOCTYPE html> +<html lang="en-GB" > +<head> + <title>Welcome to Confluence of ETH Zurich - Confluence - Confluence</title> + + + + + + + + + + + <meta http-equiv="X-UA-Compatible" content="IE=EDGE,chrome=IE7"> +<meta charset="UTF-8"> +<meta id="confluence-context-path" name="confluence-context-path" content=""> +<meta id="confluence-base-url" name="confluence-base-url" content="https://unlimited.ethz.ch"> + + <meta id="atlassian-token" name="atlassian-token" content="631ef7315e791fb45a5b12dc50790d85ac107710"> + + +<meta id="confluence-space-key" name="confluence-space-key" content="CON"> +<script type="text/javascript"> + var contextPath = ''; +</script> + + + + <meta name="confluence-request-time" content="1688136365419"> + + + + <style>.ia-fixed-sidebar, .ia-splitter-left {width: 285px;}.theme-default .ia-splitter #main {margin-left: 285px;}.ia-fixed-sidebar {visibility: hidden;}</style> + <meta name="ajs-use-keyboard-shortcuts" content="true"> + <meta name="ajs-discovered-plugin-features" content="$discoveredList"> + <meta name="ajs-keyboardshortcut-hash" content="5d1ba1fd696cce9a6e914516d1d9995b"> + <meta id="team-calendars-has-jira-link" content="true"> + <meta name="ajs-team-calendars-display-time-format" content="displayTimeFormat24"> + <meta id="team-calendars-display-week-number" content="true"> + <meta id="team-calendars-user-timezone" content="Europe/Zurich"> + <script type="text/x-template" id="team-calendars-messages" title="team-calendars-messages"><fieldset class="i18n hidden"><input type="hidden" name="calendar3.month.long.july" value="July"><input type="hidden" name="calendar3.day.short.wednesday" value="Wed"><input type="hidden" name="calendar3.day.short.thursday" value="Thu"><input type="hidden" name="calendar3.month.short.march" value="Mar"><input type="hidden" name="calendar3.month.long.april" value="April"><input type="hidden" name="calendar3.month.long.october" value="October"><input type="hidden" name="calendar3.month.long.august" value="August"><input type="hidden" name="calendar3.month.short.july" value="Jul"><input type="hidden" name="calendar3.month.short.may" value="May"><input type="hidden" name="calendar3.month.short.november" value="Nov"><input type="hidden" name="calendar3.day.long.friday" value="Friday"><input type="hidden" name="calendar3.day.long.sunday" value="Sunday"><input type="hidden" name="calendar3.day.long.saturday" value="Saturday"><input type="hidden" name="calendar3.month.short.april" value="Apr"><input type="hidden" name="calendar3.day.long.wednesday" value="Wednesday"><input type="hidden" name="calendar3.month.long.december" value="December"><input type="hidden" name="calendar3.month.short.october" value="Oct"><input type="hidden" name="calendar3.day.long.monday" value="Monday"><input type="hidden" name="calendar3.month.short.june" value="Jun"><input type="hidden" name="calendar3.day.short.monday" value="Mon"><input type="hidden" name="calendar3.day.short.tuesday" value="Tue"><input type="hidden" name="calendar3.day.short.saturday" value="Sat"><input type="hidden" name="calendar3.month.long.march" value="March"><input type="hidden" name="calendar3.month.long.june" value="June"><input type="hidden" name="calendar3.month.short.february" value="Feb"><input type="hidden" name="calendar3.month.short.august" value="Aug"><input type="hidden" name="calendar3.month.short.december" value="Dec"><input type="hidden" name="calendar3.day.short.sunday" value="Sun"><input type="hidden" name="calendar3.month.long.february" value="February"><input type="hidden" name="calendar3.day.long.tuesday" value="Tuesday"><input type="hidden" name="calendar3.month.long.may" value="May"><input type="hidden" name="calendar3.month.long.september" value="September"><input type="hidden" name="calendar3.month.long.november" value="November"><input type="hidden" name="calendar3.month.short.january" value="Jan"><input type="hidden" name="calendar3.month.short.september" value="Sep"><input type="hidden" name="calendar3.day.long.thursday" value="Thursday"><input type="hidden" name="calendar3.month.long.january" value="January"><input type="hidden" name="calendar3.day.short.friday" value="Fri"></fieldset></script> + <script type="text/x-mathjax-config"> MathJax.Hub.Config({ tex2jax: { inlineMath: [ ['(mathjax-inline(', ')mathjax-inline)' ] ], displayMath: [ ['(mathjax-block(', ')mathjax-block)' ] ], }, asciimath2jax: { delimiters: [ ['(mathjax-ascii-math(',')mathjax-ascii-math)'] ] } }); </script> <script type="text/javascript" async src="https://cdnjs.cloudflare.com/ajax/libs/mathjax/2.7.1/MathJax.js?config=TeX-MML-AM_CHTML"> </script> + <meta name="ajs-is-confluence-admin" content="false"> + <meta name="ajs-connection-timeout" content="10000"> + + + + <meta name="ajs-page-title" content="Welcome to Confluence of ETH Zurich"> + <meta name="ajs-latest-published-page-title" content="Welcome to Confluence of ETH Zurich"> + <meta name="ajs-space-name" content="Confluence"> + <meta name="ajs-page-id" content="36258959"> + <meta name="ajs-latest-page-id" content="36258959"> + <meta name="ajs-content-type" content="page"> + <meta name="ajs-parent-page-id" content=""> + <meta name="ajs-space-key" content="CON"> + <meta name="ajs-max-number-editors" content="12"> + <meta name="ajs-macro-placeholder-timeout" content="5000"> + <meta name="ajs-jira-metadata-count" content="0"> + <meta name="ajs-from-page-title" content=""> + <meta name="ajs-can-remove-page" content="false"> + <meta name="ajs-can-remove-page-hierarchy" content="false"> + <meta name="ajs-browse-page-tree-mode" content="view"> + <meta name="ajs-shared-drafts" content="true"> + <meta name="ajs-context-path" content=""> + <meta name="ajs-base-url" content="https://unlimited.ethz.ch"> + <meta name="ajs-version-number" content="8.2.0"> + <meta name="ajs-build-number" content="9004"> + <meta name="ajs-remote-user" content=""> + <meta name="ajs-remote-user-key" content=""> + <meta name="ajs-remote-user-has-licensed-access" content="false"> + <meta name="ajs-remote-user-has-browse-users-permission" content="false"> + <meta name="ajs-current-user-fullname" content=""> + <meta name="ajs-current-user-avatar-uri-reference" content="/images/icons/profilepics/anonymous.svg"> + <meta name="ajs-static-resource-url-prefix" content="/s/-5iwf5w/9004/9r8qvy/_"> + <meta name="ajs-global-settings-attachment-max-size" content="157286400"> + <meta name="ajs-global-settings-quick-search-enabled" content="true"> + <meta name="ajs-user-locale" content="en_GB"> + <meta name="ajs-enabled-dark-features" content="site-wide.shared-drafts,site-wide.synchrony,clc.quick.create,confluence.view.edit.transition,cql.search.screen,confluence-inline-comments-resolved,http.session.registrar,nps.survey.inline.dialog,confluence.efi.onboarding.new.templates,atlassian.cdn.static.assets,pdf-preview,previews.sharing,previews.versions,file-annotations,confluence.efi.onboarding.rich.space.content,collaborative-audit-log,confluence.reindex.improvements,previews.conversion-service,editor.ajax.save,crowd.sync.nested.groups.group.membership.changes.batching.enabled,read.only.mode,graphql,previews.trigger-all-file-types,attachment.extracted.text.extractor,lucene.caching.filter,confluence.table.resizable,notification.batch,previews.sharing.pushstate,confluence-inline-comments-rich-editor,tc.tacca.dacca,site-wide.synchrony.opt-in,atlassian.webresource.twophase.js.i18n.disabled,confluence.denormalisedpermissions,file-annotations.likes,gatekeeper-ui-v2,v2.content.name.searcher,confluence.search.improvements.ranking,crowd.event.transformer.directory.manager.cache,mobile.supported.version,pulp,crowd.sync.delete.user.memberships.batching.enabled,confluence-inline-comments,confluence-inline-comments-dangling-comment,quick-reload-inline-comments-flags,confluence.retention.rules"> + <meta name="ajs-atl-token" content="631ef7315e791fb45a5b12dc50790d85ac107710"> + <meta name="ajs-confluence-flavour" content="VANILLA"> + <meta name="ajs-user-date-pattern" content="dd MMM yyyy"> + <meta name="ajs-access-mode" content="READ_WRITE"> + <meta name="ajs-render-mode" content="READ_WRITE"> + <meta name="ajs-date.format" content="MMM dd, yyyy"> + + <link rel="shortcut icon" href="/s/-5iwf5w/9004/9r8qvy/8/_/favicon.ico"> + <link rel="icon" type="image/x-icon" href="/s/-5iwf5w/9004/9r8qvy/8/_/favicon.ico"> + +<link rel="search" type="application/opensearchdescription+xml" href="/opensearch/osd.action" title="Confluence"/> + + + <meta name="ajs-create-issue-metadata-show-discovery" content="false"> + + + <script> +window.WRM=window.WRM||{};window.WRM._unparsedData=window.WRM._unparsedData||{};window.WRM._unparsedErrors=window.WRM._unparsedErrors||{}; +WRM._unparsedData["com.atlassian.applinks.applinks-plugin:applinks-common-exported.entity-types"]="{\u0022singular\u0022:{\u0022refapp.charlie\u0022:\u0022Charlie\u0022,\u0022fecru.project\u0022:\u0022Crucible Project\u0022,\u0022fecru.repository\u0022:\u0022FishEye Repository\u0022,\u0022stash.project\u0022:\u0022Stash Project\u0022,\u0022generic.entity\u0022:\u0022Generic Project\u0022,\u0022confluence.space\u0022:\u0022Confluence Space\u0022,\u0022bamboo.project\u0022:\u0022Bamboo Project\u0022,\u0022jira.project\u0022:\u0022Jira Project\u0022},\u0022plural\u0022:{\u0022refapp.charlie\u0022:\u0022Charlies\u0022,\u0022fecru.project\u0022:\u0022Crucible Projects\u0022,\u0022fecru.repository\u0022:\u0022FishEye Repositories\u0022,\u0022stash.project\u0022:\u0022Stash Projects\u0022,\u0022generic.entity\u0022:\u0022Generic Projects\u0022,\u0022confluence.space\u0022:\u0022Confluence Spaces\u0022,\u0022bamboo.project\u0022:\u0022Bamboo Projects\u0022,\u0022jira.project\u0022:\u0022Jira Projects\u0022}}"; +WRM._unparsedData["com.atlassian.analytics.analytics-client:programmatic-analytics-init.programmatic-analytics-data-provider"]="false"; +WRM._unparsedData["com.atlassian.confluence.plugins.confluence-license-banner:confluence-license-banner-resources.license-details"]="{\u0022daysBeforeLicenseExpiry\u0022:0,\u0022daysBeforeMaintenanceExpiry\u0022:0,\u0022showLicenseExpiryBanner\u0022:false,\u0022showMaintenanceExpiryBanner\u0022:false,\u0022renewUrl\u0022:null,\u0022salesUrl\u0022:null}"; +WRM._unparsedData["com.atlassian.plugins.atlassian-plugins-webresource-plugin:context-path.context-path"]="\u0022\u0022"; +WRM._unparsedData["com.atlassian.analytics.analytics-client:policy-update-init.policy-update-data-provider"]="false"; +WRM._unparsedData["com.atlassian.applinks.applinks-plugin:applinks-common-exported.authentication-types"]="{\u0022com.atlassian.applinks.api.auth.types.BasicAuthenticationProvider\u0022:\u0022Basic Access\u0022,\u0022com.atlassian.applinks.api.auth.types.TrustedAppsAuthenticationProvider\u0022:\u0022Trusted Applications\u0022,\u0022com.atlassian.applinks.api.auth.types.CorsAuthenticationProvider\u0022:\u0022CORS\u0022,\u0022com.atlassian.applinks.api.auth.types.OAuthAuthenticationProvider\u0022:\u0022OAuth\u0022,\u0022com.atlassian.applinks.api.auth.types.TwoLeggedOAuthAuthenticationProvider\u0022:\u0022OAuth\u0022,\u0022com.atlassian.applinks.api.auth.types.TwoLeggedOAuthWithImpersonationAuthenticationProvider\u0022:\u0022OAuth\u0022}"; +WRM._unparsedData["com.atlassian.confluence.plugins.confluence-search-ui-plugin:confluence-search-ui-plugin-resources.i18n-data"]="{\u0022search.ui.recent.link.text\u0022:\u0022View more recently visited\u0022,\u0022search.ui.search.results.empty\u0022:\u0022We couldn\u0027\u0027t find anything matching \u005C\u0022{0}\u005C\u0022.\u0022,\u0022search.ui.filter.clear.selected\u0022:\u0022Clear selected items\u0022,\u0022search.ui.content.name.search.items.panel.load.all.top.items.button.text\u0022:\u0022Show more app results...\u0022,\u0022search.ui.filter.contributor.button.text\u0022:\u0022Contributor\u0022,\u0022search.ui.filter.space.current.label\u0022:\u0022CURRENT\u0022,\u0022search.ui.clear.input.button.text\u0022:\u0022Clear text\u0022,\u0022search.ui.search.results.clear.button\u0022:\u0022clear your filters.\u0022,\u0022help.search.ui.link.title\u0022:\u0022Search tips\u0022,\u0022search.ui.container.close.text\u0022:\u0022Close\u0022,\u0022search.ui.filter.date.month.text\u0022:\u0022The past month\u0022,\u0022search.ui.infinite.scroll.button.text\u0022:\u0022More results\u0022,\u0022search.ui.filter.date.button.text\u0022:\u0022Date\u0022,\u0022search.ui.filter.date.week.text\u0022:\u0022The past week\u0022,\u0022search.ui.result.subtitle.calendar\u0022:\u0022Team calendar\u0022,\u0022search.ui.filter.date.heading\u0022:\u0022Last modified within\u0022,\u0022search.ui.filter.space.input.label\u0022:\u0022Find spaces...\u0022,\u0022search.ui.generic.error\u0022:\u0022Something went wrong. Refresh the page, or contact your admin if this keeps happening.\u0022,\u0022search.ui.recent.spaces\u0022:\u0022Recent Spaces\u0022,\u0022search.ui.result.subtitle.space\u0022:\u0022Space\u0022,\u0022search.ui.filter.space.category.input.label\u0022:\u0022Find space categories...\u0022,\u0022search.ui.filter.space.archive.label\u0022:\u0022Search archived spaces\u0022,\u0022search.ui.filter.label\u0022:\u0022filter\u0022,\u0022search.ui.filter.date.all.text\u0022:\u0022Any time\u0022,\u0022search.ui.filter.date.hour.text\u0022:\u0022The past day\u0022,\u0022search.ui.filters.heading\u0022:\u0022Filter by\u0022,\u0022search.ui.filter.label.input.label\u0022:\u0022Find labels...\u0022,\u0022search.ui.recent.items.anonymous\u0022:\u0022Start exploring. Your search results will appear here.\u0022,\u0022search.ui.input.label\u0022:\u0022Search\u0022,\u0022search.ui.input.aria.label\u0022:\u0022Search, when you type, your results will be displayed below.\u0022,\u0022search.ui.search.result\u0022:\u0022{0,choice,1#{0} search result|1\u003c{0} search results}\u0022,\u0022search.ui.filter.label.button.text\u0022:\u0022Label\u0022,\u0022search.ui.container.clear.ariaLabel\u0022:\u0022Clear\u0022,\u0022search.ui.input.alert\u0022:\u0022Hit enter to search\u0022,\u0022search.ui.filter.no.result.text\u0022:\u0022We can\u0027\u0027t find anything matching your search\u0022,\u0022search.ui.result.subtitle.user\u0022:\u0022User profile\u0022,\u0022search.ui.filter.contributor.input.label\u0022:\u0022Find people...\u0022,\u0022search.ui.filter.content.type.button.text\u0022:\u0022Type\u0022,\u0022search.ui.filter.date.year.text\u0022:\u0022The past year\u0022,\u0022search.ui.advanced.search.link.text\u0022:\u0022Advanced search\u0022,\u0022search.ui.filter.space.button.text\u0022:\u0022Space\u0022,\u0022search.ui.search.results.clear.line2\u0022:\u0022Try a different search term or\u0022,\u0022search.ui.filter.space.category.button.text\u0022:\u0022Space category\u0022,\u0022search.ui.search.results.clear.line1\u0022:\u0022We couldn\u0027\u0027t find anything matching your search.\u0022,\u0022search.ui.content.name.search.items.panel.load.all.top.items.admin.button.text\u0022:\u0022Show more settings and app results...\u0022,\u0022search.ui.recent.pages\u0022:\u0022Recently visited\u0022,\u0022search.ui.search.result.anonymous\u0022:\u0022{0,choice,1#{0} search result|1\u003c{0} search results}. Have an account? {1}Log in{2} to expand your search.\u0022,\u0022search.ui.recent.items.empty\u0022:\u0022Start exploring. Pages and spaces you\u0027\u0027ve visited recently will appear here.\u0022,\u0022search.ui.filter.space.init.heading\u0022:\u0022recent spaces\u0022}"; +WRM._unparsedData["com.atlassian.applinks.applinks-plugin:applinks-common-exported.applinks-types"]="{\u0022crowd\u0022:\u0022Crowd\u0022,\u0022confluence\u0022:\u0022Confluence\u0022,\u0022fecru\u0022:\u0022FishEye / Crucible\u0022,\u0022stash\u0022:\u0022Stash\u0022,\u0022jira\u0022:\u0022Jira\u0022,\u0022refapp\u0022:\u0022Reference Application\u0022,\u0022bamboo\u0022:\u0022Bamboo\u0022,\u0022generic\u0022:\u0022Generic Application\u0022}"; +WRM._unparsedData["com.atlassian.confluence.plugins.synchrony-interop:synchrony-status-banner-loader.synchrony-status"]="false"; +WRM._unparsedData["com.atlassian.applinks.applinks-plugin:applinks-common-exported.applinks-help-paths"]="{\u0022entries\u0022:{\u0022applinks.docs.root\u0022:\u0022https://confluence.atlassian.com/display/APPLINKS-090/\u0022,\u0022applinks.docs.diagnostics.troubleshoot.sslunmatched\u0022:\u0022SSL+and+application+link+troubleshooting+guide\u0022,\u0022applinks.docs.diagnostics.troubleshoot.oauthsignatureinvalid\u0022:\u0022OAuth+troubleshooting+guide\u0022,\u0022applinks.docs.diagnostics.troubleshoot.oauthtimestamprefused\u0022:\u0022OAuth+troubleshooting+guide\u0022,\u0022applinks.docs.delete.entity.link\u0022:\u0022Create+links+between+projects\u0022,\u0022applinks.docs.adding.application.link\u0022:\u0022Link+Atlassian+applications+to+work+together\u0022,\u0022applinks.docs.administration.guide\u0022:\u0022Application+Links+Documentation\u0022,\u0022applinks.docs.oauth.security\u0022:\u0022OAuth+security+for+application+links\u0022,\u0022applinks.docs.troubleshoot.application.links\u0022:\u0022Troubleshoot+application+links\u0022,\u0022applinks.docs.diagnostics.troubleshoot.unknownerror\u0022:\u0022Network+and+connectivity+troubleshooting+guide\u0022,\u0022applinks.docs.configuring.auth.trusted.apps\u0022:\u0022Configuring+Trusted+Applications+authentication+for+an+application+link\u0022,\u0022applinks.docs.diagnostics.troubleshoot.authlevelunsupported\u0022:\u0022OAuth+troubleshooting+guide\u0022,\u0022applinks.docs.diagnostics.troubleshoot.ssluntrusted\u0022:\u0022SSL+and+application+link+troubleshooting+guide\u0022,\u0022applinks.docs.diagnostics.troubleshoot.unknownhost\u0022:\u0022Network+and+connectivity+troubleshooting+guide\u0022,\u0022applinks.docs.delete.application.link\u0022:\u0022Link+Atlassian+applications+to+work+together\u0022,\u0022applinks.docs.adding.project.link\u0022:\u0022Configuring+Project+links+across+Applications\u0022,\u0022applinks.docs.link.applications\u0022:\u0022Link+Atlassian+applications+to+work+together\u0022,\u0022applinks.docs.diagnostics.troubleshoot.oauthproblem\u0022:\u0022OAuth+troubleshooting+guide\u0022,\u0022applinks.docs.diagnostics.troubleshoot.migration\u0022:\u0022Update+application+links+to+use+OAuth\u0022,\u0022applinks.docs.relocate.application.link\u0022:\u0022Link+Atlassian+applications+to+work+together\u0022,\u0022applinks.docs.administering.entity.links\u0022:\u0022Create+links+between+projects\u0022,\u0022applinks.docs.upgrade.application.link\u0022:\u0022OAuth+security+for+application+links\u0022,\u0022applinks.docs.diagnostics.troubleshoot.connectionrefused\u0022:\u0022Network+and+connectivity+troubleshooting+guide\u0022,\u0022applinks.docs.configuring.auth.oauth\u0022:\u0022OAuth+security+for+application+links\u0022,\u0022applinks.docs.insufficient.remote.permission\u0022:\u0022OAuth+security+for+application+links\u0022,\u0022applinks.docs.configuring.application.link.auth\u0022:\u0022OAuth+security+for+application+links\u0022,\u0022applinks.docs.diagnostics\u0022:\u0022Application+links+diagnostics\u0022,\u0022applinks.docs.configured.authentication.types\u0022:\u0022OAuth+security+for+application+links\u0022,\u0022applinks.docs.adding.entity.link\u0022:\u0022Create+links+between+projects\u0022,\u0022applinks.docs.diagnostics.troubleshoot.unexpectedresponse\u0022:\u0022Network+and+connectivity+troubleshooting+guide\u0022,\u0022applinks.docs.configuring.auth.basic\u0022:\u0022Configuring+Basic+HTTP+Authentication+for+an+Application+Link\u0022,\u0022applinks.docs.diagnostics.troubleshoot.authlevelmismatch\u0022:\u0022OAuth+troubleshooting+guide\u0022}}"; +WRM._unparsedData["com.atlassian.confluence.plugins.confluence-feature-discovery-plugin:confluence-feature-discovery-plugin-resources.test-mode"]="false"; +if(window.WRM._dataArrived)window.WRM._dataArrived();</script> +<link rel="stylesheet" href="/s/3906b0a9678c0e7366e2037031ee723f-CDN/-5iwf5w/9004/9r8qvy/88fd744a9e5c04fb75da4bef47308e85/_/download/contextbatch/css/_super,-com.atlassian.plugins.atlassian-plugins-webresource-rest:data-collector-perf-observer/batch.css" data-wrm-key="_super,-com.atlassian.plugins.atlassian-plugins-webresource-rest:data-collector-perf-observer" data-wrm-batch-type="context" media="all"> +<link rel="stylesheet" href="/s/0c56633e0aefa2b6661634c23890e5e1-CDN/-5iwf5w/9004/9r8qvy/874b773bd8d8c495dc5a9025eccfabcc/_/download/contextbatch/css/atl.confluence.plugins.pagetree-desktop,main,viewcontent,atl.general,page,atl.comments,-_super/batch.css?cefp_collab_enabled=true&cefp_ed_perm=false&cefp_is_admin=false&gatekeeper-ui-v2=true&highlightactions=true" data-wrm-key="atl.confluence.plugins.pagetree-desktop,main,viewcontent,atl.general,page,atl.comments,-_super" data-wrm-batch-type="context" media="all"> +<link rel="stylesheet" href="/s/d41d8cd98f00b204e9800998ecf8427e-CDN/-5iwf5w/9004/9r8qvy/9.0.9/_/download/batch/confluence.macros.advanced:blogpost-resources/confluence.macros.advanced:blogpost-resources.css" data-wrm-key="confluence.macros.advanced:blogpost-resources" data-wrm-batch-type="resource" media="all"> +<link rel="stylesheet" href="/s/-5iwf5w/9004/9r8qvy/8/_/styles/custom.css" media="all"> +<script data-wrm-key="com.atlassian.plugins.atlassian-plugins-webresource-rest:data-collector-perf-observer" data-wrm-batch-type="resource" data-initially-rendered> +!function(){"use strict";if("PerformanceObserver"in window){const e=[];window.__observedResources=e;const r=e=>"script"===e?"script":"css";new PerformanceObserver((n=>{n.getEntries().filter((({initiatorType:e,name:r})=>{const n="script"===e||((e,r)=>"link"===e&&"css"===new URL(r).pathname.split(".").pop())(e,r),i=new URL(location.href).origin===new URL(r).origin;return n&&i})).forEach((({name:n,transferSize:i,encodedBodySize:s,initiatorType:o})=>{e.push([i,s,n,r(o)])}))})).observe({type:"resource"})}}(); + +</script> +<script src="/s/db7b93668ec3f1e28b20d5f4cb5aaf12-CDN/-5iwf5w/9004/9r8qvy/88fd744a9e5c04fb75da4bef47308e85/_/download/contextbatch/js/_super,-com.atlassian.plugins.atlassian-plugins-webresource-rest:data-collector-perf-observer/batch.js?locale=en-GB" data-wrm-key="_super,-com.atlassian.plugins.atlassian-plugins-webresource-rest:data-collector-perf-observer" data-wrm-batch-type="context" data-initially-rendered></script> +<script src="/s/0341ac3c08eb7d3372fdf748f56002e3-CDN/-5iwf5w/9004/9r8qvy/874b773bd8d8c495dc5a9025eccfabcc/_/download/contextbatch/js/atl.confluence.plugins.pagetree-desktop,main,viewcontent,atl.general,page,atl.comments,-_super/batch.js?cefp_collab_enabled=true&cefp_ed_perm=false&cefp_is_admin=false&gatekeeper-ui-v2=true&highlightactions=true&locale=en-GB" data-wrm-key="atl.confluence.plugins.pagetree-desktop,main,viewcontent,atl.general,page,atl.comments,-_super" data-wrm-batch-type="context" data-initially-rendered></script> +<script type="module">WRM.requireLazily(["wr!com.atlassian.plugins.atlassian-plugins-webresource-rest:data-collector-async"])</script> + + + + + + + <meta name="ajs-site-title" content="Confluence" /> + + <script> +jQuery(document).ready(function() { + jQuery(".external-link").attr("target", "_blank"); +}); +</script> +<script type="text/javascript"> + AJS.toInit(function(){ + if (AJS.params.remoteUser == ''){ + AJS.$('#who-can-view-button-ak-button').hide(); + + } + }); +</script> + + + <link rel="canonical" href="https://unlimited.ethz.ch/display/CON/Welcome+to+Confluence+of+ETH+Zurich"> + <link rel="shortlink" href="https://unlimited.ethz.ch/x/j0QpAg"> + <meta name="wikilink" content="[CON:Welcome to Confluence of ETH Zurich]"> + <meta name="page-version" content="32"> + <meta name="ajs-page-version" content="32"> + +</head> + + +<body id="com-atlassian-confluence" class="theme-default aui-layout aui-theme-default"> + + + <div id='stp-licenseStatus-banner'></div> + <meta name="ajs-is-scroll-viewport-available" content="true"> + <div id="page"> +<div id="full-height-container"> + <div id="header-precursor"> + <div class="cell"> + + </div> + </div> + + + + + + +<header id="header" role="banner"> + <a class="aui-skip-link" href="https://unlimited.ethz.ch/login.action?os_destination=%2F" tabindex="1">Log in</a> + <nav class="aui-header aui-dropdown2-trigger-group" aria-label="Site"><div class="aui-header-inner"><div class="aui-header-before"><button class=" aui-dropdown2-trigger app-switcher-trigger aui-dropdown2-trigger-arrowless" aria-controls="app-switcher" aria-haspopup="true" role="button" data-aui-trigger href="#app-switcher"><span class="aui-icon aui-icon-small aui-iconfont-appswitcher">Linked Applications</span></button><div id="app-switcher" class="aui-dropdown2 aui-style-default" role="menu" hidden data-is-user-admin="false" data-is-switcher="true"><div class="app-switcher-loading">Loading…</div></div></div><div class="aui-header-primary"><span id="logo" class="aui-header-logo aui-header-logo-custom"><a href="/" aria-label="Go to home page"><img src="/download/attachments/327682/atl.site.logo?version=1&modificationDate=1563454119905&api=v2" alt="Confluence" /></a></span><ul class="aui-nav"> + <li> + + + +<a id="space-directory-link" href="/spacedirectory/view.action" class=" aui-nav-imagelink" title="Spaces"> + <span>Spaces</span> + </a> + </li> + <li class="aui-buttons"> + <a href="#" id="create-page-button" class="aui-button aui-button-primary clc-create-dialog-btn" title = 'Create from template' tabindex="0"><span class="aui-icon aui-icon-small aui-iconfont-more">Create </span></a> + </li> +</ul> +</div><div class="aui-header-secondary"><ul class="aui-nav"> + <li> + <div id="search-ui" class="aui-quicksearch dont-default-focus header-quicksearch"><button id="quick-search-query-button" aria-label="Search" aria-haspopup= "dialog" aria-controls="search_drawer" ></button><input id="quick-search-query" aria-label="Search" placeholder="Search" type="text" aria-haspopup= "dialog" aria-controls="search_drawer" /><div id="quick-search-alert" role="alert">Hit enter to search</div><aui-spinner size="small"></aui-spinner></div> + </li> + <li> + + <a id="help-menu-link" role="button" class="aui-nav-link aui-dropdown2-trigger aui-dropdown2-trigger-arrowless" href="#" aria-haspopup="true" aria-owns="help-menu-link-content" title="Help"> + <span class="aui-icon aui-icon-small aui-iconfont-question-filled">Help</span> + </a> + <nav id="help-menu-link-content" class="aui-dropdown2 aui-style-default" > + <div class="aui-dropdown2-section"> + <strong></strong> + <ul role="menu" + role="menu" aria-label="Help" id="help-menu-link-pages" class="aui-list-truncate section-pages first"> + <li role="presentation"> + + +<a role="menuitem" id="confluence-help-link" href="https://docs.atlassian.com/confluence/docs-82/" class=" " title="Visit the Confluence documentation home" target="_blank" +> + Online Help +</a> +</li> + <li role="presentation"> + + +<a role="menuitem" id="keyboard-shortcuts-link" href="#" class=" " title="View available keyboard shortcuts" > + Keyboard Shortcuts +</a> +</li> + <li role="presentation"> + + +<a role="menuitem" id="feed-builder-link" href="/dashboard/configurerssfeed.action" class=" " title="Create your custom RSS feed." > + Feed Builder +</a> +</li> + <li role="presentation"> + + +<a role="menuitem" id="whats-new-menu-link" href="https://confluence.atlassian.com/display/DOC/Confluence+8.2+Release+Notes" class=" " title="" > + What’s new +</a> +</li> + <li role="presentation"> + + +<a role="menuitem" id="gadget-directory-link" href="#" class=" user-item administration-link " title="Browse gadgets provided by Confluence" > + Available Gadgets +</a> +</li> + <li role="presentation"> + + +<a role="menuitem" id="confluence-about-link" href="/aboutconfluencepage.action" class=" " title="Get more information about Confluence" > + About Confluence +</a> +</li> + </ul> + </div> + </nav> + + </li> + <li> + + + </li> + <li> + + </li> + <li> + <li> + + +<a role="menuitem" id="login-link" href="/login.action?os_destination=%2F" class=" user-item login-link " title="" > + Log in +</a> +</li> + + </li> + </ul> +</div></div><!-- .aui-header-inner--></nav><!-- .aui-header --> + <br class="clear"> +</header> + + + + <div class="ia-splitter"> + <div class="ia-splitter-left"> + <div class="ia-fixed-sidebar" role="complementary" aria-label=sidebar> + + <div class="acs-side-bar ia-scrollable-section"><div class="acs-side-bar-space-info tipsy-enabled" data-configure-tooltip="Edit space details"><div class="avatar"><div class="space-logo" data-key="CON" data-name="Confluence" data-entity-type="confluence.space"><div class="avatar-img-container"><div class="avatar-img-wrapper"><a href="/display/CON/Welcome+to+Confluence+of+ETH+Zurich" title="Confluence"><img class="avatar-img" src="/images/logo/default-space-logo.svg" alt="Confluence"></a></div></div></div></div><div class="space-information-container"><div class="name"><a href="/display/CON/Welcome+to+Confluence+of+ETH+Zurich" title="Confluence">Confluence</a></div><div class="flyout-handle icon aui-icon aui-icon-small aui-iconfont-edit"></div></div></div><div class="acs-side-bar-content"><div class="acs-nav-wrapper"><div class="acs-nav" data-has-create-permission="false" data-quick-links-state="null" data-page-tree-state="null" data-nav-type="page-tree"><div class="acs-nav-sections"><div class="main-links-section "><ul class="acs-nav-list"><li class="acs-nav-item blog" data-collector-key="spacebar-blogs"><a class="acs-nav-item-link tipsy-enabled" href="/pages/viewrecentblogposts.action?key=CON" data-collapsed-tooltip="Blog"><span class="icon"></span><span class="acs-nav-item-label">Blog</span></a></li></ul></div><div class="quick-links-wrapper"><h2 class="ia-quick-links-header-title">Space shortcuts</h2><div class="quick-links-section tipsy-enabled "><ul class="acs-nav-list"><li class="acs-nav-item external_link"><a class="acs-nav-item-link tipsy-enabled" href="https://unlimited.ethz.ch/dashboard.action#all-updates" data-collapsed-tooltip="null"><span class="icon"></span><span class="acs-nav-item-label">Dashboard</span></a></li></ul></div></div></div></div></div><div class="ia-secondary-container tipsy-enabled" data-tree-type="page-tree"><div class="ia-secondary-header"><h2 class="ia-secondary-header-title page-tree"><span class="icon"></span><span class="label">Page tree</span></h2></div><div class="ia-secondary-content"> + + +<div class="plugin_pagetree conf-macro output-inline" data-hasbody="false" data-macro-name="pagetree"> + + + + <ul role="list" class="plugin_pagetree_children_list plugin_pagetree_children_list_noleftspace"> + <div class="plugin_pagetree_children"> + </div> + </ul> + + <fieldset class="hidden"> + <input type="hidden" name="treeId" value="" /> + <input type="hidden" name="treeRequestId" value="/plugins/pagetree/naturalchildren.action?decorator=none&excerpt=false&sort=position&reverse=false&disableLinks=false&expandCurrent=true&placement=sidebar" /> + <input type="hidden" name="treePageId" value="36258959" /> + + <input type="hidden" name="noRoot" value="false" /> + <input type="hidden" name="rootPageId" value="36258959" /> + + <input type="hidden" name="rootPage" value="" /> + <input type="hidden" name="startDepth" value="0" /> + <input type="hidden" name="spaceKey" value="CON" /> + + <input type="hidden" name="i18n-pagetree.loading" value="Loading..." /> + <input type="hidden" name="i18n-pagetree.error.permission" value="Unable to load page tree. It seems that you do not have permission to view the root page." /> + <input type="hidden" name="i18n-pagetree.eeror.general" value="There was a problem retrieving the page tree. Please check the server log file for more information." /> + <input type="hidden" name="loginUrl" value="/login.action?os_destination=%2Fpages%2Fviewpage.action%3FspaceKey%3DCON%26title%3DWelcome%2Bto%2BConfluence%2Bof%2BETH%2BZurich&permissionViolation=true" /> + <input type="hidden" name="mobile" value="false" /> + <input type="hidden" name="placement" value="sidebar" /> + + <fieldset class="hidden"> + <input type="hidden" name="ancestorId" value="36258959" /> + </fieldset> + </fieldset> +</div> +</div></div></div><div class="hidden"><a href="/collector/pages.action?key=CON" id="space-pages-link"></a><script type="text/x-template" title="logo-config-content"><h2>Space Details</h2><div class="personal-space-logo-hint">Your profile picture is used as the logo for your personal space. <a href="/users/profile/editmyprofilepicture.action" target="_blank">Change your profile picture</a>.</div></script></div></div><div class="space-tools-section"><div id="space-tools-menu-additional-items" class="hidden"><div data-label="Browse pages" data-class="" data-href="/pages/reorderpages.action?key=CON">Browse pages</div></div><button id="space-tools-menu-trigger" class=" aui-dropdown2-trigger aui-button aui-button-subtle tipsy-enabled aui-dropdown2-trigger-arrowless " aria-controls="space-tools-menu" aria-haspopup="true" role="button" data-aui-trigger><span class="aui-icon aui-icon-small aui-iconfont-configure">Configure</span><span class="aui-button-label">Space tools</span><span class="aui-icon "></span></button><div id="space-tools-menu" class="aui-dropdown2 aui-style-default space-tools-dropdown" role="menu" hidden data-aui-alignment="top left"></div><a href="#" role="button" class="expand-collapse-trigger aui-icon aui-icon-small aui-iconfont-chevron-double-left" aria-expanded="true"></a></div> + + </div> + </div> + <!-- \#header --> + + + + <main role="main" id="main" class=" aui-page-panel"> + <div id="main-header"> + + <div id="navigation" class="content-navigation view" role="region" aria-label="Page tools"> + <ul class="ajs-menu-bar"> + + <li class="normal ajs-menu-item"> + <a id="action-menu-link" class="action aui-dropdown2-trigger-arrowless aui-button aui-button-subtle ajs-menu-title aui-dropdown2-trigger" href="#" aria-haspopup="true" aria-label="More options" aria-owns="action-menu" data-container="#navigation"> + <span> + <span class="aui-icon aui-icon-small aui-iconfont-more" aria-label="More options"></span> + + </span> + </a> <div id="action-menu" class="aui-dropdown2 aui-style-default" role="menu" aria-labelledby="action-menu-link" > + <div class="aui-dropdown2-section"> + <ul id="action-menu-primary" class="section-primary first" role="presentation" > + <li + role="presentation" + > + + + + + + + + <a id="view-attachments-link" href="/pages/viewpageattachments.action?pageId=36258959" + rel="nofollow" + class="action-view-attachments" + accessKey="t" title="View Attachments" role="menuitem" > + + <span> + A<u>t</u>tachments (12) + </span> </a> + </li> + <li + role="presentation" + > + + + + + + + + <a id="action-view-history-link" href="/pages/viewpreviousversions.action?pageId=36258959" + rel="nofollow" + class="action-view-history" + title="" role="menuitem" > + + <span> + Page History + </span> </a> + </li> + </ul> + </div> + <div class="aui-dropdown2-section"> + <ul id="action-menu-secondary" class="section-secondary" role="presentation" > + <li + role="presentation" + > + + + + + + + + <a id="view-page-info-link" href="/pages/viewinfo.action?pageId=36258959" + rel="nofollow" + class="action-view-info" + title="" role="menuitem" > + + <span> + Page Information + </span> </a> + </li> + <li + role="presentation" + > + + + + + + + + <a id="view-resolved-comments" href="#" + rel="nofollow" + class="" + title="" role="menuitem" > + + <span> + Resolved comments + </span> </a> + </li> + <li + role="presentation" + > + + + + + + + + <a id="view-in-hierarchy-link" href="/pages/reorderpages.action?key=CON&openId=36258959#selectedPageInHierarchy" + rel="nofollow" + class="" + title="" role="menuitem" > + + <span> + View in Hierarchy + </span> </a> + </li> + <li + role="presentation" + > + + + + + + + + <a id="action-view-source-link" href="/plugins/viewsource/viewpagesrc.action?pageId=36258959" + rel="nofollow" + class="action-view-source popup-link" + title="" role="menuitem" > + + <span> + View Source + </span> </a> + </li> + <li + role="presentation" + > + + + + + + + + <a id="action-export-pdf-link" href="/spaces/flyingpdf/pdfpageexport.action?pageId=36258959" + rel="nofollow" + class="" + title="" role="menuitem" > + + <span> + Export to PDF + </span> </a> + </li> + <li + role="presentation" + > + + + + + + + + <a id="action-export-word-link" href="/exportword?pageId=36258959" + rel="nofollow" + class="action-export-word" + title="" role="menuitem" > + + <span> + Export to Word + </span> </a> + </li> + <li + role="presentation" + > + + + + + + + + <a href="/plugins/lucidchart/selectVisio.action?contentId=36258959" + rel="nofollow" + class="" + title="" role="menuitem" > + + <span> + View Visio File + </span> </a> + </li> + </ul> + </div> + <div class="aui-dropdown2-section"> + <ul id="action-menu-modify" class="section-modify" role="presentation" > + <li + role="presentation" + > + + + + + + + + <a id="action-copy-page-link" href="/pages/copypage.action?idOfPageToCopy=36258959&spaceKey=CON" + rel="nofollow" + class="action-copy" + title="" role="menuitem" > + + <span> + Copy + </span> </a> + </li> + </ul> + </div> + </div> + </li> + </ul> + </div> + + + <div id="title-heading" class="pagetitle with-breadcrumbs"> + + <div id="breadcrumb-section"> + + + + <nav aria-label="Breadcrumbs"> + <ol id="breadcrumbs"> + + + <li class="first" > + + <span class=""><a href="/collector/pages.action?key=CON">Pages</a></span> + </ol> + </nav> + + + </div> + + + + <div id="page-metadata-banner"><ul class="banner"><li id="system-content-items" class="noprint"><a href="" title="Unrestricted" id="content-metadata-page-restrictions-hidden" class="hidden"></a><a href="/pages/viewpageattachments.action?pageId=36258959&metadataLink=true" title="12 attachments" id="content-metadata-attachments" class="aui-icon aui-icon-small aui-iconfont-attachment"></a></li><li class="page-metadata-item noprinthas-button" id="content-metadata-jira-wrapper"><a href="" title="" id="content-metadata-jira" class="aui-button aui-button-subtle content-metadata-jira tipsy-disabled hidden"><span>Jira links</span></a></li></ul></div> + + + <h1 id="title-text" class="with-breadcrumbs"> + <a href="/display/CON/Welcome+to+Confluence+of+ETH+Zurich">Welcome to Confluence of ETH Zurich</a> + </h1> + </div> + </div><!-- \#main-header --> + + + + <div id="sidebar-container"> + </div><!-- \#sidebar-container --> + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +<div id="content" class="page view"> + + + +<div id="action-messages"> + </div> + + + + <script type="text/x-template" title="searchResultsGrid"> + <table class="aui"> + <thead> + <tr class="header"> + <th class="search-result-title">Page Title</th> + <th class="search-result-space">Space</th> + <th class="search-result-date">Updated</th> + </tr> + </thead> + </table> +</script> +<script type="text/x-template" title="searchResultsGridCount"> + <p class="search-result-count">{0}</p> +</script> +<script type="text/x-template" title="searchResultsGridRow"> + <tr class="search-result"> + <td class="search-result-title"><a href="{1}" class="content-type-{2}"><span>{0}</span></a></td> + <td class="search-result-space"><a class="space" href="/display/{4}/" title="{3}">{3}</a></td> + <td class="search-result-date"><span class="date" title="{6}">{5}</span></td> + </tr> +</script> + + + + + + + + + + + + <div class="page-metadata"> + <ul> + <li class="page-metadata-modification-info"> + + + + + + + + + Created by <span class='author'> <a href=" /display/~mbu4ea +" + aria-label="created by Buschor Mark (4ea)" class="url fn" + >Buschor Mark (4ea)</a></span>, last modified by <span class='editor'> <a href=" /display/~mabuscho +" + aria-label="modified by Buschor Mark (ID)" class="url fn" + >Buschor Mark (ID)</a></span> on <a class='last-modified' aria-label='modified on May 11, 2022, select to show changes' title='Show changes' href='/pages/diffpagesbyversion.action?pageId=36258959&selectedPageVersions=31&selectedPageVersions=32'>May 11, 2022</a> + </li> + </ul> +</div> + + + <div id="main-content" class="wiki-content"> + + <div class="contentLayout2"> +<div class="columnLayout single" data-layout="single"> +<div class="cell normal" data-type="normal"> +<div class="innerCell"> +<br/><div class="sectionColumnWrapper conf-macro output-block" data-hasbody="true" data-macro-name="section"><div class="sectionMacro"><div class="sectionMacroRow"><div class="columnMacro conf-macro output-block" data-hasbody="true" data-macro-name="column" style="width:510px;min-width:510px;max-width:510px;"><div class="panel conf-macro output-block" data-hasbody="true" data-macro-name="panel" style="border-bottom: 1px solid white;border-width: 0px;"><div class="panelContent"> +<p><a href="#WelcometoConfluenceofETHZurich-1"><span class="confluence-embedded-file-wrapper confluence-embedded-manual-size"><img class="confluence-embedded-image" draggable="false" width="498" src="/download/attachments/36258959/previewHigh.jpg?version=1&modificationDate=1594226270202&api=v2" data-image-src="/download/attachments/36258959/previewHigh.jpg?version=1&modificationDate=1594226270202&api=v2" data-unresolved-comment-count="0" data-linked-resource-id="36258969" data-linked-resource-version="1" data-linked-resource-type="attachment" data-linked-resource-default-alias="previewHigh.jpg" data-base-url="https://unlimited.ethz.ch" data-linked-resource-content-type="image/jpeg" data-linked-resource-container-id="36258959" data-linked-resource-container-version="32" alt="" /></span></a></p> +</div></div></div><div class="columnMacro conf-macro output-block" data-hasbody="true" data-macro-name="column" style="width:300px;min-width:300px;max-width:300px;"><div class="panel conf-macro output-block" data-hasbody="true" data-macro-name="panel" style="background-color: #ebedef;border-color: #abb2b9;border-width: 1px;"><div class="panelContent" style="background-color: #ebedef;"> +<h3 style="text-align: center;" id="WelcometoConfluenceofETHZurich-WelcometoConfluenceofETHZurich">Welcome to Confluence of ETH Zurich</h3><hr /><div class="panel conf-macro output-block" data-hasbody="true" data-macro-name="panel" style="border-bottom: 1px solid white;border-width: 0px;"><div class="panelContent"> +<p style="text-align: center;">Confluence is where your team collaborates and shares knowledge — create, share and discuss your files, ideas, minutes, specs, mockups, diagrams, and projects.</p><p style="text-align: center;">To receive announcements about Confluence news and operations, please subscribe to the mailing list: <br /><strong><a class="external-link" href="https://sympa.ethz.ch/sympa/info/confluence-operations" rel="nofollow">confluence-operations in Sympa</a><br /></strong></p><p style="text-align: center;"><strong>Want to Order a new Space? <a href="https://unlimited.ethz.ch/x/RQfcBg" rel="nofollow">Click here</a></strong></p> +</div></div> +</div></div></div><div class="columnMacro conf-macro output-block" data-hasbody="true" data-macro-name="column"><p><span style="color: rgb(255,255,255);">S</span></p></div></div></div></div></div> +</div> +</div> +<div class="columnLayout single" data-layout="single"> +<div class="cell normal" data-type="normal"> +<div class="innerCell"> +<h2 style="text-align: left;" id="WelcometoConfluenceofETHZurich-"><u><br/></u></h2><div class="sectionColumnWrapper conf-macro output-block" data-hasbody="true" data-macro-name="section"><div class="sectionMacro"><div class="sectionMacroRow"><div class="columnMacro conf-macro output-block" data-hasbody="true" data-macro-name="column" style="width:400px;min-width:400px;max-width:400px;"><h2 style="text-align: left;" id="WelcometoConfluenceofETHZurich-FAQ&ManualsofITServices">FAQ & Manuals of IT Services<u><br /></u></h2><div class="columnMacro conf-macro output-block" data-hasbody="true" data-macro-name="column" style="width:195px;min-width:195px;max-width:195px;"><div class="panel conf-macro output-block" data-hasbody="true" data-macro-name="panel" style="background-color: #d6eaf8;border-color: #0264ad;border-width: 1px;"><div class="panelContent" style="background-color: #d6eaf8;"> +<p style="text-align: center;"><a href="/display/itkb"><span class="confluence-embedded-file-wrapper confluence-embedded-manual-size"><img class="confluence-embedded-image confluence-thumbnail" draggable="false" width="120" src="/download/thumbnails/36258959/firststeps_en.png?version=1&modificationDate=1594226269359&api=v2" data-image-src="/download/attachments/36258959/firststeps_en.png?version=1&modificationDate=1594226269359&api=v2" data-unresolved-comment-count="0" data-linked-resource-id="36258960" data-linked-resource-version="1" data-linked-resource-type="attachment" data-linked-resource-default-alias="firststeps_en.png" data-base-url="https://unlimited.ethz.ch" data-linked-resource-content-type="image/png" data-linked-resource-container-id="36258959" data-linked-resource-container-version="32" alt="" /></span></a></p><h3 style="text-align: center;" id="WelcometoConfluenceofETHZurich-ITKnowledgeBase(EN)"><a href="/display/itkb">IT Knowledge Base<br />(EN)</a></h3> +</div></div></div><div class="columnMacro conf-macro output-block" data-hasbody="true" data-macro-name="column" style="width:195px;min-width:195px;max-width:195px;"><div class="panel conf-macro output-block" data-hasbody="true" data-macro-name="panel" style="background-color: #d4efdf;border-color: #3c5a0f;border-width: 1px;"><div class="panelContent" style="background-color: #d4efdf;"> +<p style="text-align: center;"><a href="/display/itwdb"><span class="confluence-embedded-file-wrapper confluence-embedded-manual-size"><img class="confluence-embedded-image confluence-thumbnail" draggable="false" width="120" src="/download/thumbnails/36258959/firststeps.png?version=1&modificationDate=1594226269381&api=v2" data-image-src="/download/attachments/36258959/firststeps.png?version=1&modificationDate=1594226269381&api=v2" data-unresolved-comment-count="0" data-linked-resource-id="36258961" data-linked-resource-version="1" data-linked-resource-type="attachment" data-linked-resource-default-alias="firststeps.png" data-base-url="https://unlimited.ethz.ch" data-linked-resource-content-type="image/png" data-linked-resource-container-id="36258959" data-linked-resource-container-version="32" alt="" /></span></a></p><h3 style="text-align: center;" id="WelcometoConfluenceofETHZurich-IT-Wissensdatenbank(DE)"><a href="/display/itwdb">IT-Wissensdatenbank<br />(DE)</a></h3> +</div></div></div></div><div class="columnMacro conf-macro output-block" data-hasbody="true" data-macro-name="column" style="width:10px;min-width:10px;max-width:10px;"><p><br /></p></div><div class="columnMacro conf-macro output-block" data-hasbody="true" data-macro-name="column" style="width:400px;min-width:400px;max-width:400px;"><h2 class="auto-cursor-target" id="WelcometoConfluenceofETHZurich-News&Updates">News & Updates</h2><div class="panel conf-macro output-block" data-hasbody="true" data-macro-name="panel" style="background-color: #ebedef;border-color: #abb2b9;border-width: 1px;"><div class="panelContent" style="background-color: #ebedef;"> +<div class="panel conf-macro output-block" data-hasbody="true" data-macro-name="panel" style="background-color: #ffffff;border-width: 1px;"><div class="panelContent" style="background-color: #ffffff;"> +<p> + +</p><div class="blog-post-list conf-macro output-block" data-hasbody="false" data-macro-name="blog-posts"> + <h4 class="sub-heading">Blog Posts</h4> + <ul> + <li class="blog-item"> + <span class="blog-title"> <span class="icon aui-icon content-type-blogpost" title="Blog">Blog:</span> <a href="/pages/viewpage.action?pageId=40829180">New start page</a> + created by</span> + <div class="blog-item-creator"> <a href=" /display/~mbu4ea " class="url fn">Buschor Mark (4ea)</a></div> + <div class="blog-item-date"> + Jul 09, 2020 + </div> + <div class="blog-item-space"><a href="/display/CON">Confluence</a></div> + </li> + </ul> +</div> +<p> </p> +</div></div><p class="auto-cursor-target" style="text-align: right;"><a href="https://unlimited.ethz.ch/display/SDE/2020/07/01/Confluence+Server+Update" rel="nofollow">Show More</a></p> +</div></div><br /></div><div class="columnMacro conf-macro output-block" data-hasbody="true" data-macro-name="column"><p><br /></p></div></div></div></div><br/></div> +</div> +</div> +<div class="columnLayout single" data-layout="single"> +<div class="cell normal" data-type="normal"> +<div class="innerCell"> +<div class="sectionColumnWrapper conf-macro output-block" data-hasbody="true" data-macro-name="section"><div class="sectionMacro"><div class="sectionMacroRow"><div class="columnMacro conf-macro output-block" data-hasbody="true" data-macro-name="column" style="width:270px;min-width:270px;max-width:270px;"><h4 class="auto-cursor-target" id="WelcometoConfluenceofETHZurich-OfficialWebsiteETHZurich">Official Website ETH Zurich</h4><div class="panel conf-macro output-block" data-hasbody="true" data-macro-name="panel" style="background-color: #ffffff;border-color: #abb2b9;border-width: 1px;"><div class="panelContent" style="background-color: #ffffff;"> +<a class="external-link" href="https://ethz.ch/en.html" rel="nofollow"><span class="confluence-embedded-file-wrapper image-center-wrapper confluence-embedded-manual-size"><img class="confluence-embedded-image confluence-thumbnail image-center" draggable="false" width="160" src="/download/thumbnails/36258959/eth_logo_kurz_pos.jpg?version=1&modificationDate=1594226269522&api=v2" data-image-src="/download/attachments/36258959/eth_logo_kurz_pos.jpg?version=1&modificationDate=1594226269522&api=v2" data-unresolved-comment-count="0" data-linked-resource-id="36258964" data-linked-resource-version="1" data-linked-resource-type="attachment" data-linked-resource-default-alias="eth_logo_kurz_pos.jpg" data-base-url="https://unlimited.ethz.ch" data-linked-resource-content-type="image/jpeg" data-linked-resource-container-id="36258959" data-linked-resource-container-version="32" alt="" /></span></a> +</div></div></div><div class="columnMacro conf-macro output-block" data-hasbody="true" data-macro-name="column" style="width:270px;min-width:270px;max-width:270px;"><h4 class="auto-cursor-target" id="WelcometoConfluenceofETHZurich-ConfluenceatETHZurich">Confluence at ETH Zurich</h4><div class="panel conf-macro output-block" data-hasbody="true" data-macro-name="panel" style="background-color: #ffffff;border-color: #abb2b9;border-width: 1px;"><div class="panelContent" style="background-color: #ffffff;"> +<p class="auto-cursor-target"><a class="external-link" href="https://ethz.ch/services/en/it-services/catalogue/web-application-hosting/wiki.html" rel="nofollow"><span class="confluence-embedded-file-wrapper image-center-wrapper confluence-embedded-manual-size"><img class="confluence-embedded-image confluence-thumbnail image-center" draggable="false" width="160" src="/download/thumbnails/36258959/Confluence@2x-blue%20Kopie.png?version=1&modificationDate=1594226269509&api=v2" data-image-src="/download/attachments/36258959/Confluence@2x-blue%20Kopie.png?version=1&modificationDate=1594226269509&api=v2" data-unresolved-comment-count="0" data-linked-resource-id="36258962" data-linked-resource-version="1" data-linked-resource-type="attachment" data-linked-resource-default-alias="Confluence@2x-blue Kopie.png" data-base-url="https://unlimited.ethz.ch" data-linked-resource-content-type="image/png" data-linked-resource-container-id="36258959" data-linked-resource-container-version="32" alt="" /></span></a></p> +</div></div></div><div class="columnMacro conf-macro output-block" data-hasbody="true" data-macro-name="column" style="width:270px;min-width:270px;max-width:270px;"><h4 class="auto-cursor-target" id="WelcometoConfluenceofETHZurich-Technicalcontact">Technical contact</h4><div class="panel conf-macro output-block" data-hasbody="true" data-macro-name="panel" style="background-color: #ffffff;border-color: #abb2b9;border-width: 1px;"><div class="panelContent" style="background-color: #ffffff;"> +<a class="external-link" href="https://ethz.ch/services/en/it-services.html" rel="nofollow"><span class="confluence-embedded-file-wrapper image-center-wrapper confluence-embedded-manual-size"><img class="confluence-embedded-image confluence-thumbnail image-center" draggable="false" width="160" src="/download/thumbnails/36258959/ITS-Logo_EN%20Kopie.png?version=1&modificationDate=1594226269517&api=v2" data-image-src="/download/attachments/36258959/ITS-Logo_EN%20Kopie.png?version=1&modificationDate=1594226269517&api=v2" data-unresolved-comment-count="0" data-linked-resource-id="36258963" data-linked-resource-version="1" data-linked-resource-type="attachment" data-linked-resource-default-alias="ITS-Logo_EN Kopie.png" data-base-url="https://unlimited.ethz.ch" data-linked-resource-content-type="image/png" data-linked-resource-container-id="36258959" data-linked-resource-container-version="32" alt="" /></span></a> +</div></div></div><div class="columnMacro conf-macro output-block" data-hasbody="true" data-macro-name="column"><p><br /></p></div></div></div></div><br/><br/><br/><br/><br/></div> +</div> +</div> +</div> + + + + + </div> + + + + + + +<div id="labels-section" class="pageSection group"> + <div class="labels-section-content content-column" entityid="36258959" entitytype="page"> + <div class="labels-content"> + + <ul class="label-list label-list-right "> + + <li class="aui-label " data-label-id="27984234"><a class="aui-label-split-main" href="/label/CON/confluence" rel="tag">confluence</a></li><li class="aui-label " data-label-id="41320449"><a class="aui-label-split-main" href="/label/CON/mainpage" rel="tag">mainpage</a></li><li class="aui-label " data-label-id="41320450"><a class="aui-label-split-main" href="/label/CON/landingpage" rel="tag">landingpage</a></li> + </ul> + + </div> +</div> +</div> + + + + + + + + + + + + + + + + + + + +<div id="comments-section" class="pageSection group"> + + + + + +</div> + + + + + + +</div> + + + + + + + + + + + + + + + + + +<div id="space-tools-web-items" class="hidden"> + <div data-label="Overview" data-href="/spaces/viewspacesummary.action?key=CON">Overview</div> + <div data-label="Content Tools" data-href="/pages/reorderpages.action?key=CON">Content Tools</div> + <div data-label="Apps" data-href="/spaces/scroll-viewport/config.action?key=CON#/list">Apps</div> + </div> + + + + + </main><!-- \#main --> + + + + + + + +<div id="footer" role="contentinfo"> + <section class="footer-body"> + + + + + <ul id="poweredby"> + <li class="noprint">Powered by <a href="https://www.atlassian.com/software/confluence" class="hover-footer-link" rel="nofollow">Atlassian Confluence</a> <span id='footer-build-information'>8.2.0</span></li> + <li class="print-only">Printed by Atlassian Confluence 8.2.0</li> + <li class="noprint"><a href="https://support.atlassian.com/confluence-server/" class="hover-footer-link" rel="nofollow">Report a bug</a></li> + <li class="noprint"><a href="https://www.atlassian.com/company" class="hover-footer-link" rel="nofollow">Atlassian News</a></li> + </ul> + + + + <div id="footer-logo"><a href="https://www.atlassian.com/" rel="nofollow">Atlassian</a></div> + + + + </section> +</div> + + +</div> + +</div><!-- \#full-height-container --> +</div><!-- \#page --> + + <span style="display:none;" id="confluence-server-performance">{"serverDuration": 190, "requestCorrelationId": "808bb53b99ee1bc0"}</span> +</body> +</html> + diff --git a/docs/user-documentation/general-admin-users/img/382.png b/docs/user-documentation/general-admin-users/img/382.png new file mode 100644 index 0000000000000000000000000000000000000000..b3b77c5239d08a4f1543c59eaaf49e1bc9e686d9 Binary files /dev/null and b/docs/user-documentation/general-admin-users/img/382.png differ diff --git a/docs/user-documentation/general-admin-users/img/401.png b/docs/user-documentation/general-admin-users/img/401.png new file mode 100644 index 0000000000000000000000000000000000000000..009cf449f85f496969c0fa89ee470e035370580f --- /dev/null +++ b/docs/user-documentation/general-admin-users/img/401.png @@ -0,0 +1,859 @@ + + +<!DOCTYPE html> +<html lang="en-GB" > +<head> + <title>Welcome to Confluence of ETH Zurich - Confluence - Confluence</title> + + + + + + + + + + + <meta http-equiv="X-UA-Compatible" content="IE=EDGE,chrome=IE7"> +<meta charset="UTF-8"> +<meta id="confluence-context-path" name="confluence-context-path" content=""> +<meta id="confluence-base-url" name="confluence-base-url" content="https://unlimited.ethz.ch"> + + <meta id="atlassian-token" name="atlassian-token" content="2f665f788dd4d48becd59686fb87fb7075ed450b"> + + +<meta id="confluence-space-key" name="confluence-space-key" content="CON"> +<script type="text/javascript"> + var contextPath = ''; +</script> + + + + <meta name="confluence-request-time" content="1688136365797"> + + + + <style>.ia-fixed-sidebar, .ia-splitter-left {width: 285px;}.theme-default .ia-splitter #main {margin-left: 285px;}.ia-fixed-sidebar {visibility: hidden;}</style> + <meta name="ajs-use-keyboard-shortcuts" content="true"> + <meta name="ajs-discovered-plugin-features" content="$discoveredList"> + <meta name="ajs-keyboardshortcut-hash" content="5d1ba1fd696cce9a6e914516d1d9995b"> + <meta id="team-calendars-has-jira-link" content="true"> + <meta name="ajs-team-calendars-display-time-format" content="displayTimeFormat24"> + <meta id="team-calendars-display-week-number" content="true"> + <meta id="team-calendars-user-timezone" content="Europe/Zurich"> + <script type="text/x-template" id="team-calendars-messages" title="team-calendars-messages"><fieldset class="i18n hidden"><input type="hidden" name="calendar3.month.long.july" value="July"><input type="hidden" name="calendar3.day.short.wednesday" value="Wed"><input type="hidden" name="calendar3.day.short.thursday" value="Thu"><input type="hidden" name="calendar3.month.short.march" value="Mar"><input type="hidden" name="calendar3.month.long.april" value="April"><input type="hidden" name="calendar3.month.long.october" value="October"><input type="hidden" name="calendar3.month.long.august" value="August"><input type="hidden" name="calendar3.month.short.july" value="Jul"><input type="hidden" name="calendar3.month.short.may" value="May"><input type="hidden" name="calendar3.month.short.november" value="Nov"><input type="hidden" name="calendar3.day.long.friday" value="Friday"><input type="hidden" name="calendar3.day.long.sunday" value="Sunday"><input type="hidden" name="calendar3.day.long.saturday" value="Saturday"><input type="hidden" name="calendar3.month.short.april" value="Apr"><input type="hidden" name="calendar3.day.long.wednesday" value="Wednesday"><input type="hidden" name="calendar3.month.long.december" value="December"><input type="hidden" name="calendar3.month.short.october" value="Oct"><input type="hidden" name="calendar3.day.long.monday" value="Monday"><input type="hidden" name="calendar3.month.short.june" value="Jun"><input type="hidden" name="calendar3.day.short.monday" value="Mon"><input type="hidden" name="calendar3.day.short.tuesday" value="Tue"><input type="hidden" name="calendar3.day.short.saturday" value="Sat"><input type="hidden" name="calendar3.month.long.march" value="March"><input type="hidden" name="calendar3.month.long.june" value="June"><input type="hidden" name="calendar3.month.short.february" value="Feb"><input type="hidden" name="calendar3.month.short.august" value="Aug"><input type="hidden" name="calendar3.month.short.december" value="Dec"><input type="hidden" name="calendar3.day.short.sunday" value="Sun"><input type="hidden" name="calendar3.month.long.february" value="February"><input type="hidden" name="calendar3.day.long.tuesday" value="Tuesday"><input type="hidden" name="calendar3.month.long.may" value="May"><input type="hidden" name="calendar3.month.long.september" value="September"><input type="hidden" name="calendar3.month.long.november" value="November"><input type="hidden" name="calendar3.month.short.january" value="Jan"><input type="hidden" name="calendar3.month.short.september" value="Sep"><input type="hidden" name="calendar3.day.long.thursday" value="Thursday"><input type="hidden" name="calendar3.month.long.january" value="January"><input type="hidden" name="calendar3.day.short.friday" value="Fri"></fieldset></script> + <script type="text/x-mathjax-config"> MathJax.Hub.Config({ tex2jax: { inlineMath: [ ['(mathjax-inline(', ')mathjax-inline)' ] ], displayMath: [ ['(mathjax-block(', ')mathjax-block)' ] ], }, asciimath2jax: { delimiters: [ ['(mathjax-ascii-math(',')mathjax-ascii-math)'] ] } }); </script> <script type="text/javascript" async src="https://cdnjs.cloudflare.com/ajax/libs/mathjax/2.7.1/MathJax.js?config=TeX-MML-AM_CHTML"> </script> + <meta name="ajs-is-confluence-admin" content="false"> + <meta name="ajs-connection-timeout" content="10000"> + + + + <meta name="ajs-page-title" content="Welcome to Confluence of ETH Zurich"> + <meta name="ajs-latest-published-page-title" content="Welcome to Confluence of ETH Zurich"> + <meta name="ajs-space-name" content="Confluence"> + <meta name="ajs-page-id" content="36258959"> + <meta name="ajs-latest-page-id" content="36258959"> + <meta name="ajs-content-type" content="page"> + <meta name="ajs-parent-page-id" content=""> + <meta name="ajs-space-key" content="CON"> + <meta name="ajs-max-number-editors" content="12"> + <meta name="ajs-macro-placeholder-timeout" content="5000"> + <meta name="ajs-jira-metadata-count" content="0"> + <meta name="ajs-from-page-title" content=""> + <meta name="ajs-can-remove-page" content="false"> + <meta name="ajs-can-remove-page-hierarchy" content="false"> + <meta name="ajs-browse-page-tree-mode" content="view"> + <meta name="ajs-shared-drafts" content="true"> + <meta name="ajs-context-path" content=""> + <meta name="ajs-base-url" content="https://unlimited.ethz.ch"> + <meta name="ajs-version-number" content="8.2.0"> + <meta name="ajs-build-number" content="9004"> + <meta name="ajs-remote-user" content=""> + <meta name="ajs-remote-user-key" content=""> + <meta name="ajs-remote-user-has-licensed-access" content="false"> + <meta name="ajs-remote-user-has-browse-users-permission" content="false"> + <meta name="ajs-current-user-fullname" content=""> + <meta name="ajs-current-user-avatar-uri-reference" content="/images/icons/profilepics/anonymous.svg"> + <meta name="ajs-static-resource-url-prefix" content="/s/-5iwf5w/9004/9r8qvy/_"> + <meta name="ajs-global-settings-attachment-max-size" content="157286400"> + <meta name="ajs-global-settings-quick-search-enabled" content="true"> + <meta name="ajs-user-locale" content="en_GB"> + <meta name="ajs-enabled-dark-features" content="site-wide.shared-drafts,site-wide.synchrony,clc.quick.create,confluence.view.edit.transition,cql.search.screen,confluence-inline-comments-resolved,http.session.registrar,nps.survey.inline.dialog,confluence.efi.onboarding.new.templates,atlassian.cdn.static.assets,pdf-preview,previews.sharing,previews.versions,file-annotations,confluence.efi.onboarding.rich.space.content,collaborative-audit-log,confluence.reindex.improvements,previews.conversion-service,editor.ajax.save,crowd.sync.nested.groups.group.membership.changes.batching.enabled,read.only.mode,graphql,previews.trigger-all-file-types,attachment.extracted.text.extractor,lucene.caching.filter,confluence.table.resizable,notification.batch,previews.sharing.pushstate,confluence-inline-comments-rich-editor,tc.tacca.dacca,site-wide.synchrony.opt-in,atlassian.webresource.twophase.js.i18n.disabled,confluence.denormalisedpermissions,file-annotations.likes,gatekeeper-ui-v2,v2.content.name.searcher,confluence.search.improvements.ranking,crowd.event.transformer.directory.manager.cache,mobile.supported.version,pulp,crowd.sync.delete.user.memberships.batching.enabled,confluence-inline-comments,confluence-inline-comments-dangling-comment,quick-reload-inline-comments-flags,confluence.retention.rules"> + <meta name="ajs-atl-token" content="2f665f788dd4d48becd59686fb87fb7075ed450b"> + <meta name="ajs-confluence-flavour" content="VANILLA"> + <meta name="ajs-user-date-pattern" content="dd MMM yyyy"> + <meta name="ajs-access-mode" content="READ_WRITE"> + <meta name="ajs-render-mode" content="READ_WRITE"> + <meta name="ajs-date.format" content="MMM dd, yyyy"> + + <link rel="shortcut icon" href="/s/-5iwf5w/9004/9r8qvy/8/_/favicon.ico"> + <link rel="icon" type="image/x-icon" href="/s/-5iwf5w/9004/9r8qvy/8/_/favicon.ico"> + +<link rel="search" type="application/opensearchdescription+xml" href="/opensearch/osd.action" title="Confluence"/> + + + <meta name="ajs-create-issue-metadata-show-discovery" content="false"> + + + <script> +window.WRM=window.WRM||{};window.WRM._unparsedData=window.WRM._unparsedData||{};window.WRM._unparsedErrors=window.WRM._unparsedErrors||{}; +WRM._unparsedData["com.atlassian.applinks.applinks-plugin:applinks-common-exported.entity-types"]="{\u0022singular\u0022:{\u0022refapp.charlie\u0022:\u0022Charlie\u0022,\u0022fecru.project\u0022:\u0022Crucible Project\u0022,\u0022fecru.repository\u0022:\u0022FishEye Repository\u0022,\u0022stash.project\u0022:\u0022Stash Project\u0022,\u0022generic.entity\u0022:\u0022Generic Project\u0022,\u0022confluence.space\u0022:\u0022Confluence Space\u0022,\u0022bamboo.project\u0022:\u0022Bamboo Project\u0022,\u0022jira.project\u0022:\u0022Jira Project\u0022},\u0022plural\u0022:{\u0022refapp.charlie\u0022:\u0022Charlies\u0022,\u0022fecru.project\u0022:\u0022Crucible Projects\u0022,\u0022fecru.repository\u0022:\u0022FishEye Repositories\u0022,\u0022stash.project\u0022:\u0022Stash Projects\u0022,\u0022generic.entity\u0022:\u0022Generic Projects\u0022,\u0022confluence.space\u0022:\u0022Confluence Spaces\u0022,\u0022bamboo.project\u0022:\u0022Bamboo Projects\u0022,\u0022jira.project\u0022:\u0022Jira Projects\u0022}}"; +WRM._unparsedData["com.atlassian.analytics.analytics-client:programmatic-analytics-init.programmatic-analytics-data-provider"]="false"; +WRM._unparsedData["com.atlassian.confluence.plugins.confluence-license-banner:confluence-license-banner-resources.license-details"]="{\u0022daysBeforeLicenseExpiry\u0022:0,\u0022daysBeforeMaintenanceExpiry\u0022:0,\u0022showLicenseExpiryBanner\u0022:false,\u0022showMaintenanceExpiryBanner\u0022:false,\u0022renewUrl\u0022:null,\u0022salesUrl\u0022:null}"; +WRM._unparsedData["com.atlassian.plugins.atlassian-plugins-webresource-plugin:context-path.context-path"]="\u0022\u0022"; +WRM._unparsedData["com.atlassian.analytics.analytics-client:policy-update-init.policy-update-data-provider"]="false"; +WRM._unparsedData["com.atlassian.applinks.applinks-plugin:applinks-common-exported.authentication-types"]="{\u0022com.atlassian.applinks.api.auth.types.BasicAuthenticationProvider\u0022:\u0022Basic Access\u0022,\u0022com.atlassian.applinks.api.auth.types.TrustedAppsAuthenticationProvider\u0022:\u0022Trusted Applications\u0022,\u0022com.atlassian.applinks.api.auth.types.CorsAuthenticationProvider\u0022:\u0022CORS\u0022,\u0022com.atlassian.applinks.api.auth.types.OAuthAuthenticationProvider\u0022:\u0022OAuth\u0022,\u0022com.atlassian.applinks.api.auth.types.TwoLeggedOAuthAuthenticationProvider\u0022:\u0022OAuth\u0022,\u0022com.atlassian.applinks.api.auth.types.TwoLeggedOAuthWithImpersonationAuthenticationProvider\u0022:\u0022OAuth\u0022}"; +WRM._unparsedData["com.atlassian.confluence.plugins.confluence-search-ui-plugin:confluence-search-ui-plugin-resources.i18n-data"]="{\u0022search.ui.recent.link.text\u0022:\u0022View more recently visited\u0022,\u0022search.ui.search.results.empty\u0022:\u0022We couldn\u0027\u0027t find anything matching \u005C\u0022{0}\u005C\u0022.\u0022,\u0022search.ui.filter.clear.selected\u0022:\u0022Clear selected items\u0022,\u0022search.ui.content.name.search.items.panel.load.all.top.items.button.text\u0022:\u0022Show more app results...\u0022,\u0022search.ui.filter.contributor.button.text\u0022:\u0022Contributor\u0022,\u0022search.ui.filter.space.current.label\u0022:\u0022CURRENT\u0022,\u0022search.ui.clear.input.button.text\u0022:\u0022Clear text\u0022,\u0022search.ui.search.results.clear.button\u0022:\u0022clear your filters.\u0022,\u0022help.search.ui.link.title\u0022:\u0022Search tips\u0022,\u0022search.ui.container.close.text\u0022:\u0022Close\u0022,\u0022search.ui.filter.date.month.text\u0022:\u0022The past month\u0022,\u0022search.ui.infinite.scroll.button.text\u0022:\u0022More results\u0022,\u0022search.ui.filter.date.button.text\u0022:\u0022Date\u0022,\u0022search.ui.filter.date.week.text\u0022:\u0022The past week\u0022,\u0022search.ui.result.subtitle.calendar\u0022:\u0022Team calendar\u0022,\u0022search.ui.filter.date.heading\u0022:\u0022Last modified within\u0022,\u0022search.ui.filter.space.input.label\u0022:\u0022Find spaces...\u0022,\u0022search.ui.generic.error\u0022:\u0022Something went wrong. Refresh the page, or contact your admin if this keeps happening.\u0022,\u0022search.ui.recent.spaces\u0022:\u0022Recent Spaces\u0022,\u0022search.ui.result.subtitle.space\u0022:\u0022Space\u0022,\u0022search.ui.filter.space.category.input.label\u0022:\u0022Find space categories...\u0022,\u0022search.ui.filter.space.archive.label\u0022:\u0022Search archived spaces\u0022,\u0022search.ui.filter.label\u0022:\u0022filter\u0022,\u0022search.ui.filter.date.all.text\u0022:\u0022Any time\u0022,\u0022search.ui.filter.date.hour.text\u0022:\u0022The past day\u0022,\u0022search.ui.filters.heading\u0022:\u0022Filter by\u0022,\u0022search.ui.filter.label.input.label\u0022:\u0022Find labels...\u0022,\u0022search.ui.recent.items.anonymous\u0022:\u0022Start exploring. Your search results will appear here.\u0022,\u0022search.ui.input.label\u0022:\u0022Search\u0022,\u0022search.ui.input.aria.label\u0022:\u0022Search, when you type, your results will be displayed below.\u0022,\u0022search.ui.search.result\u0022:\u0022{0,choice,1#{0} search result|1\u003c{0} search results}\u0022,\u0022search.ui.filter.label.button.text\u0022:\u0022Label\u0022,\u0022search.ui.container.clear.ariaLabel\u0022:\u0022Clear\u0022,\u0022search.ui.input.alert\u0022:\u0022Hit enter to search\u0022,\u0022search.ui.filter.no.result.text\u0022:\u0022We can\u0027\u0027t find anything matching your search\u0022,\u0022search.ui.result.subtitle.user\u0022:\u0022User profile\u0022,\u0022search.ui.filter.contributor.input.label\u0022:\u0022Find people...\u0022,\u0022search.ui.filter.content.type.button.text\u0022:\u0022Type\u0022,\u0022search.ui.filter.date.year.text\u0022:\u0022The past year\u0022,\u0022search.ui.advanced.search.link.text\u0022:\u0022Advanced search\u0022,\u0022search.ui.filter.space.button.text\u0022:\u0022Space\u0022,\u0022search.ui.search.results.clear.line2\u0022:\u0022Try a different search term or\u0022,\u0022search.ui.filter.space.category.button.text\u0022:\u0022Space category\u0022,\u0022search.ui.search.results.clear.line1\u0022:\u0022We couldn\u0027\u0027t find anything matching your search.\u0022,\u0022search.ui.content.name.search.items.panel.load.all.top.items.admin.button.text\u0022:\u0022Show more settings and app results...\u0022,\u0022search.ui.recent.pages\u0022:\u0022Recently visited\u0022,\u0022search.ui.search.result.anonymous\u0022:\u0022{0,choice,1#{0} search result|1\u003c{0} search results}. Have an account? {1}Log in{2} to expand your search.\u0022,\u0022search.ui.recent.items.empty\u0022:\u0022Start exploring. Pages and spaces you\u0027\u0027ve visited recently will appear here.\u0022,\u0022search.ui.filter.space.init.heading\u0022:\u0022recent spaces\u0022}"; +WRM._unparsedData["com.atlassian.applinks.applinks-plugin:applinks-common-exported.applinks-types"]="{\u0022crowd\u0022:\u0022Crowd\u0022,\u0022confluence\u0022:\u0022Confluence\u0022,\u0022fecru\u0022:\u0022FishEye / Crucible\u0022,\u0022stash\u0022:\u0022Stash\u0022,\u0022jira\u0022:\u0022Jira\u0022,\u0022refapp\u0022:\u0022Reference Application\u0022,\u0022bamboo\u0022:\u0022Bamboo\u0022,\u0022generic\u0022:\u0022Generic Application\u0022}"; +WRM._unparsedData["com.atlassian.confluence.plugins.synchrony-interop:synchrony-status-banner-loader.synchrony-status"]="false"; +WRM._unparsedData["com.atlassian.applinks.applinks-plugin:applinks-common-exported.applinks-help-paths"]="{\u0022entries\u0022:{\u0022applinks.docs.root\u0022:\u0022https://confluence.atlassian.com/display/APPLINKS-090/\u0022,\u0022applinks.docs.diagnostics.troubleshoot.sslunmatched\u0022:\u0022SSL+and+application+link+troubleshooting+guide\u0022,\u0022applinks.docs.diagnostics.troubleshoot.oauthsignatureinvalid\u0022:\u0022OAuth+troubleshooting+guide\u0022,\u0022applinks.docs.diagnostics.troubleshoot.oauthtimestamprefused\u0022:\u0022OAuth+troubleshooting+guide\u0022,\u0022applinks.docs.delete.entity.link\u0022:\u0022Create+links+between+projects\u0022,\u0022applinks.docs.adding.application.link\u0022:\u0022Link+Atlassian+applications+to+work+together\u0022,\u0022applinks.docs.administration.guide\u0022:\u0022Application+Links+Documentation\u0022,\u0022applinks.docs.oauth.security\u0022:\u0022OAuth+security+for+application+links\u0022,\u0022applinks.docs.troubleshoot.application.links\u0022:\u0022Troubleshoot+application+links\u0022,\u0022applinks.docs.diagnostics.troubleshoot.unknownerror\u0022:\u0022Network+and+connectivity+troubleshooting+guide\u0022,\u0022applinks.docs.configuring.auth.trusted.apps\u0022:\u0022Configuring+Trusted+Applications+authentication+for+an+application+link\u0022,\u0022applinks.docs.diagnostics.troubleshoot.authlevelunsupported\u0022:\u0022OAuth+troubleshooting+guide\u0022,\u0022applinks.docs.diagnostics.troubleshoot.ssluntrusted\u0022:\u0022SSL+and+application+link+troubleshooting+guide\u0022,\u0022applinks.docs.diagnostics.troubleshoot.unknownhost\u0022:\u0022Network+and+connectivity+troubleshooting+guide\u0022,\u0022applinks.docs.delete.application.link\u0022:\u0022Link+Atlassian+applications+to+work+together\u0022,\u0022applinks.docs.adding.project.link\u0022:\u0022Configuring+Project+links+across+Applications\u0022,\u0022applinks.docs.link.applications\u0022:\u0022Link+Atlassian+applications+to+work+together\u0022,\u0022applinks.docs.diagnostics.troubleshoot.oauthproblem\u0022:\u0022OAuth+troubleshooting+guide\u0022,\u0022applinks.docs.diagnostics.troubleshoot.migration\u0022:\u0022Update+application+links+to+use+OAuth\u0022,\u0022applinks.docs.relocate.application.link\u0022:\u0022Link+Atlassian+applications+to+work+together\u0022,\u0022applinks.docs.administering.entity.links\u0022:\u0022Create+links+between+projects\u0022,\u0022applinks.docs.upgrade.application.link\u0022:\u0022OAuth+security+for+application+links\u0022,\u0022applinks.docs.diagnostics.troubleshoot.connectionrefused\u0022:\u0022Network+and+connectivity+troubleshooting+guide\u0022,\u0022applinks.docs.configuring.auth.oauth\u0022:\u0022OAuth+security+for+application+links\u0022,\u0022applinks.docs.insufficient.remote.permission\u0022:\u0022OAuth+security+for+application+links\u0022,\u0022applinks.docs.configuring.application.link.auth\u0022:\u0022OAuth+security+for+application+links\u0022,\u0022applinks.docs.diagnostics\u0022:\u0022Application+links+diagnostics\u0022,\u0022applinks.docs.configured.authentication.types\u0022:\u0022OAuth+security+for+application+links\u0022,\u0022applinks.docs.adding.entity.link\u0022:\u0022Create+links+between+projects\u0022,\u0022applinks.docs.diagnostics.troubleshoot.unexpectedresponse\u0022:\u0022Network+and+connectivity+troubleshooting+guide\u0022,\u0022applinks.docs.configuring.auth.basic\u0022:\u0022Configuring+Basic+HTTP+Authentication+for+an+Application+Link\u0022,\u0022applinks.docs.diagnostics.troubleshoot.authlevelmismatch\u0022:\u0022OAuth+troubleshooting+guide\u0022}}"; +WRM._unparsedData["com.atlassian.confluence.plugins.confluence-feature-discovery-plugin:confluence-feature-discovery-plugin-resources.test-mode"]="false"; +if(window.WRM._dataArrived)window.WRM._dataArrived();</script> +<link rel="stylesheet" href="/s/3906b0a9678c0e7366e2037031ee723f-CDN/-5iwf5w/9004/9r8qvy/88fd744a9e5c04fb75da4bef47308e85/_/download/contextbatch/css/_super,-com.atlassian.plugins.atlassian-plugins-webresource-rest:data-collector-perf-observer/batch.css" data-wrm-key="_super,-com.atlassian.plugins.atlassian-plugins-webresource-rest:data-collector-perf-observer" data-wrm-batch-type="context" media="all"> +<link rel="stylesheet" href="/s/0c56633e0aefa2b6661634c23890e5e1-CDN/-5iwf5w/9004/9r8qvy/874b773bd8d8c495dc5a9025eccfabcc/_/download/contextbatch/css/atl.confluence.plugins.pagetree-desktop,main,viewcontent,atl.general,page,atl.comments,-_super/batch.css?cefp_collab_enabled=true&cefp_ed_perm=false&cefp_is_admin=false&gatekeeper-ui-v2=true&highlightactions=true" data-wrm-key="atl.confluence.plugins.pagetree-desktop,main,viewcontent,atl.general,page,atl.comments,-_super" data-wrm-batch-type="context" media="all"> +<link rel="stylesheet" href="/s/d41d8cd98f00b204e9800998ecf8427e-CDN/-5iwf5w/9004/9r8qvy/9.0.9/_/download/batch/confluence.macros.advanced:blogpost-resources/confluence.macros.advanced:blogpost-resources.css" data-wrm-key="confluence.macros.advanced:blogpost-resources" data-wrm-batch-type="resource" media="all"> +<link rel="stylesheet" href="/s/-5iwf5w/9004/9r8qvy/8/_/styles/custom.css" media="all"> +<script data-wrm-key="com.atlassian.plugins.atlassian-plugins-webresource-rest:data-collector-perf-observer" data-wrm-batch-type="resource" data-initially-rendered> +!function(){"use strict";if("PerformanceObserver"in window){const e=[];window.__observedResources=e;const r=e=>"script"===e?"script":"css";new PerformanceObserver((n=>{n.getEntries().filter((({initiatorType:e,name:r})=>{const n="script"===e||((e,r)=>"link"===e&&"css"===new URL(r).pathname.split(".").pop())(e,r),i=new URL(location.href).origin===new URL(r).origin;return n&&i})).forEach((({name:n,transferSize:i,encodedBodySize:s,initiatorType:o})=>{e.push([i,s,n,r(o)])}))})).observe({type:"resource"})}}(); + +</script> +<script src="/s/db7b93668ec3f1e28b20d5f4cb5aaf12-CDN/-5iwf5w/9004/9r8qvy/88fd744a9e5c04fb75da4bef47308e85/_/download/contextbatch/js/_super,-com.atlassian.plugins.atlassian-plugins-webresource-rest:data-collector-perf-observer/batch.js?locale=en-GB" data-wrm-key="_super,-com.atlassian.plugins.atlassian-plugins-webresource-rest:data-collector-perf-observer" data-wrm-batch-type="context" data-initially-rendered></script> +<script src="/s/0341ac3c08eb7d3372fdf748f56002e3-CDN/-5iwf5w/9004/9r8qvy/874b773bd8d8c495dc5a9025eccfabcc/_/download/contextbatch/js/atl.confluence.plugins.pagetree-desktop,main,viewcontent,atl.general,page,atl.comments,-_super/batch.js?cefp_collab_enabled=true&cefp_ed_perm=false&cefp_is_admin=false&gatekeeper-ui-v2=true&highlightactions=true&locale=en-GB" data-wrm-key="atl.confluence.plugins.pagetree-desktop,main,viewcontent,atl.general,page,atl.comments,-_super" data-wrm-batch-type="context" data-initially-rendered></script> +<script type="module">WRM.requireLazily(["wr!com.atlassian.plugins.atlassian-plugins-webresource-rest:data-collector-async"])</script> + + + + + + + <meta name="ajs-site-title" content="Confluence" /> + + <script> +jQuery(document).ready(function() { + jQuery(".external-link").attr("target", "_blank"); +}); +</script> +<script type="text/javascript"> + AJS.toInit(function(){ + if (AJS.params.remoteUser == ''){ + AJS.$('#who-can-view-button-ak-button').hide(); + + } + }); +</script> + + + <link rel="canonical" href="https://unlimited.ethz.ch/display/CON/Welcome+to+Confluence+of+ETH+Zurich"> + <link rel="shortlink" href="https://unlimited.ethz.ch/x/j0QpAg"> + <meta name="wikilink" content="[CON:Welcome to Confluence of ETH Zurich]"> + <meta name="page-version" content="32"> + <meta name="ajs-page-version" content="32"> + +</head> + + +<body id="com-atlassian-confluence" class="theme-default aui-layout aui-theme-default"> + + + <div id='stp-licenseStatus-banner'></div> + <meta name="ajs-is-scroll-viewport-available" content="true"> + <div id="page"> +<div id="full-height-container"> + <div id="header-precursor"> + <div class="cell"> + + </div> + </div> + + + + + + +<header id="header" role="banner"> + <a class="aui-skip-link" href="https://unlimited.ethz.ch/login.action?os_destination=%2F" tabindex="1">Log in</a> + <nav class="aui-header aui-dropdown2-trigger-group" aria-label="Site"><div class="aui-header-inner"><div class="aui-header-before"><button class=" aui-dropdown2-trigger app-switcher-trigger aui-dropdown2-trigger-arrowless" aria-controls="app-switcher" aria-haspopup="true" role="button" data-aui-trigger href="#app-switcher"><span class="aui-icon aui-icon-small aui-iconfont-appswitcher">Linked Applications</span></button><div id="app-switcher" class="aui-dropdown2 aui-style-default" role="menu" hidden data-is-user-admin="false" data-is-switcher="true"><div class="app-switcher-loading">Loading…</div></div></div><div class="aui-header-primary"><span id="logo" class="aui-header-logo aui-header-logo-custom"><a href="/" aria-label="Go to home page"><img src="/download/attachments/327682/atl.site.logo?version=1&modificationDate=1563454119905&api=v2" alt="Confluence" /></a></span><ul class="aui-nav"> + <li> + + + +<a id="space-directory-link" href="/spacedirectory/view.action" class=" aui-nav-imagelink" title="Spaces"> + <span>Spaces</span> + </a> + </li> + <li class="aui-buttons"> + <a href="#" id="create-page-button" class="aui-button aui-button-primary clc-create-dialog-btn" title = 'Create from template' tabindex="0"><span class="aui-icon aui-icon-small aui-iconfont-more">Create </span></a> + </li> +</ul> +</div><div class="aui-header-secondary"><ul class="aui-nav"> + <li> + <div id="search-ui" class="aui-quicksearch dont-default-focus header-quicksearch"><button id="quick-search-query-button" aria-label="Search" aria-haspopup= "dialog" aria-controls="search_drawer" ></button><input id="quick-search-query" aria-label="Search" placeholder="Search" type="text" aria-haspopup= "dialog" aria-controls="search_drawer" /><div id="quick-search-alert" role="alert">Hit enter to search</div><aui-spinner size="small"></aui-spinner></div> + </li> + <li> + + <a id="help-menu-link" role="button" class="aui-nav-link aui-dropdown2-trigger aui-dropdown2-trigger-arrowless" href="#" aria-haspopup="true" aria-owns="help-menu-link-content" title="Help"> + <span class="aui-icon aui-icon-small aui-iconfont-question-filled">Help</span> + </a> + <nav id="help-menu-link-content" class="aui-dropdown2 aui-style-default" > + <div class="aui-dropdown2-section"> + <strong></strong> + <ul role="menu" + role="menu" aria-label="Help" id="help-menu-link-pages" class="aui-list-truncate section-pages first"> + <li role="presentation"> + + +<a role="menuitem" id="confluence-help-link" href="https://docs.atlassian.com/confluence/docs-82/" class=" " title="Visit the Confluence documentation home" target="_blank" +> + Online Help +</a> +</li> + <li role="presentation"> + + +<a role="menuitem" id="keyboard-shortcuts-link" href="#" class=" " title="View available keyboard shortcuts" > + Keyboard Shortcuts +</a> +</li> + <li role="presentation"> + + +<a role="menuitem" id="feed-builder-link" href="/dashboard/configurerssfeed.action" class=" " title="Create your custom RSS feed." > + Feed Builder +</a> +</li> + <li role="presentation"> + + +<a role="menuitem" id="whats-new-menu-link" href="https://confluence.atlassian.com/display/DOC/Confluence+8.2+Release+Notes" class=" " title="" > + What’s new +</a> +</li> + <li role="presentation"> + + +<a role="menuitem" id="gadget-directory-link" href="#" class=" user-item administration-link " title="Browse gadgets provided by Confluence" > + Available Gadgets +</a> +</li> + <li role="presentation"> + + +<a role="menuitem" id="confluence-about-link" href="/aboutconfluencepage.action" class=" " title="Get more information about Confluence" > + About Confluence +</a> +</li> + </ul> + </div> + </nav> + + </li> + <li> + + + </li> + <li> + + </li> + <li> + <li> + + +<a role="menuitem" id="login-link" href="/login.action?os_destination=%2F" class=" user-item login-link " title="" > + Log in +</a> +</li> + + </li> + </ul> +</div></div><!-- .aui-header-inner--></nav><!-- .aui-header --> + <br class="clear"> +</header> + + + + <div class="ia-splitter"> + <div class="ia-splitter-left"> + <div class="ia-fixed-sidebar" role="complementary" aria-label=sidebar> + + <div class="acs-side-bar ia-scrollable-section"><div class="acs-side-bar-space-info tipsy-enabled" data-configure-tooltip="Edit space details"><div class="avatar"><div class="space-logo" data-key="CON" data-name="Confluence" data-entity-type="confluence.space"><div class="avatar-img-container"><div class="avatar-img-wrapper"><a href="/display/CON/Welcome+to+Confluence+of+ETH+Zurich" title="Confluence"><img class="avatar-img" src="/images/logo/default-space-logo.svg" alt="Confluence"></a></div></div></div></div><div class="space-information-container"><div class="name"><a href="/display/CON/Welcome+to+Confluence+of+ETH+Zurich" title="Confluence">Confluence</a></div><div class="flyout-handle icon aui-icon aui-icon-small aui-iconfont-edit"></div></div></div><div class="acs-side-bar-content"><div class="acs-nav-wrapper"><div class="acs-nav" data-has-create-permission="false" data-quick-links-state="null" data-page-tree-state="null" data-nav-type="page-tree"><div class="acs-nav-sections"><div class="main-links-section "><ul class="acs-nav-list"><li class="acs-nav-item blog" data-collector-key="spacebar-blogs"><a class="acs-nav-item-link tipsy-enabled" href="/pages/viewrecentblogposts.action?key=CON" data-collapsed-tooltip="Blog"><span class="icon"></span><span class="acs-nav-item-label">Blog</span></a></li></ul></div><div class="quick-links-wrapper"><h2 class="ia-quick-links-header-title">Space shortcuts</h2><div class="quick-links-section tipsy-enabled "><ul class="acs-nav-list"><li class="acs-nav-item external_link"><a class="acs-nav-item-link tipsy-enabled" href="https://unlimited.ethz.ch/dashboard.action#all-updates" data-collapsed-tooltip="null"><span class="icon"></span><span class="acs-nav-item-label">Dashboard</span></a></li></ul></div></div></div></div></div><div class="ia-secondary-container tipsy-enabled" data-tree-type="page-tree"><div class="ia-secondary-header"><h2 class="ia-secondary-header-title page-tree"><span class="icon"></span><span class="label">Page tree</span></h2></div><div class="ia-secondary-content"> + + +<div class="plugin_pagetree conf-macro output-inline" data-hasbody="false" data-macro-name="pagetree"> + + + + <ul role="list" class="plugin_pagetree_children_list plugin_pagetree_children_list_noleftspace"> + <div class="plugin_pagetree_children"> + </div> + </ul> + + <fieldset class="hidden"> + <input type="hidden" name="treeId" value="" /> + <input type="hidden" name="treeRequestId" value="/plugins/pagetree/naturalchildren.action?decorator=none&excerpt=false&sort=position&reverse=false&disableLinks=false&expandCurrent=true&placement=sidebar" /> + <input type="hidden" name="treePageId" value="36258959" /> + + <input type="hidden" name="noRoot" value="false" /> + <input type="hidden" name="rootPageId" value="36258959" /> + + <input type="hidden" name="rootPage" value="" /> + <input type="hidden" name="startDepth" value="0" /> + <input type="hidden" name="spaceKey" value="CON" /> + + <input type="hidden" name="i18n-pagetree.loading" value="Loading..." /> + <input type="hidden" name="i18n-pagetree.error.permission" value="Unable to load page tree. It seems that you do not have permission to view the root page." /> + <input type="hidden" name="i18n-pagetree.eeror.general" value="There was a problem retrieving the page tree. Please check the server log file for more information." /> + <input type="hidden" name="loginUrl" value="/login.action?os_destination=%2Fpages%2Fviewpage.action%3FspaceKey%3DCON%26title%3DWelcome%2Bto%2BConfluence%2Bof%2BETH%2BZurich&permissionViolation=true" /> + <input type="hidden" name="mobile" value="false" /> + <input type="hidden" name="placement" value="sidebar" /> + + <fieldset class="hidden"> + <input type="hidden" name="ancestorId" value="36258959" /> + </fieldset> + </fieldset> +</div> +</div></div></div><div class="hidden"><a href="/collector/pages.action?key=CON" id="space-pages-link"></a><script type="text/x-template" title="logo-config-content"><h2>Space Details</h2><div class="personal-space-logo-hint">Your profile picture is used as the logo for your personal space. <a href="/users/profile/editmyprofilepicture.action" target="_blank">Change your profile picture</a>.</div></script></div></div><div class="space-tools-section"><div id="space-tools-menu-additional-items" class="hidden"><div data-label="Browse pages" data-class="" data-href="/pages/reorderpages.action?key=CON">Browse pages</div></div><button id="space-tools-menu-trigger" class=" aui-dropdown2-trigger aui-button aui-button-subtle tipsy-enabled aui-dropdown2-trigger-arrowless " aria-controls="space-tools-menu" aria-haspopup="true" role="button" data-aui-trigger><span class="aui-icon aui-icon-small aui-iconfont-configure">Configure</span><span class="aui-button-label">Space tools</span><span class="aui-icon "></span></button><div id="space-tools-menu" class="aui-dropdown2 aui-style-default space-tools-dropdown" role="menu" hidden data-aui-alignment="top left"></div><a href="#" role="button" class="expand-collapse-trigger aui-icon aui-icon-small aui-iconfont-chevron-double-left" aria-expanded="true"></a></div> + + </div> + </div> + <!-- \#header --> + + + + <main role="main" id="main" class=" aui-page-panel"> + <div id="main-header"> + + <div id="navigation" class="content-navigation view" role="region" aria-label="Page tools"> + <ul class="ajs-menu-bar"> + + <li class="normal ajs-menu-item"> + <a id="action-menu-link" class="action aui-dropdown2-trigger-arrowless aui-button aui-button-subtle ajs-menu-title aui-dropdown2-trigger" href="#" aria-haspopup="true" aria-label="More options" aria-owns="action-menu" data-container="#navigation"> + <span> + <span class="aui-icon aui-icon-small aui-iconfont-more" aria-label="More options"></span> + + </span> + </a> <div id="action-menu" class="aui-dropdown2 aui-style-default" role="menu" aria-labelledby="action-menu-link" > + <div class="aui-dropdown2-section"> + <ul id="action-menu-primary" class="section-primary first" role="presentation" > + <li + role="presentation" + > + + + + + + + + <a id="view-attachments-link" href="/pages/viewpageattachments.action?pageId=36258959" + rel="nofollow" + class="action-view-attachments" + accessKey="t" title="View Attachments" role="menuitem" > + + <span> + A<u>t</u>tachments (12) + </span> </a> + </li> + <li + role="presentation" + > + + + + + + + + <a id="action-view-history-link" href="/pages/viewpreviousversions.action?pageId=36258959" + rel="nofollow" + class="action-view-history" + title="" role="menuitem" > + + <span> + Page History + </span> </a> + </li> + </ul> + </div> + <div class="aui-dropdown2-section"> + <ul id="action-menu-secondary" class="section-secondary" role="presentation" > + <li + role="presentation" + > + + + + + + + + <a id="view-page-info-link" href="/pages/viewinfo.action?pageId=36258959" + rel="nofollow" + class="action-view-info" + title="" role="menuitem" > + + <span> + Page Information + </span> </a> + </li> + <li + role="presentation" + > + + + + + + + + <a id="view-resolved-comments" href="#" + rel="nofollow" + class="" + title="" role="menuitem" > + + <span> + Resolved comments + </span> </a> + </li> + <li + role="presentation" + > + + + + + + + + <a id="view-in-hierarchy-link" href="/pages/reorderpages.action?key=CON&openId=36258959#selectedPageInHierarchy" + rel="nofollow" + class="" + title="" role="menuitem" > + + <span> + View in Hierarchy + </span> </a> + </li> + <li + role="presentation" + > + + + + + + + + <a id="action-view-source-link" href="/plugins/viewsource/viewpagesrc.action?pageId=36258959" + rel="nofollow" + class="action-view-source popup-link" + title="" role="menuitem" > + + <span> + View Source + </span> </a> + </li> + <li + role="presentation" + > + + + + + + + + <a id="action-export-pdf-link" href="/spaces/flyingpdf/pdfpageexport.action?pageId=36258959" + rel="nofollow" + class="" + title="" role="menuitem" > + + <span> + Export to PDF + </span> </a> + </li> + <li + role="presentation" + > + + + + + + + + <a id="action-export-word-link" href="/exportword?pageId=36258959" + rel="nofollow" + class="action-export-word" + title="" role="menuitem" > + + <span> + Export to Word + </span> </a> + </li> + <li + role="presentation" + > + + + + + + + + <a href="/plugins/lucidchart/selectVisio.action?contentId=36258959" + rel="nofollow" + class="" + title="" role="menuitem" > + + <span> + View Visio File + </span> </a> + </li> + </ul> + </div> + <div class="aui-dropdown2-section"> + <ul id="action-menu-modify" class="section-modify" role="presentation" > + <li + role="presentation" + > + + + + + + + + <a id="action-copy-page-link" href="/pages/copypage.action?idOfPageToCopy=36258959&spaceKey=CON" + rel="nofollow" + class="action-copy" + title="" role="menuitem" > + + <span> + Copy + </span> </a> + </li> + </ul> + </div> + </div> + </li> + </ul> + </div> + + + <div id="title-heading" class="pagetitle with-breadcrumbs"> + + <div id="breadcrumb-section"> + + + + <nav aria-label="Breadcrumbs"> + <ol id="breadcrumbs"> + + + <li class="first" > + + <span class=""><a href="/collector/pages.action?key=CON">Pages</a></span> + </ol> + </nav> + + + </div> + + + + <div id="page-metadata-banner"><ul class="banner"><li id="system-content-items" class="noprint"><a href="" title="Unrestricted" id="content-metadata-page-restrictions-hidden" class="hidden"></a><a href="/pages/viewpageattachments.action?pageId=36258959&metadataLink=true" title="12 attachments" id="content-metadata-attachments" class="aui-icon aui-icon-small aui-iconfont-attachment"></a></li><li class="page-metadata-item noprinthas-button" id="content-metadata-jira-wrapper"><a href="" title="" id="content-metadata-jira" class="aui-button aui-button-subtle content-metadata-jira tipsy-disabled hidden"><span>Jira links</span></a></li></ul></div> + + + <h1 id="title-text" class="with-breadcrumbs"> + <a href="/display/CON/Welcome+to+Confluence+of+ETH+Zurich">Welcome to Confluence of ETH Zurich</a> + </h1> + </div> + </div><!-- \#main-header --> + + + + <div id="sidebar-container"> + </div><!-- \#sidebar-container --> + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +<div id="content" class="page view"> + + + +<div id="action-messages"> + </div> + + + + <script type="text/x-template" title="searchResultsGrid"> + <table class="aui"> + <thead> + <tr class="header"> + <th class="search-result-title">Page Title</th> + <th class="search-result-space">Space</th> + <th class="search-result-date">Updated</th> + </tr> + </thead> + </table> +</script> +<script type="text/x-template" title="searchResultsGridCount"> + <p class="search-result-count">{0}</p> +</script> +<script type="text/x-template" title="searchResultsGridRow"> + <tr class="search-result"> + <td class="search-result-title"><a href="{1}" class="content-type-{2}"><span>{0}</span></a></td> + <td class="search-result-space"><a class="space" href="/display/{4}/" title="{3}">{3}</a></td> + <td class="search-result-date"><span class="date" title="{6}">{5}</span></td> + </tr> +</script> + + + + + + + + + + + + <div class="page-metadata"> + <ul> + <li class="page-metadata-modification-info"> + + + + + + + + + Created by <span class='author'> <a href=" /display/~mbu4ea +" + aria-label="created by Buschor Mark (4ea)" class="url fn" + >Buschor Mark (4ea)</a></span>, last modified by <span class='editor'> <a href=" /display/~mabuscho +" + aria-label="modified by Buschor Mark (ID)" class="url fn" + >Buschor Mark (ID)</a></span> on <a class='last-modified' aria-label='modified on May 11, 2022, select to show changes' title='Show changes' href='/pages/diffpagesbyversion.action?pageId=36258959&selectedPageVersions=31&selectedPageVersions=32'>May 11, 2022</a> + </li> + </ul> +</div> + + + <div id="main-content" class="wiki-content"> + + <div class="contentLayout2"> +<div class="columnLayout single" data-layout="single"> +<div class="cell normal" data-type="normal"> +<div class="innerCell"> +<br/><div class="sectionColumnWrapper conf-macro output-block" data-hasbody="true" data-macro-name="section"><div class="sectionMacro"><div class="sectionMacroRow"><div class="columnMacro conf-macro output-block" data-hasbody="true" data-macro-name="column" style="width:510px;min-width:510px;max-width:510px;"><div class="panel conf-macro output-block" data-hasbody="true" data-macro-name="panel" style="border-bottom: 1px solid white;border-width: 0px;"><div class="panelContent"> +<p><a href="#WelcometoConfluenceofETHZurich-1"><span class="confluence-embedded-file-wrapper confluence-embedded-manual-size"><img class="confluence-embedded-image" draggable="false" width="498" src="/download/attachments/36258959/previewHigh.jpg?version=1&modificationDate=1594226270202&api=v2" data-image-src="/download/attachments/36258959/previewHigh.jpg?version=1&modificationDate=1594226270202&api=v2" data-unresolved-comment-count="0" data-linked-resource-id="36258969" data-linked-resource-version="1" data-linked-resource-type="attachment" data-linked-resource-default-alias="previewHigh.jpg" data-base-url="https://unlimited.ethz.ch" data-linked-resource-content-type="image/jpeg" data-linked-resource-container-id="36258959" data-linked-resource-container-version="32" alt="" /></span></a></p> +</div></div></div><div class="columnMacro conf-macro output-block" data-hasbody="true" data-macro-name="column" style="width:300px;min-width:300px;max-width:300px;"><div class="panel conf-macro output-block" data-hasbody="true" data-macro-name="panel" style="background-color: #ebedef;border-color: #abb2b9;border-width: 1px;"><div class="panelContent" style="background-color: #ebedef;"> +<h3 style="text-align: center;" id="WelcometoConfluenceofETHZurich-WelcometoConfluenceofETHZurich">Welcome to Confluence of ETH Zurich</h3><hr /><div class="panel conf-macro output-block" data-hasbody="true" data-macro-name="panel" style="border-bottom: 1px solid white;border-width: 0px;"><div class="panelContent"> +<p style="text-align: center;">Confluence is where your team collaborates and shares knowledge — create, share and discuss your files, ideas, minutes, specs, mockups, diagrams, and projects.</p><p style="text-align: center;">To receive announcements about Confluence news and operations, please subscribe to the mailing list: <br /><strong><a class="external-link" href="https://sympa.ethz.ch/sympa/info/confluence-operations" rel="nofollow">confluence-operations in Sympa</a><br /></strong></p><p style="text-align: center;"><strong>Want to Order a new Space? <a href="https://unlimited.ethz.ch/x/RQfcBg" rel="nofollow">Click here</a></strong></p> +</div></div> +</div></div></div><div class="columnMacro conf-macro output-block" data-hasbody="true" data-macro-name="column"><p><span style="color: rgb(255,255,255);">S</span></p></div></div></div></div></div> +</div> +</div> +<div class="columnLayout single" data-layout="single"> +<div class="cell normal" data-type="normal"> +<div class="innerCell"> +<h2 style="text-align: left;" id="WelcometoConfluenceofETHZurich-"><u><br/></u></h2><div class="sectionColumnWrapper conf-macro output-block" data-hasbody="true" data-macro-name="section"><div class="sectionMacro"><div class="sectionMacroRow"><div class="columnMacro conf-macro output-block" data-hasbody="true" data-macro-name="column" style="width:400px;min-width:400px;max-width:400px;"><h2 style="text-align: left;" id="WelcometoConfluenceofETHZurich-FAQ&ManualsofITServices">FAQ & Manuals of IT Services<u><br /></u></h2><div class="columnMacro conf-macro output-block" data-hasbody="true" data-macro-name="column" style="width:195px;min-width:195px;max-width:195px;"><div class="panel conf-macro output-block" data-hasbody="true" data-macro-name="panel" style="background-color: #d6eaf8;border-color: #0264ad;border-width: 1px;"><div class="panelContent" style="background-color: #d6eaf8;"> +<p style="text-align: center;"><a href="/display/itkb"><span class="confluence-embedded-file-wrapper confluence-embedded-manual-size"><img class="confluence-embedded-image confluence-thumbnail" draggable="false" width="120" src="/download/thumbnails/36258959/firststeps_en.png?version=1&modificationDate=1594226269359&api=v2" data-image-src="/download/attachments/36258959/firststeps_en.png?version=1&modificationDate=1594226269359&api=v2" data-unresolved-comment-count="0" data-linked-resource-id="36258960" data-linked-resource-version="1" data-linked-resource-type="attachment" data-linked-resource-default-alias="firststeps_en.png" data-base-url="https://unlimited.ethz.ch" data-linked-resource-content-type="image/png" data-linked-resource-container-id="36258959" data-linked-resource-container-version="32" alt="" /></span></a></p><h3 style="text-align: center;" id="WelcometoConfluenceofETHZurich-ITKnowledgeBase(EN)"><a href="/display/itkb">IT Knowledge Base<br />(EN)</a></h3> +</div></div></div><div class="columnMacro conf-macro output-block" data-hasbody="true" data-macro-name="column" style="width:195px;min-width:195px;max-width:195px;"><div class="panel conf-macro output-block" data-hasbody="true" data-macro-name="panel" style="background-color: #d4efdf;border-color: #3c5a0f;border-width: 1px;"><div class="panelContent" style="background-color: #d4efdf;"> +<p style="text-align: center;"><a href="/display/itwdb"><span class="confluence-embedded-file-wrapper confluence-embedded-manual-size"><img class="confluence-embedded-image confluence-thumbnail" draggable="false" width="120" src="/download/thumbnails/36258959/firststeps.png?version=1&modificationDate=1594226269381&api=v2" data-image-src="/download/attachments/36258959/firststeps.png?version=1&modificationDate=1594226269381&api=v2" data-unresolved-comment-count="0" data-linked-resource-id="36258961" data-linked-resource-version="1" data-linked-resource-type="attachment" data-linked-resource-default-alias="firststeps.png" data-base-url="https://unlimited.ethz.ch" data-linked-resource-content-type="image/png" data-linked-resource-container-id="36258959" data-linked-resource-container-version="32" alt="" /></span></a></p><h3 style="text-align: center;" id="WelcometoConfluenceofETHZurich-IT-Wissensdatenbank(DE)"><a href="/display/itwdb">IT-Wissensdatenbank<br />(DE)</a></h3> +</div></div></div></div><div class="columnMacro conf-macro output-block" data-hasbody="true" data-macro-name="column" style="width:10px;min-width:10px;max-width:10px;"><p><br /></p></div><div class="columnMacro conf-macro output-block" data-hasbody="true" data-macro-name="column" style="width:400px;min-width:400px;max-width:400px;"><h2 class="auto-cursor-target" id="WelcometoConfluenceofETHZurich-News&Updates">News & Updates</h2><div class="panel conf-macro output-block" data-hasbody="true" data-macro-name="panel" style="background-color: #ebedef;border-color: #abb2b9;border-width: 1px;"><div class="panelContent" style="background-color: #ebedef;"> +<div class="panel conf-macro output-block" data-hasbody="true" data-macro-name="panel" style="background-color: #ffffff;border-width: 1px;"><div class="panelContent" style="background-color: #ffffff;"> +<p> + +</p><div class="blog-post-list conf-macro output-block" data-hasbody="false" data-macro-name="blog-posts"> + <h4 class="sub-heading">Blog Posts</h4> + <ul> + <li class="blog-item"> + <span class="blog-title"> <span class="icon aui-icon content-type-blogpost" title="Blog">Blog:</span> <a href="/pages/viewpage.action?pageId=40829180">New start page</a> + created by</span> + <div class="blog-item-creator"> <a href=" /display/~mbu4ea " class="url fn">Buschor Mark (4ea)</a></div> + <div class="blog-item-date"> + Jul 09, 2020 + </div> + <div class="blog-item-space"><a href="/display/CON">Confluence</a></div> + </li> + </ul> +</div> +<p> </p> +</div></div><p class="auto-cursor-target" style="text-align: right;"><a href="https://unlimited.ethz.ch/display/SDE/2020/07/01/Confluence+Server+Update" rel="nofollow">Show More</a></p> +</div></div><br /></div><div class="columnMacro conf-macro output-block" data-hasbody="true" data-macro-name="column"><p><br /></p></div></div></div></div><br/></div> +</div> +</div> +<div class="columnLayout single" data-layout="single"> +<div class="cell normal" data-type="normal"> +<div class="innerCell"> +<div class="sectionColumnWrapper conf-macro output-block" data-hasbody="true" data-macro-name="section"><div class="sectionMacro"><div class="sectionMacroRow"><div class="columnMacro conf-macro output-block" data-hasbody="true" data-macro-name="column" style="width:270px;min-width:270px;max-width:270px;"><h4 class="auto-cursor-target" id="WelcometoConfluenceofETHZurich-OfficialWebsiteETHZurich">Official Website ETH Zurich</h4><div class="panel conf-macro output-block" data-hasbody="true" data-macro-name="panel" style="background-color: #ffffff;border-color: #abb2b9;border-width: 1px;"><div class="panelContent" style="background-color: #ffffff;"> +<a class="external-link" href="https://ethz.ch/en.html" rel="nofollow"><span class="confluence-embedded-file-wrapper image-center-wrapper confluence-embedded-manual-size"><img class="confluence-embedded-image confluence-thumbnail image-center" draggable="false" width="160" src="/download/thumbnails/36258959/eth_logo_kurz_pos.jpg?version=1&modificationDate=1594226269522&api=v2" data-image-src="/download/attachments/36258959/eth_logo_kurz_pos.jpg?version=1&modificationDate=1594226269522&api=v2" data-unresolved-comment-count="0" data-linked-resource-id="36258964" data-linked-resource-version="1" data-linked-resource-type="attachment" data-linked-resource-default-alias="eth_logo_kurz_pos.jpg" data-base-url="https://unlimited.ethz.ch" data-linked-resource-content-type="image/jpeg" data-linked-resource-container-id="36258959" data-linked-resource-container-version="32" alt="" /></span></a> +</div></div></div><div class="columnMacro conf-macro output-block" data-hasbody="true" data-macro-name="column" style="width:270px;min-width:270px;max-width:270px;"><h4 class="auto-cursor-target" id="WelcometoConfluenceofETHZurich-ConfluenceatETHZurich">Confluence at ETH Zurich</h4><div class="panel conf-macro output-block" data-hasbody="true" data-macro-name="panel" style="background-color: #ffffff;border-color: #abb2b9;border-width: 1px;"><div class="panelContent" style="background-color: #ffffff;"> +<p class="auto-cursor-target"><a class="external-link" href="https://ethz.ch/services/en/it-services/catalogue/web-application-hosting/wiki.html" rel="nofollow"><span class="confluence-embedded-file-wrapper image-center-wrapper confluence-embedded-manual-size"><img class="confluence-embedded-image confluence-thumbnail image-center" draggable="false" width="160" src="/download/thumbnails/36258959/Confluence@2x-blue%20Kopie.png?version=1&modificationDate=1594226269509&api=v2" data-image-src="/download/attachments/36258959/Confluence@2x-blue%20Kopie.png?version=1&modificationDate=1594226269509&api=v2" data-unresolved-comment-count="0" data-linked-resource-id="36258962" data-linked-resource-version="1" data-linked-resource-type="attachment" data-linked-resource-default-alias="Confluence@2x-blue Kopie.png" data-base-url="https://unlimited.ethz.ch" data-linked-resource-content-type="image/png" data-linked-resource-container-id="36258959" data-linked-resource-container-version="32" alt="" /></span></a></p> +</div></div></div><div class="columnMacro conf-macro output-block" data-hasbody="true" data-macro-name="column" style="width:270px;min-width:270px;max-width:270px;"><h4 class="auto-cursor-target" id="WelcometoConfluenceofETHZurich-Technicalcontact">Technical contact</h4><div class="panel conf-macro output-block" data-hasbody="true" data-macro-name="panel" style="background-color: #ffffff;border-color: #abb2b9;border-width: 1px;"><div class="panelContent" style="background-color: #ffffff;"> +<a class="external-link" href="https://ethz.ch/services/en/it-services.html" rel="nofollow"><span class="confluence-embedded-file-wrapper image-center-wrapper confluence-embedded-manual-size"><img class="confluence-embedded-image confluence-thumbnail image-center" draggable="false" width="160" src="/download/thumbnails/36258959/ITS-Logo_EN%20Kopie.png?version=1&modificationDate=1594226269517&api=v2" data-image-src="/download/attachments/36258959/ITS-Logo_EN%20Kopie.png?version=1&modificationDate=1594226269517&api=v2" data-unresolved-comment-count="0" data-linked-resource-id="36258963" data-linked-resource-version="1" data-linked-resource-type="attachment" data-linked-resource-default-alias="ITS-Logo_EN Kopie.png" data-base-url="https://unlimited.ethz.ch" data-linked-resource-content-type="image/png" data-linked-resource-container-id="36258959" data-linked-resource-container-version="32" alt="" /></span></a> +</div></div></div><div class="columnMacro conf-macro output-block" data-hasbody="true" data-macro-name="column"><p><br /></p></div></div></div></div><br/><br/><br/><br/><br/></div> +</div> +</div> +</div> + + + + + </div> + + + + + + +<div id="labels-section" class="pageSection group"> + <div class="labels-section-content content-column" entityid="36258959" entitytype="page"> + <div class="labels-content"> + + <ul class="label-list label-list-right "> + + <li class="aui-label " data-label-id="27984234"><a class="aui-label-split-main" href="/label/CON/confluence" rel="tag">confluence</a></li><li class="aui-label " data-label-id="41320449"><a class="aui-label-split-main" href="/label/CON/mainpage" rel="tag">mainpage</a></li><li class="aui-label " data-label-id="41320450"><a class="aui-label-split-main" href="/label/CON/landingpage" rel="tag">landingpage</a></li> + </ul> + + </div> +</div> +</div> + + + + + + + + + + + + + + + + + + + +<div id="comments-section" class="pageSection group"> + + + + + +</div> + + + + + + +</div> + + + + + + + + + + + + + + + + + +<div id="space-tools-web-items" class="hidden"> + <div data-label="Overview" data-href="/spaces/viewspacesummary.action?key=CON">Overview</div> + <div data-label="Content Tools" data-href="/pages/reorderpages.action?key=CON">Content Tools</div> + <div data-label="Apps" data-href="/spaces/scroll-viewport/config.action?key=CON#/list">Apps</div> + </div> + + + + + </main><!-- \#main --> + + + + + + + +<div id="footer" role="contentinfo"> + <section class="footer-body"> + + + + + <ul id="poweredby"> + <li class="noprint">Powered by <a href="https://www.atlassian.com/software/confluence" class="hover-footer-link" rel="nofollow">Atlassian Confluence</a> <span id='footer-build-information'>8.2.0</span></li> + <li class="print-only">Printed by Atlassian Confluence 8.2.0</li> + <li class="noprint"><a href="https://support.atlassian.com/confluence-server/" class="hover-footer-link" rel="nofollow">Report a bug</a></li> + <li class="noprint"><a href="https://www.atlassian.com/company" class="hover-footer-link" rel="nofollow">Atlassian News</a></li> + </ul> + + + + <div id="footer-logo"><a href="https://www.atlassian.com/" rel="nofollow">Atlassian</a></div> + + + + </section> +</div> + + +</div> + +</div><!-- \#full-height-container --> +</div><!-- \#page --> + + <span style="display:none;" id="confluence-server-performance">{"serverDuration": 215, "requestCorrelationId": "8ae5f79c4cdbf3f2"}</span> +</body> +</html> + diff --git a/docs/user-documentation/general-admin-users/img/434.png b/docs/user-documentation/general-admin-users/img/434.png new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/docs/user-documentation/general-admin-users/img/502.png b/docs/user-documentation/general-admin-users/img/502.png new file mode 100644 index 0000000000000000000000000000000000000000..77318fce512435129b90920e71a1f6293106ba6f Binary files /dev/null and b/docs/user-documentation/general-admin-users/img/502.png differ diff --git a/docs/user-documentation/general-admin-users/img/521.png b/docs/user-documentation/general-admin-users/img/521.png new file mode 100644 index 0000000000000000000000000000000000000000..d9c41aa39daf00db79c8a4a70b0fa21e95b16748 Binary files /dev/null and b/docs/user-documentation/general-admin-users/img/521.png differ diff --git a/docs/user-documentation/general-admin-users/img/645.png b/docs/user-documentation/general-admin-users/img/645.png new file mode 100644 index 0000000000000000000000000000000000000000..37722ee9f008a2def8e4fb38feb4015c79e6f205 Binary files /dev/null and b/docs/user-documentation/general-admin-users/img/645.png differ diff --git a/docs/user-documentation/general-admin-users/img/731.png b/docs/user-documentation/general-admin-users/img/731.png new file mode 100644 index 0000000000000000000000000000000000000000..a7b8447d7f85b74264e1a611fad70ddc006b607f Binary files /dev/null and b/docs/user-documentation/general-admin-users/img/731.png differ diff --git a/docs/user-documentation/general-admin-users/img/777.png b/docs/user-documentation/general-admin-users/img/777.png new file mode 100644 index 0000000000000000000000000000000000000000..3c88ab07d88277f6e1a6cc681f0b10bd3761a79a Binary files /dev/null and b/docs/user-documentation/general-admin-users/img/777.png differ diff --git a/docs/user-documentation/general-admin-users/img/80.png b/docs/user-documentation/general-admin-users/img/80.png new file mode 100644 index 0000000000000000000000000000000000000000..d9c41aa39daf00db79c8a4a70b0fa21e95b16748 Binary files /dev/null and b/docs/user-documentation/general-admin-users/img/80.png differ diff --git a/docs/user-documentation/general-admin-users/img/874.png b/docs/user-documentation/general-admin-users/img/874.png new file mode 100644 index 0000000000000000000000000000000000000000..d1525ddd2bb2de370b86cfeb05dcabd11c962c0c Binary files /dev/null and b/docs/user-documentation/general-admin-users/img/874.png differ diff --git a/docs/user-documentation/general-admin-users/img/915.png b/docs/user-documentation/general-admin-users/img/915.png new file mode 100644 index 0000000000000000000000000000000000000000..6d3c263512b1f988f6a6ea9aa8706280952a0bf9 Binary files /dev/null and b/docs/user-documentation/general-admin-users/img/915.png differ diff --git a/docs/user-documentation/general-admin-users/img/922.png b/docs/user-documentation/general-admin-users/img/922.png new file mode 100644 index 0000000000000000000000000000000000000000..ea776ed86e4bb6405cf31cdf3e0db448a46b0033 Binary files /dev/null and b/docs/user-documentation/general-admin-users/img/922.png differ diff --git a/docs/user-documentation/general-admin-users/img/941.png b/docs/user-documentation/general-admin-users/img/941.png new file mode 100644 index 0000000000000000000000000000000000000000..45c663ab028273c79c60a252a2890b11026de88d Binary files /dev/null and b/docs/user-documentation/general-admin-users/img/941.png differ diff --git a/docs/user-documentation/general-admin-users/index.rst b/docs/user-documentation/general-admin-users/index.rst index 92676570b241a1583996ab86348a1ca269916751..18078af63347f2b9635cd8297ed1e86aff2aba8f 100644 --- a/docs/user-documentation/general-admin-users/index.rst +++ b/docs/user-documentation/general-admin-users/index.rst @@ -1,9 +1,10 @@ -##################### -General Admin Users -##################### - -.. toctree:: - :maxdepth: 4 - - Admins Documentation </user-documentation/general-admin-users/admins-documentation/index> - Configurable Scripts </user-documentation/general-admin-users/properties-handled-by-scripts/index> +##################### +General Admin Users +##################### + +.. toctree:: + :maxdepth: 4 + + Admins Documentation </user-documentation/general-admin-users/admins-documentation/index> + Properties Handled By Scripts <properties-handled-by-scripts> + Custom Database Queries <custom-database-queries> diff --git a/docs/user-documentation/general-admin-users/properties-handled-by-scripts/properties-handled-by-scripts.md b/docs/user-documentation/general-admin-users/properties-handled-by-scripts.md similarity index 99% rename from docs/user-documentation/general-admin-users/properties-handled-by-scripts/properties-handled-by-scripts.md rename to docs/user-documentation/general-admin-users/properties-handled-by-scripts.md index a8c9a480bbc119cbc152e3f83e516f99ef56cffb..72ed940a8a389c8f8083f533d8aa72622a217aea 100644 --- a/docs/user-documentation/general-admin-users/properties-handled-by-scripts/properties-handled-by-scripts.md +++ b/docs/user-documentation/general-admin-users/properties-handled-by-scripts.md @@ -77,7 +77,7 @@ following steps. section. (e.g Admin->Types->Samples. Select sample and click edit.) - +  @@ -107,10 +107,10 @@ To create a dynamic property: - Define a property type with appropriate name and data type (`Admin->Plugins→Add Plugin`) - + - Choose `Dynamic Property Evaluator` from Plugin type dropdown list in the upper left corner. - + - You may evaluate script on chosen entity in Script Tester section. ### Creating scripts @@ -431,7 +431,7 @@ validation script for each type: - You find a property which is called 'Validation Script' (see screen shot below). Just select your defined Script and hit save. - + ### Creating and Deploying Java Validation Plugins @@ -518,7 +518,7 @@ To create a Managed Property: the script has defined the function `batchColumnNames` or `inputWidgets`. - + ### Creating scripts @@ -642,7 +642,7 @@ The picture below shows that in detail view of CELL\_PLATE sample S1 there will be a tab titled *Fixed Table* containing a table defined by the script. The table has the same functionality as all other openBIS tables like sorting, filtering, exporting etc. - + ###### Example 2 @@ -728,7 +728,7 @@ Let's assume, that: The picture below shows that in detail view of sample S1 there will be a tab titled CSV containing a table defined by the script. - + Managed property value will be visible as text in the left panel (*Sample Properties*) only if user had enabled debugging mode in openBIS @@ -774,7 +774,7 @@ input for actions like add, edit and delete: The picture below shows updated detail view of sample S1. For every action defined in the script there is a button in bottom toolbar of the table. - + The screenshot was taken after a user clicked on the first table row and then clicked on `Edit` button. This resulted in showing a dialog with input fields defined in the script. Every field has default value set @@ -871,7 +871,7 @@ property `MANGED-TEXT` would create in sample detailed view - + ###### Example 7 @@ -912,14 +912,14 @@ Uploading the following file for such a sample type /test/sample-with-managed-property-2 sec 47.11 would lead to a detailed view as in the following screenshot: - + If the flag *Shown in Edit Views* is set and flag *Show Raw Value in Forms* is not set, the registration form would have a field called 'Managed Text' with initially one section with input field 'Unit' and 'Value': - + With '+' and 'Add More' button additional sections can be created. Existing sections can be deleted by the '-' button. The section fields @@ -938,7 +938,7 @@ is replaced by function `inputWidgets` as in the following example: The field 'Managed Text' in the registration form will be as shown in the following screen shot: - + Both fields are mandatory and the first field is a combo box with the two elements 'cm' and 'mm'. diff --git a/docs/user-documentation/general-admin-users/properties-handled-by-scripts/index.rst b/docs/user-documentation/general-admin-users/properties-handled-by-scripts/index.rst deleted file mode 100644 index 6a2b497809e241d8e9c0fd5356e0cf27874f58a1..0000000000000000000000000000000000000000 --- a/docs/user-documentation/general-admin-users/properties-handled-by-scripts/index.rst +++ /dev/null @@ -1,7 +0,0 @@ -Properties handled by scripts -============= - -.. toctree:: - :maxdepth: 4 - - properties-handled-by-scripts \ No newline at end of file diff --git a/docs/user-documentation/general-users/additional-functionalities.md b/docs/user-documentation/general-users/additional-functionalities.md index 47f8b06e6583f1b285c57943c36487829264d9c4..f505209aecd7beba3fc7601918e03062d558319a 100644 --- a/docs/user-documentation/general-users/additional-functionalities.md +++ b/docs/user-documentation/general-users/additional-functionalities.md @@ -43,7 +43,7 @@ Here we give an overview of the main functionalities of the tables.  -## Filters +### Filters Two filter options are available form the **Filters** button: **Filter Per Column** and **Global Filter**. The first allows to filter on @@ -60,7 +60,7 @@ terms across the entire table using the **AND** or **OR** operator. >  -## Sorting +### Sorting It is possible to sort individual columns or also multiple columns. For multi-column sorting, you should click on the column header and press @@ -73,7 +73,7 @@ each column, as shown below. >  -## Exports +### Exports Tables can be exported in different ways, using the export button shown below. @@ -147,7 +147,7 @@ below. > >  -## Columns +### Columns Users can select which properties to display in the table clicking on the **Columns** button. It is also possible to show all properties or @@ -161,7 +161,7 @@ This information is stored in the database for each user.  -### **Spreadsheets** +#### **Spreadsheets** If a table contains *Objects* which have a spreadsheet field which is filled in, a spreadsheet icon is displayed in the table. Upon clicking @@ -175,7 +175,7 @@ on the icon, the content of the spreadsheet can be expanded.  > >  -### Text fields +#### Text fields If a table contains Objects which have long text fields, only the beginning of the text is shown and can be expanded. If the text contains @@ -188,7 +188,7 @@ the text becomes visible by clicking on the icon. > >  -## **Selection of entries in table** +### **Selection of entries in table** Single entries in a table can be selected using the checkbox in the row. By clicking the checkbox in the table header, all entries of the table @@ -225,14 +225,6 @@ In *Object* tables inside *Experiments/Collections* there is an  - - - - - - - - Updated on April 26, 2023 ## Browse Entries by Type diff --git a/docs/user-documentation/general-users/barcodes.md b/docs/user-documentation/general-users/barcodes.md index d6e2b9812ab3d04cd15ea95e59bddaca70dfb0bd..436938f04e973434f762b5538e86f4e242440292 100644 --- a/docs/user-documentation/general-users/barcodes.md +++ b/docs/user-documentation/general-users/barcodes.md @@ -1,14 +1,82 @@ -Barcodes -==== - +# Barcodes -[](# "Print this article") +## Barcodes - +The barcode functionality must be enabled in openBIS by a *lab manager* +or *group admin*: [Enable +Barcodes](https://openbis.ch/index.php/docs/admin-documentation-openbis-19-06-4/enable-barcodes/). +### Barcodes for individual samples +When a sample is registered, a barcode is automatically generated by +openBIS. This is found in the **Identification info** section, as shown +below. -## Printers + + +This barcode can be printed and the label can be added to the vial +containing the sample. The option to print the barcode is under the +**More..** menu + + + +If a sample already has it own barcode, it is possible to scan this with +a scanner or the camera of a mobile device and assign it to the sample. +This can be done after registration of a sample, with the **Custom +Barcode Update** option under the **More..** drop down. + + + +The custom barcode will appear in the *Identification Info*. If a custom +barcode is registered, the print function shown above will print the +custom barcode, instead of the default one. + +### Generate batches of barcodes + +In some cases there is the need to generate several barcodes that can be +later on assigned to samples registered in openBIS. + +To generate new barcodes, go to the **Barcodes Generator** in the main +menu under **Utilities**. + + + +Users can select: + +1. The type of barcode to generate: + 1. *Code 128* + 2. *QR Code* + 3. *Micro QR code* +2. The number of barcodes to generate +3. The layout: + 1. *Split*: one barcode per page + 2. *Continuous*: several barcodes in one page +4. The width of the barcode +5. The length of the barcode + +After selecting the desired parameters, click the **Generate Custom +Barcodes** button. + +To print the barcodes use the **print icon** on the form, next to +**Generate Custom Barcodes**. These barcodes can be printed on labels to +be attached to vials. When the samples are registered in openBIS, these +barcodes can be scanned and assigned to the samples as explained above. + +### Scan barcodes from mobile devices + +It is also possible to scan barcodes and QR codes using the scan button +on top of the main menu, as shown below. In this way, you can scan a +barcode or QR code already associated with an entry and this will open +the entry page in openBIS. You can use a scanner or the camera of a +mobile device. The selection you make is saved. + + + +Updated on July 5, 2023 + +## Printer and Barcode Scanner Requirements + +### Printers There are several manufacturers of printers and different kinds of barcodes and paper to adapt to different use cases. Most manufacturers @@ -21,9 +89,7 @@ thus having as single requirement that the printer driver used allows to print PDF documents using applications such as Adobe Acrobat Reader or Preview (Mac). - - -### Printer Configuration +#### Printer Configuration There are different types of printer drivers. The two types we can define as generic are **PS** (PostScript) (recommended) and **PCL** @@ -41,20 +107,15 @@ layouts are supported: The printer paper size needs to be configured for each printer. It is possible to indicate the size of the barcode, so it can fit. - - -### Printer testing +#### Printer testing We provide two example documents that can be used to test the printer. - - - Split barcodes example PDF: [printer-test-code128-split-50-15](https://openbis.ch/wp-content/uploads/2021/08/printer-test-code128-split-50-15.pdf) - Continuous barcodes example PDF: [printer-test-code128-continuous-50-15](https://openbis.ch/wp-content/uploads/2021/08/printer-test-code128-continuous-50-15.pdf) - Please consider that these examples likely do not correspond to the particular paper size of the printer being evaluated and as such the @@ -62,47 +123,32 @@ barcodes may look squashed. In order to obtain optimal results, the paper size would need to be configured. However, for the test it is enough to verify that the printer can print those files. - - - - -### Printer Advice before purchasing +#### Printer Advice before purchasing Before purchasing a printer, we recommend to check with the manufacturer that the barcode printer provides a general driver and that it can print one of the documents provided as example above. - - -### Tested Printers +#### Tested Printers - Zebra ZD420 - - - -## Scanners +### Scanners There are several manufacturers of barcode scanners. In most cases scanners act as a keyboard for the computer, so when the barcode scanner scans a barcode it will type whatever has been scanned. - - -### Scanner Configuration +#### Scanner Configuration The scanner keyboard layout should be the same as the computer used. If not this could cause problems if there are any special characters. - - -### Scanner testing +#### Scanner testing Open a notepad and scan the barcodes provided in the examples below. The scanner should read them and type the correct output. - - - Barcode Code 128. [scanner-test-code128-50-15](https://openbis.ch/wp-content/uploads/2021/08/scanner-test-code128-50-15.pdf). This should give as output “20210720122856003-454071†without @@ -116,18 +162,13 @@ scanner should read them and type the correct output. This should give as output “20210720122856003-454071†without quotes. - - - -### Scanner Advice before purchasing +#### Scanner Advice before purchasing Before purchasing a scanner, ensure that the barcode scanner provides a keyboard driver and ask the manufacturer’s support to scan the examples above. - - -### Tested Scanners +#### Tested Scanners - Honeywell 1902G-BF diff --git a/docs/user-documentation/general-users/data-upload.md b/docs/user-documentation/general-users/data-upload.md index 8809b54d753baf9bca6a01d224c009c4d78dfdce..1532b00aed0ec3c61448b17b494ec2ec1d775513 100644 --- a/docs/user-documentation/general-users/data-upload.md +++ b/docs/user-documentation/general-users/data-upload.md @@ -116,7 +116,7 @@ on the eln-lims-dropbox folder.  -## Dropbox with markerfile +### Dropbox with markerfile  @@ -149,7 +149,7 @@ The marker file should be named:  -### **How to create the Marker file in Windows** +#### **How to create the Marker file in Windows**  @@ -166,7 +166,7 @@ You can create the Marker file in Windows using a text editor such as  -### **How to create the Marker file on Mac** +#### **How to create the Marker file on Mac**  @@ -191,7 +191,7 @@ other text editor will also work.  -## Dropbox monitor +### Dropbox monitor  @@ -223,7 +223,7 @@ the log with the error is shown.  -## Registration of metadata for datasets via dropbox +### Registration of metadata for datasets via dropbox  diff --git a/docs/user-documentation/general-users/img/Barcode-generator-1024x466.png b/docs/user-documentation/general-users/img/Barcode-generator-1024x466.png new file mode 100644 index 0000000000000000000000000000000000000000..a79c2dcd0bf93403e644c8f6b423eacdaae7f693 Binary files /dev/null and b/docs/user-documentation/general-users/img/Barcode-generator-1024x466.png differ diff --git a/docs/user-documentation/general-users/img/Barcode-generator-1024x466.png:Zone.Identifier b/docs/user-documentation/general-users/img/Barcode-generator-1024x466.png:Zone.Identifier new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/docs/user-documentation/general-users/img/Default-barcode-print-768x361.png b/docs/user-documentation/general-users/img/Default-barcode-print-768x361.png new file mode 100644 index 0000000000000000000000000000000000000000..1a4a8a6bb69ae1b69ee5413627fcd38abe1d28eb Binary files /dev/null and b/docs/user-documentation/general-users/img/Default-barcode-print-768x361.png differ diff --git a/docs/user-documentation/general-users/img/Default-barcode-print-768x361.png:Zone.Identifier b/docs/user-documentation/general-users/img/Default-barcode-print-768x361.png:Zone.Identifier new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/docs/user-documentation/general-users/img/Default-sample-barcode-1.png b/docs/user-documentation/general-users/img/Default-sample-barcode-1.png new file mode 100644 index 0000000000000000000000000000000000000000..24dd8f3fbde517e2844c7b25c4340aaa5c53c968 Binary files /dev/null and b/docs/user-documentation/general-users/img/Default-sample-barcode-1.png differ diff --git a/docs/user-documentation/general-users/img/Default-sample-barcode-1.png:Zone.Identifier b/docs/user-documentation/general-users/img/Default-sample-barcode-1.png:Zone.Identifier new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/docs/user-documentation/general-users/img/ELN-new-exp-step-from-exp-step-1 (1).png b/docs/user-documentation/general-users/img/ELN-new-exp-step-from-exp-step-1 (1).png new file mode 100644 index 0000000000000000000000000000000000000000..209bbd19eda32eabf4e6e5077c503b486ec5a683 Binary files /dev/null and b/docs/user-documentation/general-users/img/ELN-new-exp-step-from-exp-step-1 (1).png differ diff --git a/docs/user-documentation/general-users/img/ELN-new-exp-step-from-exp-step-1 (1).png:Zone.Identifier b/docs/user-documentation/general-users/img/ELN-new-exp-step-from-exp-step-1 (1).png:Zone.Identifier new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/docs/user-documentation/general-users/img/ELN-new-exp-step-from-experiment (1).png b/docs/user-documentation/general-users/img/ELN-new-exp-step-from-experiment (1).png new file mode 100644 index 0000000000000000000000000000000000000000..eca39cc96a2b414e5d2cc67e9c0d77bb8290ff1e Binary files /dev/null and b/docs/user-documentation/general-users/img/ELN-new-exp-step-from-experiment (1).png differ diff --git a/docs/user-documentation/general-users/img/ELN-new-exp-step-from-experiment (1).png:Zone.Identifier b/docs/user-documentation/general-users/img/ELN-new-exp-step-from-experiment (1).png:Zone.Identifier new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/docs/user-documentation/general-users/img/Screenshot-2021-09-21-at-19.00.58 (1).png b/docs/user-documentation/general-users/img/Screenshot-2021-09-21-at-19.00.58 (1).png new file mode 100644 index 0000000000000000000000000000000000000000..63bcd99226cafba339f11f5d5e42bbbba5766f93 Binary files /dev/null and b/docs/user-documentation/general-users/img/Screenshot-2021-09-21-at-19.00.58 (1).png differ diff --git a/docs/user-documentation/general-users/img/Screenshot-2021-09-21-at-19.00.58 (1).png:Zone.Identifier b/docs/user-documentation/general-users/img/Screenshot-2021-09-21-at-19.00.58 (1).png:Zone.Identifier new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/docs/user-documentation/general-users/img/Screenshot-2021-09-21-at-19.29.33-1024x186 (1).png b/docs/user-documentation/general-users/img/Screenshot-2021-09-21-at-19.29.33-1024x186 (1).png new file mode 100644 index 0000000000000000000000000000000000000000..a90ca07934b04b2fe8fcb7e184f4f2b452bc684a Binary files /dev/null and b/docs/user-documentation/general-users/img/Screenshot-2021-09-21-at-19.29.33-1024x186 (1).png differ diff --git a/docs/user-documentation/general-users/img/Screenshot-2021-09-21-at-19.29.33-1024x186 (1).png:Zone.Identifier b/docs/user-documentation/general-users/img/Screenshot-2021-09-21-at-19.29.33-1024x186 (1).png:Zone.Identifier new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/docs/user-documentation/general-users/img/Screenshot-2021-09-21-at-19.40.53 (1).png b/docs/user-documentation/general-users/img/Screenshot-2021-09-21-at-19.40.53 (1).png new file mode 100644 index 0000000000000000000000000000000000000000..db69c29174e997aae86bdf52955d99a41107ef2a Binary files /dev/null and b/docs/user-documentation/general-users/img/Screenshot-2021-09-21-at-19.40.53 (1).png differ diff --git a/docs/user-documentation/general-users/img/Screenshot-2021-09-21-at-19.40.53 (1).png:Zone.Identifier b/docs/user-documentation/general-users/img/Screenshot-2021-09-21-at-19.40.53 (1).png:Zone.Identifier new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/docs/user-documentation/general-users/img/Storage-manager-1024x572.png b/docs/user-documentation/general-users/img/Storage-manager-1024x572.png new file mode 100644 index 0000000000000000000000000000000000000000..eb6b57ec83149245dce9daf86217be9e757b3662 Binary files /dev/null and b/docs/user-documentation/general-users/img/Storage-manager-1024x572.png differ diff --git a/docs/user-documentation/general-users/img/add-parent-via-barcode (1).png b/docs/user-documentation/general-users/img/add-parent-via-barcode (1).png new file mode 100644 index 0000000000000000000000000000000000000000..cfcad971114a8962269c7b6dfa7e45f292ff94a5 Binary files /dev/null and b/docs/user-documentation/general-users/img/add-parent-via-barcode (1).png differ diff --git a/docs/user-documentation/general-users/img/add-parent-via-barcode (1).png:Zone.Identifier b/docs/user-documentation/general-users/img/add-parent-via-barcode (1).png:Zone.Identifier new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/docs/user-documentation/general-users/img/add-parents-with-barcodes-1024x233.png b/docs/user-documentation/general-users/img/add-parents-with-barcodes-1024x233.png new file mode 100644 index 0000000000000000000000000000000000000000..97d59b53a5e29732a9ea5352f9a2489a56b576b1 Binary files /dev/null and b/docs/user-documentation/general-users/img/add-parents-with-barcodes-1024x233.png differ diff --git a/docs/user-documentation/general-users/img/add-parents-with-barcodes-1024x233.png:Zone.Identifier b/docs/user-documentation/general-users/img/add-parents-with-barcodes-1024x233.png:Zone.Identifier new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/docs/user-documentation/general-users/img/adv-search-datasets-1024x349.png b/docs/user-documentation/general-users/img/adv-search-datasets-1024x349.png new file mode 100644 index 0000000000000000000000000000000000000000..a23266aaa1d371bb846e9fb06411a9cd921b8132 Binary files /dev/null and b/docs/user-documentation/general-users/img/adv-search-datasets-1024x349.png differ diff --git a/docs/user-documentation/general-users/img/adv-search-exp-step-1024x366.png b/docs/user-documentation/general-users/img/adv-search-exp-step-1024x366.png new file mode 100644 index 0000000000000000000000000000000000000000..2d3633c6d88d13023094ea54bec16f15528a2b69 Binary files /dev/null and b/docs/user-documentation/general-users/img/adv-search-exp-step-1024x366.png differ diff --git a/docs/user-documentation/general-users/img/adv-search-experiment-1024x307.png b/docs/user-documentation/general-users/img/adv-search-experiment-1024x307.png new file mode 100644 index 0000000000000000000000000000000000000000..14e3f5f6dc46c40d632f5579790670c566b72c61 Binary files /dev/null and b/docs/user-documentation/general-users/img/adv-search-experiment-1024x307.png differ diff --git a/docs/user-documentation/general-users/img/adv-search-experiments-property-1024x373.png b/docs/user-documentation/general-users/img/adv-search-experiments-property-1024x373.png new file mode 100644 index 0000000000000000000000000000000000000000..3c6a0a1fbcb47be1790bc44d6c1fa4fa8608bdd8 Binary files /dev/null and b/docs/user-documentation/general-users/img/adv-search-experiments-property-1024x373.png differ diff --git a/docs/user-documentation/general-users/img/adv-search-objects-1024x382.png b/docs/user-documentation/general-users/img/adv-search-objects-1024x382.png new file mode 100644 index 0000000000000000000000000000000000000000..9118e290f1216ac7a19c1c2f780b56bb1c69eb62 Binary files /dev/null and b/docs/user-documentation/general-users/img/adv-search-objects-1024x382.png differ diff --git a/docs/user-documentation/general-users/img/advanced-search-all-field-type.png b/docs/user-documentation/general-users/img/advanced-search-all-field-type.png new file mode 100644 index 0000000000000000000000000000000000000000..2e1f5aaf45a38e85a7228aacc15a07f228c0ece8 Binary files /dev/null and b/docs/user-documentation/general-users/img/advanced-search-all-field-type.png differ diff --git a/docs/user-documentation/general-users/img/advanced-search-criteria.png b/docs/user-documentation/general-users/img/advanced-search-criteria.png new file mode 100644 index 0000000000000000000000000000000000000000..d1fee7b1766001cfaadfe23d7ce2bcadc2754b0a Binary files /dev/null and b/docs/user-documentation/general-users/img/advanced-search-criteria.png differ diff --git a/docs/user-documentation/general-users/img/advanced-search-navigation-menu-1024x448.png b/docs/user-documentation/general-users/img/advanced-search-navigation-menu-1024x448.png new file mode 100644 index 0000000000000000000000000000000000000000..b194de5da9ba8a3b10b3e9374ec5428d30cb05c2 Binary files /dev/null and b/docs/user-documentation/general-users/img/advanced-search-navigation-menu-1024x448.png differ diff --git a/docs/user-documentation/general-users/img/children-generator-1024x477 (1).png b/docs/user-documentation/general-users/img/children-generator-1024x477 (1).png new file mode 100644 index 0000000000000000000000000000000000000000..d6ae75ef2dc77a3457cd43b3f39c40f82e0a12d9 Binary files /dev/null and b/docs/user-documentation/general-users/img/children-generator-1024x477 (1).png differ diff --git a/docs/user-documentation/general-users/img/children-generator-1024x477 (1).png:Zone.Identifier b/docs/user-documentation/general-users/img/children-generator-1024x477 (1).png:Zone.Identifier new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/docs/user-documentation/general-users/img/custom-barcode-update-1.png b/docs/user-documentation/general-users/img/custom-barcode-update-1.png new file mode 100644 index 0000000000000000000000000000000000000000..5d1139a1a02e73384cbdae6bc218984af3d0c2a9 Binary files /dev/null and b/docs/user-documentation/general-users/img/custom-barcode-update-1.png differ diff --git a/docs/user-documentation/general-users/img/custom-barcode-update-1.png:Zone.Identifier b/docs/user-documentation/general-users/img/custom-barcode-update-1.png:Zone.Identifier new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/docs/user-documentation/general-users/img/dataset-file-search-2-1024x292.png b/docs/user-documentation/general-users/img/dataset-file-search-2-1024x292.png new file mode 100644 index 0000000000000000000000000000000000000000..99c786d6d2a8e4c6d354b7b2597a48c90e9edd8f Binary files /dev/null and b/docs/user-documentation/general-users/img/dataset-file-search-2-1024x292.png differ diff --git a/docs/user-documentation/general-users/img/dataset-table-exp-step-1024x455.png b/docs/user-documentation/general-users/img/dataset-table-exp-step-1024x455.png new file mode 100644 index 0000000000000000000000000000000000000000..fd9cbf71b22b4380fd6c25980af02da5d86c67ce Binary files /dev/null and b/docs/user-documentation/general-users/img/dataset-table-exp-step-1024x455.png differ diff --git a/docs/user-documentation/general-users/img/exp-step-parent-added-after-search-1024x332 (1).png b/docs/user-documentation/general-users/img/exp-step-parent-added-after-search-1024x332 (1).png new file mode 100644 index 0000000000000000000000000000000000000000..f37817554772af232646906965ca775e214aa9c3 Binary files /dev/null and b/docs/user-documentation/general-users/img/exp-step-parent-added-after-search-1024x332 (1).png differ diff --git a/docs/user-documentation/general-users/img/exp-step-parent-added-after-search-1024x332 (1).png:Zone.Identifier b/docs/user-documentation/general-users/img/exp-step-parent-added-after-search-1024x332 (1).png:Zone.Identifier new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/docs/user-documentation/general-users/img/exp-step-search-parent-1024x201 (1).png b/docs/user-documentation/general-users/img/exp-step-search-parent-1024x201 (1).png new file mode 100644 index 0000000000000000000000000000000000000000..20b96a5f840e919356f353bff0f734bb56ecaf80 Binary files /dev/null and b/docs/user-documentation/general-users/img/exp-step-search-parent-1024x201 (1).png differ diff --git a/docs/user-documentation/general-users/img/exp-step-search-parent-1024x201 (1).png:Zone.Identifier b/docs/user-documentation/general-users/img/exp-step-search-parent-1024x201 (1).png:Zone.Identifier new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/docs/user-documentation/general-users/img/general-search-1024x486.png b/docs/user-documentation/general-users/img/general-search-1024x486.png new file mode 100644 index 0000000000000000000000000000000000000000..b84b187e9de127ea47dcc2e171f62bd1aa5918d2 Binary files /dev/null and b/docs/user-documentation/general-users/img/general-search-1024x486.png differ diff --git a/docs/user-documentation/general-users/img/menu-storage-manager.png b/docs/user-documentation/general-users/img/menu-storage-manager.png new file mode 100644 index 0000000000000000000000000000000000000000..cd030cb8af053bcedb13bb976f016994ae48dbf1 Binary files /dev/null and b/docs/user-documentation/general-users/img/menu-storage-manager.png differ diff --git a/docs/user-documentation/general-users/img/picture-inserted-in-RTE-1024x697.png b/docs/user-documentation/general-users/img/picture-inserted-in-RTE-1024x697.png new file mode 100644 index 0000000000000000000000000000000000000000..04d9248907e0cec3736d41003758ff8d57d66e07 Binary files /dev/null and b/docs/user-documentation/general-users/img/picture-inserted-in-RTE-1024x697.png differ diff --git a/docs/user-documentation/general-users/img/picture-inserted-in-RTE-1024x697.png:Zone.Identifier b/docs/user-documentation/general-users/img/picture-inserted-in-RTE-1024x697.png:Zone.Identifier new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/docs/user-documentation/general-users/img/proejct-more-dropdown.png b/docs/user-documentation/general-users/img/proejct-more-dropdown.png new file mode 100644 index 0000000000000000000000000000000000000000..6a1d8de9d5936d2bdfae9729af7a4eac1c2d4856 Binary files /dev/null and b/docs/user-documentation/general-users/img/proejct-more-dropdown.png differ diff --git a/docs/user-documentation/general-users/img/proejct-more-dropdown.png:Zone.Identifier b/docs/user-documentation/general-users/img/proejct-more-dropdown.png:Zone.Identifier new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/docs/user-documentation/general-users/img/proejct-page-overview-1024x640.png b/docs/user-documentation/general-users/img/proejct-page-overview-1024x640.png new file mode 100644 index 0000000000000000000000000000000000000000..644334c89db612657639ac97b2db73b1543e1a98 Binary files /dev/null and b/docs/user-documentation/general-users/img/proejct-page-overview-1024x640.png differ diff --git a/docs/user-documentation/general-users/img/proejct-page-overview-1024x640.png:Zone.Identifier b/docs/user-documentation/general-users/img/proejct-page-overview-1024x640.png:Zone.Identifier new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/docs/user-documentation/general-users/img/project-page-experiments-view-1024x488.png b/docs/user-documentation/general-users/img/project-page-experiments-view-1024x488.png new file mode 100644 index 0000000000000000000000000000000000000000..3a67c7b9a03bb52b3c679a13284e34655b63c506 Binary files /dev/null and b/docs/user-documentation/general-users/img/project-page-experiments-view-1024x488.png differ diff --git a/docs/user-documentation/general-users/img/project-page-experiments-view-1024x488.png:Zone.Identifier b/docs/user-documentation/general-users/img/project-page-experiments-view-1024x488.png:Zone.Identifier new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/docs/user-documentation/general-users/img/register-protocol-1024x237.png b/docs/user-documentation/general-users/img/register-protocol-1024x237.png new file mode 100644 index 0000000000000000000000000000000000000000..c9192a48a5e56b03874dccdd513addd00562d0f4 Binary files /dev/null and b/docs/user-documentation/general-users/img/register-protocol-1024x237.png differ diff --git a/docs/user-documentation/general-users/img/register-protocol-1024x237.png:Zone.Identifier b/docs/user-documentation/general-users/img/register-protocol-1024x237.png:Zone.Identifier new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/docs/user-documentation/general-users/img/remove-parents-1024x313 (1).png b/docs/user-documentation/general-users/img/remove-parents-1024x313 (1).png new file mode 100644 index 0000000000000000000000000000000000000000..e2b0b3a16690231c2e55e671a9d548c3b0fa3daf Binary files /dev/null and b/docs/user-documentation/general-users/img/remove-parents-1024x313 (1).png differ diff --git a/docs/user-documentation/general-users/img/remove-parents-1024x313 (1).png:Zone.Identifier b/docs/user-documentation/general-users/img/remove-parents-1024x313 (1).png:Zone.Identifier new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/docs/user-documentation/general-users/img/scan-barcode-navigation-menu-1-1024x241.png b/docs/user-documentation/general-users/img/scan-barcode-navigation-menu-1-1024x241.png new file mode 100644 index 0000000000000000000000000000000000000000..55cdd508fbe57d28db7107ee7e7db085829e0b1f Binary files /dev/null and b/docs/user-documentation/general-users/img/scan-barcode-navigation-menu-1-1024x241.png differ diff --git a/docs/user-documentation/general-users/img/scan-barcode-navigation-menu-1-1024x241.png:Zone.Identifier b/docs/user-documentation/general-users/img/scan-barcode-navigation-menu-1-1024x241.png:Zone.Identifier new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/docs/user-documentation/general-users/img/search-and-paste-parents-1024x572 (1).png b/docs/user-documentation/general-users/img/search-and-paste-parents-1024x572 (1).png new file mode 100644 index 0000000000000000000000000000000000000000..aa634b3174adcaab694aa2678f8cc9b749783430 Binary files /dev/null and b/docs/user-documentation/general-users/img/search-and-paste-parents-1024x572 (1).png differ diff --git a/docs/user-documentation/general-users/img/search-and-paste-parents-1024x572 (1).png:Zone.Identifier b/docs/user-documentation/general-users/img/search-and-paste-parents-1024x572 (1).png:Zone.Identifier new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/docs/user-documentation/general-users/img/search-in-collection-1024x378.png b/docs/user-documentation/general-users/img/search-in-collection-1024x378.png new file mode 100644 index 0000000000000000000000000000000000000000..9fe3f77e3578038a100c221855216df37c526210 Binary files /dev/null and b/docs/user-documentation/general-users/img/search-in-collection-1024x378.png differ diff --git a/docs/user-documentation/general-users/img/search-types-in-ELN-UI.png b/docs/user-documentation/general-users/img/search-types-in-ELN-UI.png new file mode 100644 index 0000000000000000000000000000000000000000..5fdb1fd536ae8080ea1df9c0e9e84ff2d45b189e Binary files /dev/null and b/docs/user-documentation/general-users/img/search-types-in-ELN-UI.png differ diff --git a/docs/user-documentation/general-users/inventory-of-materials-and-methods.md b/docs/user-documentation/general-users/inventory-of-materials-and-methods.md index c8de3535b032c942d1df7a6a4f9a163f3366d274..803df57a6f3e7fb3faaefa253d0019612c2164ac 100644 --- a/docs/user-documentation/general-users/inventory-of-materials-and-methods.md +++ b/docs/user-documentation/general-users/inventory-of-materials-and-methods.md @@ -3,10 +3,6 @@ Inventory Of Materials And Methods ## Customise Collection View -[](# "Print this article") - - - It is possible customise the view of *Collections* in the ELN. The default *Collection* can have a **Form View** or a **List View**. @@ -16,10 +12,6 @@ Depending on this selection, the collection view will be different.  - - - - **Form View:** This shows the metadata of the *Collection* along with the table of objects. This view is useful when a user wants to see specific metadata for a *Collection*. @@ -58,41 +50,27 @@ Updated on April 25, 2023 ## Register single entries in a Collection -[](# "Print this article") - - - - +[](# "Print this article") In this example, we will see how to register one *Object* of type **Sample** in the **Raw Samples** *Collection.* The same procedure should be followed to register any other *Object* in other *Collections*. - - 1. Click on the **Raw Samples** *Collection* folder in the main menu. 2. Click the **+ New Sample ** in the main page 3. Fill in the form 4. **Save** - - Please note that the *Object type* shown in the **+New** button (in this case **Sample**), is what is defined as *default object type* for the *Collection*. If this is missing in the *Collection,* the button will not be present. - -  - - To register a different object type in the Collection: - - 1. Select **New Object** from the **More** drop down menu (as shown below) 2. Select the relevant *Object type* from the list *(Sample,* in this @@ -100,8 +78,6 @@ To register a different object type in the Collection: 3. Fill in the form 4. **Save** - -  Updated on April 25, 2023 @@ -110,8 +86,6 @@ Updated on April 25, 2023 [](# "Print this article") - - It is possible to register several samples at once via file upload. Two methods are currently available: @@ -120,22 +94,15 @@ methods are currently available: 2. Batch registration via TSV template file (TSV Batch Register Objects) - -  -  In openBIS versions prior to 20.10.6, the XLS batch registration is not recommended to register several hundreds of entries. The use of the TSV batch upload to register several hundreds of entries is recommended in those cases. - - -## Batch registration via Excel template file - - +### Batch registration via Excel template file To register several entries of the same type with an Excel file: @@ -147,12 +114,8 @@ To register several entries of the same type with an Excel file: [SAMPLE-COLLECTION-REGISTRATION-SAMPLE-STORAGE\_POSITION-template](https://openbis.ch/wp-content/uploads/2022/02/SAMPLE-COLLECTION-REGISTRATION-SAMPLE-STORAGE_POSITION-template-2.xlsx)) 4. Upload the file. - - #### **Codes** - - In most cases, *Object* types have the option to auto-generate codes set to true in the admin UI. In this case, openBIS automatically generates codes and identifiers when *Objects* are registered. If that is not the @@ -162,14 +125,10 @@ however be manually added if codes should be provided by the user and not automatically generated by openBIS. If codes should be manually entered and are missing, openBIS will show the error message “*UserFailureExceptionmessage: Code cannot be empty for a non auto -generated code.*â€Â - - +generated code.*†#### **Controlled vocabularies** - - For Controlled Vocabularies fields, i.e. fields with a drop down menu, you can enter either the **code** or the **label** of the terms in the Excel file. @@ -179,16 +138,10 @@ Please note that codes are not case-sensitive, but labels are. Codes and labels of vocabulary terms can be seen under **Utilities -> Vocabulary Browser**. - - #### Assign parents - - 1. **Assign already existing parents** - - If the parents you want to assign to your Objects are already registered in openBIS, in the **Parents** column of the Excel file, you can assign the relationship, by providing the identifier of the parent (i.e. /SPACE @@ -200,18 +153,12 @@ the keyboard shortcuts **Alt** + **Enter.** Example file: [SAMPLE-COLLECTION-REGISTRATION-ANTIBODY-STORAGE\_POSITION-template](https://openbis.ch/wp-content/uploads/2022/02/SAMPLE-COLLECTION-REGISTRATION-ANTIBODY-STORAGE_POSITION-template-1.xlsx) - - ** Note:** no other separators (e.g “,†or “;â€) should be used, otherwise an error will be thrown. - - **2. Register Objects and assign parents in the same batch registration process. ** - - If you want to register a few *Objects* and at the same time establish a parent-child relationship between some of them, you can do so by using the **\\$** and **Parents** columns. In the example below we want to @@ -221,37 +168,23 @@ antibody 1 we need to enter numbers or letters proceeded by the \\$ symbol (i.e. \\$1, or \\$parent1). In the **Parents** column of antibody 2, we need to use the same value used in the **\\$ column** for antibody 1. - -  - - #### Date format For date fields, the expected format is YYYY-MM-DD. - - ### Register storage positions and samples in the same XLS file - - A **sample** and its **storage** **position** can be registered together, as shown in the template provided above: - - - The info in the **\$** column of the **sample** spreadsheet should match the **Parents** column in **Storage Positions** spreadsheet. In the \$ column you can enter numbers or letters proceeded by the \$ symbol (i.e. \$1, \$2 or \$parent1, \$parent2)**.** - - -## Batch registration via TSV template file - - +### Batch registration via TSV template file 1. Select **TSV** **Batch Register Object*****s*** from the **More*** *drop-down menu @@ -261,9 +194,7 @@ together, as shown in the template provided above: ** ** -### **Rules to follow to fill in the template .tsv file** - - +#### **Rules to follow to fill in the template .tsv file** 1. **Identifiers**: 1. Identifiers are given by **/SPACE code/PROJECT code/OBJECT @@ -286,9 +217,7 @@ together, as shown in the template provided above: 5. **Date fields**. The expected syntax for dates is YYYY-MM-DD. -## Advantages of XLS batch registration vs the old batch registration - - +### Advantages of XLS batch registration vs the old batch registration 1. XLS batch registration uses labels instead of codes in the column headers in the template file. @@ -300,46 +229,30 @@ together, as shown in the template provided above: 4. Upload of samples and storage positions can now be performed using single template file. - - The *old* batch register mode is being maintained for backward compatibility and will be phased out. - - - - Updated on April 25, 2023 ## Batch register entries in several Collections [](# "Print this article") - - It is possible to batch register *Objects* that belong to different *Collections*. This can be done from the **Object Browser** page, under **Utilities**. Two options are available: - - 1. **XLS Batch Register Objects**: batch registration via Excel template file. 2. **TSV Batch Register Objects**: batch registration via .tsv template file. - -  - - ### XLS Batch Register Objects - - This option for batch registration is available since openBIS version 20.10.3. It allows to register *Objects* of different types to multiple *Collections*. @@ -349,8 +262,6 @@ available types.  - - You can then download the template that will allow you to register *Objects* of the selected types to single or multiple *Collections*. The *Space, Project, Collection* need to be entered in the file. The @@ -358,12 +269,8 @@ complete path for *Projects* and *Collections* need to be used, as shown in this example file: [SAMPLE-GENERAL-REGISTRATION-EXPERIMENTAL\_STEP-MASS\_MEASUREMENT-SAMPLE-template](https://openbis.ch/wp-content/uploads/2022/03/SAMPLE-GENERAL-REGISTRATION-EXPERIMENTAL_STEP-MASS_MEASUREMENT-SAMPLE-template.xlsx) - - ### TSV Batch Register Objects - - The batch registration via .tsv file allows to batch register only one type of *Object* at a time. *Objects* however can be registered to several *Collections*. @@ -371,54 +278,34 @@ several *Collections*. This batch upload method is kept for backward compatibility, but it will be phased out. - -  - - In this case, if *Objects* are to be registered to multiple *Collections*, an **identifier** for the *Objects* needs to be provided, as shown below. This is not the case with the XLS batch registration, where identifiers can be automatically generated by openBIS. - -  - - Updated on April 25, 2023 ## Batch update entries in a Collection [](# "Print this article") - - It is possible to modify the values of one or more fields in several objects simultaneously via batch update. This can be done in two ways: - - 1. **XLS Batch Update Objects** 2. **TSV Batch Update Objects** - - - - ### XLS Batch Update Objects - - 1. Navigate to the relevant collection (e.g. **Raw Samples**). 2. In the Collection table, from the **Columns,** select **Identifier** and the field(s) you want to update (e.g. **Source**), as shown below - -  3\. If you have several entries you can filter the table @@ -428,23 +315,18 @@ objects simultaneously via batch update. This can be done in two ways: Selected Columns; All pages/Current page/Selected rows** (depending on what you want to export)**.** - -  5\. Modify the file you just exported and save it. 6\. Select **XLS Batch Update Objects** from the **More..** dropdown -  6\. Upload the file you saved before and click **Accept**. Your entries will be updated. - - **Note**: If a column is removed from the file or a cell in a column is left empty @@ -455,25 +337,16 @@ enter    into the corresponding cell in the XLS file. - ### TSV Batch Update Objects - - 1. Navigate to the relevant collection (e.g. **Raw Samples**). 2\. Select **TSV** **Batch Update Objects** from the **More…** dropdown. - -  - - 3\. Select the relevant *Object* *type*, e.g. **Sample ** - -  4\. Download the available **template** @@ -489,8 +362,6 @@ table and paste them in the file. Identifiers have this format: 7\. Save the file and upload it via the **XLS Batch Update Objects** from the **More..** dropdown - - **Note**: If a column is removed from the file or a cell in a column is left empty @@ -506,25 +377,17 @@ Updated on April 25, 2023 [](# "Print this article") - - It is possible to batch update *Objects* that belong to different *Collections*. This can be done from the **Object Browser** page, under **Utilities**. Two options are available: - - 1. **XLS Batch Update Objects**: batch update via Excel template file. 2. **TSV Batch Update Objects**: batch update via .tsv template file. - -  - - ### XLS Batch Update Objects This option for batch update is available since openBIS version 20.10.3. @@ -536,8 +399,6 @@ types.  - - You can then download the template that will allow you to update *Objects* of the selected types to single or multiple *Collections*. The *Space, Project, Collection* need to be entered in the file. The @@ -547,14 +408,8 @@ are unique in openBIS, by providing them openBIS will know which *Objects* have to be updated. Example file: [SAMPLE-GENERAL-REGISTRATION-EXPERIMENTAL\_STEP-MASS\_MEASUREMENT-SAMPLE-template](https://openbis.ch/wp-content/uploads/2022/03/SAMPLE-GENERAL-REGISTRATION-EXPERIMENTAL_STEP-MASS_MEASUREMENT-SAMPLE-template-1.xlsx) - - - - ### TSV Batch Update Objects - - The batch update via .tsv file allows to batch update only one type of *Object* at a time. However, it is possible to update *Objects* that belong to several *Collections*. @@ -562,51 +417,33 @@ belong to several *Collections*. This batch update method is kept for backward compatibility, but it will be phased out. - -  - - The *Space, Project, Collection* need to be entered in the file. The complete path for *Projects* and *Collections* need to be used. In addition, identifiers for the *Objects* need to be provided: identifiers are unique in openBIS, by providing them openBIS will know which *Objects* have to be updated. - -  - - Updated on April 25, 2023 ## Copy entries [](# "Print this article") - - -  - To create a copy of an existing entry, select **Copy** from the **More..** drop down menu in the *Collection* page. - -  - -  When an entry is copied, the user has the option to **link parents**, **copy children into the Parents’ collection** and **copy the comments log**. All these options are disabled by default. - -  Updated on July 27, 2022 @@ -615,39 +452,23 @@ Updated on July 27, 2022 [](# "Print this article") - - You can move entries to a different *Collection* either from the e*ntry* form or from a *Collection* table. - - ### Move from entry form - - To move entries to a different *Collection*, select **Move** from the **More…** drop down menu in the entry form. - -  - - You have the option to move to an existing *Collection* or to create a new *Collection*. - -  - - ### Move from Collection Table - - It is also possible to move objects from *Collection* tables. You can select one or multiple entries from a table and click on the **Move** button. @@ -655,12 +476,27 @@ button. Also in this case you can move to an existing *Collection* or create a new one. - -  - +Updated on July 27, 2022 - +## Register Protocols in the Methods Inventory -Updated on July 27, 2022 +Protocols are standard operating procedures (SOPs) used in the lab. If such procedures are in place, they should be organised in folders in the Methods Inventory which, by default, is accessible by all lab members. + +openBIS provides a General Protocol Object type that can be used. If different specific metadata is needed for protocols, new Object types can be created by an Instance admin in the admin UI and the corresponding Collections can be created in the ELN UI. + +To register a new General Protocol in the General Protocols folder, follow these steps: + +1. Go to the General Protocols Collection in the Methods folder. +2. Click the + New General Protocol button in the main page. +3. Fill in the relevant fields in the form or choose from available templates. +4. Save + + + +### LINKS TO SAMPLES, MATERIALS, OTHER PROTOCOLS + +When writing a protocol, it is possible to create links to samples, materials or other protocols stored in the Inventory. These are parent-child relationships in openBIS. + +Everything that is used in the protocol can be added as Parent of the protocol itself. This can be done as described fo Experimental Steps: [Add parents and children to Experimental Steps](lab-notebook.md) diff --git a/docs/user-documentation/general-users/lab-notebook.md b/docs/user-documentation/general-users/lab-notebook.md index 45b2b293aa3e5f9b1d83d66ed073c3c6d504a09b..4bfcb913d9ffbf38ab5804ec4c4bbe61acf88e16 100644 --- a/docs/user-documentation/general-users/lab-notebook.md +++ b/docs/user-documentation/general-users/lab-notebook.md @@ -120,51 +120,33 @@ Updated on April 25, 2023 ## Add parents and children to Experimental Steps -[](# "Print this article") - - - In the default *Experimental Step* and in the *Entry*, there is a **Parents** section where it is possible to specify links to materials and methods from the *Inventory* or to any other *Object*, e.g. another *Experimental Step* or *Entry*. - - **Parents** are all samples/materials used in an experimental procedure, standard protocols from the inventory followed in the experimental procedure, the equipment used. It is also possible to set one *Experimental Step/Entry* as parent of a second *Experimental Step/Entry,* to keep the connection between the two. - - The name of this section and which parents should be shown in the form, is customisable by the *lab manager* or *group admin* as described in [Customise Parents and Children Sections in Object Forms](https://openbis.ch/index.php/docs/admin-documentation-openbis-19-06-4/customise-parents-and-children-sections-in-object-forms/) +### Adding a parent -## Adding a parent - - - - - - -### Adding a parent of a predefined type in the form + - +#### Adding a parent of a predefined type in the form In the screenshot above, **General protocol** is predefined as parent type in the form. We have two options to add a parent of this predefined type: - - -#### **1. Search** - - +##### **1. Search** 1. 1. Click on the **Search** button. 2. Enter the **name** or **code** of the entry you want to add as @@ -173,59 +155,36 @@ type: The parent will be added only when you **save** the entity. - - - + - +#####  -#### **2. Paste** - - +##### **2. Paste** 1. 1. You may copy the identifier of an entry you want to add as parent from a file, or from an advanced search or from another ELN page. You can paste the identifier(s) in the **Paste** text field. - 2. click the **+Add** button - - - - - -  + 2. Click the **+Add** button + - -### Adding parent of any available type - - +#### Adding parent of any available type If you want to add a parent that is not specified in the *Experimental Step* form, you can use the **Search Any** or **Paste Any** options next to **Parents.** - - - - - + - - -#### 1. Search Any - - +##### 1. Search Any 1. Click **Search Any** 2. Select the *Object* type for which you want to add a parent 3. Search by **code** or **name** as explained above 4. Click the **+ Add** button - -#### 2. Paste Any - - +##### 2. Paste Any There are cases where you may want to add several parents of the same type or also of different types. In this case, we recommend to use the @@ -234,18 +193,9 @@ the desired entries from the table and the **Copy Identifiers** button will become visible. You can copy the identifiers and paste them in the **Paste Any** field in the *Experimental Step* page, as shown below. - + - - - - - - - - - -### Adding parent via barcodes +#### Adding parent via barcodes If you want to add a parent that is registered in openBIS and has a barcode associated with it by scanning the barcode: @@ -254,37 +204,29 @@ barcode associated with it by scanning the barcode: 1.Click on the **barcode** icon in the Parents section - + 2\. A **Barcode Reader** window opens - + -3\. Scan the barcode of the entry you want to add as parent +3\. Scan the barcode/QR code of the entry you want to add as parent with +a scanner or with the camera of a mobile device 4\. Click on the **Add Objects** button -5\. **Close** - - - - - -## Removing a parent +5\. **Close** - +### Removing a parent To remove a parent, choose **Remove*** *from the **Operations*** *drop down in the parent table, as shown below.  - - + -## **Adding and Removing Children** - - +### **Adding and Removing Children** Children of *Experimental Steps* are usually derivative *Experimental Steps,* or products of the *Experimental Step.* As for the **Parents** @@ -293,13 +235,10 @@ manager* in the **ELN Settings** ([Customise Parents and Children Sections in Object Forms)](https://openbis.ch/index.php/docs/admin-documentation-openbis-19-06-4/customise-parents-and-children-sections-in-object-forms/). - - The procedure for adding and removing children is the same as explained for parents. - -### Children Generator +#### Children Generator The **Children Generator** creates a matrix of all the parents entered in the* Experimental Step*, as shown below. Combinations of parents @@ -308,9 +247,9 @@ needed to generate children can then be selected by the user. The to be specified. The children will then be automatically generated by openBIS upon registration of the *Experimental Step*. - + -## Parent-child relationships between entries in lab notebook +### Parent-child relationships between entries in lab notebook In the Lab Notebook section, if you create a new *Object* from an existing *Object*, independently of the type, this will be automatically @@ -319,29 +258,18 @@ Experimental Step (measurement 4) from an existing Experimental Step (measurement 3), this will be automatically set as child of measurement 3, as shown below. - - - - + If you do not wish to have this relationship established, you need to create the new Object starting from the Experiment level, as shown below. - - - + -Updated on April 25, 2023 +Updated on July 5, 2023 ## How to use protocols in Experimental Steps -[](# "Print this article") - - - - - When adding protocols to an *Experimental Step*, two options are available: @@ -447,6 +375,14 @@ like to have default values for those parameters.  Updated on December 8, 2022 + +## Datasets tables + +Since openBIS version 20.10.7, a dataset table has been added to the Experiment/Collection and Object pages. + +This table shows the metadata of the datasets. The content of the datasets can be navigated through the main menu. + + ## Data Access @@ -688,37 +624,30 @@ Updated on April 25, 2023 ## Project Overview -[](# "Print this article") - - -All *Experiments* and *Experimental Ste*ps have a **Show in project -overview** checkbox. When selected, these *Experiments* and/or -*Experimental Steps* will be shown in the *Project* form, as shown -below. +In the Project page you have the options to see: - - -This allows users to have a better overview of a *Project* and highlight -the most important findings. - - +1. Default Experiments and Experimental Steps with the field *Show in + project overview = true***.** This is a way to mark the most + relevant Experiments and Experimental steps and see them at a glance + on the project page (**Show Overview**). +2. All experiments belonging to the project (**Show + Experiments/Collections**). - +The two options are available from the *More..* dropdown on the Project +page. - + -If the *Experiments* and *Object* tables are not shown in the project -page, you need to select **Show Experiments** and **Show Objects** from -the More.. drop down. +Below you see an example of an overview in a Project page. - + - -menu. +Below you see an example of the visualisation of Experiments and +Collections in a Project page. - + -Updated on April 25, 2023 +Updated on July 5, 2023 ## Edit and Delete Projects, Experiments, Experimental Steps @@ -777,8 +706,6 @@ Available roles are: 3. **Admin**: can create, modify and delete entities in Space or Project - - The roles can be granted to: 1. **User**: the user needs to be already registered in openBIS. The @@ -786,22 +713,16 @@ The roles can be granted to: 2. **Group**: the name of a user group existing in openBIS needs to be entered. - - - -  - -  - +Updated on April 25, 2023 - +## Rich Test Editor - +### EMBED IMAGES IN TEXT FIELDS - +To embed an image in the a text field with the Rich Text Editor (RTE) enabled, you can simply drag & drop a .png or .jpg file and resize the image by clicking on and dragging the corners. -Updated on April 25, 2023 + diff --git a/docs/user-documentation/general-users/managing-storage-of-samples.md b/docs/user-documentation/general-users/managing-storage-of-samples.md index adf51769069a7766a7a461781281d4fd8f0ed692..c33d88ca25c5e26805797b24c8252cbd59622901 100644 --- a/docs/user-documentation/general-users/managing-storage-of-samples.md +++ b/docs/user-documentation/general-users/managing-storage-of-samples.md @@ -148,7 +148,7 @@ Updated on April 25, 2023  -## Delete single storage positions +### Delete single storage positions  @@ -175,7 +175,7 @@ from the trashcan (see  -## Remove one of multiple positions in the same box +### Remove one of multiple positions in the same box  @@ -200,7 +200,7 @@ steps:  -## Delete multiple storage positions +### Delete multiple storage positions  @@ -238,3 +238,39 @@ should be removed from there to be permanently deleted (see  Updated on May 2, 2023 + +## Overview of lab storages + +The **Storage Manager**, under **Utilities**, provides an overview of +each single storage configured for the lab, by the lab admin. + + + +> 1. Select the storage containing the samples to visualise from the +> **Storage** drop down menu. +> 2. Click on a box to view its content. +> 3. When hovering with the mouse over a sample inside a box, the info +> about the sample is shown. +> +>  + + + +## Overview of lab Storages + +### Change storage position of samples + +The **Storage Manager** can also be used to move samples from one +storage position to another, if the location of the sample is changed: + +> 1. Click on **Toggle Storage B** (see figure above). +> 2. Select the destination storage, from the **Storage** drop down +> menu. +> 3. Drag and drop the box or sample to move from **Storage A** to the +> desired position in **Storage B**. Please note that the move +> operation for samples with multiple positions in the same box or +> in different boxes is not supported. +> 4. Changes are visualised at the bottom of the page. To save them, +> click **Save Changes** on top of the **Storage Manager** form. + +Updated on April 25, 2023 diff --git a/docs/user-documentation/general-users/search.md b/docs/user-documentation/general-users/search.md index ec971ff5a561b3a2213f3c21bda8b382c8ccfe8b..cfe0bb2c466f34b08a0652c78cf6d15acd022242 100644 --- a/docs/user-documentation/general-users/search.md +++ b/docs/user-documentation/general-users/search.md @@ -1,10 +1,191 @@ -Search -==== - +# Search -[](# "Print this article") +## Advanced search - +The **Advanced Search** can be accessed from the main menu, under +**Utilities**. Alternatively, the advanced search can also be used after +performing a global search (see +[Search](https://openbis.ch/index.php/docs/user-documentation-20-10-3/search/)), +to refine the obtained results. + + + +In the advanced search users can combine several search criteria using +either the **AND** or **OR** operators. Users can choose to search for: + +1. **All (prefix match, faster)**: search for the first 3 letters of a + word. The search is performed across all fields of all entities + (Experiments/Collections, Objects, Datasets). +2. **All (full word match, faster)**: search for a complete word. The + search is performed across all fields of all entities + (Experiments/Collections, Objects, Datasets). +3. **All (partial match, slower)**: search for a partial word. The + search is performed across all fields of all entities + (Experiments/Collections, Objects, Datasets). +4. **Experiment/Collection**: search is performed across all fields of + all Experiments/Collections. +5. **Object**: search is performed across all fields of all Objects. +6. **Dataset**: search is performed across all fields of all Datasets. +7. **A specific Object type** (e.g. Antibody, Bacteria, Cell Line, in + the picture below): search is performed across all fields of the + selected Object type only. + + + +After selecting what to search for, the search can be further restricted +to specific fields, in the **Field Type** drop down menu. The available +fields for a search vary depending on what the search is performed. + +### Search for: All + +This includes all 3 “All†options described above. + +Available **Field Types**: + +1. **All**: search across all fields of all entities. + + + +In this case, this is the only available option and it is not possible +to restrict the search. + +### Search for: Experiment/Collection + +Available **Field Types**: + +1. **All**: search across all fields of all Experiments/Collections +2. **Property**: Can select a specific property to search on. This can + be selected in the **Field Name**. + + + +It is possible to exclude terms from the search by selecting the NOT in +the first column of the table. + +If **Property** is selected in the **Field Type**, a list of all +available properties becomes available in the **Field Name** drop down. +According to the type of property selected, the comparator operator will +be different (e.g for a date field it is possible to select an exact +date, or everything before a given date or everything after). It is +possible to search on more than one field by clicking on the **+** +button in the table and build complex queries in this way. By selecting +the NOT checkbox in the table certain fields can be excluded from the +search. + + + +### Search for: Object + +Available **Field Types**: + +1. **All**: search across all fields of all Objects +2. **Property**: can select one or more specific properties to search + on. These can be selected in the **Field Name** (see above) +3. **Experiment/Collection**: search for Objects in a given + Experiment/Collection +4. **Parent**: search for Objects that have the specified parents +5. **Children**: search for Objects that have the specified children + + + +Also in this case, fields can be excluded from the search by selecting +the NOT checkbox in the table. + + +### Search for: Dataset + +Available **Field Types**: + +1. **All**: search across all fields of all Datasets +2. **Property**: can select one or more specific properties to search + on. These can be selected in the **Field Name** (see above) +3. **Object**: search for Datasets in a given Object +4. **Experiment/Collection**: search for Datasets in a given + Experiment/Collection + + + +Also in this case, fields can be excluded from the search by selecting +the NOT checkbox in the table. + +### Search for: specific Object Type (e.g Experimental Step) + +In this case, the available Field Types are the same as when searching +for an Object. + +Available **Field Types**: + +1. **All**: search across all fields of the specific Object type (e.g. + Experimental Step) +2. **Property**: can select one or more specific properties to search + on. These can be selected in the **Field Name** (see above) +3. **Experiment/Collection**: search for Objects of the selected type + in a given Experiment/Collection +4. **Parent**: search for Objects of the selected type that have the + specified parents +5. **Children**: search for Objects of the selected type that have the + specified children + + + +### Search Collection + +It is possible to launch an advanced search limited to Objects of one +Collection from a Collection page, by selecting **Search in Collection** +from the **More** drop down. This redirects to the Advanced Search page +where the Collection is already pre-defined. + + + +Updated on July 5, 2023 + +## Search + +Different types of searches are available from the main menu in openBIS: + +1. Global search +2. BLAST search +3. Data Set Files Search +4. Advanced Search + + + +### Global search + +This functionality, available from the main menu, performs a search +across all database fields. Results are presented in a table in the +**Advanced Search** page. The search can be also be further refined (see +[Advanced +search](https://openbis.ch/index.php/docs/user-documentation-20-10-3/search/advanced-search/)). + + + +### BLAST search + +This performs a BLAST search over nucleotide sequences contained either +in the **Sequence** property of an Object type (e.g Plasmid or Oligo) or +in Datasets of type **SEQ\_FILES**. Results are shown in a table, sorted +by E-value. + +### Data Set File search + +This search allows users to search across names of files stored in +openBIS Datasets. + +Please note that it is not possible to search the content of the files. + +In the example below, we search for files that contain “mass-loss†in +the name, and we find 1 dataset that contains a file called +mass-loss-data.csv. + +By clicking on the table row that shows the Dataset containing the +searched file, you will be redirected to the Dataset page. + + + +Updated on April 25, 2023 + +## Save and reuse searches It is possible to save and re-use searches created in the [Advanced search](https://openbis.ch/index.php/docs/user-documentation-20-10-3/search/advanced-search/). @@ -12,27 +193,19 @@ search](https://openbis.ch/index.php/docs/user-documentation-20-10-3/search/adva Searches can be used by anyone with *User* or *Observer* rights to a given Space. - - In the Advanced Search page, build your search criteria (see example below). To save the search, click the **Save** button and enter: - - 1. The **Name** of the search 2. The **Experiment/Collection** where the search should be stored - - Searches are stored in *Experiments/Collections*.  - Saved searches are available from the **load a saved search** drop down menu, at the top of the **Advanced Search** page.   - -Updated on July 28, 2022 +Updated on July 28, 2022 \ No newline at end of file diff --git a/docs/user-documentation/legacy-advance-features/img/145.png b/docs/user-documentation/legacy-advance-features/img/145.png new file mode 100644 index 0000000000000000000000000000000000000000..31365c9c3f1acc93527b311d7ecb0cd1c5d06261 Binary files /dev/null and b/docs/user-documentation/legacy-advance-features/img/145.png differ diff --git a/docs/user-documentation/legacy-advance-features/img/162.png b/docs/user-documentation/legacy-advance-features/img/162.png new file mode 100644 index 0000000000000000000000000000000000000000..f9f8f6146ebfe6635645c1d6ee6805881c60816f Binary files /dev/null and b/docs/user-documentation/legacy-advance-features/img/162.png differ diff --git a/docs/user-documentation/legacy-advance-features/img/186.png b/docs/user-documentation/legacy-advance-features/img/186.png new file mode 100644 index 0000000000000000000000000000000000000000..b2f9bd3dd15bdc623917af3bdcf5386cfd4c2538 Binary files /dev/null and b/docs/user-documentation/legacy-advance-features/img/186.png differ diff --git a/docs/user-documentation/legacy-advance-features/img/199.png b/docs/user-documentation/legacy-advance-features/img/199.png new file mode 100644 index 0000000000000000000000000000000000000000..66e977c4a505614f7dc20e68f93608f64d019a3e Binary files /dev/null and b/docs/user-documentation/legacy-advance-features/img/199.png differ diff --git a/docs/user-documentation/legacy-advance-features/img/204.png b/docs/user-documentation/legacy-advance-features/img/204.png new file mode 100644 index 0000000000000000000000000000000000000000..3541c7d10798d789c58142585f588902723026cf Binary files /dev/null and b/docs/user-documentation/legacy-advance-features/img/204.png differ diff --git a/docs/user-documentation/legacy-advance-features/img/280.png b/docs/user-documentation/legacy-advance-features/img/280.png new file mode 100644 index 0000000000000000000000000000000000000000..116ad881fd8db80a747328bdac44e03534b6e2c7 Binary files /dev/null and b/docs/user-documentation/legacy-advance-features/img/280.png differ diff --git a/docs/user-documentation/legacy-advance-features/img/286.png b/docs/user-documentation/legacy-advance-features/img/286.png new file mode 100644 index 0000000000000000000000000000000000000000..31c52c389a14f30a9eaf58eb1b800256cfff8ce5 Binary files /dev/null and b/docs/user-documentation/legacy-advance-features/img/286.png differ diff --git a/docs/user-documentation/legacy-advance-features/img/297.png b/docs/user-documentation/legacy-advance-features/img/297.png new file mode 100644 index 0000000000000000000000000000000000000000..5caf46a3b7cfab87d71c1146173e3e792709ec2e Binary files /dev/null and b/docs/user-documentation/legacy-advance-features/img/297.png differ diff --git a/docs/user-documentation/legacy-advance-features/img/303.png b/docs/user-documentation/legacy-advance-features/img/303.png new file mode 100644 index 0000000000000000000000000000000000000000..2946fb9e2ec482366d35c8895364da974407942b Binary files /dev/null and b/docs/user-documentation/legacy-advance-features/img/303.png differ diff --git a/docs/user-documentation/legacy-advance-features/img/311.png b/docs/user-documentation/legacy-advance-features/img/311.png new file mode 100644 index 0000000000000000000000000000000000000000..64ff1ac1b3c7969ababbeeec0995981d62435aac Binary files /dev/null and b/docs/user-documentation/legacy-advance-features/img/311.png differ diff --git a/docs/user-documentation/legacy-advance-features/index.rst b/docs/user-documentation/legacy-advance-features/index.rst index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..cfc98ff1814eb7852a5bac09378a58e3b68855b4 100644 --- a/docs/user-documentation/legacy-advance-features/index.rst +++ b/docs/user-documentation/legacy-advance-features/index.rst @@ -0,0 +1,7 @@ +Legacy Advance Features +======================= + +.. toctree:: + :maxdepth: 4 + + openbis-kinme-nodes diff --git a/docs/user-documentation/legacy-advance-features/openbis-kinme-nodes.md b/docs/user-documentation/legacy-advance-features/openbis-kinme-nodes.md new file mode 100644 index 0000000000000000000000000000000000000000..7961fa667c2b9ced9f480d17490de3d8d6fb5b07 --- /dev/null +++ b/docs/user-documentation/legacy-advance-features/openbis-kinme-nodes.md @@ -0,0 +1,401 @@ +openBIS KNIME Nodes +=================== + +Introduction +------------ + +[KNIME](http://www.knime.org) is a powerful workflow system. It allows +to import data from some sources and process them in a workflow +graphically designed by the user. + +There are special openBIS KNIME nodes for importing/exporting data +from/to openBIS. KNIME version 2.7.2 or higher is required. + +Installation +------------ + +1. Start KNIME application. +2. Click on menu item 'Install New Software...' of menu 'Help'. An + installation dialog pops up. +3. Click on the add button. A dialog titled 'Add Repository' pops up. +4. Enter a name like 'KNIME Community Nodes' and the URL + <http://update.knime.org/community-contributions/3.1> +5. Check the check box of 'openBIS Knime Nodes' in section 'Community + Contributions - Bioinformatics & NGS' and click twice the next + button. +6. Accept the license agreements. +7. Click the finish button. +8. Ignore the security warning and restart KNIME application. + +Usage +----- + +All openBIS KNIME nodes can be found in Node Repository under Community +Nodes -> openBIS: + + + +Drag and drop a node into the project and double-click on the node. A +node setting dialog opens for entering parameters. + +Nodes +----- + +All nodes need + +- URL of openBIS server, like + ` https://sprint-openbis.ethz.ch/openbis `. +- User credentials + +When configuring a node in the node setting dialog the user is asked for +these parameters in the section 'Connection Parameters': + + + +After pressing the button *connect* a connecting to the openBIS server +will be established. This is needed for editing additional node +parameters. For example, the combo boxes of the reader nodes have to be +populated. + +For a data set registration node the credentials combo box is only +filled if all nodes of the upstream part of the workflow are +successfully configured. + + + +The OK button closes the node setting dialog. The connection parameters +and all other parameters will be stored and used when executing a +workflow. + +### Definining openBIS URLs + +Contrary to the previous version of openBIS KNIME nodes (Version 13.04.0 +and earlier) the URL field in the node setting dialog is no longer a +text field but a combo box with URLs. This list of predefined URLs is +initially empty. It has to be created by the following preference page: + + + +### Defining User Credentials for Authentication + +For security reasons it is not recommended to specify user ID and +password directly for each openBIS node. Instead named credentials +should be used. This has the advantage to enter user ID and password +only once for a workflow with several openBIS nodes. + +Named credentials are defined for a particular workflow. They are called +workflow credentials and can be specified via the context menu of the +workflow: + + + +Each set of credentials has a name (which is used in the combo box), a +user ID (called 'Login') and a password: + + + +The credentials are saved with the workflow except of the passwords. The +user will be asked for the passwords after loading a workflow. + + + +If user ID and password are entered directly in the node setting dialog +the KNIME master key on the preferences page **KNIME -> Master Key** +should be activated. Otherwise passwords will be stored unencrypted! + +### openBIS Query Reader + +This node allows to run parametrized SQL queries on openBIS. The combo +box shows a list of available queries. After choosing one additional +parameters have to be entered. + +### openBIS Report Reader + +This node allows to get a report for a specified data set. The combo box +shows a list of available report. After choosing a report a data set +should be entered. The button with three dots lets pop up a dialog for +convenient way to choose a data set. + +### openBIS Data Set File Importer + +This nodes allows to download a particular file from a specified data +set. Data set code, file path and a local folder for downloads have to +be specified. The output of the node is not a table put an object of +type `org.knime.core.data.uri.URIPortObject`. Other nodes with input +ports of this type can access the downloaded file. Such nodes exist in +GenericKnimeNodes of the Community Nodes (which are a part of openMS +KNIME Nodes). Also 'openBIS Data Set Registration (URI Port)' is such a +node. + +The absolute path of the downloaded file is also available as a flow +variable `absolute-file-path`. This allows to connect a openBIS Data Set +File Importer with a file reader which supports absolute file paths in +flow variables like the CSV Reader node. The mechanism of connecting +both nodes via flow variable ports is explained in the next section +where a CSV Writer node is connected with an openBIS Data Set +Registration node. + +This importer node also creates the following KNIME flow variables: +`openbis.DATA_SET`, `openbis.EXPERIMENT`, and +optionally `openbis.SAMPLE`. These variables contain data set code, +experiment identifier, and sample identifier, respectively. The flow +variable `openbis.SAMPLE` identifier only appears if the data set is +directly link to a sample. KNIME flow variables are available to other +nodes downstream. + + + +### openBIS Data Set Registration (Flow Variable Port) + +This node allows to register a file as a data set. The path of the file +to be registered is the value a flow variable specified in the node +settings dialog. In addition the user has to specify owner type and data +set type. + +The owner identifier (which is either a data set code, an experiment +identifier, or a sample identifier depending on the chosen owner type) +can be chosen by a chooser dialog. If the owner field is empty one of +the flow variables s `openbis.DATA_SET`, `openbis.EXPERIMENT`, or +`openbis.SAMPLE` will be used. + +#### Usage + +This node is usually used in combination with a writer node which stores +data (e.g. data table) in a file. Writer nodes are end nodes of a +workflow. But it is possible to append another node downstream by using +the flow variable port. Normally the flow variable ports are not +visible. To make them visible choose item '**Show Flow Variable Ports**' +of the context menu of the node. Two red circle will appear at the upper +corners of the node symbol: + + +add a node of type 'openBIS Data Set Registration (Flow Variable Port)' +and connect the upper right circle of the writer node with the input +node of the registration node. A click on 'Hide Flow Variable Ports' of +the context menu of the writer node hides the upper left circle: + + +you need to tell the registration node which flow variable has the path +to the file to be registered. This needs two steps: + +1. The configuration parameter of the writer has to be made available + as a flow variable. This can be done in tab 'Flow Variables' of the + node settings dialog. It lists all configuration parameters. If a + name is specified in the text field of a certain parameter its value + will be available as a flow variable of specified name for the + downstream nodes. Here is the example for CSV Writer: + + + This works for all writers. There is an easier way for CSV Writer: + On the Settings tab there is small button named '*v=?*'. Clicking on + this button opens a dialog where the flow variable for the file name + can directly be specified by using 'Create Variable': + + + + + + +2. In the registration node the flow variable specified in the first + step has to be chosen as the file variable: + + + + + +### openBIS Data Set Registration (URI Port) + +This nodes allows to register a file as a data set. The file to be +registered is the first one in the list of URIs of the port object of +type `org.knime.core.data.uri.URIPortObject`. The user has to specify +owner type and data set type in the node settings dialog. + +The owner identifier (which is either a data set code, an experiment +identifier, or a sample identifier depending on the chosen owner type) +can be chosen by a chooser dialog. If the owner field is empty one of +the flow variables `openbis.DATA_SET`, `openbis.EXPERIMENT`, or +`openbis.SAMPLE` will be used. + +### openBIS Aggregation Service Report Reader + +This nodes allows to get an [aggregation +service](/display/openBISDoc2010/Reporting+Plugins) report. Only +aggregation services where the service key starts with `knime-` can be +chosen by the user in the node settings dialog. After the service has +been chosen the aggregation service will be invoked with the parameter +`_REQUEST_ `set to` getParameterDescriptions`. The service has to return +a table where each row defines the name of the parameter and optionally +its type. This is used to created an appropriated form in the node +settings dialog. The values specified by the user will be used to invoke +the aggregation service when the node is executed. The result will be +available as a KNIME table. See also section [KNIME Aggregation Service +Specifications](#openBISKNIMENodes-KNIMEAggregationServiceSpecifications). + +### openBIS Aggregated Data File Importer + +This nodes allows to invoke an [aggregation +service](/display/openBISDoc2010/Reporting+Plugins) which returns a name +of a file in the session workspace which will be downloaded and made +available for nodes with input ports of type +`org.knime.core.data.uri.URIPortObject`. Such nodes exist in +GenericKnimeNodes of the Community Nodes. Also 'openBIS Data Set +Registration (URI Port)' is such a node. + +Only aggregation services where the service key starts +with `knime-file-` can be chosen by the user in the node settings +dialog. The communication protocol between this node and openBIS is as +for nodes of type 'openBIS Aggregation Service Report Reader'. The only +difference is that the returned table has only one row with one cell +which contains the file name. + +KNIME Aggregation Service Specifications +---------------------------------------- + +Nodes of type 'openBIS Aggregation Service Report Reader' and 'openBIS +Aggregated Data File Importer' rely on [aggregation +services](/display/openBISDoc2010/Reporting+Plugins) which follow a +certain protocol. In order to distinguish these services from other +aggregation services the service key (i.e. [core +plugins](/display/openBISDoc2010/Core+Plugins) ID) has to start +with `knime-`. The specifications of such services are the following: + +1. If there is a parameter `_REQUEST_ `with + value` getParameterDescriptions` descriptions of all parameters will + be returned in the form specified as follows: + - The table has the columns `name` and `type`. + - Each row has a non-empty unique value of column `name`. It + specifies the name of the parameter. It is also shown in node + settings dialog. + - The type columns contains either an empty string or `VARCHAR`, + `VOCABULARY`, `EXPERIMENT`, `SAMPLE`, or `DATA_SET.` The default + type is `VARCHAR` which is represented in the node settings + dialog by a single-line text field. The types `EXPERIMENT`, + `SAMPLE`, and `DATA_SET` are also single line text field with an + additional button to open an appropriate chooser. + - The type `VOCABULARY` isn't useful without a list of terms in + the following form: `VOCABULRY:<term 1>, <term 2>, ...`. + Example: `VOCABULARY:Strong, Medium, Weak` +2. If there is no parameter `_REQUEST_ `or its value + isn't` getParameterDescriptions` the aggregation service can assume + that all parameters as defined by the parameters description are + present. Some of them might have empty strings as values. +3. An exception should be returned as a table with five columns where + the first column is `_EXCEPTION_`. If such a table is returned an + exception with stack trace will be created and thrown in KNIME. It + will appear in KNIME log. For each row either the first cell isn't + empty or the five other cells are not empty. In the first case the + value of the first column is of the form <exception + class>:<exception message>. If the first column is empty + the row represents a stack trace entry where the other columns are + interpreted as class name, method name, file name, and line number. + +In order to simplify KNIME aggregation services a Helper API in Java is +available +[openbis-knime-server.jar](/download/attachments/53746033/openbis-knime-server.jar?version=1&modificationDate=1601541485341&api=v2). +It should be added to openBIS installation in +folder `<installation folder>/servers/datastore_server/ext-lib`. + +### KNIME Aggregation Service Helper API + +The helper API contains the two +classes `ch.systemsx.cisd.openbis.knime.server.AggregationCommand` +and `ch.systemsx.cisd.openbis.knime.server.AggregationFileCommand` which +should be extend when writing an aggregation service for nodes of type +'openBIS Aggregation Service Report Reader' and 'openBIS Aggregated Data +File Importer', respectively. + +The subclasses should override the method `defineParameters()`. Its +argument is a `ParameterDescriptionsBuilder` which simplifies creation +of parameter descriptions. + +If `AggregationCommand`/`AggregationFileCommand` is subclassed the +method `aggregate()/createFile()` should be overridden. The +`aggregate()` methods gets the original arguments which are the +parameters binding map and the ISimpleTableModelBuilderAdaptor. The +`createFile()` methods gets only the parameters binding map. It returns +the name of the file in the session workspace. + +The aggregation service should instanciate the subclass and +invoke `handleRequest()` with the parameters binding map and the table +model builder adaptor. + +The `ParameterDescriptionsBuilder` has the method `parameter()`. It +creates a `ParameterDescriptionBuilder` based on the specified parameter +name. The `ParameterDescriptionBuilder` has the +methods `text()`, `vocabulary()`, `experiment()`, `sample()`, +`dataSet()` which specify the parameter type. Only `vocabulary()` has an +argument: The string array of vocabulary terms. + +#### Example for an Aggregation Service Report Reader + + from ch.systemsx.cisd.openbis.knime.server import AggregationCommand + from ch.systemsx.cisd.openbis.generic.shared.api.v1.dto import SearchCriteria + from ch.systemsx.cisd.openbis.generic.shared.api.v1.dto import SearchSubCriteria + from ch.systemsx.cisd.openbis.generic.shared.api.v1.dto.SearchCriteria import MatchClause + from ch.systemsx.cisd.openbis.generic.shared.api.v1.dto.SearchCriteria import MatchClauseAttribute + EXPERIMENT = 'Experiment' + DATA_SET_COLUMN = 'Data Set' + PATH_COLUMN = 'Path' + SIZE_COLUMN = 'Size' + def scan(tableBuilder, dataSetCode, node): + if node.isDirectory(): + for child in node.childNodes: + scan(tableBuilder, dataSetCode, child) + else: + row = tableBuilder.addRow() + row.setCell(DATA_SET_COLUMN, dataSetCode) + row.setCell(PATH_COLUMN, node.relativePath) + row.setCell(SIZE_COLUMN, node.fileLength) + class MyAggregationCommand(AggregationCommand): + def defineParameters(self, builder): + builder.parameter(EXPERIMENT).experiment() + + def aggregate(self, parameters, tableBuilder): + experiment = searchService.getExperiment(parameters.get(EXPERIMENT)) + searchCriteria = SearchCriteria() + subCriteria = SearchCriteria() + subCriteria.addMatchClause(MatchClause.createAttributeMatch(MatchClauseAttribute.PERM_ID, experiment.permId)) + searchCriteria.addSubCriteria(SearchSubCriteria.createExperimentCriteria(subCriteria)) + dataSets = searchService.searchForDataSets(searchCriteria) + tableBuilder.addHeader(DATA_SET_COLUMN) + tableBuilder.addHeader(PATH_COLUMN) + tableBuilder.addHeader(SIZE_COLUMN) + for dataSet in dataSets: + dataSetCode = dataSet.dataSetCode + try: + content = contentProvider.getContent(dataSetCode) + scan(tableBuilder, dataSetCode, content.rootNode) + finally: + if content != None: + content.close() + + def aggregate(parameters, tableBuilder): + MyAggregationCommand().handleRequest(parameters, tableBuilder) + +#### Example for an Aggregated Data File Importer + + + + import os.path + from java.util import Date + from ch.systemsx.cisd.openbis.knime.server import AggregationFileCommand + + class MyAggregationFileCommand(AggregationFileCommand): + def defineParameters(self, builder): + builder.parameter('Greeting Type').vocabulary(['Hi', 'Hello']) + builder.parameter('Name') + builder.parameter('Sample').sample() + + def createFile(self, parameters): + sessionWorkspace = sessionWorkspaceProvider.getSessionWorkspace() + filename = "report.txt" + output = open(os.path.join(sessionWorkspace.getAbsolutePath(), filename), "w") + name = parameters.get('Name') + sample = searchService.getSample(parameters.get('Sample')) + output.write(str(parameters.get('Greeting Type')) + " " + str(name) + "!\n\n" + Date().toString() + "\n") + output.write(sample.getSampleType()) + output.close() + return filename + + def aggregate(parameters, tableBuilder): + MyAggregationFileCommand().handleRequest(parameters, tableBuilder) \ No newline at end of file diff --git a/server-application-server/source/java/ch/systemsx/cisd/openbis/generic/server/task/UserManagementMaintenanceTask.java b/server-application-server/source/java/ch/systemsx/cisd/openbis/generic/server/task/UserManagementMaintenanceTask.java index 5f803c8535076b8f54349a45d0156f067fa28e3b..1d88eb5337335a626f2390c5ab9be78c46ee6fff 100644 --- a/server-application-server/source/java/ch/systemsx/cisd/openbis/generic/server/task/UserManagementMaintenanceTask.java +++ b/server-application-server/source/java/ch/systemsx/cisd/openbis/generic/server/task/UserManagementMaintenanceTask.java @@ -267,6 +267,7 @@ public class UserManagementMaintenanceTask extends AbstractGroupMaintenanceTask private UserManager createUserManager(UserManagerConfig config, Log4jSimpleLogger logger, UserManagerReport report) { UserManager userManager = createUserManager(logger, report); + userManager.setReuseHomeSpace(config.getReuseHomeSpace()); userManager.setGlobalSpaces(config.getGlobalSpaces()); userManager.setInstanceAdmins(config.getInstanceAdmins()); try diff --git a/server-application-server/source/java/ch/systemsx/cisd/openbis/generic/server/task/UserManager.java b/server-application-server/source/java/ch/systemsx/cisd/openbis/generic/server/task/UserManager.java index f99ebb49d70ad1f1ecff754de36cc720bd9042fd..9b7ee1a8a886d5af5a7de832ab94d0fa28c5e309 100644 --- a/server-application-server/source/java/ch/systemsx/cisd/openbis/generic/server/task/UserManager.java +++ b/server-application-server/source/java/ch/systemsx/cisd/openbis/generic/server/task/UserManager.java @@ -146,6 +146,8 @@ public class UserManager private boolean deactivateUnknownUsers; + private boolean reuseHomeSpace; + public UserManager(IAuthenticationService authenticationService, IApplicationServerInternalApi service, File shareIdsMappingFileOrNull, ISimpleLogger logger, UserManagerReport report) { @@ -1023,10 +1025,13 @@ public class UserManager private SpacePermId createUserSpace(Context context, String groupCode, String userId) { String userSpaceCode = createCommonSpaceCode(groupCode, userId.toUpperCase()); - int n = context.getCurrentState().getNumberOfSpacesStartingWith(userSpaceCode); - if (n > 0) + if(!reuseHomeSpace) { - userSpaceCode += "_" + (n + 1); + int n = context.getCurrentState().getNumberOfSpacesStartingWith(userSpaceCode); + if (n > 0) + { + userSpaceCode += "_" + (n + 1); + } } return createSpace(context, userSpaceCode); } @@ -1128,6 +1133,11 @@ public class UserManager context.getReport().assignRoleTo(groupId, role, spaceId); } + public void setReuseHomeSpace(boolean reuseHomeSpace) + { + this.reuseHomeSpace = reuseHomeSpace; + } + private static final class CurrentState { private Map<String, AuthorizationGroup> groupsByCode = new TreeMap<>(); diff --git a/server-application-server/source/java/ch/systemsx/cisd/openbis/generic/server/task/UserManagerConfig.java b/server-application-server/source/java/ch/systemsx/cisd/openbis/generic/server/task/UserManagerConfig.java index acfb0d05c2845b9e23d3099a4dbd5990b5dc2d42..fd26889514813c38570f33e9c58c581b7420eb61 100644 --- a/server-application-server/source/java/ch/systemsx/cisd/openbis/generic/server/task/UserManagerConfig.java +++ b/server-application-server/source/java/ch/systemsx/cisd/openbis/generic/server/task/UserManagerConfig.java @@ -24,6 +24,8 @@ import ch.ethz.sis.openbis.generic.asapi.v3.dto.roleassignment.Role; class UserManagerConfig { + private Boolean reuseHomeSpace = true; + private List<String> globalSpaces = new ArrayList<>(); private Map<Role, List<String>> commonSpaces = new HashMap<>(); @@ -96,4 +98,13 @@ class UserManagerConfig this.instanceAdmins = instanceAdmins; } + public boolean getReuseHomeSpace() + { + return reuseHomeSpace; + } + + public void setReuseHomeSpace(boolean reuseHomeSpace) + { + this.reuseHomeSpace = reuseHomeSpace; + } } \ No newline at end of file diff --git a/ui-eln-lims/src/core-plugins/eln-lims/1/as/webapps/eln-lims/html/js/views/DataGrid/ExperimentDataGridUtil.js b/ui-eln-lims/src/core-plugins/eln-lims/1/as/webapps/eln-lims/html/js/views/DataGrid/ExperimentDataGridUtil.js index 1fd3d1fb6289315192721b469840a2f2cc6b7bb6..4fdf4c34570d9df80c9449251010270e8e526a82 100644 --- a/ui-eln-lims/src/core-plugins/eln-lims/1/as/webapps/eln-lims/html/js/views/DataGrid/ExperimentDataGridUtil.js +++ b/ui-eln-lims/src/core-plugins/eln-lims/1/as/webapps/eln-lims/html/js/views/DataGrid/ExperimentDataGridUtil.js @@ -1,5 +1,5 @@ var ExperimentDataGridUtil = new function() { - this.getExperimentDataGrid = function(entities, rowClick, heightPercentage) { + this.getExperimentDataGrid = function(entities, rowClick, multiselectable, heightPercentage) { //Fill Columns model var columns = []; @@ -129,6 +129,7 @@ var ExperimentDataGridUtil = new function() { perm_id: entity.permId, type_perm_id: entity.experimentTypeCode }, + '$object' : entity, 'code' : entity.code, 'identifier' : entity.identifier, 'permId' : entity.permId, @@ -174,7 +175,7 @@ var ExperimentDataGridUtil = new function() { //Create and return a data grid controller var configKey = "EXPERIMENT_TABLE"; - var dataGridController = new DataGridController(null, columns, [], dynamicColumnsFunc, getDataList, rowClick, false, configKey, null, { + var dataGridController = new DataGridController(null, columns, [], dynamicColumnsFunc, getDataList, rowClick, false, configKey, multiselectable, { fileFormat: DataGridExportOptions.FILE_FORMAT.XLS, filePrefix: 'collections', fileContent: DataGridExportOptions.FILE_CONTENT.ENTITIES diff --git a/ui-eln-lims/src/core-plugins/eln-lims/1/as/webapps/eln-lims/html/js/views/ExperimentTable/ExperimentTableController.js b/ui-eln-lims/src/core-plugins/eln-lims/1/as/webapps/eln-lims/html/js/views/ExperimentTable/ExperimentTableController.js index 7fd40d151197adc8fd7d01c3c61cc1798e165ea6..a195ec06e61ad69db09ddd39f537a86350e99a4d 100644 --- a/ui-eln-lims/src/core-plugins/eln-lims/1/as/webapps/eln-lims/html/js/views/ExperimentTable/ExperimentTableController.js +++ b/ui-eln-lims/src/core-plugins/eln-lims/1/as/webapps/eln-lims/html/js/views/ExperimentTable/ExperimentTableController.js @@ -14,7 +14,7 @@ * limitations under the License. */ -function ExperimentTableController(parentController, title, project, showInProjectOverview) { +function ExperimentTableController(parentController, title, project, showInProjectOverview, extraOptions) { this._parentController = parentController; this._experimentTableModel = new ExperimentTableModel(title, project, showInProjectOverview); this._experimentTableView = new ExperimentTableView(this, this._experimentTableModel); @@ -66,8 +66,9 @@ function ExperimentTableController(parentController, title, project, showInProje var rowClick = null; //Create and display table - this._dataGridController = ExperimentDataGridUtil.getExperimentDataGrid(experiments, rowClick, 50); - this._dataGridController.init(this._experimentTableView.getTableContainer()); + var multiselectable = extraOptions != null; + this._dataGridController = ExperimentDataGridUtil.getExperimentDataGrid(experiments, rowClick, multiselectable, 50); + this._dataGridController.init(this._experimentTableView.getTableContainer(), extraOptions); } this.refresh = function() diff --git a/ui-eln-lims/src/core-plugins/eln-lims/1/as/webapps/eln-lims/html/js/views/ExperimentTable/ExperimentTableView.js b/ui-eln-lims/src/core-plugins/eln-lims/1/as/webapps/eln-lims/html/js/views/ExperimentTable/ExperimentTableView.js index 48099ce6710ab881caab64ec3b1b16d2749864b0..9c4688e86915dbbcc73f75b9ab89cc9f1299fa1a 100644 --- a/ui-eln-lims/src/core-plugins/eln-lims/1/as/webapps/eln-lims/html/js/views/ExperimentTable/ExperimentTableView.js +++ b/ui-eln-lims/src/core-plugins/eln-lims/1/as/webapps/eln-lims/html/js/views/ExperimentTable/ExperimentTableView.js @@ -30,10 +30,6 @@ function ExperimentTableView(experimentTableController, experimentTableModel) { $container.append($title); } - var toolbarModel = []; - toolbarModel.push({ component : this._showExperimentFromOverviewDropdown(), tooltip: null }); - - $container.append(FormUtil.getToolbar(toolbarModel)); $container.append(this._tableContainer); } @@ -47,29 +43,4 @@ function ExperimentTableView(experimentTableController, experimentTableModel) { this.getTypeSelector = function() { return this.typeSelector; } - - this._showExperimentFromOverviewDropdown = function() { - var _this = this; - var expDropModel = []; - var kindName = ELNDictionary.getExperimentsDualName(); - expDropModel = [{value : "OVERVIEW", label : "Show only overview " + kindName, selected : this._experimentTableModel.showInProjectOverview }, - {value : "ALL", label : "Show all " + kindName, selected : !this._experimentTableModel.showInProjectOverview }]; - - - var $experimentDropdown = FormUtil.getDropdown(expDropModel, "Select what " + kindName + " to show"); - $experimentDropdown.attr("id", "what-experiments-drop-down"); - - $experimentDropdown.change(function() { - switch($(this).val()){ - case "OVERVIEW": - _this._experimentTableModel.showInProjectOverview = true; - break; - case "ALL": - _this._experimentTableModel.showInProjectOverview = false; - break; - } - _this._experimentTableController.init(_this._$container); - }); - return $("<span>").append($experimentDropdown); - } } \ No newline at end of file diff --git a/ui-eln-lims/src/core-plugins/eln-lims/1/as/webapps/eln-lims/html/js/views/ProjectForm/ProjectFormView.js b/ui-eln-lims/src/core-plugins/eln-lims/1/as/webapps/eln-lims/html/js/views/ProjectForm/ProjectFormView.js index 75e43e244871e96de6a127ccbcffe4ee3c70a6a0..16f3a918017502214c51a1c398222895908b9c0f 100644 --- a/ui-eln-lims/src/core-plugins/eln-lims/1/as/webapps/eln-lims/html/js/views/ProjectForm/ProjectFormView.js +++ b/ui-eln-lims/src/core-plugins/eln-lims/1/as/webapps/eln-lims/html/js/views/ProjectForm/ProjectFormView.js @@ -177,8 +177,8 @@ function ProjectFormView(projectFormController, projectFormModel) { } if (this._projectFormModel.mode !== FormMode.CREATE && !isInventoryProject) { + $formColumn.append(this._createOverviewSection(projectIdentifier, hideShowOptionsModel)); $formColumn.append(this._createExperimentsSection(projectIdentifier, hideShowOptionsModel)); - $formColumn.append(this._createSamplesSection(hideShowOptionsModel)); } FormUtil.addOptionsToToolbar(toolbarModel, dropdownOptionsModel, hideShowOptionsModel, "PROJECT-VIEW"); @@ -284,7 +284,43 @@ function ProjectFormView(projectFormController, projectFormModel) { $description.hide(); return $description; } - + + this._createOverviewSection = function(projectIdentifier, hideShowOptionsModel) { + var $overview = $("<div>", { id : "project-overview" }); + $overview.append($("<legend>").append("Overview")); + var $overviewContainer = $("<div>"); + $overview.append($overviewContainer); + + $experimentsOverview = $("<div>"); + $overviewContainer.append($("<h4>").append(ELNDictionary.ExperimentsELN)); + $overviewContainer.append($experimentsOverview); + + $samplesOverview = $("<div>"); + $header = $("<h4>").append(ELNDictionary.Samples); + $overviewContainer.append($header); + $overviewContainer.append($samplesOverview); + + var experimentTableController = new ExperimentTableController(this._projectFormController, null, jQuery.extend(true, {}, this._projectFormModel.project), true); + experimentTableController.init($experimentsOverview); + var sampleTableController = new SampleTableController(this._projectFormController, null, null, this._projectFormModel.project.permId, true, null, 40); + var views = { + header : $header, + content : $samplesOverview + } + sampleTableController.init(views); + + $overview.hide(); + hideShowOptionsModel.push({ + label : "Overview", + section : "#project-overview", + beforeShowingAction : function() { + experimentTableController.refresh(); + sampleTableController.refresh(); + } + }); + return $overview; + } + this._createExperimentsSection = function(projectIdentifier, hideShowOptionsModel) { var entityKindName = ELNDictionary.getExperimentsDualName(); var $experiments = $("<div>", { id : "project-experiments" }); @@ -292,7 +328,24 @@ function ProjectFormView(projectFormController, projectFormModel) { $experiments.append($("<legend>").append(entityKindName)); $experiments.append($experimentsContainer); - var experimentTableController = new ExperimentTableController(this._projectFormController, null, jQuery.extend(true, {}, this._projectFormModel.project), true); + var _this = this; + var extraOptions = []; + extraOptions.push({ name : "Delete", action : function(selected) { + if(selected != undefined && selected.length == 0){ + Util.showUserError("Please select at least one " + ELNDictionary.experimentELN + " to delete!"); + } else { + _this._deleteExperiments(selected.map(e => e.permId)); + } + }}); + extraOptions.push({ name : "Move", action : function(selected) { + if(selected != undefined && selected.length == 0){ + Util.showUserError("Please select at least one " + ELNDictionary.experimentELN + " to move!"); + } else { + _this._moveExperiments(selected.map(s => s.permId)); + } + }}); + var experimentTableController = new ExperimentTableController(this._projectFormController, null, jQuery.extend(true, {}, this._projectFormModel.project), + false, extraOptions); experimentTableController.init($experimentsContainer); $experiments.hide(); hideShowOptionsModel.push({ @@ -304,34 +357,100 @@ function ProjectFormView(projectFormController, projectFormModel) { }); return $experiments; } - - this._createSamplesSection = function(hideShowOptionsModel) { - var entityKindName = "" + ELNDictionary.Samples + ""; - - var $samples = $("<div>", { id : "project-samples" }); - var $experimentsContainer = $("<div>"); - $samples.append($("<legend>").append(entityKindName)); - var $samplesContainerHeader = $("<div>"); - $samples.append($samplesContainerHeader); - var $samplesContainer = $("<div>"); - $samples.append($samplesContainer); - - var views = { - header : $samplesContainerHeader, - content : $samplesContainer - } - var sampleTableController = new SampleTableController(this._projectFormController, null, null, this._projectFormModel.project.permId, true, null, 40); - sampleTableController.init(views); - $samples.hide(); - hideShowOptionsModel.push({ - label : entityKindName, - section : "#project-samples", - beforeShowingAction : function() { - sampleTableController.refresh(); - } - }); - return $samples; - } + + this._deleteExperiments = function(permIds) { + var _this = this; + var $component = $("<div>"); + var warningText = "Also all " + ELNDictionary.samples + " and data sets of the selected " + + permIds.length + " " + ELNDictionary.getExperimentsDualName() + " will be deleted."; + var $warning = FormUtil.getFieldForLabelWithText(null, warningText); + $warning.css('color', FormUtil.warningColor); + $component.append($warning); + var modalView = new DeleteEntityController(function(reason) { + require(["as/dto/experiment/id/ExperimentPermId","as/dto/experiment/delete/ExperimentDeletionOptions"], + function(ExperimentPermId, ExperimentDeletionOptions) { + var experimentIds = permIds.map(permId => new ExperimentPermId(permId)); + var deletionOptions = new ExperimentDeletionOptions(); + deletionOptions.setReason(reason); + mainController.openbisV3.deleteExperiments(experimentIds, deletionOptions).done(function() { + Util.showSuccess("All " + permIds.length + " " + ELNDictionary.getExperimentsDualName() + + " are moved to trashcan", function() { + permIds.forEach(function(permId) { + mainController.sideMenu.deleteNodeByEntityPermId("EXPERIMENT", permId, false); + }); + mainController.refreshView(); + }); + }).fail(function(error) { + Util.showFailedServerCallError(error); + Util.unblockUI(); + }); + }) + }, true, null, $component); + modalView.init(); + } + + this._moveExperiments = function(permIds) { + var _this = this; + var $window = $('<form>', { 'action' : 'javascript:void(0);' }); + var project = null; + $window.submit(function() { + Util.unblockUI(); + require(["as/dto/experiment/id/ExperimentPermId", "as/dto/experiment/update/ExperimentUpdate"], + function(ExperimentPermId, ExperimentUpdate) { + var projectIdentifier = project.getIdentifier(); + var updates = []; + permIds.forEach(function(permId) { + var update = new ExperimentUpdate(); + update.setExperimentId(new ExperimentPermId(permId)); + update.setProjectId(projectIdentifier); + updates.push(update); + }); + mainController.openbisV3.updateExperiments(updates).done(function() { + Util.showSuccess("Moved successfully", function() { + var projectPermId = project.getPermId().getPermId(); + mainController.sideMenu.refreshNodeParentByPermId("PROJECT", projectPermId, true); + permIds.forEach(function(permId) { + mainController.sideMenu.deleteNodeByEntityPermId("EXPERIMENT", permId, false); + }); + mainController.refreshView(); + }); + }).fail(function(error) { + Util.showFailedServerCallError(error); + Util.unblockUI(); + }); + }); + }); + + $window.append($('<legend>').append("Moving " + permIds.length + " selected " + + ELNDictionary.getExperimentsDualName() + " to:")); + var $searchBox = $('<div>'); + $window.append($searchBox); + var searchDropdown = new AdvancedEntitySearchDropdown(false, true, "search project to move to", + false, false, false, true, false); + var $btnAccept = $('<input>', { 'type': 'submit', 'class' : 'btn btn-primary', 'value' : 'Accept' }); + var $btnCancel = $('<a>', { 'class' : 'btn btn-default' }).append('Cancel'); + $btnCancel.click(function() { + Util.unblockUI(); + }); + + $window.append('<br>').append($btnAccept).append(' ').append($btnCancel); + searchDropdown.onChange(function(selected) { + project = selected[0]; + }); + + searchDropdown.init($searchBox); + + var css = { + 'text-align' : 'left', + 'top' : '15%', + 'width' : '70%', + 'left' : '15%', + 'right' : '20%', + 'overflow' : 'hidden' + }; + Util.blockUI($window, css); + + } this._projectDeletionAction = function() { var _this = this; diff --git a/ui-eln-lims/src/core-plugins/eln-lims/1/as/webapps/eln-lims/html/js/views/SampleForm/SampleFormController.js b/ui-eln-lims/src/core-plugins/eln-lims/1/as/webapps/eln-lims/html/js/views/SampleForm/SampleFormController.js index e2a3bdb0221927340c804046326a284d05d0f908..6d80b209bcdfb8eef185e03a821314c445fa4754 100644 --- a/ui-eln-lims/src/core-plugins/eln-lims/1/as/webapps/eln-lims/html/js/views/SampleForm/SampleFormController.js +++ b/ui-eln-lims/src/core-plugins/eln-lims/1/as/webapps/eln-lims/html/js/views/SampleForm/SampleFormController.js @@ -513,7 +513,16 @@ function SampleFormController(mainController, mode, sample, paginationInfo) { if (object.setSampleId) { object.setSampleId(new SampleIdentifier(sampleIdentifier + "/" + parameters["sampleCode"])); } - object.setProperties(parameters["sampleProperties"]); + var sampleProperties = parameters["sampleProperties"]; + var properties = {}; + Object.keys(sampleProperties).forEach(function(key) { + var sampleProperty = sampleProperties[key]; + if (sampleProperty == "") { + sampleProperty = null; + } + properties[key] = sampleProperty; + }); + object.setProperties(properties); } var createRelatedSampleCreation = function(definition) { var creation = new SampleCreation(); diff --git a/ui-eln-lims/src/core-plugins/eln-lims/1/as/webapps/eln-lims/html/js/views/SampleTable/widgets/MoveSampleView.js b/ui-eln-lims/src/core-plugins/eln-lims/1/as/webapps/eln-lims/html/js/views/SampleTable/widgets/MoveSampleView.js index b4e465ac6932b16691251142b60f8341a69cb5bb..7a02190b67d4e52d0214716ffeedaee895161552 100644 --- a/ui-eln-lims/src/core-plugins/eln-lims/1/as/webapps/eln-lims/html/js/views/SampleTable/widgets/MoveSampleView.js +++ b/ui-eln-lims/src/core-plugins/eln-lims/1/as/webapps/eln-lims/html/js/views/SampleTable/widgets/MoveSampleView.js @@ -99,7 +99,7 @@ function MoveSampleView(moveSampleController, moveSampleModel) { //Attach Fields $experimentSection.append(FormUtil.getFieldForComponentWithLabel($dropdown, "Future Project")) .append(FormUtil.getFieldForComponentWithLabel($expTypeField, "Future " + ELNDictionary.getExperimentDualName() + " Type")) - .append(FormUtil.getFieldForComponentWithLabel($expNameField, "Future " + ELNDictionary.getExperimentDualName() + " Name")); + .append(FormUtil.getFieldForComponentWithLabel($expNameField, "Future " + ELNDictionary.getExperimentDualName() + " Code")); }); } diff --git a/ui-eln-lims/src/core-plugins/eln-lims/1/as/webapps/eln-lims/html/js/views/ZenodoExport/ZenodoExportController.js b/ui-eln-lims/src/core-plugins/eln-lims/1/as/webapps/eln-lims/html/js/views/ZenodoExport/ZenodoExportController.js index 5edaaa22b50b35a8c7769a4d93ff209f2a998f68..0115fca2671e9ccae49fa81246981ee3dde5c14b 100644 --- a/ui-eln-lims/src/core-plugins/eln-lims/1/as/webapps/eln-lims/html/js/views/ZenodoExport/ZenodoExportController.js +++ b/ui-eln-lims/src/core-plugins/eln-lims/1/as/webapps/eln-lims/html/js/views/ZenodoExport/ZenodoExportController.js @@ -53,7 +53,7 @@ function ZenodoExportController(parentController) { Util.showInfo('Please enter a title.'); } else if (!this.isValid(toExport)) { Util.showInfo('Not only spaces and the root should be selected. It will result in an empty export file.'); - } else if (checkedGroups.length === 0) { + } else if (groupRows.length > 0 && checkedGroups.length === 0) { Util.showInfo('At least one group should be selected.'); } else { Util.blockUI(); diff --git a/ui-eln-lims/src/core-plugins/eln-lims/1/as/webapps/eln-lims/html/js/views/ZenodoExport/ZenodoExportView.js b/ui-eln-lims/src/core-plugins/eln-lims/1/as/webapps/eln-lims/html/js/views/ZenodoExport/ZenodoExportView.js index c489381a396ba3668a283fa8b06f1d1eb6eff636..b6ad3abc833af08376f0acbccb533939afd85508 100644 --- a/ui-eln-lims/src/core-plugins/eln-lims/1/as/webapps/eln-lims/html/js/views/ZenodoExport/ZenodoExportView.js +++ b/ui-eln-lims/src/core-plugins/eln-lims/1/as/webapps/eln-lims/html/js/views/ZenodoExport/ZenodoExportView.js @@ -49,14 +49,15 @@ function ZenodoExportView(exportController, exportModel) { $container.append($form); exportModel.tree = TreeUtil.getCompleteTree($tree); - - var $formTitle = $('<h2>').append('Zenodo Export Builder'); - $header.append($formTitle); + exportModel.tableModel = ExportUtil.getTableModel(); this.paintTitleTextBox($container); - ExportUtil.paintGroupCheckboxes($container, "zenodo-groups"); + if (exportModel.tableModel.getValues().length > 0) { + ExportUtil.paintGroupCheckboxes($container, "zenodo-groups"); + } - exportModel.tableModel = ExportUtil.getTableModel(); + var $formTitle = $('<h2>').append('Zenodo Export Builder'); + $header.append($formTitle); var $exportButton = $('<input>', { 'type': 'submit', 'class': 'btn btn-primary', 'value': 'Export Selected', 'onClick': '$("form[name=\'zenodoExportForm\']").submit()'}); diff --git a/ui-eln-lims/src/core-plugins/eln-lims/1/dss/drop-boxes/eln-lims-dropbox/eln-lims-dropbox.py b/ui-eln-lims/src/core-plugins/eln-lims/1/dss/drop-boxes/eln-lims-dropbox/eln-lims-dropbox.py index 95f3762335386ab4446357123d92637bf0c111b6..129057a1740f1375b96666b234de8c991b7bf399 100644 --- a/ui-eln-lims/src/core-plugins/eln-lims/1/dss/drop-boxes/eln-lims-dropbox/eln-lims-dropbox.py +++ b/ui-eln-lims/src/core-plugins/eln-lims/1/dss/drop-boxes/eln-lims-dropbox/eln-lims-dropbox.py @@ -1,18 +1,16 @@ import re -import uuid - from ch.ethz.sis.openbis.generic.asapi.v3.dto.experiment.fetchoptions import ExperimentFetchOptions from ch.ethz.sis.openbis.generic.asapi.v3.dto.experiment.id import ExperimentIdentifier from ch.ethz.sis.openbis.generic.asapi.v3.dto.sample.fetchoptions import SampleFetchOptions from ch.ethz.sis.openbis.generic.asapi.v3.dto.sample.id import SampleIdentifier from ch.systemsx.cisd.common.mail import EMailAddress -from ch.systemsx.cisd.openbis.generic.client.web.client.exception import UserFailureException from ch.systemsx.cisd.openbis.dss.generic.shared import ServiceProvider +from ch.systemsx.cisd.openbis.generic.client.web.client.exception import UserFailureException from java.io import File from java.nio.file import Files, Paths, StandardCopyOption from java.util import List -from org.json import JSONObject from org.apache.commons.io import FileUtils +from org.json import JSONObject INVALID_FORMAT_ERROR_MESSAGE = "Invalid format for the folder name, should follow the pattern <ENTITY_KIND>+<SPACE_CODE>+<PROJECT_CODE>+[<EXPERIMENT_CODE>|<SAMPLE_CODE>]+<OPTIONAL_DATASET_TYPE>+<OPTIONAL_NAME>"; ILLEGAL_CHARACTERS_IN_FILE_NAMES_ERROR_MESSAGE = "Directory or its content contain illegal characters: \"', ~, $, %\""; @@ -25,174 +23,184 @@ EXPERIMENT_MISSING_ERROR_MESSAGE = "Experiment not found"; NAME_PROPERTY_SET_IN_TWO_PLACES_ERROR_MESSAGE = "$NAME property specified twice, it should just be in either folder name or metadata.json" EMAIL_SUBJECT = "ELN LIMS Dropbox Error"; ILLEGAL_FILES = ["desktop.ini", "IconCache.db", "thumbs.db"]; -ILLEGAL_FILES_ERROR_MESSAGE = "Directory or contains illegal files: " + str(ILLEGAL_FILES); -HIDDEN_FILES_ERROR_MESSAGE = "Directory or contains hidden files: files starting with '.'"; +ILLEGAL_FILES_ERROR_MESSAGE = "Directory contains illegal files: " + str(ILLEGAL_FILES); +HIDDEN_FILES_ERROR_MESSAGE = "Directory contains hidden files: files starting with '.'"; + +errorMessages = [] def process(transaction): incoming = transaction.getIncoming(); folderName = incoming.getName(); - - if not folderName.startswith('.'): - datasetInfo = folderName.split("+"); - entityKind = None; - sample = None; - experiment = None; - datasetType = None; - name = None; - - # Parse entity Kind - if len(datasetInfo) >= 1: - entityKind = datasetInfo[0]; - else: - raise UserFailureException(INVALID_FORMAT_ERROR_MESSAGE + ":" + FAILED_TO_PARSE_ERROR_MESSAGE); - - v3 = ServiceProvider.getV3ApplicationService(); - sessionToken = transaction.getOpenBisServiceSessionToken(); - projectSamplesEnabled = v3.getServerInformation(sessionToken)['project-samples-enabled'] == 'true' - - # Parse entity Kind Format - if entityKind == "O": - if len(datasetInfo) >= 4 and projectSamplesEnabled: - sampleSpace = datasetInfo[1]; - projectCode = datasetInfo[2]; - sampleCode = datasetInfo[3]; - - emailAddress = getSampleRegistratorsEmail(transaction, sampleSpace, projectCode, sampleCode) - sample = transaction.getSample("/" + sampleSpace + "/" + projectCode + "/" + sampleCode); - if sample is None: - reportIssue(transaction, - INVALID_FORMAT_ERROR_MESSAGE + ":" + SAMPLE_MISSING_ERROR_MESSAGE, - None); - if len(datasetInfo) >= 5: - datasetType = datasetInfo[4]; - if len(datasetInfo) >= 6: - name = datasetInfo[5]; - if len(datasetInfo) > 6: - reportIssue(transaction, - INVALID_FORMAT_ERROR_MESSAGE + ":" + FAILED_TO_PARSE_SAMPLE_ERROR_MESSAGE, - emailAddress) - elif len(datasetInfo) >= 3 and not projectSamplesEnabled: - sampleSpace = datasetInfo[1]; - sampleCode = datasetInfo[2]; - - emailAddress = getSampleRegistratorsEmail(transaction, sampleSpace, None, sampleCode) - sample = transaction.getSample("/" + sampleSpace + "/" + sampleCode); - if sample is None: - reportIssue(transaction, - INVALID_FORMAT_ERROR_MESSAGE + ":" + SAMPLE_MISSING_ERROR_MESSAGE, - None); + emailAddress = None + + try: + if not folderName.startswith('.'): + datasetInfo = folderName.split("+"); + entityKind = None; + sample = None; + experiment = None; + datasetType = None; + name = None; + + # Parse entity Kind + if len(datasetInfo) >= 1: + entityKind = datasetInfo[0]; + else: + raise UserFailureException(INVALID_FORMAT_ERROR_MESSAGE + ":" + FAILED_TO_PARSE_ERROR_MESSAGE); + + v3 = ServiceProvider.getV3ApplicationService(); + sessionToken = transaction.getOpenBisServiceSessionToken(); + projectSamplesEnabled = v3.getServerInformation(sessionToken)['project-samples-enabled'] == 'true' + + # Parse entity Kind Format + if entityKind == "O": + if len(datasetInfo) >= 4 and projectSamplesEnabled: + sampleSpace = datasetInfo[1]; + projectCode = datasetInfo[2]; + sampleCode = datasetInfo[3]; + + emailAddress = getSampleRegistratorsEmail(transaction, sampleSpace, projectCode, sampleCode) + sample = transaction.getSample("/" + sampleSpace + "/" + projectCode + "/" + sampleCode); + if sample is None: + reportIssue(INVALID_FORMAT_ERROR_MESSAGE + ":" + SAMPLE_MISSING_ERROR_MESSAGE) + raise UserFailureException(INVALID_FORMAT_ERROR_MESSAGE + ":" + SAMPLE_MISSING_ERROR_MESSAGE) + if len(datasetInfo) >= 5: + datasetType = datasetInfo[4]; + if len(datasetInfo) >= 6: + name = datasetInfo[5]; + if len(datasetInfo) > 6: + reportIssue(INVALID_FORMAT_ERROR_MESSAGE + ":" + FAILED_TO_PARSE_SAMPLE_ERROR_MESSAGE) + elif len(datasetInfo) >= 3 and not projectSamplesEnabled: + sampleSpace = datasetInfo[1]; + sampleCode = datasetInfo[2]; + + emailAddress = getSampleRegistratorsEmail(transaction, sampleSpace, None, sampleCode) + sample = transaction.getSample("/" + sampleSpace + "/" + sampleCode); + if sample is None: + reportIssue(INVALID_FORMAT_ERROR_MESSAGE + ":" + SAMPLE_MISSING_ERROR_MESSAGE) + raise UserFailureException(INVALID_FORMAT_ERROR_MESSAGE + ":" + SAMPLE_MISSING_ERROR_MESSAGE) + if len(datasetInfo) >= 4: + datasetType = datasetInfo[3]; + if len(datasetInfo) >= 5: + name = datasetInfo[4]; + if len(datasetInfo) > 5: + reportIssue(INVALID_FORMAT_ERROR_MESSAGE + ":" + FAILED_TO_PARSE_SAMPLE_ERROR_MESSAGE) + else: + raise UserFailureException(INVALID_FORMAT_ERROR_MESSAGE + ":" + FAILED_TO_PARSE_SAMPLE_ERROR_MESSAGE); + + hiddenFiles = getHiddenFiles(incoming) + if hiddenFiles: + reportIssue(HIDDEN_FILES_ERROR_MESSAGE + ":" + FAILED_TO_PARSE_SAMPLE_ERROR_MESSAGE + ":\n" + pathListToStr(hiddenFiles)) + + illegalFiles = getIllegalFiles(incoming) + if illegalFiles: + reportIssue(ILLEGAL_FILES_ERROR_MESSAGE + ":" + FAILED_TO_PARSE_SAMPLE_ERROR_MESSAGE + ":\n" + pathListToStr(illegalFiles)) + + filesWithIllegalCharacters = getFilesWithIllegalCharacters(incoming) + if filesWithIllegalCharacters: + reportIssue(ILLEGAL_CHARACTERS_IN_FILE_NAMES_ERROR_MESSAGE + ":" + + FAILED_TO_PARSE_SAMPLE_ERROR_MESSAGE + ":\n" + pathListToStr(filesWithIllegalCharacters)) + + readOnlyFiles = getReadOnlyFiles(incoming) + if readOnlyFiles: + reportIssue(FOLDER_CONTAINS_NON_DELETABLE_FILES_ERROR_MESSAGE + ":" + FAILED_TO_PARSE_SAMPLE_ERROR_MESSAGE + ":\n" + pathListToStr(readOnlyFiles)); + if entityKind == "E": if len(datasetInfo) >= 4: - datasetType = datasetInfo[3]; - if len(datasetInfo) >= 5: - name = datasetInfo[4]; - if len(datasetInfo) > 5: - reportIssue(transaction, - INVALID_FORMAT_ERROR_MESSAGE + ":" + FAILED_TO_PARSE_SAMPLE_ERROR_MESSAGE, - emailAddress) + experimentSpace = datasetInfo[1]; + projectCode = datasetInfo[2]; + experimentCode = datasetInfo[3]; + + emailAddress = getExperimentRegistratorsEmail(transaction, experimentSpace, projectCode, + experimentCode); + experiment = transaction.getExperiment("/" + experimentSpace + "/" + projectCode + "/" + experimentCode); + if experiment is None: + reportIssue(INVALID_FORMAT_ERROR_MESSAGE + ":" + EXPERIMENT_MISSING_ERROR_MESSAGE) + raise UserFailureException(INVALID_FORMAT_ERROR_MESSAGE + ":" + EXPERIMENT_MISSING_ERROR_MESSAGE) + if len(datasetInfo) >= 5: + datasetType = datasetInfo[4]; + if len(datasetInfo) >= 6: + name = datasetInfo[5]; + if len(datasetInfo) > 6: + reportIssue(INVALID_FORMAT_ERROR_MESSAGE + ":" + FAILED_TO_PARSE_EXPERIMENT_ERROR_MESSAGE); + else: + raise UserFailureException(INVALID_FORMAT_ERROR_MESSAGE + ":" + FAILED_TO_PARSE_EXPERIMENT_ERROR_MESSAGE); + + hiddenFiles = getHiddenFiles(incoming) + if hiddenFiles: + reportIssue(HIDDEN_FILES_ERROR_MESSAGE + ":" + FAILED_TO_PARSE_EXPERIMENT_ERROR_MESSAGE + ":\n" + pathListToStr(hiddenFiles)) + + illegalFiles = getIllegalFiles(incoming) + if illegalFiles: + reportIssue(ILLEGAL_FILES_ERROR_MESSAGE + ":" + FAILED_TO_PARSE_EXPERIMENT_ERROR_MESSAGE + ":\n" + pathListToStr(illegalFiles)) + + filedWithIllegalCharacters = getFilesWithIllegalCharacters(incoming) + if filedWithIllegalCharacters: + reportIssue(ILLEGAL_CHARACTERS_IN_FILE_NAMES_ERROR_MESSAGE + ":" + + FAILED_TO_PARSE_EXPERIMENT_ERROR_MESSAGE + ":\n" + pathListToStr(filedWithIllegalCharacters)) + + readOnlyFiles = getReadOnlyFiles(incoming) + if readOnlyFiles: + reportIssue(FOLDER_CONTAINS_NON_DELETABLE_FILES_ERROR_MESSAGE + ":" + + FAILED_TO_PARSE_EXPERIMENT_ERROR_MESSAGE + ":\n" + pathListToStr(readOnlyFiles)) + + # Create dataset + dataSet = None; + if datasetType is not None: # Set type if found + dataSet = transaction.createNewDataSet(datasetType); else: - raise UserFailureException(INVALID_FORMAT_ERROR_MESSAGE + ":" + FAILED_TO_PARSE_SAMPLE_ERROR_MESSAGE); - - if hasFolderHiddenFiles(incoming): - reportIssue(transaction, HIDDEN_FILES_ERROR_MESSAGE + ":" - + FAILED_TO_PARSE_SAMPLE_ERROR_MESSAGE, emailAddress); - if hasFolderIllegalFiles(incoming): - reportIssue(transaction, ILLEGAL_FILES_ERROR_MESSAGE + ":" - + FAILED_TO_PARSE_SAMPLE_ERROR_MESSAGE, emailAddress); - if hasFolderIllegalCharacters(incoming): - reportIssue(transaction, ILLEGAL_CHARACTERS_IN_FILE_NAMES_ERROR_MESSAGE + ":" - + FAILED_TO_PARSE_SAMPLE_ERROR_MESSAGE, emailAddress); - if hasFolderReadOnlyFiles(incoming): - reportIssue(transaction, FOLDER_CONTAINS_NON_DELETABLE_FILES_ERROR_MESSAGE + ":" - + FAILED_TO_PARSE_SAMPLE_ERROR_MESSAGE, emailAddress); - if entityKind == "E": - if len(datasetInfo) >= 4: - experimentSpace = datasetInfo[1]; - projectCode = datasetInfo[2]; - experimentCode = datasetInfo[3]; - - emailAddress = getExperimentRegistratorsEmail(transaction, experimentSpace, projectCode, - experimentCode); - experiment = transaction.getExperiment("/" + experimentSpace + "/" + projectCode + "/" + experimentCode); - if experiment is None: - reportIssue(transaction, - INVALID_FORMAT_ERROR_MESSAGE + ":" + EXPERIMENT_MISSING_ERROR_MESSAGE, - None); - if len(datasetInfo) >= 5: - datasetType = datasetInfo[4]; - if len(datasetInfo) >= 6: - name = datasetInfo[5]; - if len(datasetInfo) > 6: - reportIssue(transaction, - INVALID_FORMAT_ERROR_MESSAGE + ":" + FAILED_TO_PARSE_EXPERIMENT_ERROR_MESSAGE, - emailAddress); + dataSet = transaction.createNewDataSet(); + + if name is not None: + dataSet.setPropertyValue("$NAME", name); # Set name if found + + # Set sample or experiment + if sample is not None: + dataSet.setSample(sample); else: - raise UserFailureException(INVALID_FORMAT_ERROR_MESSAGE + ":" + FAILED_TO_PARSE_EXPERIMENT_ERROR_MESSAGE); - - if hasFolderHiddenFiles(incoming): - reportIssue(transaction, HIDDEN_FILES_ERROR_MESSAGE + ":" - + FAILED_TO_PARSE_EXPERIMENT_ERROR_MESSAGE, emailAddress); - if hasFolderIllegalFiles(incoming): - reportIssue(transaction, ILLEGAL_FILES_ERROR_MESSAGE + ":" - + FAILED_TO_PARSE_EXPERIMENT_ERROR_MESSAGE, emailAddress); - if hasFolderIllegalCharacters(incoming): - reportIssue(transaction, ILLEGAL_CHARACTERS_IN_FILE_NAMES_ERROR_MESSAGE + ":" - + FAILED_TO_PARSE_EXPERIMENT_ERROR_MESSAGE, emailAddress); - if hasFolderReadOnlyFiles(incoming): - reportIssue(transaction, FOLDER_CONTAINS_NON_DELETABLE_FILES_ERROR_MESSAGE + ":" - + FAILED_TO_PARSE_EXPERIMENT_ERROR_MESSAGE, emailAddress); - - # Create dataset - dataSet = None; - if datasetType is not None: # Set type if found - dataSet = transaction.createNewDataSet(datasetType); - else: - dataSet = transaction.createNewDataSet(); - - if name is not None: - dataSet.setPropertyValue("$NAME", name); # Set name if found - - # Set sample or experiment - if sample is not None: - dataSet.setSample(sample); - else: - dataSet.setExperiment(experiment); - - # Move folder to dataset - filesInFolder = incoming.listFiles(); - - itemsInFolder = 0; - datasetItem = None; - for item in filesInFolder: - fileName = item.getName() - if fileName == "metadata.json": - root = JSONObject(FileUtils.readFileToString(item, "UTF-8")) - properties = root.get("properties") - for propertyKey in properties.keys(): - if propertyKey == "$NAME" and name is not None: - raise UserFailureException(NAME_PROPERTY_SET_IN_TWO_PLACES_ERROR_MESSAGE) - propertyValue = properties.get(propertyKey) - if propertyValue is not None: - propertyValueString = str(propertyValue) - dataSet.setPropertyValue(propertyKey, propertyValueString) + dataSet.setExperiment(experiment); + + # Move folder to dataset + filesInFolder = incoming.listFiles(); + + itemsInFolder = 0; + datasetItem = None; + for item in filesInFolder: + fileName = item.getName() + if fileName == "metadata.json": + root = JSONObject(FileUtils.readFileToString(item, "UTF-8")) + properties = root.get("properties") + for propertyKey in properties.keys(): + if propertyKey == "$NAME" and name is not None: + raise UserFailureException(NAME_PROPERTY_SET_IN_TWO_PLACES_ERROR_MESSAGE) + propertyValue = properties.get(propertyKey) + if propertyValue is not None: + propertyValueString = str(propertyValue) + dataSet.setPropertyValue(propertyKey, propertyValueString) + else: + itemsInFolder = itemsInFolder + 1; + datasetItem = item; + + if itemsInFolder > 1: + tmpPath = incoming.getAbsolutePath() + "/default"; + tmpDir = File(tmpPath); + tmpDir.mkdir(); + + try: + for inputFile in filesInFolder: + Files.move(inputFile.toPath(), Paths.get(tmpPath, inputFile.getName()), + StandardCopyOption.ATOMIC_MOVE); + transaction.moveFile(tmpDir.getAbsolutePath(), dataSet); + finally: + if tmpDir is not None: + tmpDir.delete(); else: - itemsInFolder = itemsInFolder + 1; - datasetItem = item; - - if itemsInFolder > 1: - tmpPath = incoming.getAbsolutePath() + "/default"; - tmpDir = File(tmpPath); - tmpDir.mkdir(); - - try: - for inputFile in filesInFolder: - Files.move(inputFile.toPath(), Paths.get(tmpPath, inputFile.getName()), - StandardCopyOption.ATOMIC_MOVE); - transaction.moveFile(tmpDir.getAbsolutePath(), dataSet); - finally: - if tmpDir is not None: - tmpDir.delete(); - else: - transaction.moveFile(datasetItem.getAbsolutePath(), dataSet); + transaction.moveFile(datasetItem.getAbsolutePath(), dataSet); + finally: + reportAllIssues(transaction, emailAddress) + + +def pathListToStr(list): + return "\n".join(list) def getContactsEmailAddresses(transaction): @@ -200,62 +208,69 @@ def getContactsEmailAddresses(transaction): return re.split("[,;]", emailString) if emailString is not None else [] -def reportIssue(transaction, errorMessage, emailAddress): - contacts = getContactsEmailAddresses(transaction); - allAddresses = [emailAddress] + contacts if emailAddress is not None else contacts; - sendMail(transaction, map(lambda address: EMailAddress(address), allAddresses), EMAIL_SUBJECT, errorMessage); - raise UserFailureException(errorMessage); +def reportIssue(errorMessage): + errorMessages.append(errorMessage) + + +def reportAllIssues(transaction, emailAddress): + if len(errorMessages) > 0: + contacts = getContactsEmailAddresses(transaction) + allAddresses = [emailAddress] + contacts if emailAddress is not None else contacts + joinedErrorMessages = "\n".join(errorMessages) + sendMail(transaction, map(lambda address: EMailAddress(address), allAddresses), EMAIL_SUBJECT, joinedErrorMessages); + raise UserFailureException(joinedErrorMessages) -def hasFolderIllegalCharacters(incoming): - if bool(re.search(r"['~$%]", incoming.getName())): - return True; +def getFilesWithIllegalCharacters(folder): + result = [] + if bool(re.search(r"['~$%]", folder.getPath())): + result.append(folder.getName()) - files = incoming.listFiles() + files = folder.listFiles() if files is not None: for f in files: - if hasFolderIllegalCharacters(f): - return True; + result.extend(getFilesWithIllegalCharacters(f)) return False; -def hasFolderHiddenFiles(incoming): - if incoming.getName().startswith("."): - return True; +def getHiddenFiles(folder): + result = [] + if folder.getName().startswith("."): + result.append(folder.getPath()) - files = incoming.listFiles() + files = folder.listFiles() if files is not None: for f in files: - if hasFolderHiddenFiles(f): - return True; + result.extend(getHiddenFiles(f)) + + return result - return False; -def hasFolderIllegalFiles(incoming): - if incoming.getName() in ILLEGAL_FILES: - return True; +def getIllegalFiles(folder): + result = [] + if folder.getName() in ILLEGAL_FILES: + result.append(folder.getPath()) - files = incoming.listFiles() + files = folder.listFiles() if files is not None: for f in files: - if hasFolderIllegalFiles(f): - return True; + result.extend(getIllegalFiles(f)) - return False; + return result -def hasFolderReadOnlyFiles(incoming): - if not incoming.renameTo(incoming): - return True; +def getReadOnlyFiles(folder): + result = [] + if not folder.renameTo(folder): + result.append(folder.getPath()) - files = incoming.listFiles() + files = folder.listFiles() if files is not None: for f in files: - if hasFolderReadOnlyFiles(f): - return True; + result.extend(getReadOnlyFiles(f)) - return False; + return result def sendMail(transaction, emailAddresses, subject, body):