code
string
signature
string
docstring
string
loss_without_docstring
float64
loss_with_docstring
float64
factor
float64
for m in cls: if m.matches(desc): return True return False
def is_supported(cls, desc)
Determines if the given metric descriptor is supported. Args: desc (:class:`endpoints_management.gen.servicecontrol_v1_messages.MetricDescriptor`): the metric descriptor to test Return: `True` if desc is supported, otherwise `False`
5.64575
6.644083
0.849741
if a_dict is None: return for k, v in a_dict.items(): a_hash.update(b'\x00' + k.encode('utf-8') + b'\x00' + v.encode('utf-8'))
def add_dict_to_hash(a_hash, a_dict)
Adds `a_dict` to `a_hash` Args: a_hash (`Hash`): the secure hash, e.g created by hashlib.md5 a_dict (dict[string, [string]]): the dictionary to add to the hash
2.278368
2.52834
0.901132
app.add_route(create_swagger_json_handler(app), json_route, methods=["GET"]) add_swagger_api_route(app, html_route, json_route)
def add_swagger(app, json_route, html_route)
a convenience method for both adding a swagger.json route, as well as adding a page showing the html documentation
4.099982
4.181812
0.980432
static_root = get_swagger_static_root() swagger_body = generate_swagger_html( STATIC_ROOT, swagger_json_route ).encode("utf-8") async def swagger_ui(request): return HTTPResponse(body_bytes=swagger_body, content_type="text/html") bp = Blueprint('swagger') bp.static(STATIC_ROOT, static_root) app.add_route(swagger_ui, target_route, methods=["GET"]) app.blueprint(bp)
def add_swagger_api_route(app, target_route, swagger_json_route)
mount a swagger statics page. app: the sanic app object target_route: the path to mount the statics page. swagger_json_route: the path where the swagger json definitions is expected to be.
3.645879
3.41691
1.06701
spec = get_swagger_spec(app) _add_blueprint_specs(app, spec) spec_dict = spec.swagger_definition(**kwargs) encoded_spec = json.dumps(spec_dict).encode("UTF-8") async def swagger(request): return HTTPResponse( body_bytes=encoded_spec, headers={ "Access-Control-Allow-Origin": "*" }, content_type="application/json", ) return swagger
def create_swagger_json_handler(app, **kwargs)
Create a handler that returns the swagger definition for an application. This method assumes the application is using the TransmuteUrlDispatcher as the router.
3.759901
3.644585
1.03164
# for problem in trim_join, we must try to give the fields in a consistent order with others models... # see #26515 at https://code.djangoproject.com/ticket/26515 return OrderedDict( (k, (v if isinstance(v, CompositePart) else LocalFieldValue(v))) for k, v in (to_fields.items() if isinstance(to_fields, dict) else zip(to_fields, to_fields)) )
def compute_to_fields(self, to_fields)
compute the to_fields parameterse to make it uniformly a dict of CompositePart :param set[unicode]|dict[unicode, unicode] to_fields: the list/dict of fields to match :return: the well formated to_field containing only subclasses of CompositePart :rtype: dict[str, CompositePart]
10.102043
8.845415
1.142065
lookup_class = for_remote.get_lookup("exact") return lookup_class(for_remote.get_col(alias), self.value)
def get_lookup(self, main_field, for_remote, alias)
create a fake field for the lookup capability :param CompositeForeignKey main_field: the local fk :param Field for_remote: the remote field to match :return:
8.188242
9.767142
0.838346
length = len(self.fqdn) if self.fqdn.endswith('.'): length -= 1 if length > 253: return False return bool(self.FQDN_REGEX.match(self.fqdn))
def is_valid(self)
True for a validated fully-qualified domain nam (FQDN), in full compliance with RFC 1035, and the "preferred form" specified in RFC 3686 s. 2, whether relative or absolute. https://tools.ietf.org/html/rfc3696#section-2 https://tools.ietf.org/html/rfc1035 If and only if the FQDN ends with a dot (in place of the RFC1035 trailing null byte), it may have a total length of 254 bytes, still it must be less than 253 bytes.
3.914577
3.029984
1.291946
if not self.is_valid: raise ValueError('invalid FQDN `{0}`'.format(self.fqdn)) if self.is_valid_absolute: return self.fqdn return '{0}.'.format(self.fqdn)
def absolute(self)
The FQDN as a string in absolute form
4.755993
3.413847
1.393148
if not self.is_valid: raise ValueError('invalid FQDN `{0}`'.format(self.fqdn)) if self.is_valid_absolute: return self.fqdn[:-1] return self.fqdn
def relative(self)
The FQDN as a string in relative form
5.841258
4.035221
1.447568
try: return len(os.sched_getaffinity(0)) except AttributeError: pass import re try: with open('/proc/self/status') as f: status = f.read() m = re.search(r'(?m)^Cpus_allowed:\s*(.*)$', status) if m: res = bin(int(m.group(1).replace(',', ''), 16)).count('1') if res > 0: return res except IOError: pass try: import multiprocessing return multiprocessing.cpu_count() except (ImportError, NotImplementedError): return 1
def _available_cpu_count()
Number of available virtual or physical CPUs on this system Adapted from http://stackoverflow.com/a/1006301/715090
1.944016
1.831239
1.061585
if mode in ('r', 'w', 'a'): mode += 't' if mode not in ('rt', 'rb', 'wt', 'wb', 'at', 'ab'): raise ValueError("mode '{0}' not supported".format(mode)) if not _PY3: mode = mode[0] filename = fspath(filename) if not isinstance(filename, basestring): raise ValueError("the filename must be a string") if compresslevel not in range(1, 10): raise ValueError("compresslevel must be between 1 and 9") if filename == '-': return _open_stdin_or_out(mode) elif filename.endswith('.bz2'): return _open_bz2(filename, mode) elif filename.endswith('.xz'): return _open_xz(filename, mode) elif filename.endswith('.gz'): return _open_gz(filename, mode, compresslevel, threads) else: # Python 2.6 and 2.7 have io.open, which we could use to make the returned # object consistent with the one returned in Python 3, but reading a file # with io.open() is 100 times slower (!) on Python 2.6, and still about # three times slower on Python 2.7 (tested with "for _ in io.open(path): pass") return open(filename, mode)
def xopen(filename, mode='r', compresslevel=6, threads=None)
A replacement for the "open" function that can also open files that have been compressed with gzip, bzip2 or xz. If the filename is '-', standard output (mode 'w') or input (mode 'r') is returned. The file type is determined based on the filename: .gz is gzip, .bz2 is bzip2 and .xz is xz/lzma. When writing a gzip-compressed file, the following methods are tried in order to get the best speed 1) using a pigz (parallel gzip) subprocess; 2) using a gzip subprocess; 3) gzip.open. A single gzip subprocess can be faster than gzip.open because it runs in a separate process. Uncompressed files are opened with the regular open(). mode can be: 'rt', 'rb', 'at', 'ab', 'wt', or 'wb'. Also, the 't' can be omitted, so instead of 'rt', 'wt' and 'at', the abbreviations 'r', 'w' and 'a' can be used. In Python 2, the 't' and 'b' characters are ignored. Append mode ('a', 'at', 'ab') is unavailable with BZ2 compression and will raise an error. compresslevel is the gzip compression level. It is not used for bz2 and xz. threads is the number of threads for pigz. If None, then the pigz default is used.
3.229315
3.395078
0.951176
retcode = self.process.poll() if retcode is not None and retcode != 0: message = self._stderr.read().strip() raise IOError(message)
def _raise_if_error(self)
Raise IOError if process is not running anymore and the exit code is nonzero.
4.147077
2.944803
1.40827
''' Run MAB test with T trials. Parameters ---------- trials : int Number of trials to run. strategy : str Name of selected strategy. parameters : dict Parameters for selected strategy. Available strategies: - Epsilon-greedy ("eps_greedy") - Softmax ("softmax") - Upper confidence bound ("ucb") Returns ------- None ''' if trials < 1: raise Exception('MAB.run: Number of trials cannot be less than 1!') if not strategy: strategy = 'eps_greedy' else: if strategy not in self.strategies: raise Exception('MAB,run: Strategy name invalid. Choose from:' ' {}'.format(', '.join(self.strategies))) # Run strategy for n in range(trials): self._run(strategy, parameters)
def run(self, trials=100, strategy=None, parameters=None)
Run MAB test with T trials. Parameters ---------- trials : int Number of trials to run. strategy : str Name of selected strategy. parameters : dict Parameters for selected strategy. Available strategies: - Epsilon-greedy ("eps_greedy") - Softmax ("softmax") - Upper confidence bound ("ucb") Returns ------- None
3.681504
2.166682
1.699144
''' Run single trial of MAB strategy. Parameters ---------- strategy : function parameters : dict Returns ------- None ''' choice = self.run_strategy(strategy, parameters) self.choices.append(choice) payout = self.bandits.pull(choice) if payout is None: print('Trials exhausted. No more values for bandit', choice) return None else: self.wins[choice] += payout self.pulls[choice] += 1
def _run(self, strategy, parameters=None)
Run single trial of MAB strategy. Parameters ---------- strategy : function parameters : dict Returns ------- None
5.310049
3.750427
1.415852
''' Run the Bayesian Bandit algorithm which utilizes a beta distribution for exploration and exploitation. Parameters ---------- params : None For API consistency, this function can take a parameters argument, but it is ignored. Returns ------- int Index of chosen bandit ''' p_success_arms = [ np.random.beta(self.wins[i] + 1, self.pulls[i] - self.wins[i] + 1) for i in range(len(self.wins)) ] return np.array(p_success_arms).argmax()
def bayesian(self, params=None)
Run the Bayesian Bandit algorithm which utilizes a beta distribution for exploration and exploitation. Parameters ---------- params : None For API consistency, this function can take a parameters argument, but it is ignored. Returns ------- int Index of chosen bandit
5.190122
2.221212
2.336617
''' Run the epsilon-greedy strategy and update self.max_mean() Parameters ---------- Params : dict Epsilon Returns ------- int Index of chosen bandit ''' if params and type(params) == dict: eps = params.get('epsilon') else: eps = 0.1 r = np.random.rand() if r < eps: return np.random.choice(list(set(range(len(self.wins))) - {self.max_mean()})) else: return self.max_mean()
def eps_greedy(self, params)
Run the epsilon-greedy strategy and update self.max_mean() Parameters ---------- Params : dict Epsilon Returns ------- int Index of chosen bandit
4.489138
2.455094
1.828499
''' Run the softmax selection strategy. Parameters ---------- Params : dict Tau Returns ------- int Index of chosen bandit ''' default_tau = 0.1 if params and type(params) == dict: tau = params.get('tau') try: float(tau) except ValueError: 'slots: softmax: Setting tau to default' tau = default_tau else: tau = default_tau # Handle cold start. Not all bandits tested yet. if True in (self.pulls < 3): return np.random.choice(range(len(self.pulls))) else: payouts = self.wins / (self.pulls + 0.1) norm = sum(np.exp(payouts/tau)) ps = np.exp(payouts/tau)/norm # Randomly choose index based on CMF cmf = [sum(ps[:i+1]) for i in range(len(ps))] rand = np.random.rand() found = False found_i = None i = 0 while not found: if rand < cmf[i]: found_i = i found = True else: i += 1 return found_i
def softmax(self, params)
Run the softmax selection strategy. Parameters ---------- Params : dict Tau Returns ------- int Index of chosen bandit
4.804663
3.750647
1.281022
''' Run the upper confidence bound MAB selection strategy. This is the UCB1 algorithm described in https://homes.di.unimi.it/~cesabian/Pubblicazioni/ml-02.pdf Parameters ---------- params : None For API consistency, this function can take a parameters argument, but it is ignored. Returns ------- int Index of chosen bandit ''' # UCB = j_max(payout_j + sqrt(2ln(n_tot)/n_j)) # Handle cold start. Not all bandits tested yet. if True in (self.pulls < 3): return np.random.choice(range(len(self.pulls))) else: n_tot = sum(self.pulls) payouts = self.wins / (self.pulls + 0.1) ubcs = payouts + np.sqrt(2*np.log(n_tot)/self.pulls) return np.argmax(ubcs)
def ucb(self, params=None)
Run the upper confidence bound MAB selection strategy. This is the UCB1 algorithm described in https://homes.di.unimi.it/~cesabian/Pubblicazioni/ml-02.pdf Parameters ---------- params : None For API consistency, this function can take a parameters argument, but it is ignored. Returns ------- int Index of chosen bandit
6.795856
3.115088
2.181594
''' Return current 'best' choice of bandit. Returns ------- int Index of bandit ''' if len(self.choices) < 1: print('slots: No trials run so far.') return None else: return np.argmax(self.wins/(self.pulls+0.1))
def best(self)
Return current 'best' choice of bandit. Returns ------- int Index of bandit
9.856832
5.662709
1.740657
''' Calculate current estimate of average payout for each bandit. Returns ------- array of floats or None ''' if len(self.choices) < 1: print('slots: No trials run so far.') return None else: return self.wins/(self.pulls+0.1)
def est_payouts(self)
Calculate current estimate of average payout for each bandit. Returns ------- array of floats or None
13.691697
6.417498
2.133495
''' Calculate expected regret, where expected regret is maximum optimal reward - sum of collected rewards, i.e. expected regret = T*max_k(mean_k) - sum_(t=1-->T) (reward_t) Returns ------- float ''' return (sum(self.pulls)*np.max(np.nan_to_num(self.wins/self.pulls)) - sum(self.wins)) / sum(self.pulls)
def regret(self)
Calculate expected regret, where expected regret is maximum optimal reward - sum of collected rewards, i.e. expected regret = T*max_k(mean_k) - sum_(t=1-->T) (reward_t) Returns ------- float
9.120623
2.541302
3.588956
''' Determine if stopping criterion has been met. Returns ------- bool ''' if True in (self.pulls < 3): return False else: return self.criteria[self.criterion](self.stop_value)
def crit_met(self)
Determine if stopping criterion has been met. Returns ------- bool
11.107552
8.043323
1.380966
''' Determine if regret criterion has been met. Parameters ---------- threshold : float Returns ------- bool ''' if not threshold: return self.regret() <= self.stop_value elif self.regret() <= threshold: return True else: return False
def regret_met(self, threshold=None)
Determine if regret criterion has been met. Parameters ---------- threshold : float Returns ------- bool
4.314646
3.311225
1.303036
''' Update the bandits with the results of the previous live, online trial. Next run a the selection algorithm. If the stopping criteria is met, return the best arm estimate. Otherwise return the next arm to try. Parameters ---------- bandit : int Bandit index payout : float Payout value strategy : string Name of update strategy parameters : dict Parameters for update strategy function Returns ------- dict Format: {'new_trial': boolean, 'choice': int, 'best': int} ''' if bandit is not None and payout is not None: self.update(bandit=bandit, payout=payout) else: raise Exception('slots.online_trial: bandit and/or payout value' ' missing.') if self.crit_met(): return {'new_trial': False, 'choice': self.best(), 'best': self.best()} else: return {'new_trial': True, 'choice': self.run_strategy(strategy, parameters), 'best': self.best()}
def online_trial(self, bandit=None, payout=None, strategy='eps_greedy', parameters=None)
Update the bandits with the results of the previous live, online trial. Next run a the selection algorithm. If the stopping criteria is met, return the best arm estimate. Otherwise return the next arm to try. Parameters ---------- bandit : int Bandit index payout : float Payout value strategy : string Name of update strategy parameters : dict Parameters for update strategy function Returns ------- dict Format: {'new_trial': boolean, 'choice': int, 'best': int}
4.667426
1.781389
2.620105
''' Update bandit trials and payouts for given bandit. Parameters ---------- bandit : int Bandit index payout : float Returns ------- None ''' self.choices.append(bandit) self.pulls[bandit] += 1 self.wins[bandit] += payout self.bandits.payouts[bandit] += payout
def update(self, bandit, payout)
Update bandit trials and payouts for given bandit. Parameters ---------- bandit : int Bandit index payout : float Returns ------- None
3.940655
2.527649
1.55902
''' Return the payout from a single pull of the bandit i's arm. Parameters ---------- i : int Index of bandit. Returns ------- float or None ''' if self.live: if len(self.payouts[i]) > 0: return self.payouts[i].pop() else: return None else: if np.random.rand() < self.probs[i]: return self.payouts[i] else: return 0.0
def pull(self, i)
Return the payout from a single pull of the bandit i's arm. Parameters ---------- i : int Index of bandit. Returns ------- float or None
3.900807
2.261167
1.72513
_filter = [h.lower() for h in _filter] + [h.upper() for h in _filter] headers = [h for h in headers if not _filter or h in _filter] # Maximum header width header_widths = [len(h) for h in headers] for row in data: for idx in range(len(headers)): # If a row contains an element which is wider update maximum width if header_widths[idx] < len(str(row[idx])): header_widths[idx] = len(str(row[idx])) # Prepare the format string with the maximum widths formatted_output_parts = ['{{:<{0}}}'.format(hw) for hw in header_widths] formatted_output = ' '.join(formatted_output_parts) # Print the table with the headers capitalized click.echo(formatted_output.format(*[h.upper() for h in headers])) for row in data: click.echo(formatted_output.format(*row))
def click_table_printer(headers, _filter, data)
Generate space separated output for click commands.
3.133239
3.094038
1.01267
md5_hash = md5() if not os.path.exists(directory): return -1 try: for subdir, dirs, files in os.walk(directory): for _file in files: file_path = os.path.join(subdir, _file) if file_list is not None and file_path not in file_list: continue try: _file_object = open(file_path, 'rb') except Exception: # You can't open the file for some reason _file_object.close() # We return -1 since we cannot ensure that the file that # can not be read, will not change from one execution to # another. return -1 while 1: # Read file in little chunks buf = _file_object.read(4096) if not buf: break md5_hash.update(md5(buf).hexdigest().encode()) _file_object.close() except Exception: return -1 return md5_hash.hexdigest()
def calculate_hash_of_dir(directory, file_list=None)
Calculate hash of directory.
2.673346
2.631876
1.015757
if 'workflow_workspace' in job_spec: del job_spec['workflow_workspace'] job_md5_buffer = md5() job_md5_buffer.update(json.dumps(job_spec).encode('utf-8')) job_md5_buffer.update(json.dumps(workflow_json).encode('utf-8')) return job_md5_buffer.hexdigest()
def calculate_job_input_hash(job_spec, workflow_json)
Calculate md5 hash of job specification and workflow json.
2.237253
1.978902
1.130553
access_times = {} for subdir, dirs, files in os.walk(workflow_workspace): for file in files: file_path = os.path.join(subdir, file) access_times[file_path] = os.stat(file_path).st_atime return access_times
def calculate_file_access_time(workflow_workspace)
Calculate access times of files in workspace.
1.778018
1.663781
1.068661
if component == 'reana-server': file = 'reana_server.json' elif component == 'reana-workflow-controller': file = 'reana_workflow_controller.json' elif component == 'reana-job-controller': file = 'reana_job_controller.json' if os.environ.get('REANA_SRCDIR'): reana_srcdir = os.environ.get('REANA_SRCDIR') else: reana_srcdir = os.path.join('..') try: reana_commons_specs_path = os.path.join( reana_srcdir, 'reana-commons', 'reana_commons', 'openapi_specifications') if os.path.exists(reana_commons_specs_path): if os.path.isfile(output_path): shutil.copy(output_path, os.path.join(reana_commons_specs_path, file)) # copy openapi specs file as well to docs shutil.copy(output_path, os.path.join('docs', 'openapi.json')) except Exception as e: click.echo('Something went wrong, could not copy openapi ' 'specifications to reana-commons \n{0}'.format(e))
def copy_openapi_specs(output_path, component)
Copy generated and validated openapi specs to reana-commons module.
2.429262
2.222906
1.092832
verb = '' if status.endswith('ing'): verb = 'is' elif status.endswith('ed'): verb = 'has been' else: raise ValueError('Unrecognised status {}'.format(status)) return verb
def get_workflow_status_change_verb(status)
Give the correct verb conjugation depending on status tense. :param status: String which represents the status the workflow changed to.
3.086799
3.134006
0.984937
progress_message = {} if total: progress_message['total'] = total if running: progress_message['running'] = running if finished: progress_message['finished'] = finished if failed: progress_message['failed'] = failed if cached: progress_message['cached'] = cached return progress_message
def build_progress_message(total=None, running=None, finished=None, failed=None, cached=None)
Build the progress message with correct formatting.
1.453489
1.481151
0.981324
caching_info_message = { "job_spec": job_spec, "job_id": job_id, "workflow_workspace": workflow_workspace, "workflow_json": workflow_json, "result_path": result_path } return caching_info_message
def build_caching_info_message(job_spec, job_id, workflow_workspace, workflow_json, result_path)
Build the caching info message with correct formatting.
1.507039
1.533674
0.982633
command = ['du', '-h'] if summarize: command.append('-s') else: command.append('-a') command.append(workspace) disk_usage_info = subprocess.check_output(command).decode().split() # create pairs of (size, filename) filesize_pairs = list(zip(disk_usage_info[::2], disk_usage_info[1::2])) filesizes = [] for filesize_pair in filesize_pairs: size, name = filesize_pair # trim workspace path in every file name filesizes.append({'name': name[len(workspace):], 'size': size}) return filesizes
def get_workspace_disk_usage(workspace, summarize=False)
Retrieve disk usage information of a workspace.
3.285104
3.233404
1.015989
name = CVMFS_REPOSITORIES[cvmfs_volume] rendered_template = dict(REANA_CVMFS_PVC_TEMPLATE) rendered_template['metadata']['name'] = 'csi-cvmfs-{}-pvc'.format(name) rendered_template['spec']['storageClassName'] = "csi-cvmfs-{}".format(name) return rendered_template
def render_cvmfs_pvc(cvmfs_volume)
Render REANA_CVMFS_PVC_TEMPLATE.
3.969122
2.959849
1.340988
name = CVMFS_REPOSITORIES[cvmfs_volume] rendered_template = dict(REANA_CVMFS_SC_TEMPLATE) rendered_template['metadata']['name'] = "csi-cvmfs-{}".format(name) rendered_template['parameters']['repository'] = cvmfs_volume return rendered_template
def render_cvmfs_sc(cvmfs_volume)
Render REANA_CVMFS_SC_TEMPLATE.
5.340061
3.671049
1.454642
from kubernetes.client.rest import ApiException from reana_commons.k8s.api_client import current_k8s_storagev1_api_client try: current_k8s_storagev1_api_client.\ create_storage_class( render_cvmfs_sc(cvmfs_volume) ) except ApiException as e: if e.status != 409: raise e
def create_cvmfs_storage_class(cvmfs_volume)
Create CVMFS storage class.
3.409583
3.257579
1.046662
from kubernetes.client.rest import ApiException from reana_commons.k8s.api_client import current_k8s_corev1_api_client try: current_k8s_corev1_api_client.\ create_namespaced_persistent_volume_claim( "default", render_cvmfs_pvc(cvmfs_volume) ) except ApiException as e: if e.status != 409: raise e
def create_cvmfs_persistent_volume_claim(cvmfs_volume)
Create CVMFS persistent volume claim.
2.62114
2.556205
1.025403
k8s_config.load_incluster_config() api_configuration = client.Configuration() api_configuration.verify_ssl = False if api == 'extensions/v1beta1': api_client = client.ExtensionsV1beta1Api() elif api == 'CoreV1': api_client = client.CoreV1Api() elif api == 'StorageV1': api_client = client.StorageV1Api() else: api_client = client.BatchV1Api() return api_client
def create_api_client(api='BatchV1')
Create Kubernetes API client using config. :param api: String which represents which Kubernetes API to spawn. By default BatchV1. :returns: Kubernetes python client object for a specific API i.e. BatchV1.
2.029786
2.213421
0.917035
logging.error('Error while publishing {}'.format( exception)) logging.info('Retry in %s seconds.', interval)
def __error_callback(self, exception, interval)
Execute when there is an error while sending a message. :param exception: Exception which has been thrown while trying to send the message. :param interval: Interval in which the message delivery will be retried.
9.37722
11.575883
0.810065
connection = self._connection.clone() publish = connection.ensure(self.producer, self.producer.publish, errback=self.__error_callback, max_retries=MQ_PRODUCER_MAX_RETRIES) publish(json.dumps(msg), exchange=self._exchange, routing_key=self._routing_key, declare=[self._queue]) logging.debug('Publisher: message sent: %s', msg)
def _publish(self, msg)
Publish, handling retries, a message in the queue. :param msg: Object which represents the message to be sent in the queue. Note that this object should be serializable in the configured format (by default JSON).
5.302539
5.565883
0.952686
msg = { "workflow_uuid": workflow_uuid, "logs": logs, "status": status, "message": message } self._publish(msg)
def publish_workflow_status(self, workflow_uuid, status, logs='', message=None)
Publish workflow status using the configured. :param workflow_uudid: String which represents the workflow UUID. :param status: Integer which represents the status of the workflow, this is defined in the `reana-db` `Workflow` models. :param logs: String which represents the logs which the workflow has produced as output. :param message: Dictionary which includes additional information can be attached such as the overall progress of the workflow.
2.54109
3.468352
0.732651
msg = { "user": user_id, "workflow_id_or_name": workflow_id_or_name, "parameters": parameters } self._publish(msg)
def publish_workflow_submission(self, user_id, workflow_id_or_name, parameters)
Publish workflow submission parameters.
2.4599
2.479039
0.992279
parameters = parameters or {} if not specification: with open(workflow_file, 'r') as f: specification = json.loads(f.read()) expanded_specification = _expand_parameters(specification, parameters, original) validate(specification, serial_workflow_schema) return expanded_specification
def serial_load(workflow_file, specification, parameters=None, original=None)
Validate and return a expanded REANA Serial workflow specification. :param workflow_file: A specification file compliant with REANA Serial workflow specification. :returns: A dictionary which represents the valid Serial workflow with all parameters expanded.
3.816782
4.028904
0.94735
expanded_specification = deepcopy(specification) try: for step_num, step in enumerate(expanded_specification['steps']): current_step = expanded_specification['steps'][step_num] for command_num, command in enumerate(step['commands']): current_step['commands'][command_num] = \ Template(command).substitute(parameters) # if call is done from client, original==True and original # specifications withtout applied parameters are returned. if original: return specification else: return expanded_specification except KeyError as e: raise ValidationError('Workflow parameter(s) could not ' 'be expanded. Please take a look ' 'to {params}'.format(params=str(e)))
def _expand_parameters(specification, parameters, original=None)
Expand parameters inside comands for Serial workflow specifications. :param specification: Full valid Serial workflow specification. :param parameters: Parameters to be extended on a Serial specification. :param original: Flag which, determins type of specifications to return. :returns: If 'original' parameter is set, a copy of the specification whithout expanded parametrers will be returned. If 'original' is not set, a copy of the specification with expanded parameters (all $varname and ${varname} will be expanded with their value). Otherwise an error will be thrown if the parameters can not be expanded. :raises: jsonschema.ValidationError
5.679131
5.004319
1.134846
from reana_commons.config import REANA_READY_CONDITIONS for module_name, condition_list in REANA_READY_CONDITIONS.items(): for condition_name in condition_list: module = importlib.import_module(module_name) condition_func = getattr(module, condition_name) if not condition_func(): return False return True
def reana_ready()
Check if reana can start new workflows.
2.383212
2.211712
1.077542
try: node_info = current_k8s_corev1_api_client.list_node() for node in node_info.items: # check based on the predefined conditions about the # node status: MemoryPressure, OutOfDisk, KubeletReady # DiskPressure, PIDPressure, for condition in node.status.conditions: if not condition.status: return False except ApiException as e: log.error('Something went wrong while getting node information.') log.error(e) return False return True
def check_predefined_conditions()
Check k8s predefined conditions for the nodes.
5.433223
4.887198
1.111726
try: job_list = current_k8s_batchv1_api_client.\ list_job_for_all_namespaces() if len(job_list.items) > K8S_MAXIMUM_CONCURRENT_JOBS: return False except ApiException as e: log.error('Something went wrong while getting running job list.') log.error(e) return False return True
def check_running_job_count()
Check upper limit on running jobs.
3.710508
3.449993
1.075512
spec_file_path = os.path.join( pkg_resources. resource_filename( 'reana_commons', 'openapi_specifications'), spec_file) with open(spec_file_path) as f: json_spec = json.load(f) return json_spec
def _get_spec(self, spec_file)
Get json specification from package data.
3.905798
3.330459
1.172751
job_spec = { 'experiment': experiment, 'docker_img': image, 'cmd': cmd, 'prettified_cmd': prettified_cmd, 'env_vars': {}, 'workflow_workspace': workflow_workspace, 'job_name': job_name, 'cvmfs_mounts': cvmfs_mounts, 'workflow_uuid': workflow_uuid } response, http_response = self._client.jobs.create_job(job=job_spec).\ result() if http_response.status_code == 400: raise HTTPBadRequest('Bad request to create a job. Error: {}'. format(http_response.data)) elif http_response.status_code == 500: raise HTTPInternalServerError('Internal Server Error. Error: {}'. format(http_response.data)) return response
def submit(self, workflow_uuid='', experiment='', image='', cmd='', prettified_cmd='', workflow_workspace='', job_name='', cvmfs_mounts='false')
Submit a job to RJC API. :param name: Name of the job. :param experiment: Experiment the job belongs to. :param image: Identifier of the Docker image which will run the job. :param cmd: String which represents the command to execute. It can be modified by the workflow engine i.e. prepending ``cd /some/dir/``. :prettified_cmd: Original command submitted by the user. :workflow_workspace: Path to the workspace of the workflow. :cvmfs_mounts: String with CVMFS volumes to mount in job pods. :return: Returns a dict with the ``job_id``.
2.37599
2.586293
0.918686
response, http_response = self._client.jobs.get_job(job_id=job_id).\ result() if http_response.status_code == 404: raise HTTPNotFound('The given job ID was not found. Error: {}'. format(http_response.data)) return response
def check_status(self, job_id)
Check status of a job.
5.072085
4.818693
1.052585
response, http_response = self._client.jobs.get_logs(job_id=job_id).\ result() if http_response.status_code == 404: raise HTTPNotFound('The given job ID was not found. Error: {}'. format(http_response.data)) return http_response.text
def get_logs(self, job_id)
Get logs of a job.
5.041498
4.661887
1.081429
response, http_response = self._client.job_cache.check_if_cached( job_spec=json.dumps(job_spec), workflow_json=json.dumps(step), workflow_workspace=workflow_workspace).\ result() if http_response.status_code == 400: raise HTTPBadRequest('Bad request to check cache. Error: {}'. format(http_response.data)) elif http_response.status_code == 500: raise HTTPInternalServerError('Internal Server Error. Error: {}'. format(http_response.data)) return http_response
def check_if_cached(self, job_spec, step, workflow_workspace)
Check if job result is in cache.
3.289908
3.18617
1.032559
workflow_workspace_relative_to_owner = workflow_workspace if os.path.isabs(workflow_workspace): workflow_workspace_relative_to_owner = \ os.path.relpath(workflow_workspace, shared_volume_root) mount_path = os.path.join(shared_volume_root, workflow_workspace_relative_to_owner) volume_mount = { "name": REANA_SHARED_VOLUME_NAME, "mountPath": mount_path, "subPath": workflow_workspace_relative_to_owner} if REANA_STORAGE_BACKEND == "CEPHFS": volume = get_k8s_cephfs_volume() else: volume = get_k8s_hostpath_volume(shared_volume_root) return volume_mount, volume
def get_shared_volume(workflow_workspace, shared_volume_root)
Get shared CephFS/hostPath volume to a given job spec. :param workflow_workspace: Absolute path to the job's workflow workspace. :param shared_volume_root: Root path in the underlying storage backend. :returns: Tuple consisting of the Kubernetes volumeMount and the volume.
2.436779
2.345078
1.039104
domain = ffi.string(domain).decode() message = ffi.string(message).decode() logger = LOGGER.getChild(domain) if level not in LOG_LEVELS: return logger.log(LOG_LEVELS[level], message)
def _logging_callback(level, domain, message, data)
Callback that outputs libgphoto2's logging message via Python's standard logging facilities. :param level: libgphoto2 logging level :param domain: component the message originates from :param message: logging message :param data: Other data in the logging record (unused)
3.409043
4.388634
0.776789
local = import_attribute(local_path)(**local_options) remote = import_attribute(remote_path)(**remote_options) result = self.transfer(name, local, remote, **kwargs) if result is True: cache.set(cache_key, True) file_transferred.send(sender=self.__class__, name=name, local=local, remote=remote) elif result is False: args = [name, cache_key, local_path, remote_path, local_options, remote_options] self.retry(args=args, kwargs=kwargs) else: raise ValueError("Task '%s' did not return True/False but %s" % (self.__class__, result)) return result
def run(self, name, cache_key, local_path, remote_path, local_options, remote_options, **kwargs)
The main work horse of the transfer task. Calls the transfer method with the local and remote storage backends as given with the parameters. :param name: name of the file to transfer :type name: str :param local_path: local storage class to transfer from :type local_path: str :param local_options: options of the local storage class :type local_options: dict :param remote_path: remote storage class to transfer to :type remote_path: str :param remote_options: options of the remote storage class :type remote_options: dict :param cache_key: cache key to set after a successful transfer :type cache_key: str :rtype: task result
2.825216
2.73445
1.033194
try: remote.save(name, local.open(name)) return True except Exception as e: logger.error("Unable to save '%s' to remote storage. " "About to retry." % name) logger.exception(e) return False
def transfer(self, name, local, remote, **kwargs)
Transfers the file with the given name from the local to the remote storage backend. :param name: The name of the file to transfer :param local: The local storage backend instance :param remote: The remote storage backend instance :returns: `True` when the transfer succeeded, `False` if not. Retries the task when returning `False` :rtype: bool
4.493487
3.702554
1.213618
cstr = get_ctype("const char**", cfunc, *args) return backend.ffi.string(cstr).decode() if cstr else None
def get_string(cfunc, *args)
Call a C function and return its return value as a Python string. :param cfunc: C function to call :param args: Arguments to call function with :rtype: str
6.670272
10.480612
0.636439
val_p = backend.ffi.new(rtype) args = args + (val_p,) cfunc(*args) return val_p[0]
def get_ctype(rtype, cfunc, *args)
Call a C function that takes a pointer as its last argument and return the C object that it contains after the function has finished. :param rtype: C data type is filled by the function :param cfunc: C function to call :param args: Arguments to call function with :return: A pointer to the specified data type
5.278974
6.91423
0.763494
obj_p = backend.ffi.new("{0}**".format(typename)) backend.CONSTRUCTORS[typename](obj_p) return obj_p[0]
def new_gp_object(typename)
Create an indirect pointer to a GPhoto2 type, call its matching constructor function and return the pointer to it. :param typename: Name of the type to create. :return: A pointer to the specified data type.
6.696208
8.470531
0.79053
version_str = ffi.string(lib.gp_library_version(True)[0]).decode() return tuple(int(x) for x in version_str.split('.'))
def get_library_version()
Get the version number of the underlying gphoto2 library. :return: The version :rtype: tuple of (major, minor, patch) version numbers
4.987962
4.166403
1.197186
ctx = lib.gp_context_new() camlist_p = new_gp_object("CameraList") port_list_p = new_gp_object("GPPortInfoList") lib.gp_port_info_list_load(port_list_p) abilities_list_p = new_gp_object("CameraAbilitiesList") lib.gp_abilities_list_load(abilities_list_p, ctx) lib.gp_abilities_list_detect(abilities_list_p, port_list_p, camlist_p, ctx) out = [] for idx in range(lib.gp_list_count(camlist_p)): name = get_string(lib.gp_list_get_name, camlist_p, idx) value = get_string(lib.gp_list_get_value, camlist_p, idx) # Skip iteration if no matches matches = re.match(r"usb:(\d+),(\d+)", value) if not matches: continue bus_no, device_no = (int(x) for x in matches.groups()) abilities = ffi.new("CameraAbilities*") ability_idx = lib.gp_abilities_list_lookup_model( abilities_list_p, name.encode()) lib.gp_abilities_list_get_abilities(abilities_list_p, ability_idx, abilities) if abilities.device_type == lib.GP_DEVICE_STILL_CAMERA: out.append(Camera(bus_no, device_no, lazy=True, _abilities=abilities)) lib.gp_list_free(camlist_p) lib.gp_port_info_list_free(port_list_p) lib.gp_abilities_list_free(abilities_list_p) return out
def list_cameras()
List all attached USB cameras that are supported by libgphoto2. :return: All recognized cameras :rtype: list of :py:class:`Camera`
2.906361
2.924275
0.993874
ctx = lib.gp_context_new() abilities_list_p = new_gp_object("CameraAbilitiesList") lib.gp_abilities_list_load(abilities_list_p, ctx) abilities = ffi.new("CameraAbilities*") out = [] for idx in range(lib.gp_abilities_list_count(abilities_list_p)): lib.gp_abilities_list_get_abilities(abilities_list_p, idx, abilities) if abilities.device_type == lib.GP_DEVICE_STILL_CAMERA: libname = os.path.basename(ffi.string(abilities.library) .decode()) out.append((ffi.string(abilities.model).decode(), libname)) lib.gp_abilities_list_free(abilities_list_p) key_func = lambda name, driver: driver out = sorted(out, key=key_func) return {k: tuple(x[0] for x in v) for k, v in itertools.groupby(out, key_func)} return out
def supported_cameras()
List the names of all cameras supported by libgphoto2, grouped by the name of their driver.
3.954629
3.788265
1.043916
self.camera._get_config()['actions']['movie'].set(False) self.videofile = self.camera._wait_for_event( event_type=lib.GP_EVENT_FILE_ADDED) if self._old_captarget != "Memory card": self.camera.config['settings']['capturetarget'].set( self._old_captarget)
def stop(self)
Stop the capture.
12.901277
11.476377
1.124159
if self.parent is None: return "/" else: return os.path.join(self.parent.path, self.name)
def path(self)
Absolute path to the directory on the camera's filesystem.
2.901298
2.950164
0.983436
return tuple(op for op in backend.DIR_OPS if self._dir_ops & op)
def supported_operations(self)
All directory operations supported by the camera.
22.585243
13.530467
1.669214
if self.name in ("", "/") and self.parent is None: return True else: return self in self.parent.directories
def exists(self)
Check whether the directory exists on the camera.
6.375971
5.82047
1.095439
filelist_p = new_gp_object("CameraList") lib.gp_camera_folder_list_files(self._cam._cam, self.path.encode(), filelist_p, self._cam._ctx) for idx in range(lib.gp_list_count(filelist_p)): fname = get_string(lib.gp_list_get_name, filelist_p, idx) yield File(name=fname, directory=self, camera=self._cam) lib.gp_list_free(filelist_p)
def files(self)
Get a generator that yields all files in the directory.
5.837691
4.987239
1.170525
dirlist_p = new_gp_object("CameraList") lib.gp_camera_folder_list_folders(self._cam._cam, self.path.encode(), dirlist_p, self._cam._ctx) for idx in range(lib.gp_list_count(dirlist_p)): name = os.path.join( self.path, get_string(lib.gp_list_get_name, dirlist_p, idx)) yield Directory(name=name, parent=self, camera=self._cam) lib.gp_list_free(dirlist_p)
def directories(self)
Get a generator that yields all subdirectories in the directory.
5.371506
4.737035
1.133938
lib.gp_camera_folder_make_dir( self._cam._cam, self.parent.path.encode(), self.name.encode(), self._cam._ctx)
def create(self)
Create the directory.
18.28491
14.743441
1.240206
lib.gp_camera_folder_remove_dir( self._cam._cam, self.parent.path.encode(), self.name.encode(), self._cam._ctx)
def remove(self)
Remove the directory.
17.567167
14.241954
1.23348
camerafile_p = ffi.new("CameraFile**") with open(local_path, 'rb') as fp: lib.gp_file_new_from_fd(camerafile_p, fp.fileno()) lib.gp_camera_folder_put_file( self._cam._cam, self.path.encode() + b"/", os.path.basename(local_path).encode(), backend.FILE_TYPES['normal'], camerafile_p[0], self._cam.ctx)
def upload(self, local_path)
Upload a file to the camera's permanent storage. :param local_path: Path to file to copy :type local_path: str/unicode
6.233212
5.986943
1.041134
return tuple(op for op in backend.FILE_OPS if self._operations & op)
def supported_operations(self)
All file operations supported by the camera.
24.649309
15.018284
1.641287
return ImageDimensions(self._info.file.width, self._info.file.height)
def dimensions(self)
Dimensions of the image. :rtype: :py:class:`ImageDimensions`
9.896931
9.008355
1.098639
can_read = self._info.file.permissions & lib.GP_FILE_PERM_READ can_write = self._info.file.permissions & lib.GP_FILE_PERM_DELETE return "{0}{1}".format("r" if can_read else "-", "w" if can_write else "-")
def permissions(self)
Permissions of the file. Can be "r-" (read-only), "-w" (write-only), "rw" (read-write) or "--" (no rights). :rtype: str
4.768314
4.18935
1.138199
camfile_p = ffi.new("CameraFile**") with open(target_path, 'wb') as fp: lib.gp_file_new_from_fd(camfile_p, fp.fileno()) lib.gp_camera_file_get( self._cam._cam, self.directory.path.encode(), self.name.encode(), backend.FILE_TYPES[ftype], camfile_p[0], self._cam._ctx)
def save(self, target_path, ftype='normal')
Save file content to a local file. :param target_path: Path to save remote file as. :type target_path: str/unicode :param ftype: Select 'view' on file. :type ftype: str
6.71229
7.327685
0.916018
camfile_p = ffi.new("CameraFile**") lib.gp_file_new(camfile_p) lib.gp_camera_file_get( self._cam._cam, self.directory.path.encode(), self.name.encode(), backend.FILE_TYPES[ftype], camfile_p[0], self._cam._ctx) data_p = ffi.new("char**") length_p = ffi.new("unsigned long*") lib.gp_file_get_data_and_size(camfile_p[0], data_p, length_p) byt = bytes(ffi.buffer(data_p[0], length_p[0])) # gphoto2 camera files MUST be freed. lib.gp_file_free(camfile_p[0]) # just to be safe. del data_p, length_p, camfile_p return byt
def get_data(self, ftype='normal')
Get file content as a bytestring. :param ftype: Select 'view' on file. :type ftype: str :return: File content :rtype: bytes
4.007058
3.91695
1.023005
self._check_type_supported(ftype) buf_p = ffi.new("char[{0}]".format(chunk_size)) size_p = ffi.new("uint64_t*") offset_p = ffi.new("uint64_t*") for chunk_idx in range(int(math.ceil(self.size/chunk_size))): size_p[0] = chunk_size lib.gp_camera_file_read( self._cam._cam, self.directory.path.encode(), self.name.encode(), backend.FILE_TYPES[ftype], offset_p[0], buf_p, size_p, self._cam._ctx) yield ffi.buffer(buf_p, size_p[0])[:]
def iter_data(self, chunk_size=2**16, ftype='normal')
Get an iterator that yields chunks of the file content. :param chunk_size: Size of yielded chunks in bytes :type chunk_size: int :param ftype: Select 'view' on file. :type ftype: str :return: Iterator
3.791127
3.998209
0.948206
lib.gp_camera_file_delete(self._cam._cam, self.directory.path.encode(), self.name.encode(), self._cam._ctx)
def remove(self)
Remove file from device.
20.002542
13.82229
1.447122
if self.readonly: raise ValueError("Option is read-only.") val_p = None if self.type == 'selection': if value not in self.choices: raise ValueError("Invalid choice (valid: {0})".format( repr(self.choices))) val_p = ffi.new("const char[]", value.encode()) elif self.type == 'text': if not isinstance(value, basestring): raise ValueError("Value must be a string.") val_p = ffi.new("char**") val_p[0] = ffi.new("char[]", value.encode()) elif self.type == 'range': if value < self.range.min or value > self.range.max: raise ValueError("Value exceeds valid range ({0}-{1}." .format(self.range.min, self.range.max)) if value % self.range.step: raise ValueError("Value can only be changed in steps of {0}." .format(self.range.step)) val_p = ffi.new("float*") val_p[0] = value elif self.type == 'toggle': if not isinstance(value, bool): raise ValueError("Value must be bool.") val_p = ffi.new("int*") val_p[0] = int(value) elif self.type == 'date': val_p = ffi.new("int*") val_p[0] = value lib.gp_widget_set_value(self._widget, val_p) lib.gp_camera_set_config(self._cam._cam, self._root, self._cam._ctx) self.value = value
def set(self, value)
Update value of the option. Only possible for options with :py:attr:`readonly` set to `False`. If :py:attr:`type` is `choice`, the value must be one of the :py:attr:`choices`. If :py:attr:`type` is `range`, the value must be in the range described by :py:attr:`range`. :param value: Value to set
2.491481
2.468846
1.009168
return tuple(op for op in backend.CAM_OPS if self._abilities.operations & op)
def supported_operations(self)
All operations supported by the camera.
46.464912
31.784203
1.461887
return UsbInformation(self._abilities.usb_vendor, self._abilities.usb_product, self._abilities.usb_class, self._abilities.usb_subclass, self._abilities.usb_protocol)
def usb_info(self)
The camera's USB information.
3.615816
3.54412
1.02023
config = self._get_config() return {section: {itm.name: itm for itm in config[section].values() if not itm.readonly} for section in config if 'settings' in section or section == 'other'}
def config(self)
Writeable configuration parameters. :rtype: dict
6.956846
7.94473
0.875655
config = self._get_config() is_hex = lambda name: (len(name) == 4 and all(c in string.hexdigits for c in name)) out = SimpleNamespace() for sect in config: for itm in config[sect].values(): if (itm.readonly or sect == 'status') and not is_hex(itm.name): setattr(out, itm.name, itm.value) return out
def status(self)
Status information (read-only). :rtype: :py:class:`SimpleNamespace`
4.461304
3.962267
1.125947
info_p = ffi.new("CameraStorageInformation**") num_info_p = ffi.new("int*") lib.gp_camera_get_storageinfo(self._cam, info_p, num_info_p, self._ctx) infos = [] for idx in range(num_info_p[0]): out = SimpleNamespace() struc = (info_p[0] + idx) fields = struc.fields if lib.GP_STORAGEINFO_BASE & fields: out.directory = next( (d for d in self.list_all_directories() if d.path == ffi.string(struc.basedir).decode()), None) if lib.GP_STORAGEINFO_LABEL & fields: out.label = ffi.string(struc.label).decode() if lib.GP_STORAGEINFO_DESCRIPTION & fields: out.description = ffi.string(struc.description).decode() if lib.GP_STORAGEINFO_STORAGETYPE & fields: stype = struc.type if lib.GP_STORAGEINFO_ST_FIXED_ROM & stype: out.type = 'fixed_rom' elif lib.GP_STORAGEINFO_ST_REMOVABLE_ROM & stype: out.type = 'removable_rom' elif lib.GP_STORAGEINFO_ST_FIXED_RAM & stype: out.type = 'fixed_ram' elif lib.GP_STORAGEINFO_ST_REMOVABLE_RAM & stype: out.type = 'removable_ram' else: out.type = 'unknown' if lib.GP_STORAGEINFO_ACCESS & fields: if lib.GP_STORAGEINFO_AC_READWRITE & struc.access: out.access = 'read-write' elif lib.GP_STORAGEINFO_AC_READONLY & struc.access: out.access = 'read-only' elif lib.GP_STORAGEINFO_AC_READONLY_WITH_DELETE & struc.access: out.access = 'read-delete' if lib.GP_STORAGEINFO_MAXCAPACITY & fields: out.capacity = int(struc.capacitykbytes) if lib.GP_STORAGEINFO_FREESPACEKBYTES & fields: out.free_space = int(struc.freekbytes) if lib.GP_STORAGEINFO_FREESPACEIMAGES & fields: out.remaining_images = int(struc.freeimages) infos.append(out) return infos
def storage_info(self)
Information about the camera's storage.
2.269226
2.23633
1.01471
def list_files_recursively(directory): f_gen = itertools.chain( directory.files, *tuple(list_files_recursively(d) for d in directory.directories)) for f in f_gen: yield f return list_files_recursively(self.filesystem)
def list_all_files(self)
Utility method that yields all files on the device's file systems.
4.120021
4.008354
1.027859
def list_dirs_recursively(directory): if directory == self.filesystem: yield directory d_gen = itertools.chain( directory.directories, *tuple(list_dirs_recursively(d) for d in directory.directories)) for d in d_gen: yield d return list_dirs_recursively(self.filesystem)
def list_all_directories(self)
Utility method that yields all directories on the device's file systems.
4.260936
4.162818
1.02357
target = self.config['settings']['capturetarget'] if to_camera_storage and target.value != "Memory card": target.set("Memory card") elif not to_camera_storage and target.value != "Internal RAM": target.set("Internal RAM") lib.gp_camera_trigger_capture(self._cam, self._ctx) fobj = self._wait_for_event(event_type=lib.GP_EVENT_FILE_ADDED) if to_camera_storage: self._logger.info("File written to storage at {0}.".format(fobj)) return fobj else: data = fobj.get_data() try: fobj.remove() except errors.CameraIOError: # That probably means the file is already gone from RAM, # so nothing to worry about. pass return data
def capture(self, to_camera_storage=False)
Capture an image. Some cameras (mostly Canon and Nikon) support capturing to internal RAM. On these devices, you have to specify `to_camera_storage` if you want to save the images to the memory card. On devices that do not support saving to RAM, the only difference is that the file is automatically downloaded and deleted when set to `False`. :param to_camera_storage: Save image to the camera's internal storage :type to_camera_storage: bool :return: A :py:class:`File` if `to_camera_storage` was `True`, otherwise the captured image as a bytestring. :rtype: :py:class:`File` or bytes
5.492496
5.05091
1.087427
with self.capture_video_context() as ctx: time.sleep(length) return ctx.videofile
def capture_video(self, length)
Capture a video. This always writes to the memory card, since internal RAM is likely to run out of space very quickly. Currently this only works with Nikon cameras. :param length: Length of the video to capture in seconds. :type length: int :return: Video file :rtype: :py:class:`File`
7.533465
8.582221
0.877799
lib.gp_camera_capture_preview(self._cam, self.__camfile_p[0], self._ctx) lib.gp_file_get_data_and_size(self.__camfile_p[0], self.__data_p, self.__length_p) return ffi.buffer(self.__data_p[0], self.__length_p[0])[:]
def get_preview(self)
Get a preview from the camera's viewport. This will usually be a JPEG image with the dimensions depending on the camera. You will need to call the exit() method manually after you are done capturing a live preview. :return: The preview image as a bytestring :rtype: bytes
5.11538
5.677378
0.901011
cache_result = cache.get(self.get_cache_key(name)) if cache_result: return self.remote elif cache_result is None and self.remote.exists(name): cache.set(self.get_cache_key(name), True) return self.remote else: return self.local
def get_storage(self, name)
Returns the storage backend instance responsible for the file with the given name (either local or remote). This method is used in most of the storage API methods. :param name: file name :type name: str :rtype: :class:`~django:django.core.files.storage.Storage`
3.190326
2.973802
1.072811
return self.get_storage(name).open(name, mode)
def open(self, name, mode='rb')
Retrieves the specified file from storage. :param name: file name :type name: str :param mode: mode to open the file with :type mode: str :rtype: :class:`~django:django.core.files.File`
6.542381
6.837836
0.956791
cache_key = self.get_cache_key(name) cache.set(cache_key, False) # Use a name that is available on both the local and remote storage # systems and save locally. name = self.get_available_name(name) try: name = self.local.save(name, content, max_length=max_length) except TypeError: # Django < 1.10 name = self.local.save(name, content) # Pass on the cache key to prevent duplicate cache key creation, # we save the result in the storage to be able to test for it if not self.delayed: self.result = self.transfer(name, cache_key=cache_key) return name
def save(self, name, content, max_length=None)
Saves the given content with the given name using the local storage. If the :attr:`~queued_storage.backends.QueuedStorage.delayed` attribute is ``True`` this will automatically call the :meth:`~queued_storage.backends.QueuedStorage.transfer` method queuing the transfer from local to remote storage. :param name: file name :type name: str :param content: content of the file specified by name :type content: :class:`~django:django.core.files.File` :rtype: str
5.419681
4.777901
1.134323
if cache_key is None: cache_key = self.get_cache_key(name) return self.task.delay(name, cache_key, self.local_path, self.remote_path, self.local_options, self.remote_options)
def transfer(self, name, cache_key=None)
Transfers the file with the given name to the remote storage backend by queuing the task. :param name: file name :type name: str :param cache_key: the cache key to set after a successful task run :type cache_key: str :rtype: task result
3.108034
3.402579
0.913435
local_available_name = self.local.get_available_name(name) remote_available_name = self.remote.get_available_name(name) if remote_available_name > local_available_name: return remote_available_name return local_available_name
def get_available_name(self, name)
Returns a filename that's free on both the local and remote storage systems, and available for new content to be written to. :param name: file name :type name: str :rtype: str
2.299707
2.215042
1.038223
index_analysis = None recommendation = None namespace = parsed_query['ns'] indexStatus = "unknown" index_cache_entry = self._ensure_index_cache(db_uri, db_name, collection_name) query_analysis = self._generate_query_analysis(parsed_query, db_name, collection_name) if ((query_analysis['analyzedFields'] != []) and query_analysis['supported']): index_analysis = self._generate_index_analysis(query_analysis, index_cache_entry['indexes']) indexStatus = index_analysis['indexStatus'] if index_analysis['indexStatus'] != 'full': recommendation = self._generate_recommendation(query_analysis, db_name, collection_name) # a temporary fix to suppress faulty parsing of $regexes. # if the recommendation cannot be re-parsed into yaml, we assume # it is invalid. if not validate_yaml(recommendation['index']): recommendation = None query_analysis['supported'] = False # QUERY REPORT return OrderedDict({ 'queryMask': parsed_query['queryMask'], 'indexStatus': indexStatus, 'parsed': parsed_query, 'namespace': namespace, 'queryAnalysis': query_analysis, 'indexAnalysis': index_analysis, 'recommendation': recommendation })
def generate_query_report(self, db_uri, parsed_query, db_name, collection_name)
Generates a comprehensive report on the raw query
4.717965
4.758686
0.991443
if not self._check_indexes or db_uri is None: return {'indexes': None} if db_name not in self.get_cache(): self._internal_map[db_name] = {} if collection_name not in self._internal_map[db_name]: indexes = [] try: if self._index_cache_connection is None: self._index_cache_connection = pymongo.MongoClient(db_uri, document_class=OrderedDict, read_preference=pymongo.ReadPreference.PRIMARY_PREFERRED) db = self._index_cache_connection[db_name] indexes = db[collection_name].index_information() except: warning = 'Warning: unable to connect to ' + db_uri + "\n" else: internal_map_entry = {'indexes': indexes} self.get_cache()[db_name][collection_name] = internal_map_entry return self.get_cache()[db_name][collection_name]
def _ensure_index_cache(self, db_uri, db_name, collection_name)
Adds a collections index entries to the cache if not present
2.592593
2.524121
1.027127
analyzed_fields = [] field_count = 0 supported = True sort_fields = [] query_mask = None if 'command' in parsed_query and parsed_query['command'] not in SUPPORTED_COMMANDS: supported = False else: #if 'orderby' in parsed_query: sort_component = parsed_query['orderby'] if 'orderby' in parsed_query else [] sort_seq = 0 for key in sort_component: sort_field = {'fieldName': key, 'fieldType': SORT_TYPE, 'seq': sort_seq} sort_fields.append(key) analyzed_fields.append(sort_field) field_count += 1 sort_seq += 1 query_component = parsed_query['query'] if 'query' in parsed_query else {} for key in query_component: if key not in sort_fields: field_type = UNSUPPORTED_TYPE if ((key not in UNSUPPORTED_QUERY_OPERATORS) and (key not in COMPOSITE_QUERY_OPERATORS)): try: if query_component[key] == {}: raise nested_field_list = query_component[key].keys() except: field_type = EQUIV_TYPE else: for nested_field in nested_field_list: if ((nested_field in RANGE_QUERY_OPERATORS) and (nested_field not in UNSUPPORTED_QUERY_OPERATORS)): field_type = RANGE_TYPE else: supported = False field_type = UNSUPPORTED_TYPE break if field_type is UNSUPPORTED_TYPE: supported = False analyzed_field = {'fieldName': key, 'fieldType': field_type} analyzed_fields.append(analyzed_field) field_count += 1 query_mask = parsed_query['queryMask'] # QUERY ANALYSIS return OrderedDict({ 'analyzedFields': analyzed_fields, 'fieldCount': field_count, 'supported': supported, 'queryMask': query_mask })
def _generate_query_analysis(self, parsed_query, db_name, collection_name)
Translates a raw query object into a Dex query analysis
2.694835
2.697436
0.999036