code
stringlengths
20
4.93k
docstring
stringlengths
33
1.27k
source
stringclasses
3 values
def fashion_mnist_generator(tmp_dir, training, how_many, start_from=0): _get_fashion_mnist(tmp_dir) d = (_FASHION_MNIST_LOCAL_FILE_PREFIX + (_MNIST_TRAIN_DATA_FILENAME if training else _MNIST_TEST_DATA_FILENAME)) l = (_FASHION_MNIST_LOCAL_FILE_PREFIX + (_MNIST_TRAIN_LABELS_FILENAME if training else _MNIST_TEST_LABELS_FILENAME)) return mnist_common_generator(tmp_dir, training, how_many, d, l, start_from)
Image generator for FashionMNIST. Args: tmp_dir: path to temporary storage directory. training: a Boolean; if true, we use the train set, otherwise the test set. how_many: how many images and labels to generate. start_from: from which image to start. Returns: An instance of image_generator that produces MNIST images.
codesearchnet
def cost(self, t_node, branch_length, multiplicity=2.0): merger_time = t_node+branch_length return self.integral_merger_rate(merger_time) - self.integral_merger_rate(t_node)\ - np.log(self.total_merger_rate(merger_time))*(multiplicity-1.0)/multiplicity
returns the cost associated with a branch starting at t_node t_node is time before present, the branch goes back in time Args: - t_node: time of the node - branch_length: branch length, determines when this branch merges with sister - multiplicity: 2 if merger is binary, higher if this is a polytomy
juraj-google-style
def _get_max_page(dom): div = dom.find("div", {"class": "razeniKnihListovani"}) if not div: return 1 links = div[0].find("a") max_page = filter( lambda x: "href" in x.params and "pageindex=" in x.params["href"], links ) max_page = map( lambda x: x.params["href"].split("pageindex=")[-1], max_page ) max_page = filter(lambda x: x.isdigit(), max_page) max_page = map(lambda x: int(x), max_page) if not max_page: return 1 return max(max_page)
Try to guess how much pages are in book listing. Args: dom (obj): HTMLElement container of the page with book list. Returns: int: Number of pages for given category.
juraj-google-style
def get_gates(self, x): x = tf.stop_gradient(x) x = tf.matmul(x, self.t_vectors) x = tf.sign(x) x = (tf.matmul(x, self.t_group, transpose_b=True) / self.nb_hyperplanes) x = tf.argmax(x, axis=(- 1)) x = tf.one_hot(x, self.nb_buckets) return x
Return the bucket id of the given tensor. Args: x (tf.Tensor): float32 of shape [length, depth] Returns: tf.Tensor: One-hot vector int64 of shape [heads, length, nb_buckets] containing the id of the bucket
codesearchnet
def check_channel(fcn): def wrapper(*args, **kwargs): if not isinstance(args[1], ChannelResource): raise RuntimeError('resource must be an instance of intern.resource.boss.ChannelResource.') if not args[1].cutout_ready: raise PartialChannelResourceError( 'ChannelResource not fully initialized. Use intern.remote.BossRemote.get_channel({}, {}, {})'.format( args[1].name, args[1].coll_name, args[1].exp_name)) return fcn(*args, **kwargs) return wrapper
Decorator that ensures a valid channel passed in. Args: fcn (function): Function that has a ChannelResource as its second argument. Returns: (function): Wraps given function with one that checks for a valid channel.
juraj-google-style
def __init__(self, cell): self._cell = cell
Creates a new SamplerCell. Args: cell: A c pointer of TFE_MonitoringSamplerCell.
github-repos
def nic_v1(msg, NICs): if ((typecode(msg) < 5) or (typecode(msg) > 22)): raise RuntimeError(('%s: Not a surface position message (5<TC<8), airborne position message (8<TC<19), or airborne position with GNSS height (20<TC<22)' % msg)) tc = typecode(msg) NIC = uncertainty.TC_NICv1_lookup[tc] if isinstance(NIC, dict): NIC = NIC[NICs] try: Rc = uncertainty.NICv1[NIC][NICs]['Rc'] VPL = uncertainty.NICv1[NIC][NICs]['VPL'] except KeyError: (Rc, VPL) = (uncertainty.NA, uncertainty.NA) return (Rc, VPL)
Calculate NIC, navigation integrity category, for ADS-B version 1 Args: msg (string): 28 bytes hexadecimal message string NICs (int or string): NIC supplement Returns: int or string: Horizontal Radius of Containment int or string: Vertical Protection Limit
codesearchnet
def create_impression_event(self, experiment, variation_id, user_id, attributes): params = self._get_common_params(user_id, attributes) impression_params = self._get_required_params_for_impression(experiment, variation_id) params[self.EventParams.USERS][0][self.EventParams.SNAPSHOTS].append(impression_params) return Event(self.EVENTS_URL, params, http_verb=self.HTTP_VERB, headers=self.HTTP_HEADERS)
Create impression Event to be sent to the logging endpoint. Args: experiment: Experiment for which impression needs to be recorded. variation_id: ID for variation which would be presented to user. user_id: ID for user. attributes: Dict representing user attributes and values which need to be recorded. Returns: Event object encapsulating the impression event.
codesearchnet
def is_ordered(cat_id): url = 'https: auth = Auth() r = _req_with_retries(auth.gbdx_connection, url) if (r is not None): return (r.status_code == 200) return False
Checks to see if a CatalogID has been ordered or not. Args: catalogID (str): The catalog ID from the platform catalog. Returns: ordered (bool): Whether or not the image has been ordered
codesearchnet
def _respond(self, channel, text): result = self._format_message(channel, text) if (result is not None): logger.info('Sending message: %r', truncate(result, max_len=50)) self.socket.send_str(result)
Respond to a message on the current socket. Args: channel (:py:class:`str`): The channel to send to. text (:py:class:`str`): The message text to send.
codesearchnet
def if_callable_call_with_formatted_string(callback, formattable_string, *args): try: formatted_string = formattable_string.format(*args) except IndexError: raise ValueError("Mismatch metween amount of insertion points in the formattable string\n" "and the amount of args given.") if callable(callback): callback(formatted_string)
If the callback is callable, format the string with the args and make a call. Otherwise, do nothing. Args: callback (function): May or may not be callable. formattable_string (str): A string with '{}'s inserted. *args: A variable amount of arguments for the string formatting. Must correspond to the amount of '{}'s in 'formattable_string'. Raises: ValueError
juraj-google-style
def __init__(self, resolver_context): super(TSKFile, self).__init__(resolver_context) self._current_offset = 0 self._file_system = None self._size = 0 self._tsk_attribute = None self._tsk_file = None
Initializes a file-like object. Args: resolver_context (Context): resolver context.
juraj-google-style
async def cancel(self, task: asyncio.Task, wait_for: bool=True) -> Any: if (task is None): return task.cancel() with suppress(KeyError): self._tasks.remove(task) with suppress(Exception): return ((await task) if wait_for else None)
Cancels and waits for an `asyncio.Task` to finish. Removes it from the collection of managed tasks. Args: task (asyncio.Task): The to be cancelled task. It is not required that the task was was created with `TaskScheduler.create_task()`. wait_for (bool, optional): Whether to wait for the task to finish execution. If falsey, this function returns immediately after cancelling the task. Returns: Any: The return value of `task`. None if `wait_for` is falsey.
codesearchnet
def on(self, day, strict=False): day_start, day_stop = day.floor('day').span('day') if strict: return self.included(day_start, day_stop) else: return self.overlapping(day_start, day_stop)
Iterates (in chronological order) over all events that occurs on `day` Args: day (Arrow object) strict (bool): if True events will be returned only if they are\ strictly *included* in `day`.
juraj-google-style
def execute_forever(method, interval_s): interval = Interval(method) interval.start(interval_s) return interval
Executes a method forever at the specified interval. Args: method: The callable to execute. interval_s: The number of seconds to start the execution after each method finishes. Returns: An Interval object.
juraj-google-style
def get_experiment_from_id(self, experiment_id): experiment = self.experiment_id_map.get(experiment_id) if experiment: return experiment self.logger.error(('Experiment ID "%s" is not in datafile.' % experiment_id)) self.error_handler.handle_error(exceptions.InvalidExperimentException(enums.Errors.INVALID_EXPERIMENT_KEY_ERROR)) return None
Get experiment for the provided experiment ID. Args: experiment_id: Experiment ID for which experiment is to be determined. Returns: Experiment corresponding to the provided experiment ID.
codesearchnet
def create_elb_dns(self, regionspecific=False): if regionspecific: dns_elb = self.generated.dns()['elb_region'] else: dns_elb = self.generated.dns()['elb'] dns_elb_aws = find_elb(name=self.app_name, env=self.env, region=self.region) zone_ids = get_dns_zone_ids(env=self.env, facing=self.elb_subnet) self.log.info('Updating Application URL: %s', dns_elb) dns_kwargs = { 'dns_name': dns_elb, 'dns_name_aws': dns_elb_aws, 'dns_ttl': self.dns_ttl, } for zone_id in zone_ids: self.log.debug('zone_id: %s', zone_id) update_dns_zone_record(self.env, zone_id, **dns_kwargs) return dns_elb
Create dns entries in route53. Args: regionspecific (bool): The DNS entry should have region on it Returns: str: Auto-generated DNS name for the Elastic Load Balancer.
juraj-google-style
def _download_to_local(boto_conn, s3_path, fp, num_result_dir, delim=None): def _callback(downloaded, total): '\n Call function for upload.\n\n `downloaded`: File size already downloaded (int)\n\n `total`: Total file size to be downloaded (int)\n ' if ((total is 0) or (downloaded == total)): return progress = ((downloaded * 100) / total) sys.stderr.write('\r[{0}] {1}%'.format((' sys.stderr.flush() m = _URI_RE.match(s3_path) bucket_name = m.group(1) bucket = boto_conn.get_bucket(bucket_name) retries = 6 if (s3_path.endswith('/') is False): key_name = m.group(2) key_instance = bucket.get_key(key_name) while ((key_instance is None) and (retries > 0)): retries = (retries - 1) log.info(('Results file is not available on s3. Retry: ' + str((6 - retries)))) time.sleep(10) key_instance = bucket.get_key(key_name) if (key_instance is None): raise Exception('Results file not available on s3 yet. This can be because of s3 eventual consistency issues.') log.info(('Downloading file from %s' % s3_path)) if (delim is None): try: key_instance.get_contents_to_file(fp) except boto.exception.S3ResponseError as e: if (e.status == 403): log.warn('Access denied while fetching the s3 object. Retrying without specifying the version....') key_instance.open() fp.write(key_instance.read()) key_instance.close() else: raise else: _read_iteratively(key_instance, fp, delim=delim) else: key_prefix = m.group(2) bucket_paths = bucket.list(key_prefix) for one_path in bucket_paths: name = one_path.name if name.endswith('$folder$'): continue log.info(('Downloading file from %s' % name)) if (delim is None): one_path.get_contents_to_file(fp) else: _read_iteratively(one_path, fp, delim=delim)
Downloads the contents of all objects in s3_path into fp Args: `boto_conn`: S3 connection object `s3_path`: S3 path to be downloaded `fp`: The file object where data is to be downloaded
codesearchnet
def CallNtpdate(logger): ntpd_inactive = subprocess.call(['service', 'ntpd', 'status']) try: if (not ntpd_inactive): subprocess.check_call(['service', 'ntpd', 'stop']) subprocess.check_call('ntpdate `awk \'$1=="server" {print $2}\' /etc/ntp.conf`', shell=True) if (not ntpd_inactive): subprocess.check_call(['service', 'ntpd', 'start']) except subprocess.CalledProcessError: logger.warning('Failed to sync system time with ntp server.') else: logger.info('Synced system time with ntp server.')
Sync clock using ntpdate. Args: logger: logger object, used to write to SysLog and serial port.
codesearchnet
def ion_equals(a, b, timestamps_instants_only=False): if timestamps_instants_only: return _ion_equals_timestamps_instants(a, b) return _ion_equals_timestamps_data_model(a, b)
Tests two objects for equivalence under the Ion data model. There are three important cases: * When neither operand specifies its `ion_type` or `annotations`, this method will only return True when the values of both operands are equivalent under the Ion data model. * When only one of the operands specifies its `ion_type` and `annotations`, this method will only return True when that operand has no annotations and has a value equivalent to the other operand under the Ion data model. * When both operands specify `ion_type` and `annotations`, this method will only return True when the ion_type and annotations of both are the same and their values are equivalent under the Ion data model. Note that the order of the operands does not matter. Args: a (object): The first operand. b (object): The second operand. timestamps_instants_only (Optional[bool]): False if timestamp objects (datetime and its subclasses) should be compared according to the Ion data model (where the instant, precision, and offset must be equal); True if these objects should be considered equivalent if they simply represent the same instant.
codesearchnet
def generate_full_symmops(symmops, tol): UNIT = np.eye(4) generators = [op.affine_matrix for op in symmops if (not np.allclose(op.affine_matrix, UNIT))] if (not generators): return symmops else: full = list(generators) for g in full: for s in generators: op = np.dot(g, s) d = (np.abs((full - op)) < tol) if (not np.any(np.all(np.all(d, axis=2), axis=1))): full.append(op) d = (np.abs((full - UNIT)) < tol) if (not np.any(np.all(np.all(d, axis=2), axis=1))): full.append(UNIT) return [SymmOp(op) for op in full]
Recursive algorithm to permute through all possible combinations of the initially supplied symmetry operations to arrive at a complete set of operations mapping a single atom to all other equivalent atoms in the point group. This assumes that the initial number already uniquely identifies all operations. Args: symmops ([SymmOp]): Initial set of symmetry operations. Returns: Full set of symmetry operations.
codesearchnet
def get_http_json(self, url=None, retry_count=3, rate_limit_timeout=120, headers=None): if (headers is None): headers = {'Accept': 'application/rdap+json'} try: log.debug('HTTP query for {0} at {1}'.format(self.address_str, url)) conn = Request(url, headers=headers) data = self.opener.open(conn, timeout=self.timeout) try: d = json.loads(data.readall().decode('utf-8', 'ignore')) except AttributeError: d = json.loads(data.read().decode('utf-8', 'ignore')) try: for tmp in d['notices']: if (tmp['title'] == 'Rate Limit Notice'): log.debug('RDAP query rate limit exceeded.') if (retry_count > 0): log.debug('Waiting {0} seconds...'.format(str(rate_limit_timeout))) sleep(rate_limit_timeout) return self.get_http_json(url=url, retry_count=(retry_count - 1), rate_limit_timeout=rate_limit_timeout, headers=headers) else: raise HTTPRateLimitError('HTTP lookup failed for {0}. Rate limit exceeded, wait and try again (possibly a temporary block).'.format(url)) except (KeyError, IndexError): pass return d except HTTPError as e: if (e.code == 429): log.debug('HTTP query rate limit exceeded.') if (retry_count > 0): log.debug('Waiting {0} seconds...'.format(str(rate_limit_timeout))) sleep(rate_limit_timeout) return self.get_http_json(url=url, retry_count=(retry_count - 1), rate_limit_timeout=rate_limit_timeout, headers=headers) else: raise HTTPRateLimitError('HTTP lookup failed for {0}. Rate limit exceeded, wait and try again (possibly a temporary block).'.format(url)) else: raise HTTPLookupError('HTTP lookup failed for {0} with error code {1}.'.format(url, str(e.code))) except (URLError, socket.timeout, socket.error) as e: log.debug('HTTP query socket error: {0}'.format(e)) if (retry_count > 0): log.debug('HTTP query retrying (count: {0})'.format(str(retry_count))) return self.get_http_json(url=url, retry_count=(retry_count - 1), rate_limit_timeout=rate_limit_timeout, headers=headers) else: raise HTTPLookupError('HTTP lookup failed for {0}.'.format(url)) except (HTTPLookupError, HTTPRateLimitError) as e: raise e except: raise HTTPLookupError('HTTP lookup failed for {0}.'.format(url))
The function for retrieving a json result via HTTP. Args: url (:obj:`str`): The URL to retrieve (required). retry_count (:obj:`int`): The number of times to retry in case socket errors, timeouts, connection resets, etc. are encountered. Defaults to 3. rate_limit_timeout (:obj:`int`): The number of seconds to wait before retrying when a rate limit notice is returned via rdap+json or HTTP error 429. Defaults to 60. headers (:obj:`dict`): The HTTP headers. The Accept header defaults to 'application/rdap+json'. Returns: dict: The data in json format. Raises: HTTPLookupError: The HTTP lookup failed. HTTPRateLimitError: The HTTP request rate limited and retries were exhausted.
codesearchnet
def separable_conv2d(x, depthwise_kernel, pointwise_kernel, strides=(1, 1), padding='valid', data_format=None, dilation_rate=(1, 1)): if data_format is None: data_format = image_data_format() if data_format not in {'channels_first', 'channels_last'}: raise ValueError('Unknown data_format: ' + str(data_format)) if len(strides) != 2: raise ValueError('`strides` must be a tuple of 2 integers.') x, tf_data_format = _preprocess_conv2d_input(x, data_format) padding = _preprocess_padding(padding) if not isinstance(strides, tuple): strides = tuple(strides) if tf_data_format == 'NHWC': strides = (1,) + strides + (1,) else: strides = (1, 1) + strides x = nn.separable_conv2d(x, depthwise_kernel, pointwise_kernel, strides=strides, padding=padding, rate=dilation_rate, data_format=tf_data_format) if data_format == 'channels_first' and tf_data_format == 'NHWC': x = array_ops.transpose(x, (0, 3, 1, 2)) return x
2D convolution with separable filters. Args: x: input tensor depthwise_kernel: convolution kernel for the depthwise convolution. pointwise_kernel: kernel for the 1x1 convolution. strides: strides tuple (length 2). padding: string, `"same"` or `"valid"`. data_format: string, `"channels_last"` or `"channels_first"`. dilation_rate: tuple of integers, dilation rates for the separable convolution. Returns: Output tensor. Raises: ValueError: if `data_format` is neither `channels_last` or `channels_first`. ValueError: if `strides` is not a tuple of 2 integers.
github-repos
def __init__(self, *compressed_files, **kwargs): self._files = [] self._prefixes = defaultdict(lambda: set([''])) self._extract = kwargs.get('extract', False) self._supersede = kwargs.get('supersede', False) self._match_version = kwargs.get('_match_version', True) self._local_warned = False for f in compressed_files: if isinstance(f, zipfile.ZipFile): bin_package = any(n.endswith('.so') or n.endswith('.pxd') or n.endswith('.dylib') for n in f.namelist()) need_extract = True elif isinstance(f, tarfile.TarFile): bin_package = any(m.name.endswith('.so') or m.name.endswith('.pxd') or m.name.endswith('.dylib') for m in f.getmembers()) need_extract = True elif isinstance(f, dict): bin_package = any(name.endswith('.so') or name.endswith('.pxd') or name.endswith('.dylib') for name in iterkeys(f)) need_extract = False elif isinstance(f, list): bin_package = any(name.endswith('.so') or name.endswith('.pxd') or name.endswith('.dylib') for name in f) need_extract = False else: raise TypeError('Compressed file can only be zipfile.ZipFile or tarfile.TarFile') if bin_package: if not ALLOW_BINARY: raise SystemError('Cannot load binary package. It is quite possible that you are using an old ' 'MaxCompute service which does not support binary packages. If this is ' 'not true, please set `odps.isolation.session.enable` to True or ask your ' 'project owner to change project-level configuration.') if need_extract: f = self._extract_archive(f) prefixes = set(['']) dir_prefixes = set() if isinstance(f, zipfile.ZipFile): for name in f.namelist(): name = name if name.endswith('/') else (name.rsplit('/', 1)[0] + '/') if name in prefixes: continue try: f.getinfo(name + '__init__.py') except KeyError: prefixes.add(name) elif isinstance(f, tarfile.TarFile): for member in f.getmembers(): name = member.name if member.isdir() else member.name.rsplit('/', 1)[0] if name in prefixes: continue try: f.getmember(name + '/__init__.py') except KeyError: prefixes.add(name + '/') elif isinstance(f, (list, dict)): if ALLOW_BINARY: bin_package = True rendered_names = set() for name in f: name = name.replace(os.sep, '/') rendered_names.add(name) for name in rendered_names: name = name if name.endswith('/') else (name.rsplit('/', 1)[0] + '/') if name in prefixes or '/tests/' in name: continue if name + '__init__.py' not in rendered_names: prefixes.add(name) dir_prefixes.add(name) else: if '/' in name.rstrip('/'): ppath = name.rstrip('/').rsplit('/', 1)[0] else: ppath = '' prefixes.add(ppath) dir_prefixes.add(ppath) if bin_package: path_patch = [] for p in sorted(dir_prefixes): if p in sys.path: continue parent_exist = False for pp in path_patch: if p[:len(pp)] == pp: parent_exist = True break if parent_exist: continue path_patch.append(p) if self._supersede: sys.path = path_patch + sys.path else: sys.path = sys.path + path_patch else: self._files.append(f) if prefixes: self._prefixes[id(f)] = sorted(prefixes)
Constructor. Args: compressed_files zipfile.ZipFile or tarfile.TarFile
juraj-google-style
def all_function(function: _evaluation.AllFunction, operand_result: Optional[_sql_data_types.IdentifierSelect], params_result: Collection[_sql_data_types.StandardSqlExpression]) -> _sql_data_types.Select: sql_alias = 'all_' sql_data_type = _sql_data_types.Boolean if not operand_result or not params_result: return _sql_data_types.Select(select_part=_sql_data_types.RawExpression('TRUE', _sql_alias=sql_alias, _sql_data_type=_sql_data_types.Boolean), from_part=None, sql_dialect=_sql_data_types.SqlDialect.SPARK) else: criteria = list(params_result)[0] context_sql = None where_part = None if _fhir_path_data_types.is_collection(function.parent_node.return_type): context_sql = operand_result.from_part where_part = operand_result.where_part else: context_sql = str(operand_result.to_subquery()) criteria_sql = _sql_data_types.RawExpression(criteria.as_operand(), _sql_alias=sql_alias, _sql_data_type=sql_data_type).to_subquery() internal_if_null_call = _sql_data_types.FunctionCall('IFNULL', [criteria_sql, 'FALSE'], _sql_alias=sql_alias, _sql_data_type=sql_data_type) logical_and_call = _sql_data_types.FunctionCall('BOOL_AND', (internal_if_null_call,), _sql_alias=sql_alias, _sql_data_type=sql_data_type) return _sql_data_types.Select(select_part=_sql_data_types.FunctionCall('IFNULL', [logical_and_call, 'TRUE'], _sql_alias=sql_alias, _sql_data_type=sql_data_type), from_part=context_sql, where_part=where_part, sql_dialect=_sql_data_types.SqlDialect.SPARK)
Generates Spark SQL representing the FHIRPath all() function. Returns true if criteria evaluates to true for every item in its operand. This function takes one param (`criteria`) in addition to its operand. If operand is not provided, it returns True. Args: function: The FHIRPath AST `AllFunction` node operand_result: The expression which is being evaluated params_result: The parameter passed in to function Returns: A compiled Spark SQL expression.
github-repos
def __init__(self, settings, room_id, pause=1): Process.__init__(self) self._pause = pause self._room_id = room_id self._callback = None self._queue = None self._connection = Connection.create_from_settings(settings) self._last_message_id = None
Initialize. Args: settings (dict): Settings used to create a :class:`Connection` instance room_id (int): Room ID Kwargs: pause (int): Pause in seconds between requests
juraj-google-style
def WriteBytes(self, value, unhex=True): if unhex: try: value = binascii.unhexlify(value) except binascii.Error: pass return self.stream.write(value)
Write a `bytes` type to the stream. Args: value (bytes): array of bytes to write to the stream. unhex (bool): (Default) True. Set to unhexlify the stream. Use when the bytes are not raw bytes; i.e. b'aabb' Returns: int: the number of bytes written.
juraj-google-style
class _Validate(beam.PTransform): def __init__(self, schema: dict[str, Any], error_handling: Optional[Mapping[str, Any]]=None): self._schema = schema self._exception_handling_args = exception_handling_args(error_handling) @maybe_with_exception_handling def expand(self, pcoll): validator = json_utils.row_validator(schema_from_element_type(pcoll.element_type), self._schema) def invoke_validator(x): validator(x) return x return pcoll | beam.Map(invoke_validator) def with_exception_handling(self, **kwargs): self._exception_handling_args = kwargs return self
Validates each element of a PCollection against a json schema. Args: schema: A json schema against which to validate each element. error_handling: Whether and how to handle errors during iteration. If this is not set, invalid elements will fail the pipeline, otherwise invalid elements will be passed to the specified error output along with information about how the schema was invalidated.
github-repos
def __init__(self, indent=True, relation_sort=original_order): self.indent = indent self.relation_sort = relation_sort
Initialize a new codec. Args: indent: if True, adaptively indent; if False or None, don't indent; if a non-negative integer, indent that many spaces per nesting level relation_sort: when encoding, sort the relations on each node according to this function; by default, the original order is maintained
juraj-google-style
def get_cuda_version_all(): key = 'cuda_ver_all' out, err = run_shell_cmd(cmds_all[PLATFORM.lower()][key]) ret_val = out.split(b'\n') filtered = [] for item in ret_val: if item not in ['\n', '']: filtered.append(item) all_vers = [] for item in filtered: ver_re = re.search('.*/cuda(\\-[\\d]+\\.[\\d]+)?', item.decode('utf-8')) if ver_re.group(1): all_vers.append(ver_re.group(1).strip('-')) if err and FLAGS.debug: print('Error in detecting CUDA version:\n %s' % str(err)) return all_vers
Retrieves all additional CUDA versions available (other than default). For retrieving default CUDA version, use `get_cuda_version` function. stderr is silenced by default. Setting FLAGS.debug mode will not enable it. Remove `2> /dev/null` command from `cmds_linux['cuda_ver_dflt']` to enable stderr. Returns: List of all CUDA versions found (except default version). e.g. ['10.1', '10.2']
github-repos
def disambiguate_query(self, query, language=None, entities=None): body = {'shortText': query, 'entities': [], 'onlyNER': 'false', 'customisation': 'generic'} if language: body['language'] = {'lang': language} if entities: body['entities'] = entities files = {'query': str(body)} logger.debug('About to submit the following query {}'.format(body)) (res, status) = self.post(self.disambiguate_service, files=files, headers={'Accept': 'application/json'}) if (status == 200): return (self.decode(res), status) else: logger.debug('Disambiguation failed.') return (None, status)
Call the disambiguation service in order to disambiguate a search query. Args: text (str): Query to be disambiguated. language (str): language of text (if known) entities (list): list of entities or mentions to be supplied by the user. Returns: dict, int: API response and API status.
codesearchnet
def set_mlag_id(self, name, value=None, default=False, disable=False): cmd = self.command_builder('mlag', value=value, default=default, disable=disable) return self.configure_interface(name, cmd)
Configures the interface mlag value for the specified interface Args: name (str): The interface to configure. Valid values for the name arg include Port-Channel* value (str): The mlag identifier to cofigure on the interface default (bool): Configures the interface mlag value using the default keyword disable (bool): Negates the interface mlag value using the no keyword Returns: bool: Returns True if the commands complete successfully
codesearchnet
def sort_segment_points(Aps, Bps): mid = [] j = 0 mid.append(Aps[0]) for i in range(len(Aps)-1): dist = distance_tt_point(Aps[i], Aps[i+1]) for m in range(j, len(Bps)): distm = distance_tt_point(Aps[i], Bps[m]) if dist > distm: direction = dot(normalize(line(Aps[i].gen2arr(), Aps[i+1].gen2arr())), normalize(Bps[m].gen2arr())) if direction > 0: j = m + 1 mid.append(Bps[m]) break mid.append(Aps[i+1]) for m in range(j, len(Bps)): mid.append(Bps[m]) return mid
Takes two line segments and sorts all their points, so that they form a continuous path Args: Aps: Array of tracktotrip.Point Bps: Array of tracktotrip.Point Returns: Array with points ordered
juraj-google-style
def __getitem__(self, key): if key is None: key = self._key() value = self._get_recursive(key) if value is None: value = self[key] = self.default_factory() return value
Gets the value at key (or current context), or sets default value. Args: key: May be `None` or `Graph`object. When `None`, the key is set to the current context. Returns: Either the cached or default value.
github-repos
def delete_folder(self, folder_id, recursive=True): return self.__request('DELETE', ('folders/%s' % (folder_id,)), querystring={'recursive': unicode(recursive).lower()})
Delete an existing folder Args: folder_id (int): ID of the folder to delete. recursive (bool): Delete all subfolder if True. Returns: dict. Response from Box. Raises: BoxError: An error response is returned from Box (status_code >= 400). BoxHttpResponseError: Response from Box is malformed. requests.exceptions.*: Any connection related problem.
codesearchnet
def create(self, name, network): if not network in SUPPORTED_NETWORKS: raise ValueError('Network not valid!') account = self.wrap(self.resource.create(dict(name=name, network=network))) self.add(account) return account
Create a new Account object and add it to this Accounts collection. Args: name (str): Account name network (str): Type of cryptocurrency. Can be one of, 'bitcoin', ' bitcoin_testnet', 'litecoin', 'dogecoin'. Returns: The new round.Account
juraj-google-style
def recipe_email_to_bigquery(config, auth_read, email_from, email_to, subject, link, attachment, dataset, table, schema, header, is_incremental_load): email(config, {'auth': auth_read, 'read': {'from': email_from, 'to': email_to, 'subject': subject, 'link': link, 'attachment': attachment}, 'write': {'bigquery': {'dataset': dataset, 'table': table, 'schema': schema, 'header': header, 'is_incremental_load': is_incremental_load}}})
Import emailed CM report, Dv360 report, csv, or excel into a BigQuery table. Args: auth_read (authentication) - Credentials used for reading data. email_from (string) - Must match from field. email_to (string) - Must match to field. subject (string) - Regular expression to match subject. link (string) - Regular expression to match email. attachment (string) - Regular expression to match atttachment. dataset (string) - Existing dataset in BigQuery. table (string) - Name of table to be written to. schema (json) - Schema provided in JSON list format or empty list. header (boolean) - Does the csv contain a header row. is_incremental_load (boolean) - Append report data to table based on date column, de-duplicates.
github-repos
def bessel_j0(x, name=None): with ops.name_scope(name, 'bessel_j0', [x]): return gen_special_math_ops.bessel_j0(x)
Computes the Bessel j0 function of `x` element-wise. Modified Bessel function of order 0. >>> tf.math.special.bessel_j0([0.5, 1., 2., 4.]).numpy() array([ 0.93846981, 0.76519769, 0.22389078, -0.39714981], dtype=float32) Args: x: A `Tensor` or `SparseTensor`. Must be one of the following types: `half`, `float32`, `float64`. name: A name for the operation (optional). Returns: A `Tensor` or `SparseTensor`, respectively. Has the same type as `x`. @compatibility(scipy) Equivalent to scipy.special.j0 @end_compatibility
github-repos
def __auth_descriptor(self, api_info): if api_info.auth is None: return None auth_descriptor = {} if api_info.auth.allow_cookie_auth is not None: auth_descriptor['allowCookieAuth'] = api_info.auth.allow_cookie_auth if api_info.auth.blocked_regions: auth_descriptor['blockedRegions'] = api_info.auth.blocked_regions return auth_descriptor
Builds an auth descriptor from API info. Args: api_info: An _ApiInfo object. Returns: A dictionary with 'allowCookieAuth' and/or 'blockedRegions' keys.
juraj-google-style
def parse(self, ping_message): try: if typepy.is_not_null_string(ping_message.stdout): ping_message = ping_message.stdout except AttributeError: pass logger.debug("parsing ping result: {}".format(ping_message)) self.__parser = NullPingParser() if typepy.is_null_string(ping_message): logger.debug("ping_message is empty") self.__stats = PingStats() return self.__stats ping_lines = _to_unicode(ping_message).splitlines() parser_class_list = ( LinuxPingParser, WindowsPingParser, MacOsPingParser, AlpineLinuxPingParser, ) for parser_class in parser_class_list: self.__parser = parser_class() try: self.__stats = self.__parser.parse(ping_lines) return self.__stats except ParseError as e: if e.reason != ParseErrorReason.HEADER_NOT_FOUND: raise e except pp.ParseException: pass self.__parser = NullPingParser() return self.__stats
Parse ping command output. Args: ping_message (str or :py:class:`~pingparsing.PingResult`): ``ping`` command output. Returns: :py:class:`~pingparsing.PingStats`: Parsed result.
juraj-google-style
def getSlicesForText(self, retina_name, body, get_fingerprint=None, start_index=0, max_results=10): resourcePath = '/text/slices' method = 'POST' queryParams = {} headerParams = {'Accept': 'Application/json', 'Content-Type': 'application/json'} postData = None queryParams['retina_name'] = retina_name queryParams['start_index'] = start_index queryParams['max_results'] = max_results queryParams['get_fingerprint'] = get_fingerprint postData = body response = self.apiClient._callAPI(resourcePath, method, queryParams, postData, headerParams) return [text.Text(**r) for r in response.json()]
Get a list of slices of the text Args: retina_name, str: The retina name (required) body, str: The text to be evaluated (required) get_fingerprint, bool: Configure if the fingerprint should be returned as part of the results (optional) start_index, int: The start-index for pagination (optional) (optional) max_results, int: Max results per page (optional) (optional) Returns: Array[Text]
juraj-google-style
def get_policy(observations, hparams, action_space): if not isinstance(action_space, gym.spaces.Discrete): raise ValueError("Expecting discrete action space.") obs_shape = common_layers.shape_list(observations) (frame_height, frame_width) = obs_shape[2:4] if hparams.policy_problem_name == "dummy_policy_problem_ttt": tf.logging.info("Using DummyPolicyProblemTTT for the policy.") policy_problem = tic_tac_toe_env.DummyPolicyProblemTTT() else: tf.logging.info("Using DummyPolicyProblem for the policy.") policy_problem = DummyPolicyProblem(action_space, frame_height, frame_width) trainer_lib.add_problem_hparams(hparams, policy_problem) hparams.force_full_predict = True model = registry.model(hparams.policy_network)( hparams, tf.estimator.ModeKeys.TRAIN ) try: num_target_frames = hparams.video_num_target_frames except AttributeError: num_target_frames = 1 features = { "inputs": observations, "input_action": tf.zeros(obs_shape[:2] + [1], dtype=tf.int32), "input_reward": tf.zeros(obs_shape[:2] + [1], dtype=tf.int32), "targets": tf.zeros(obs_shape[:1] + [num_target_frames] + obs_shape[2:]), "target_action": tf.zeros( obs_shape[:1] + [num_target_frames, 1], dtype=tf.int32), "target_reward": tf.zeros( obs_shape[:1] + [num_target_frames, 1], dtype=tf.int32), "target_policy": tf.zeros( obs_shape[:1] + [num_target_frames] + [action_space.n]), "target_value": tf.zeros( obs_shape[:1] + [num_target_frames]) } with tf.variable_scope(tf.get_variable_scope(), reuse=tf.AUTO_REUSE): t2t_model.create_dummy_vars() (targets, _) = model(features) return (targets["target_policy"][:, 0, :], targets["target_value"][:, 0])
Get a policy network. Args: observations: observations hparams: parameters action_space: action space Returns: Tuple (action logits, value).
juraj-google-style
def dumps(o, preserve=False): retval = "" addtoretval, sections = _dump_sections(o, "") retval += addtoretval while sections != {}: newsections = {} for section in sections: addtoretval, addtosections = _dump_sections(sections[section], section, preserve) if addtoretval or (not addtoretval and not addtosections): if retval and retval[-2:] != "\n\n": retval += "\n" retval += "[" + section + "]\n" if addtoretval: retval += addtoretval for s in addtosections: newsections[section + "." + s] = addtosections[s] sections = newsections return retval
Stringifies input dict as toml Args: o: Object to dump into toml preserve: Boolean parameter. If true, preserve inline tables. Returns: String containing the toml corresponding to dict
juraj-google-style
def run(self, data_loaders, workflow, max_epochs, **kwargs): assert isinstance(data_loaders, list) assert mmcv.is_list_of(workflow, tuple) assert (len(data_loaders) == len(workflow)) self._max_epochs = max_epochs work_dir = (self.work_dir if (self.work_dir is not None) else 'NONE') self.logger.info('Start running, host: %s, work_dir: %s', get_host_info(), work_dir) self.logger.info('workflow: %s, max: %d epochs', workflow, max_epochs) self.call_hook('before_run') while (self.epoch < max_epochs): for (i, flow) in enumerate(workflow): (mode, epochs) = flow if isinstance(mode, str): if (not hasattr(self, mode)): raise ValueError('runner has no method named "{}" to run an epoch'.format(mode)) epoch_runner = getattr(self, mode) elif callable(mode): epoch_runner = mode else: raise TypeError('mode in workflow must be a str or callable function, not {}'.format(type(mode))) for _ in range(epochs): if ((mode == 'train') and (self.epoch >= max_epochs)): return epoch_runner(data_loaders[i], **kwargs) time.sleep(1) self.call_hook('after_run')
Start running. Args: data_loaders (list[:obj:`DataLoader`]): Dataloaders for training and validation. workflow (list[tuple]): A list of (phase, epochs) to specify the running order and epochs. E.g, [('train', 2), ('val', 1)] means running 2 epochs for training and 1 epoch for validation, iteratively. max_epochs (int): Total training epochs.
codesearchnet
def FromFile(cls, inpath): with open(inpath, "r") as infile: indata = infile.read() return cls.FromString(indata)
Load a CommandFile from a path. Args: inpath (str): The path to the file to load Returns: CommandFile: The decoded CommandFile object.
juraj-google-style
class PatchTSMixerForPretraining(PatchTSMixerPreTrainedModel): def __init__(self, config: PatchTSMixerConfig): super().__init__(config) self.model = PatchTSMixerModel(config, mask_input=True) self.head = PatchTSMixerPretrainHead(config=config) self.masked_loss = config.masked_loss self.use_return_dict = config.use_return_dict if config.post_init: self.post_init() @auto_docstring def forward(self, past_values: torch.Tensor, observed_mask: Optional[torch.Tensor]=None, output_hidden_states: Optional[bool]=False, return_loss: bool=True, return_dict: Optional[bool]=None) -> PatchTSMixerForPreTrainingOutput: return_dict = return_dict if return_dict is not None else self.use_return_dict if self.masked_loss is True: loss = torch.nn.MSELoss(reduction='none') else: loss = torch.nn.MSELoss(reduction='mean') model_output = self.model(past_values, observed_mask=observed_mask, output_hidden_states=output_hidden_states, return_dict=return_dict) if isinstance(model_output, tuple): model_output = PatchTSMixerModelOutput(*model_output) x_hat = self.head(model_output.last_hidden_state) if return_loss is True: loss_val = loss(x_hat, model_output.patch_input) else: loss_val = None if self.masked_loss is True and loss_val is not None: loss_val = (loss_val.mean(dim=-1) * model_output.mask).sum() / (model_output.mask.sum() + 1e-10) if not return_dict: return tuple((v for v in [loss_val, x_hat, model_output.last_hidden_state, model_output.hidden_states])) return PatchTSMixerForPreTrainingOutput(loss=loss_val, prediction_outputs=x_hat, last_hidden_state=model_output.last_hidden_state, hidden_states=model_output.hidden_states)
`PatchTSMixer` for mask pretraining. Args: config (`PatchTSMixerConfig`): Configuration. Returns: `None`.
github-repos
def ExportNEP2(self, passphrase): if (len(passphrase) < 2): raise ValueError('Passphrase must have a minimum of 2 characters') address_hash_tmp = hashlib.sha256(self.GetAddress().encode('utf-8')).digest() address_hash_tmp2 = hashlib.sha256(address_hash_tmp).digest() address_hash = address_hash_tmp2[:4] pwd_normalized = bytes(unicodedata.normalize('NFC', passphrase), 'utf-8') derived = scrypt.hash(pwd_normalized, address_hash, N=SCRYPT_ITERATIONS, r=SCRYPT_BLOCKSIZE, p=SCRYPT_PARALLEL_FACTOR, buflen=SCRYPT_KEY_LEN_BYTES) derived1 = derived[:32] derived2 = derived[32:] xor_ed = xor_bytes(bytes(self.PrivateKey), derived1) cipher = AES.new(derived2, AES.MODE_ECB) encrypted = cipher.encrypt(xor_ed) assembled = bytearray() assembled.extend(NEP_HEADER) assembled.extend(NEP_FLAG) assembled.extend(address_hash) assembled.extend(encrypted) encrypted_key_nep2 = base58.b58encode_check(bytes(assembled)) return encrypted_key_nep2.decode('utf-8')
Export the encrypted private key in NEP-2 format. Args: passphrase (str): The password to encrypt the private key with, as unicode string Returns: str: The NEP-2 encrypted private key
codesearchnet
def open(self, **params): logger.info('opening telnet') self.port = params['port'] self.ip = params['ip'] self.tn = None self._init()
Open telnet connection Args: params (dict), must contain two parameters "ip" - ip address or hostname and "port" - port number Example: params = {'port': 23, 'ip': 'localhost'}
juraj-google-style
def __init__(self, channel): self.StreamingAnnotateVideo = channel.stream_stream( "/google.cloud.videointelligence.v1p3beta1.StreamingVideoIntelligenceService/StreamingAnnotateVideo", request_serializer=google_dot_cloud_dot_videointelligence__v1p3beta1_dot_proto_dot_video__intelligence__pb2.StreamingAnnotateVideoRequest.SerializeToString, response_deserializer=google_dot_cloud_dot_videointelligence__v1p3beta1_dot_proto_dot_video__intelligence__pb2.StreamingAnnotateVideoResponse.FromString, )
Constructor. Args: channel: A grpc.Channel.
juraj-google-style
def get_site_dos(self, site): site_dos = functools.reduce(add_densities, self.pdos[site].values()) return Dos(self.efermi, self.energies, site_dos)
Get the total Dos for a site (all orbitals). Args: site: Site in Structure associated with CompleteDos. Returns: Dos containing summed orbital densities for site.
juraj-google-style
def _serialize_normalized_array(array, fmt='png', quality=70): dtype = array.dtype assert np.issubdtype(dtype, np.unsignedinteger) assert (np.max(array) <= np.iinfo(dtype).max) assert (array.shape[(- 1)] > 1) image = PIL.Image.fromarray(array) image_bytes = BytesIO() image.save(image_bytes, fmt, quality=quality) image_data = image_bytes.getvalue() return image_data
Given a normalized array, returns byte representation of image encoding. Args: array: NumPy array of dtype uint8 and range 0 to 255 fmt: string describing desired file format, defaults to 'png' quality: specifies compression quality from 0 to 100 for lossy formats Returns: image data as BytesIO buffer
codesearchnet
def _Load(dec: '_Dec[_DecT]', filename: Path, compress: bool=False, open_function=open) -> _DecT: try: with open_function(filename, 'rb') as fi: if compress: with gzip.GzipFile(fileobj=fi) as zfi: data = zfi.read() else: data = fi.read() return dec.decode(data) except (OSError, gzip.BadGzipFile, msgspec.DecodeError, msgspec.ValidationError) as e: raise LoadPickleError(filename) from e
Loads a serialized file. Args: dec: The msgspec.Decoder to use. filename: The file to read. compress: if True, the file will be opened using gzip. open_function: The function to open the file with. Returns: The decoded object. Raises: LoadPickleError, if there is an OSError, gzip error, or msgspec error.
github-repos
def __setattr__(self, key, value): if key in self.__dict__ or '_' + key in self.__dict__: object.__setattr__(self, key, value) else: self.set(key, value)
A shortcut for the 'set' method. Args: key (str): The name of the attribute to set. value (str): The value to assign to 'key'.
juraj-google-style
def mangle_scope_tree(root, toplevel): def mangle(scope): if scope.get_enclosing_scope() is None and not toplevel: return for name in scope.symbols: mangled_name = scope.get_next_mangled_name() scope.mangled[name] = mangled_name scope.rev_mangled[mangled_name] = name def visit(node): mangle(node) for child in node.children: visit(child) visit(root)
Walk over a scope tree and mangle symbol names. Args: toplevel: Defines if global scope should be mangled or not.
juraj-google-style
def f2format(filename): print('Now converting %r...' % filename) encoding = os.getenv('F2FORMAT_ENCODING', LOCALE_ENCODING) lineno = dict() content = list() with open(filename, 'r', encoding=encoding) as file: lineno[1] = 0 for lnum, line in enumerate(file, start=1): content.append(line) lineno[lnum+1] = lineno[lnum] + len(line) string = ''.join(content) text = convert(string, lineno) with open(filename, 'w', encoding=encoding) as file: file.write(text)
Wrapper works for conversion. Args: - filename -- str, file to be converted
juraj-google-style
def matches_filters(self, node): visible = self.visible if self.options["text"]: if isregex(self.options["text"]): regex = self.options["text"] elif self.exact_text is True: regex = re.compile(r"\A{}\Z".format(re.escape(self.options["text"]))) else: regex = toregex(self.options["text"]) text = normalize_text( node.all_text if visible == "all" else node.visible_text) if not regex.search(text): return False if isinstance(self.exact_text, (bytes_, str_)): regex = re.compile(r"\A{}\Z".format(re.escape(self.exact_text))) text = normalize_text( node.all_text if visible == "all" else node.visible_text) if not regex.search(text): return False if visible == "visible": if not node.visible: return False elif visible == "hidden": if node.visible: return False for name, node_filter in iter(self._node_filters.items()): if name in self.filter_options: if not node_filter.matches(node, self.filter_options[name]): return False elif node_filter.has_default: if not node_filter.matches(node, node_filter.default): return False if self.options["filter"] and not self.options["filter"](node): return False return True
Returns whether the given node matches all filters. Args: node (Element): The node to evaluate. Returns: bool: Whether the given node matches.
juraj-google-style
def find_clusters(struct, connected_matrix): n_atoms = len(struct.species) if (n_atoms == 0): return [0, 0, 0] if (0 in np.sum(connected_matrix, axis=0)): return [0, 1, 0] cluster_sizes = [] clusters = [] visited = [False for item in range(n_atoms)] connected_matrix += np.eye(len(connected_matrix)) def visit(atom, atom_cluster): visited[atom] = True new_cluster = set(np.where((connected_matrix[atom] != 0))[0]).union(atom_cluster) atom_cluster = new_cluster for new_atom in atom_cluster: if (not visited[new_atom]): visited[new_atom] = True atom_cluster = visit(new_atom, atom_cluster) return atom_cluster for i in range(n_atoms): if (not visited[i]): atom_cluster = set() cluster = visit(i, atom_cluster) clusters.append(cluster) cluster_sizes.append(len(cluster)) max_cluster = max(cluster_sizes) min_cluster = min(cluster_sizes) return [max_cluster, min_cluster, clusters]
Finds bonded clusters of atoms in the structure with periodic boundary conditions. If there are atoms that are not bonded to anything, returns [0,1,0]. (For faster computation time) Author: "Gowoon Cheon" Email: "gcheon@stanford.edu" Args: struct (Structure): Input structure connected_matrix: Must be made from the same structure with find_connected_atoms() function. Returns: max_cluster: the size of the largest cluster in the crystal structure min_cluster: the size of the smallest cluster in the crystal structure clusters: list of bonded clusters found here, clusters are formatted as sets of indices of atoms
codesearchnet
def parse_qcmetrics(metrics: dict) -> dict: data = { 'versions': { 'freebayes': metrics['program']['freebayes']['version'], 'gatk': metrics['program']['gatk']['version'], 'manta': metrics['program'].get('manta', {}).get('version'), 'bcftools': metrics['program']['bcftools']['version'], 'vep': metrics['program']['varianteffectpredictor']['version'], }, 'samples': [], } plink_samples = {} plink_sexcheck = metrics['program'].get('plink_sexcheck', {}).get('sample_sexcheck') if isinstance(plink_sexcheck, str): sample_id, sex_number = plink_sexcheck.strip().split(':', 1) plink_samples[sample_id] = PED_SEX_MAP.get(int(sex_number)) elif isinstance(plink_sexcheck, list): for sample_raw in plink_sexcheck: sample_id, sex_number = sample_raw.split(':', 1) plink_samples[sample_id] = PED_SEX_MAP.get(int(sex_number)) for sample_id, sample_metrics in metrics['sample'].items(): bam_stats = [values['bamstats'] for key, values in sample_metrics.items() if key[:-1].endswith('.lane')] total_reads = sum(int(bam_stat['raw_total_sequences']) for bam_stat in bam_stats) total_mapped = sum(int(bam_stat['reads_mapped']) for bam_stat in bam_stats) main_key = [key for key in sample_metrics.keys() if '_lanes_' in key][0] hs_metrics = sample_metrics[main_key]['collecthsmetrics']['header']['data'] multiple_inst_metrics = sample_metrics[main_key]['collectmultiplemetricsinsertsize']['header']['data'] multiple_metrics = sample_metrics[main_key]['collectmultiplemetrics']['header']['pair'] sample_data = { 'at_dropout': hs_metrics['AT_DROPOUT'], 'completeness_target': { 10: hs_metrics['PCT_TARGET_BASES_10X'], 20: hs_metrics['PCT_TARGET_BASES_20X'], 50: hs_metrics['PCT_TARGET_BASES_50X'], 100: hs_metrics['PCT_TARGET_BASES_100X'], }, 'duplicates': float(sample_metrics[main_key]['markduplicates']['fraction_duplicates']), 'gc_dropout': hs_metrics['GC_DROPOUT'], 'id': sample_id, 'median_insert_size': multiple_inst_metrics['MEDIAN_INSERT_SIZE'], 'mapped': total_mapped / total_reads, 'plink_sex': plink_samples.get(sample_id), 'predicted_sex': sample_metrics[main_key]['chanjo_sexcheck']['gender'], 'reads': total_reads, 'insert_size_standard_deviation': float(multiple_inst_metrics['STANDARD_DEVIATION']), 'strand_balance': float(multiple_metrics['STRAND_BALANCE']), 'target_coverage': float(hs_metrics['MEAN_TARGET_COVERAGE']), } data['samples'].append(sample_data) return data
Parse MIP qc metrics file. Args: metrics (dict): raw YAML input from MIP qc metrics file Returns: dict: parsed data
juraj-google-style
def _ParsePathSpecification(self, knowledge_base, searcher, file_system, path_specification, path_separator): try: file_entry = searcher.GetFileEntryByPathSpec(path_specification) except IOError as exception: relative_path = searcher.GetRelativePath(path_specification) if (path_separator != file_system.PATH_SEPARATOR): relative_path_segments = file_system.SplitPath(relative_path) relative_path = '{0:s}{1:s}'.format(path_separator, path_separator.join(relative_path_segments)) raise errors.PreProcessFail('Unable to retrieve file entry: {0:s} with error: {1!s}'.format(relative_path, exception)) if file_entry: self._ParseFileEntry(knowledge_base, file_entry)
Parses a file system for a preprocessing attribute. Args: knowledge_base (KnowledgeBase): to fill with preprocessing information. searcher (dfvfs.FileSystemSearcher): file system searcher to preprocess the file system. file_system (dfvfs.FileSystem): file system to be preprocessed. path_specification (dfvfs.PathSpec): path specification that contains the artifact value data. path_separator (str): path segment separator. Raises: PreProcessFail: if the preprocessing fails.
codesearchnet
def remove_bucket_list_item(self, id, collection, item): if type(id) is not ObjectId: id = ObjectId(id) obj = getattr(self.db, collection) result = obj.update( {'_id': id}, {'$pull': {'bucket_list': item}} ) return result
Removes an item from the bucket list Args: id: the CRITs object id of the TLO collection: The db collection. See main class documentation. item: the bucket list item to remove Returns: The mongodb result
juraj-google-style
def _GetFileSystemCacheIdentifier(self, path_spec): string_parts = [] string_parts.append(getattr(path_spec.parent, 'comparable', '')) string_parts.append('type: {0:s}'.format(path_spec.type_indicator)) return ''.join(string_parts)
Determines the file system cache identifier for the path specification. Args: path_spec (PathSpec): path specification. Returns: str: identifier of the VFS object.
codesearchnet
def regularize_cost_from_collection(name='regularize_cost'): ctx = get_current_tower_context() if not ctx.is_training: return tf.constant(0, dtype=tf.float32, name='empty_' + name) if ctx.has_own_variables: losses = ctx.get_collection_in_tower(tfv1.GraphKeys.REGULARIZATION_LOSSES) else: losses = tfv1.get_collection(tfv1.GraphKeys.REGULARIZATION_LOSSES) if len(losses) > 0: logger.info("regularize_cost_from_collection() found {} regularizers " "in REGULARIZATION_LOSSES collection.".format(len(losses))) def maploss(l): assert l.dtype.is_floating, l if l.dtype != tf.float32: l = tf.cast(l, tf.float32) return l losses = [maploss(l) for l in losses] reg_loss = tf.add_n(losses, name=name) return reg_loss else: return tf.constant(0, dtype=tf.float32, name='empty_' + name)
Get the cost from the regularizers in ``tf.GraphKeys.REGULARIZATION_LOSSES``. If in replicated mode, will only regularize variables created within the current tower. Args: name (str): the name of the returned tensor Returns: tf.Tensor: a scalar, the total regularization cost.
juraj-google-style
def _BuildMessageFromTypeName(type_name, descriptor_pool): from google.protobuf import symbol_database database = symbol_database.Default() try: message_descriptor = descriptor_pool.FindMessageTypeByName(type_name) except KeyError: return None message_type = database.GetPrototype(message_descriptor) return message_type()
Returns a protobuf message instance. Args: type_name: Fully-qualified protobuf message type name string. descriptor_pool: DescriptorPool instance. Returns: A Message instance of type matching type_name, or None if the a Descriptor wasn't found matching type_name.
codesearchnet
def _select_mgmt_networks(self, conf): nets = conf['nets'] mgmts = sorted( [ name for name, net in nets.iteritems() if net.get('management') is True ] ) if len(mgmts) == 0: mgmt_name = sorted((nets.keys()))[0] LOGGER.debug( 'No management network configured, selecting network %s', mgmt_name ) nets[mgmt_name]['management'] = True mgmts.append(mgmt_name) for mgmt_name in mgmts: if nets[mgmt_name].get('dns_domain_name', None) is None: nets[mgmt_name]['dns_domain_name'] = 'lago.local' return mgmts
Select management networks. If no management network is found, it will mark the first network found by sorted the network lists. Also adding default DNS domain, if none is set. Args: conf(spec): spec
juraj-google-style
def _as_document(self, partition): schema = ' '.join( u'{} {} {} {} {}'.format( c.id, c.vid, c.name, c.altname, c.description) for c in partition.table.columns) values = '' for stat in partition.stats: if stat.uvalues : values += ' '.join(e[:200] for e in stat.uvalues) + '\n' def resum(g): try: return str(GVid.parse(g).summarize()) except KeyError: return g except ValueError: logger.debug("Failed to parse gvid '{}' from partition '{}' grain coverage" .format(g, partition.identity.vname)) return g keywords = ( ' '.join(partition.space_coverage) + ' ' + ' '.join([resum(g) for g in partition.grain_coverage if resum(g)]) + ' ' + ' '.join(str(x) for x in partition.time_coverage) ) doc_field = u('{} {} {} {} {} {}').format( values, schema, ' '.join([ u('{}').format(partition.identity.vid), u('{}').format(partition.identity.id_), u('{}').format(partition.identity.name), u('{}').format(partition.identity.vname)]), partition.display.title, partition.display.description, partition.display.sub_description, partition.display.time_description, partition.display.geo_description ) document = dict( vid=u('{}').format(partition.identity.vid), dataset_vid=u('{}').format(partition.identity.as_dataset().vid), title=u('{}').format(partition.table.description), keywords=u('{}').format(keywords), doc=doc_field) return document
Converts given partition to the document indexed by FTS backend. Args: partition (orm.Partition): partition to convert. Returns: dict with structure matches to BasePartitionIndex._schema.
juraj-google-style
def _build(self, inputs): if self._axis is None: axis = list(range(1, inputs.shape.ndims)) else: axis = self._axis original_dtype = inputs.dtype if original_dtype in [tf.float16, tf.bfloat16]: inputs = tf.cast(inputs, tf.float32) if inputs.get_shape().ndims < 2: raise base.NotSupportedError( "Layer normalization expects inputs of at least rank 2." " Got inputs of rank {}.".format(inputs.get_shape().ndims)) params_shape = inputs.get_shape()[-1:] if self._scale: if self.GAMMA not in self._initializers: self._initializers[self.GAMMA] = create_gamma_initializer() self._gamma = tf.get_variable( self.GAMMA, shape=params_shape, dtype=inputs.dtype, initializer=self._initializers[self.GAMMA], partitioner=self._partitioners.get(self.GAMMA), regularizer=self._regularizers.get(self.GAMMA)) else: self._gamma = None if self._offset: if self.BETA not in self._initializers: self._initializers[self.BETA] = create_beta_initializer() self._beta = tf.get_variable( self.BETA, shape=params_shape, dtype=inputs.dtype, initializer=self._initializers[self.BETA], partitioner=self._partitioners.get(self.BETA), regularizer=self._regularizers.get(self.BETA)) else: self._beta = None mean, var = tf.nn.moments(inputs, axis, keep_dims=True) normalized = tf.nn.batch_normalization(inputs, mean, var, self._beta, self._gamma, self._eps) if original_dtype in [tf.float16, tf.bfloat16]: normalized = tf.cast(normalized, dtype=original_dtype) return normalized
Connects the LayerNorm module into the graph. Args: inputs: a Tensor of dimensionality >= 2. Returns: normalized: layer normalized outputs with same shape as inputs. Raises: base.NotSupportedError: If `inputs` has less than 2 dimensions.
juraj-google-style
def _variable_on_cpu(name, shape, initializer): with tf.device('/cpu:0'): dtype = tf.float16 if FLAGS.use_fp16 else tf.float32 var = tf.get_variable(name, shape, initializer=initializer, dtype=dtype) return var
Helper to create a Variable stored on CPU memory. Args: name: name of the variable shape: list of ints initializer: initializer for Variable Returns: Variable Tensor
juraj-google-style
def exists_evaluator(self, index): attr_name = self.condition_data[index][0] return self.attributes.get(attr_name) is not None
Evaluate the given exists match condition for the user attributes. Args: index: Index of the condition to be evaluated. Returns: Boolean: True if the user attributes have a non-null value for the given condition, otherwise False.
juraj-google-style
def _schema_from_json_file_object(self, file_obj): json_data = json.load(file_obj) return [SchemaField.from_api_repr(field) for field in json_data]
Helper function for schema_from_json that takes a file object that describes a table schema. Returns: List of schema field objects.
codesearchnet
def _check_wiremap_validity(self, wire_map, keymap, valmap): for (k, v) in wire_map.items(): kname = ('%s[%d]' % (k[0].name, k[1])) vname = ('%s[%d]' % (v[0].name, v[1])) if (k not in keymap): raise DAGCircuitError(('invalid wire mapping key %s' % kname)) if (v not in valmap): raise DAGCircuitError(('invalid wire mapping value %s' % vname)) if (type(k) is not type(v)): raise DAGCircuitError(('inconsistent wire_map at (%s,%s)' % (kname, vname)))
Check that the wiremap is consistent. Check that the wiremap refers to valid wires and that those wires have consistent types. Args: wire_map (dict): map from (register,idx) in keymap to (register,idx) in valmap keymap (dict): a map whose keys are wire_map keys valmap (dict): a map whose keys are wire_map values Raises: DAGCircuitError: if wire_map not valid
codesearchnet
def _random_flip(image, flip_index, random_func, scope_name): with ops.name_scope(None, scope_name, [image]) as scope: image = ops.convert_to_tensor(image, name='image') image = _AssertAtLeast3DImage(image) shape = image.get_shape() def f_rank3(): uniform_random = random_func(shape=[], minval=0, maxval=1.0) mirror_cond = math_ops.less(uniform_random, 0.5) result = tf_cond.cond(mirror_cond, lambda: array_ops.reverse(image, [flip_index]), lambda: image, name=scope) return fix_image_flip_shape(image, result) def f_rank4(): batch_size = array_ops.shape(image)[0] uniform_random = random_func(shape=[batch_size], minval=0, maxval=1.0) flips = math_ops.round(array_ops.reshape(uniform_random, [batch_size, 1, 1, 1])) flips = math_ops.cast(flips, image.dtype) flipped_input = array_ops.reverse(image, [flip_index + 1]) return flips * flipped_input + (1 - flips) * image if shape.ndims is None: rank = array_ops.rank(image) return tf_cond.cond(math_ops.equal(rank, 3), f_rank3, f_rank4) if shape.ndims == 3: return f_rank3() elif shape.ndims == 4: return f_rank4() else: raise ValueError("'image' (shape %s) must have either 3 or 4 dimensions." % shape)
Randomly (50% chance) flip an image along axis `flip_index`. Args: image: 4-D Tensor of shape `[batch, height, width, channels]` or 3-D Tensor of shape `[height, width, channels]`. flip_index: Dimension along which to flip the image. Vertical is 0, Horizontal is 1. random_func: partial function for calling either stateful or stateless random ops with `seed` parameter specified. scope_name: Name of the scope in which the ops are added. Returns: A tensor of the same type and shape as `image`. Raises: ValueError: if the shape of `image` not supported.
github-repos
def refine_rotation(self): (new_x, y) = (get_uvec(self[0]), get_uvec(self[1])) new_y = (y - (np.dot(new_x, y) * new_x)) new_z = np.cross(new_x, new_y) return SquareTensor([new_x, new_y, new_z])
Helper method for refining rotation matrix by ensuring that second and third rows are perpindicular to the first. Gets new y vector from an orthogonal projection of x onto y and the new z vector from a cross product of the new x and y Args: tol to test for rotation Returns: new rotation matrix
codesearchnet
def read_neb(self, reverse=True, terminate_on_match=True): patterns = {'energy': 'energy\\(sigma->0\\)\\s+=\\s+([\\d\\-\\.]+)', 'tangent_force': '(NEB: projections on to tangent \\(spring, REAL\\)\\s+\\S+|tangential force \\(eV/A\\))\\s+([\\d\\-\\.]+)'} self.read_pattern(patterns, reverse=reverse, terminate_on_match=terminate_on_match, postprocess=str) self.data['energy'] = float(self.data['energy'][0][0]) if self.data.get('tangent_force'): self.data['tangent_force'] = float(self.data['tangent_force'][0][1])
Reads NEB data. This only works with OUTCARs from both normal VASP NEB calculations or from the CI NEB method implemented by Henkelman et al. Args: reverse (bool): Read files in reverse. Defaults to false. Useful for large files, esp OUTCARs, especially when used with terminate_on_match. Defaults to True here since we usually want only the final value. terminate_on_match (bool): Whether to terminate when there is at least one match in each key in pattern. Defaults to True here since we usually want only the final value. Renders accessible: tangent_force - Final tangent force. energy - Final energy. These can be accessed under Outcar.data[key]
codesearchnet
def __predicate_object_map__(self, map_iri): pred_obj_maps = [] for pred_obj_map_bnode in self.rml.objects( subject=map_iri, predicate=NS_MGR.rr.predicateObjectMap.rdflib): pred_obj_map = SimpleNamespace() pred_obj_map.predicate = self.rml.value( subject=pred_obj_map_bnode, predicate=NS_MGR.rr.predicate.rdflib) obj_map_bnode = self.rml.value( subject=pred_obj_map_bnode, predicate=NS_MGR.rr.objectMap.rdflib) if obj_map_bnode is None: continue pred_obj_map.constant = self.rml.value( subject=obj_map_bnode, predicate=NS_MGR.rr.constant.rdflib) pred_obj_map.template = self.rml.value( subject=obj_map_bnode, predicate=NS_MGR.rr.template.rdflib) pred_obj_map.parentTriplesMap = self.rml.value( subject=obj_map_bnode, predicate=NS_MGR.rr.parentTriplesMap.rdflib) if pred_obj_map.parentTriplesMap is not None: self.parents.add(str(pred_obj_map.parentTriplesMap)) pred_obj_map.reference = self.rml.value( subject=obj_map_bnode, predicate=NS_MGR.rr.reference.rdflib) pred_obj_map.datatype = self.rml.value( subject=obj_map_bnode, predicate=NS_MGR.rr.datatype.rdflib) pred_obj_map.query = self.rml.value( subject=obj_map_bnode, predicate=NS_MGR.rml.query.rdflib) pred_obj_map.json_query = self.rml.value( subject=obj_map_bnode, predicate=NS_MGR.rml.reference.rdflib) json_key = None if hasattr(self.triple_maps[str(map_iri)].logicalSource, 'json_key'): json_key = self.triple_maps[str(map_iri)].logicalSource.json_key pred_obj_map.json_key = pick(self.rml.value( subject=obj_map_bnode, predicate=NS_MGR.rml.key.rdflib), json_key) pred_obj_map.delimiters = [] if pred_obj_map.json_query: self.use_json_qry = True for obj in self.rml.objects(subject=obj_map_bnode, predicate=NS_MGR.kds.delimiter.rdflib): pred_obj_map.delimiters.append(obj) pred_obj_maps.append(pred_obj_map) return pred_obj_maps
Iterates through rr:predicateObjectMaps for this TripleMap creating a SimpleNamespace for each triple map and assigning the constant, template, parentTripleMap, reference as properties. Args: ----- map_iri: rdflib.URIRef, TripleMap IRI Returns: -------- list: List of predicate_object Namespace objects
juraj-google-style
def copy_file_content(self, file_id, source_file): if not is_valid_uuid(file_id): raise StorageArgumentException( 'Invalid UUID for file_id: {0}'.format(file_id)) if not is_valid_uuid(source_file): raise StorageArgumentException( 'Invalid UUID for source_file: {0}'.format(source_file)) self._authenticated_request \ .to_endpoint('file/{}/content/'.format(file_id)) \ .with_headers({'X-Copy-From': source_file}) \ .put()
Copy file content from source file to target file. Args: file_id (str): The UUID of the file whose content is written. source_file (str): The UUID of the file whose content is copied. Returns: None Raises: StorageArgumentException: Invalid arguments StorageForbiddenException: Server response code 403 StorageNotFoundException: Server response code 404 StorageException: other 400-600 error codes
juraj-google-style
def cluster_spec(self): merged_cluster = {} for cluster_resolver in self._cluster_resolvers: cluster_spec = cluster_resolver.cluster_spec() cluster_dict = cluster_spec.as_dict() for job_name, tasks in cluster_dict.items(): if job_name in merged_cluster: if isinstance(tasks, dict): merged_cluster[job_name] = {} elif isinstance(tasks, list): merged_cluster[job_name] = [] else: merged_cluster[job_name] = {} for cluster_resolver in self._cluster_resolvers: cluster_spec = cluster_resolver.cluster_spec() cluster_dict = cluster_spec.as_dict() for job_name, tasks in cluster_dict.items(): if isinstance(merged_cluster[job_name], list): merged_cluster[job_name].extend(tasks) else: if isinstance(tasks, list): task_dict = dict(zip(range(0, len(tasks)), tasks)) else: task_dict = tasks.copy() task_keys = set(task_dict) merged_keys = set(merged_cluster[job_name].keys()) intersected_keys = task_keys.intersection(merged_keys) if intersected_keys: raise KeyError('Duplicate keys detected when merging two ClusterSpecs: %s' % repr(intersected_keys)) merged_cluster[job_name].update(task_dict) return ClusterSpec(merged_cluster)
Returns a union of all the ClusterSpecs from the ClusterResolvers. Returns: A ClusterSpec containing host information merged from all the underlying ClusterResolvers. Raises: KeyError: If there are conflicting keys detected when merging two or more dictionaries, this exception is raised. Note: If there are multiple ClusterResolvers exposing ClusterSpecs with the same job name, we will merge the list/dict of workers. If *all* underlying ClusterSpecs expose the set of workers as lists, we will concatenate the lists of workers, starting with the list of workers from the first ClusterResolver passed into the constructor. If *any* of the ClusterSpecs expose the set of workers as a dict, we will treat all the sets of workers as dicts (even if they are returned as lists) and will only merge them into a dict if there is no conflicting keys. If there is a conflicting key, we will raise a `KeyError`.
github-repos
def ported_string(raw_data, encoding='utf-8', errors='ignore'): if not raw_data: return six.text_type() if isinstance(raw_data, six.text_type): return raw_data.strip() if six.PY2: try: return six.text_type(raw_data, encoding, errors).strip() except LookupError: return six.text_type(raw_data, "utf-8", errors).strip() if six.PY3: try: return six.text_type(raw_data, encoding).strip() except (LookupError, UnicodeDecodeError): return six.text_type(raw_data, "utf-8", errors).strip()
Give as input raw data and output a str in Python 3 and unicode in Python 2. Args: raw_data: Python 2 str, Python 3 bytes or str to porting encoding: string giving the name of an encoding errors: his specifies the treatment of characters which are invalid in the input encoding Returns: str (Python 3) or unicode (Python 2)
juraj-google-style
def variables(self): return tuple(self._flatten(predicate=_is_variable, expand_composites=True))
Sequence of variables owned by this module and its submodules. Note: this method uses reflection to find variables on the current instance and submodules. For performance reasons you may wish to cache the result of calling this method if you don't expect the return value to change. Returns: A sequence of variables for the current module (sorted by attribute name) followed by variables from all submodules recursively (breadth first).
github-repos
def pkg_version_list(self, pkg_id): pkg_data = self.__reg_software.get(pkg_id, None) if (not pkg_data): return [] if isinstance(pkg_data, list): return pkg_data installed_versions = list(pkg_data.get('version').keys()) return sorted(installed_versions, key=cmp_to_key(self.__oldest_to_latest_version))
Returns information on a package. Args: pkg_id (str): Package Id of the software/component. Returns: list: List of version numbers installed.
codesearchnet
def _PrintDictAsTable(self, src_dict): key_list = list(src_dict.keys()) key_list.sort() print('|', end='') for key in key_list: print(' {0:s} |'.format(key), end='') print('') print('|', end='') for key in key_list: print(' :---: |', end='') print('') print('|', end='') for key in key_list: print(' {0!s} |'.format(src_dict[key]), end='') print('\n')
Prints a table of artifact definitions. Args: src_dict (dict[str, ArtifactDefinition]): artifact definitions by name.
codesearchnet
def compare(self, reference_model): self.console.print('Running comparison') ref_spec = {} get_weight_spec_of_saveable(reference_model, ref_spec) def _compare(target, ref_spec, inner_path, target_name, ref_name, error_count, match_count, checked_paths): base_inner_path = inner_path for ref_key, ref_val in ref_spec.items(): inner_path = base_inner_path + '/' + ref_key if inner_path in checked_paths: continue if ref_key not in target: error_count += 1 checked_paths.add(inner_path) if isinstance(ref_val, dict): self.console.print(f'[color(160)]...Object [bold]{inner_path}[/] present in {ref_name}, missing from {target_name}[/]') self.console.print(f' In {ref_name}, {inner_path} contains the following keys: {list(ref_val.keys())}') else: self.console.print(f'[color(160)]...Weight [bold]{inner_path}[/] present in {ref_name}, missing from {target_name}[/]') elif isinstance(ref_val, dict): _error_count, _match_count = _compare(target[ref_key], ref_spec[ref_key], inner_path, target_name, ref_name, error_count=error_count, match_count=match_count, checked_paths=checked_paths) error_count += _error_count match_count += _match_count elif target[ref_key].shape != ref_val.shape: error_count += 1 checked_paths.add(inner_path) self.console.print(f'[color(160)]...Weight shape mismatch for [bold]{inner_path}[/][/]\n In {ref_name}: shape={ref_val.shape}\n In {target_name}: shape={target[ref_key].shape}') else: match_count += 1 return (error_count, match_count) checked_paths = set() error_count, match_count = _compare(self.weights_dict, ref_spec, inner_path='', target_name='saved file', ref_name='reference model', error_count=0, match_count=0, checked_paths=checked_paths) _error_count, _ = _compare(ref_spec, self.weights_dict, inner_path='', target_name='reference model', ref_name='saved file', error_count=0, match_count=0, checked_paths=checked_paths) error_count += _error_count self.console.print('─────────────────────') if error_count == 0: status = 'success' self.console.print('[color(28)][bold]Comparison successful:[/] saved file is compatible with the reference model[/]') if match_count == 1: plural = '' else: plural = 's' self.console.print(f' Found {match_count} matching weight{plural}') else: status = 'error' if error_count == 1: plural = '' else: plural = 's' self.console.print(f'[color(160)][bold]Found {error_count} error{plural}:[/] saved file is not compatible with the reference model[/]') return {'status': status, 'error_count': error_count, 'match_count': match_count}
Compares the opened file to a reference model. This method will list all mismatches between the currently opened file and the provided reference model. Args: reference_model: Model instance to compare to. Returns: Dict with the following keys: `'status'`, `'error_count'`, `'match_count'`. Status can be `'success'` or `'error'`. `'error_count'` is the number of mismatches found. `'match_count'` is the number of matching weights found.
github-repos
def sg_sugar_func(func): @wraps(func) def wrapper(tensor, **kwargs): out = func(tensor, tf.sg_opt(kwargs)) out._sugar = tf.sg_opt(func=func, arg=(tf.sg_opt(kwargs) + sg_get_context()), prev=tensor) out.sg_reuse = types.MethodType(sg_reuse, out) return out return wrapper
r""" Decorates a function `func` so that it can be a sugar function. Sugar function can be used in a chainable manner. Args: func: function to decorate Returns: A sugar function.
codesearchnet
def list_group_members(self, name): self.project_service.set_auth(self._token_project) return self.project_service.list_group_members(name)
Get the members of a group. Args: name (string): Name of group to query. Returns: (list[string]): List of member names. Raises: requests.HTTPError on failure.
codesearchnet
def map_exp_ids(self, exp, positions=False): if positions: exp = [(('%s_%s' % (self.indexed_string.word(x[0]), '-'.join(map(str, self.indexed_string.string_position(x[0]))))), x[1]) for x in exp] else: exp = [(self.indexed_string.word(x[0]), x[1]) for x in exp] return exp
Maps ids to words or word-position strings. Args: exp: list of tuples [(id, weight), (id,weight)] positions: if True, also return word positions Returns: list of tuples (word, weight), or (word_positions, weight) if examples: ('bad', 1) or ('bad_3-6-12', 1)
codesearchnet
def __init__(self, all_models=None): self.local_models = ModelRepository() if all_models: self.all_models = all_models else: self.all_models = ModelRepository()
create a new repo for a model Args: all_models: models to be added to this new repository.
juraj-google-style
def decode_base64_dict(data): b64 = base64.b64decode(data['__ndarray__']) array = np.copy(np.frombuffer(b64, dtype=data['dtype'])) if (len(data['shape']) > 1): array = array.reshape(data['shape']) return array
Decode a base64 encoded array into a NumPy array. Args: data (dict) : encoded array data to decode Data should have the format encoded by :func:`encode_base64_dict`. Returns: np.ndarray
codesearchnet
def abs_vert_pos(self, amount): mL = amount%256 mH = amount/256 if amount < 32767 and amount > 0: self.send(chr(27)+'('+'V'+chr(2)+chr(0)+chr(mL)+chr(mH)) else: raise RuntimeError('Invalid vertical position in function absVertPos')
Specify vertical print position from the top margin position. Args: amount: The distance from the top margin you'd like, from 0 to 32767 Returns: None Raises: RuntimeError: Invalid vertical position.
juraj-google-style
def migrate_indexes(aggregate_indexes=None, forensic_indexes=None): version = 2 if aggregate_indexes is None: aggregate_indexes = [] if forensic_indexes is None: forensic_indexes = [] for aggregate_index_name in aggregate_indexes: if not Index(aggregate_index_name).exists(): continue aggregate_index = Index(aggregate_index_name) doc = "doc" fo_field = "published_policy.fo" fo = "fo" fo_mapping = aggregate_index.get_field_mapping(fields=[fo_field]) fo_mapping = fo_mapping[list(fo_mapping.keys())[0]]["mappings"] if doc not in fo_mapping: continue fo_mapping = fo_mapping[doc][fo_field]["mapping"][fo] fo_type = fo_mapping["type"] if fo_type == "long": new_index_name = "{0}-v{1}".format(aggregate_index_name, version) body = {"properties": {"published_policy.fo": { "type": "text", "fields": { "keyword": { "type": "keyword", "ignore_above": 256 } } } } } Index(new_index_name).create() Index(new_index_name).put_mapping(doc_type=doc, body=body) reindex(connections.get_connection(), aggregate_index_name, new_index_name) Index(aggregate_index_name).delete() for forensic_index in forensic_indexes: pass
Updates index mappings Args: aggregate_indexes (list): A list of aggregate index names forensic_indexes (list): A list of forensic index names
juraj-google-style
def _update(self, baseNumber, magnification): interval = int(baseNumber * magnification) self.value = [IntegerSingle(interval)]
update self.value with basenumber and time interval Args: baseNumber (str): self.baseNumber magnification (str): self.magnification
juraj-google-style
def is_datafile_valid(datafile): try: datafile_json = json.loads(datafile) except: return False try: jsonschema.Draft4Validator(constants.JSON_SCHEMA).validate(datafile_json) except: return False return True
Given a datafile determine if it is valid or not. Args: datafile: JSON string representing the project. Returns: Boolean depending upon whether datafile is valid or not.
codesearchnet
class QuantoConfig(QuantizationConfigMixin): def __init__(self, weights='int8', activations=None, modules_to_not_convert: Optional[List]=None, **kwargs): self.quant_method = QuantizationMethod.QUANTO self.weights = weights self.activations = activations self.modules_to_not_convert = modules_to_not_convert self.post_init() def post_init(self): accepted_weights = ['float8', 'int8', 'int4', 'int2'] accepted_activations = [None, 'int8', 'float8'] if self.weights not in accepted_weights: raise ValueError(f'Only support weights in {accepted_weights} but found {self.weights}') if self.activations not in accepted_activations: raise ValueError(f'Only support weights in {accepted_activations} but found {self.activations}')
This is a wrapper class about all possible attributes and features that you can play with a model that has been loaded using `quanto`. Args: weights (`str`, *optional*, defaults to `"int8"`): The target dtype for the weights after quantization. Supported values are ("float8","int8","int4","int2") activations (`str`, *optional*): The target dtype for the activations after quantization. Supported values are (None,"int8","float8") modules_to_not_convert (`list`, *optional*, default to `None`): The list of modules to not quantize, useful for quantizing models that explicitly require to have some modules left in their original precision (e.g. Whisper encoder, Llava encoder, Mixtral gate layers).
github-repos
def update(self, data, offset, is_last, buffer_index=0): if (buffer_index >= self.num_buffers): raise ValueError('Expected buffer index < {} but got index {}.'.format(self.num_buffers, buffer_index)) if ((self.buffers[buffer_index] is not None) and (self.buffers[buffer_index].shape[0] > 0)): expected_next_frame = (self.current_frame + self.buffers[buffer_index].shape[0]) if (expected_next_frame != offset): raise ValueError('There are missing frames. Last frame in buffer is {}. The passed frames start at {}.'.format(expected_next_frame, offset)) self.buffers[buffer_index] = np.vstack([self.buffers[buffer_index], data]) else: self.buffers[buffer_index] = data self.buffers_full[buffer_index] = is_last
Update the buffer at the given index. Args: data (np.ndarray): The frames. offset (int): The index of the first frame in `data` within the sequence. is_last (bool): Whether this is the last block of frames in the sequence. buffer_index (int): The index of the buffer to update (< self.num_buffers).
codesearchnet
def load_file(filename, file_type='json', klazz=YapconfError, open_kwargs=None, load_kwargs=None): _check_file_type(file_type, klazz) open_kwargs = (open_kwargs or {'encoding': 'utf-8'}) load_kwargs = (load_kwargs or {}) data = None with open(filename, **open_kwargs) as conf_file: if (str(file_type).lower() == 'json'): data = json.load(conf_file, **load_kwargs) elif (str(file_type).lower() == 'yaml'): data = yaml.safe_load(conf_file.read()) else: raise NotImplementedError(('Someone forgot to implement how to load a %s file_type.' % file_type)) if (not isinstance(data, dict)): raise klazz(('Successfully loaded %s, but the result was not a dictionary.' % filename)) return data
Load a file with the given file type. Args: filename (str): The filename to load. file_type (str, optional): Defaults to 'json'. The file type for the given filename. Supported types are ``yapconf.FILE_TYPES``` klazz (optional): The custom exception to raise if something goes wrong. open_kwargs (dict, optional): Keyword arguments for the open call. load_kwargs (dict, optional): Keyword arguments for the load call. Raises: klazz: If no klazz was passed in, this will be the ``YapconfError`` Returns: dict: The dictionary from the file.
codesearchnet
def MakePmfFromItems(t, name=''): pmf = Pmf(dict(t), name) pmf.Normalize() return pmf
Makes a PMF from a sequence of value-probability pairs Args: t: sequence of value-probability pairs name: string name for this PMF Returns: Pmf object
codesearchnet
def _get_section(name, source): pattern = re.compile( '^([^\n]*{name}[^\n]*\n?(?:[ \t].*?(?:\n|$))*)'.format(name=name), re.IGNORECASE | re.MULTILINE) usage = None for section in pattern.findall(source): usage = _merge_section(usage, section.strip()) return usage
Extract the named section from the source. Args: name: The name of the section to extract (e.g. "Usage"). source: The usage string to parse. Returns: A string containing only the requested section. If the section appears multiple times, each instance will be merged into a single section.
juraj-google-style
def sum(self, vars_list: List[str]) -> 'TensorFluent': operand = self if operand.dtype == tf.bool: operand = operand.cast(tf.float32) return self._aggregation_op(tf.reduce_sum, operand, vars_list)
Returns the TensorFluent for the sum aggregation function. Args: vars_list: The list of variables to be aggregated over. Returns: A TensorFluent wrapping the sum aggregation function.
juraj-google-style
def find_element_by_class(self, class_, update=False) -> Elements: return self.find_element(by=By.CLASS, value=class_, update=update)
Finds an element by class. Args: class_: The class of the element to be found. update: If the interface has changed, this option should be True. Returns: The element if it was found. Raises: NoSuchElementException - If the element wasn't found. Usage: element = driver.find_element_by_class('foo')
juraj-google-style
def maps_json(): map_sources = {id: {'id': map_source.id, 'name': map_source.name, 'folder': map_source.folder, 'min_zoom': map_source.min_zoom, 'max_zoom': map_source.max_zoom, 'layers': [{'min_zoom': layer.min_zoom, 'max_zoom': layer.max_zoom, 'tile_url': layer.tile_url.replace('$', '')} for layer in map_source.layers]} for (id, map_source) in app.config['mapsources'].items()} return jsonify(map_sources)
Generates a json object which serves as bridge between the web interface and the map source collection. All attributes relevant for openlayers are converted into JSON and served through this route. Returns: Response: All map sources as JSON object.
codesearchnet
def add_edge(self, a, b): neighbors_of_a = self.adjacency_lists.get(a) if (not neighbors_of_a): neighbors_of_a = set() self.adjacency_lists[a] = neighbors_of_a neighbors_of_a.add(b) neighbors_of_b = self.adjacency_lists.get(b) if (not neighbors_of_b): neighbors_of_b = set() self.adjacency_lists[b] = neighbors_of_b neighbors_of_b.add(a)
Used to add edges to the graph. 'a' and 'b' are vertexes and if 'a' or 'b' doesn't exisit then the vertex is created Args: a (hash): is one vertex of the edge b (hash): is another vertext of the edge
codesearchnet
def parse_compounds(compound_info, case_id, variant_type): compounds = [] if compound_info: for family_info in compound_info.split(','): splitted_entry = family_info.split(':') if splitted_entry[0] == case_id: for compound in splitted_entry[1].split('|'): splitted_compound = compound.split('>') compound_obj = {} compound_name = splitted_compound[0] compound_obj['variant'] = generate_md5_key(compound_name.split('_') + [variant_type, case_id]) try: compound_score = float(splitted_compound[1]) except (TypeError, IndexError): compound_score = 0.0 compound_obj['score'] = compound_score compound_obj['display_name'] = compound_name compounds.append(compound_obj) return compounds
Get a list with compounds objects for this variant. Arguments: compound_info(str): A Variant dictionary case_id (str): unique family id variant_type(str): 'research' or 'clinical' Returns: compounds(list(dict)): A list of compounds
juraj-google-style