code
stringlengths
20
4.93k
docstring
stringlengths
33
1.27k
source
stringclasses
3 values
def find_layer_idx(model, layer_name): layer_idx = None for (idx, layer) in enumerate(model.layers): if (layer.name == layer_name): layer_idx = idx break if (layer_idx is None): raise ValueError("No layer with name '{}' within the model".format(layer_name)) return layer_idx
Looks up the layer index corresponding to `layer_name` from `model`. Args: model: The `keras.models.Model` instance. layer_name: The name of the layer to lookup. Returns: The layer index if found. Raises an exception otherwise.
codesearchnet
def _build(self, inputs): input_shape = tf.shape(inputs) input_dtype = inputs.dtype.as_numpy_dtype batch_size = tf.expand_dims(input_shape[0], 0) number_of_params = inputs.get_shape()[1] if (number_of_params != self._constraints.num_free_params): raise base.Error('Input size is not consistent with constraint definition: {} parameters expected, {} provided.'.format(self._constraints.num_free_params, number_of_params)) num_output_dimensions = (len(self._psi) def get_input_slice(start, size): 'Extracts a subset of columns from the input 2D Tensor.' return basic.SliceByDim([1], [start], [size])(inputs) warped_grid = [] var_index_offset = 0 number_of_points = np.prod(self._output_shape) for i in xrange(num_output_dimensions): if (self._psi[i] is not None): grid_coord = self._psi[i].astype(input_dtype) num_active_vars = self._psi[i].shape[0] active_vars = get_input_slice(var_index_offset, num_active_vars) warped_coord = tf.matmul(active_vars, grid_coord) warped_coord = tf.expand_dims(warped_coord, 1) var_index_offset += num_active_vars offset = self._psi[(num_output_dimensions + i)] if (offset is not None): offset = offset.astype(input_dtype) tiling_params = tf.concat([batch_size, tf.constant(1, shape=(1,)), tf.ones_like(offset.shape)], 0) offset = offset.reshape(((1, 1) + offset.shape)) warped_coord += tf.tile(offset, tiling_params) else: warped_coord = self._psi[(num_output_dimensions + i)].astype(input_dtype) tiling_params = tf.concat([batch_size, tf.constant(1, shape=(1,)), tf.ones_like(warped_coord.shape)], 0) warped_coord = warped_coord.reshape(((1, 1) + warped_coord.shape)) warped_coord = tf.tile(warped_coord, tiling_params) warped_coord += self._psi[(i + (2 * num_output_dimensions))] warped_coord.set_shape([None, 1, number_of_points]) warped_grid.append(warped_coord) grid_shape = (self._output_shape + (1,)) warped_grid = [basic.BatchReshape(grid_shape)(grid) for grid in warped_grid] return tf.concat(warped_grid, len(grid_shape))
Assembles the module network and adds it to the graph. The internal computation graph is assembled according to the set of constraints provided at construction time. Args: inputs: Tensor containing a batch of transformation parameters. Returns: A batch of warped grids. Raises: Error: If the input tensor size is not consistent with the constraints passed at construction time.
codesearchnet
def _is_framework_filename(filename): for pattern in _EXTERNAL_FILENAME_PATTERNS: if pattern.search(filename): return False for pattern in _FRAMEWORK_FILENAME_PATTERNS: if pattern.search(filename): return True for prefix in _FRAMEWORK_PATH_PREFIXES: if filename.startswith(prefix): return True return False
Returns whether a filename should be considered a part of the framework. A file is part of the framework if it does not match a pattern in _EXTERNAL_FILENAME_PATTERNS and it either matches a pattern in _FRAMEWORK_FILENAME_PATTERNS or starts with a _FRAMEWORK_PATH_PREFIXES prefix. Args: filename: A filename string. Returns: Whether the filename should be considered to be internal to the TensorFlow framework for the purposes of reporting errors.
github-repos
def get(self, *args, **kwargs): if not self.enabled: return None cache_key = self.make_key(args, kwargs) with self._cache_lock: if cache_key in self._cache: expirytime, item = self._cache[cache_key] if expirytime >= time(): return item else: del self._cache[cache_key] return None
Get an item from the cache for this combination of args and kwargs. Args: *args: any arguments. **kwargs: any keyword arguments. Returns: object: The object which has been found in the cache, or `None` if no unexpired item is found. This means that there is no point storing an item in the cache if it is `None`.
juraj-google-style
def universal_transformer_highway(layer_inputs, step, hparams, ffn_unit, attention_unit, pad_remover=None): (state, inputs, memory) = layer_inputs new_state = step_preprocess(state, step, hparams) for i in range(hparams.num_inrecurrence_layers): with tf.variable_scope(('rec_layer_%d' % i)): new_state = ffn_unit(attention_unit(new_state)) transformed_state = new_state gate_inputs = [] if ('s' in hparams.gates_inputs): gate_inputs.append(state) if ('t' in hparams.gates_inputs): gate_inputs.append(transformed_state) if ('i' in hparams.gates_inputs): gate_inputs.append(inputs) gate_ffn_layer = hparams.gate_ffn_layer transform_gate = _ffn_layer_multi_inputs(gate_inputs, hparams, ffn_layer_type=gate_ffn_layer, name='transform', bias_initializer=tf.constant_initializer(hparams.transform_bias_init), activation=tf.sigmoid, pad_remover=pad_remover, preprocess=True, postprocess=True) if hparams.couple_carry_transform_gates: carry_gate = tf.subtract(1.0, transform_gate, name='carry') else: carry_gate = _ffn_layer_multi_inputs(gate_inputs, hparams, ffn_layer_type=gate_ffn_layer, name='carry', bias_initializer=tf.constant_initializer((- hparams.transform_bias_init)), activation=tf.sigmoid, pad_remover=pad_remover, preprocess=True, postprocess=True) new_state = ((state * carry_gate) + (transformed_state * transform_gate)) tf.contrib.summary.scalar('highway_transform_gate_layer', tf.reduce_mean(transform_gate)) tf.contrib.summary.scalar('highway_carry_gate_layer', tf.reduce_mean(carry_gate)) return (new_state, inputs, memory)
Universal Transformer with highway connection. It transforms the state using a block contaaining sel-attention and transition function and wrap the whole block with a highway connection. (the new state is a combination of the state and the transformed-state based on cary/transform gates.) Interesting observation: Controlling the cary/transform gate with the original inputs works usually better (i.e. hparams.gates_inputs="i") Args: layer_inputs: - state: state - inputs: the original embedded inputs (= inputs to the first step) step: indicates number of steps taken so far hparams: model hyper-parameters. ffn_unit: feed-forward unit attention_unit: multi-head attention unit pad_remover: to mask out padding in convolutional layers (efficiency). Returns: layer_output: new_state: new state inputs: the original embedded inputs (= inputs to the first step)
codesearchnet
def DeregisterHelper(cls, analyzer_helper): if analyzer_helper.type_indicator not in cls._analyzer_helpers: raise KeyError( 'Analyzer helper object not set for type indicator: {0:s}.'.format( analyzer_helper.type_indicator)) analyzer_helper = cls._analyzer_helpers[analyzer_helper.type_indicator] cls._FlushCache(analyzer_helper.format_categories) del cls._analyzer_helpers[analyzer_helper.type_indicator]
Deregisters a format analyzer helper. Args: analyzer_helper (AnalyzerHelper): analyzer helper. Raises: KeyError: if analyzer helper object is not set for the corresponding type indicator.
juraj-google-style
def get_server_ipaddress(self, trust): log.debug('Trust string is {!r}'.format(trust)) if (not trust.strip()): return received = self.message.get_all('received', []) for i in received: i = ported_string(i) if (trust in i): log.debug('Trust string {!r} is in {!r}'.format(trust, i)) check = REGXIP.findall(i[0:i.find('by')]) if check: try: ip_str = six.text_type(check[(- 1)]) log.debug('Found sender IP {!r} in {!r}'.format(ip_str, i)) ip = ipaddress.ip_address(ip_str) except ValueError: return else: if (not ip.is_private): log.debug('IP {!r} not private'.format(ip_str)) return ip_str
Return the ip address of sender Overview: Extract a reliable sender IP address heuristically for each message. Although the message format dictates a chain of relaying IP addresses in each message, a malicious relay can easily alter that. Therefore we cannot simply take the first IP in the chain. Instead, our method is as follows. First we trust the sender IP reported by our mail server in the Received headers, and if the previous relay IP address is on our trust list (e.g. other well-known mail services), we continue to follow the previous Received line, till we reach the first unrecognized IP address in the email header. From article Characterizing Botnets from Email Spam Records: Li Zhuang, J. D. Tygar In our case we trust only our mail server with the trust string. Args: trust (string): String that identify our mail server Returns: string with the ip address
codesearchnet
def risk_score(self, domains): api_name = 'opendns-risk_score' fmt_url_path = u'domains/risk-score/{0}' return self._multi_get(api_name, fmt_url_path, domains)
Performs Umbrella risk score analysis on the input domains Args: domains: an enumerable of domains Returns: An enumerable of associated domain risk scores
juraj-google-style
def load_actor_class(self, driver_id, function_descriptor): function_id = function_descriptor.function_id actor_class = self._loaded_actor_classes.get(function_id, None) if actor_class is None: if self._worker.load_code_from_local: driver_id = ray.DriverID.nil() actor_class = self._load_actor_from_local( driver_id, function_descriptor) else: actor_class = self._load_actor_class_from_gcs( driver_id, function_descriptor) self._loaded_actor_classes[function_id] = actor_class module_name = function_descriptor.module_name actor_class_name = function_descriptor.class_name actor_methods = inspect.getmembers( actor_class, predicate=is_function_or_method) for actor_method_name, actor_method in actor_methods: method_descriptor = FunctionDescriptor( module_name, actor_method_name, actor_class_name) method_id = method_descriptor.function_id executor = self._make_actor_method_executor( actor_method_name, actor_method, actor_imported=True, ) self._function_execution_info[driver_id][method_id] = ( FunctionExecutionInfo( function=executor, function_name=actor_method_name, max_calls=0, )) self._num_task_executions[driver_id][method_id] = 0 self._num_task_executions[driver_id][function_id] = 0 return actor_class
Load the actor class. Args: driver_id: Driver ID of the actor. function_descriptor: Function descriptor of the actor constructor. Returns: The actor class.
juraj-google-style
def _format_field_value(self, field_name) -> str: field_name = self._normalize_field_name(field_name) field = self._get_model_field(field_name) return SQLInsertCompiler.prepare_value( self, field, getattr(self.query.objs[0], field.attname) )
Formats a field's value for usage in SQL. Arguments: field_name: The name of the field to format the value of. Returns: The field's value formatted for usage in SQL.
juraj-google-style
def to_representation(self, instance): updated_program = copy.deepcopy(instance) enterprise_customer_catalog = self.context['enterprise_customer_catalog'] updated_program['enrollment_url'] = enterprise_customer_catalog.get_program_enrollment_url( updated_program['uuid'] ) for course in updated_program['courses']: course['enrollment_url'] = enterprise_customer_catalog.get_course_enrollment_url(course['key']) for course_run in course['course_runs']: course_run['enrollment_url'] = enterprise_customer_catalog.get_course_run_enrollment_url( course_run['key'] ) return updated_program
Return the updated program data dictionary. Arguments: instance (dict): The program data. Returns: dict: The updated program data.
juraj-google-style
def bind(self, devices_to_bind): if (self.entity_api_key == ''): return {'status': 'failure', 'response': 'No API key found in request'} url = (self.base_url + 'api/0.1.0/subscribe/bind') headers = {'apikey': self.entity_api_key} data = {'exchange': 'amq.topic', 'keys': devices_to_bind, 'queue': self.entity_id} with self.no_ssl_verification(): r = requests.post(url, json=data, headers=headers) response = dict() if ('No API key' in str(r.content.decode('utf-8'))): response['status'] = 'failure' r = json.loads(r.content.decode('utf-8'))['message'] elif ('bind queue ok' in str(r.content.decode('utf-8'))): response['status'] = 'success' r = r.content.decode('utf-8') else: response['status'] = 'failure' r = r.content.decode('utf-8') response['response'] = str(r) return response
This function allows an entity to list the devices to subscribe for data. This function must be called at least once, before doing a subscribe. Subscribe function will listen to devices that are bound here. Args: devices_to_bind (list): an array of devices to listen to. Example bind(["test100","testDemo"])
codesearchnet
def add_data(self, data): if not self._data: self._data = {} self._data.update(data)
Add POST data. Args: data (dict): key => value dictionary
juraj-google-style
def tunnel(container, local_port, remote_port=None, gateway_port=None): if remote_port is None: remote_port = local_port if gateway_port is None: gateway_port = remote_port remote_host = get_ip(container) command = % { 'key_filename': env.key_filename, 'local_port': local_port, 'gateway_port': gateway_port, 'gateway_user': env.user, 'gateway_host': env.host, 'remote_port': remote_port, 'remote_host': remote_host, } command = command.replace('\n', '') local(command)
Set up an SSH tunnel into the container, using the host as a gateway host. Args: * container: Container name or ID * local_port: Local port * remote_port=None: Port on the Docker container (defaults to local_port) * gateway_port=None: Port on the gateway host (defaults to remote_port)
juraj-google-style
def value(self): binary = (UBInt8(self.sub_type).pack() + self.sub_value.pack()) return BinaryData(binary)
Return sub type and sub value as binary data. Returns: :class:`~pyof.foundation.basic_types.BinaryData`: BinaryData calculated.
codesearchnet
def cluster_nodes(self, tol=0.2): lattice = self.structure.lattice vfcoords = [v.frac_coords for v in self.vnodes] dist_matrix = np.array(lattice.get_all_distances(vfcoords, vfcoords)) dist_matrix = (dist_matrix + dist_matrix.T) / 2 for i in range(len(dist_matrix)): dist_matrix[i, i] = 0 condensed_m = squareform(dist_matrix) z = linkage(condensed_m) cn = fcluster(z, tol, criterion="distance") merged_vnodes = [] for n in set(cn): poly_indices = set() frac_coords = [] for i, j in enumerate(np.where(cn == n)[0]): poly_indices.update(self.vnodes[j].polyhedron_indices) if i == 0: frac_coords.append(self.vnodes[j].frac_coords) else: fcoords = self.vnodes[j].frac_coords d, image = lattice.get_distance_and_image(frac_coords[0], fcoords) frac_coords.append(fcoords + image) merged_vnodes.append( VoronoiPolyhedron(lattice, np.average(frac_coords, axis=0), poly_indices, self.coords)) self.vnodes = merged_vnodes logger.debug("%d vertices after combination." % len(self.vnodes))
Cluster nodes that are too close together using a tol. Args: tol (float): A distance tolerance. PBC is taken into account.
juraj-google-style
def _GetFieldPathElementIndex(api_error, field): field_path_elements = api_error['fieldPathElements'] if field_path_elements: found_index = [field_path_element['index'] for field_path_element in field_path_elements if field_path_element['field'] == field] if found_index: return found_index return None
Retrieve the index of a given field in the api_error's fieldPathElements. Args: api_error: a dict containing a partialFailureError returned from the AdWords API. field: a str field for which this determines the index in the api_error's fieldPathElements. Returns: An int index of the field path element, or None if the specified field can't be found in the api_error.
juraj-google-style
def _GetRecordValue(self, record, value_entry): column_type = record.get_column_type(value_entry) long_value = None if record.is_long_value(value_entry): long_value = record.get_value_data_as_long_value(value_entry) if record.is_multi_value(value_entry): raise ValueError('Multi value support not implemented yet.') if column_type == pyesedb.column_types.NULL: return None if column_type == pyesedb.column_types.BOOLEAN: raise ValueError('Boolean value support not implemented yet.') if column_type in self.INTEGER_COLUMN_TYPES: if long_value: raise ValueError('Long integer value not supported.') return record.get_value_data_as_integer(value_entry) if column_type in self.FLOATING_POINT_COLUMN_TYPES: if long_value: raise ValueError('Long floating point value not supported.') return record.get_value_data_as_floating_point(value_entry) if column_type in self.STRING_COLUMN_TYPES: if long_value: return long_value.get_data_as_string() return record.get_value_data_as_string(value_entry) if column_type == pyesedb.column_types.GUID: raise ValueError('GUID value support not implemented yet.') if long_value: return long_value.get_data() return record.get_value_data(value_entry)
Retrieves a specific value from the record. Args: record (pyesedb.record): ESE record. value_entry (int): value entry. Returns: object: value. Raises: ValueError: if the value is not supported.
juraj-google-style
def _emit_op(self, nodestats: step_stats_pb2.NodeExecStats, pid: int, is_gputrace: bool) -> None: node_name = nodestats.node_name start = nodestats.all_start_micros duration = nodestats.all_end_rel_micros tid = nodestats.thread_id inputs = [] if is_gputrace: node_name, op = self._parse_kernel_label(nodestats.timeline_label, node_name) elif node_name == 'RecvTensor': op = 'RecvTensor' else: _, op, inputs = self._parse_op_label(nodestats.timeline_label) args = {'name': node_name, 'op': op} if build_info.build_info['is_rocm_build']: args['kernel'] = nodestats.timeline_label.split('@@')[0] for i, iname in enumerate(inputs): args['input%d' % i] = iname self._chrome_trace.emit_region(start, duration, pid, tid, 'Op', op, args)
Generates a Chrome Trace event to show Op execution. Args: nodestats: The 'step_stats_pb2.NodeExecStats' proto recording op execution. pid: The pid assigned for the device where this op ran. is_gputrace: If True then this op came from the GPUTracer.
github-repos
def coordination_leader(cluster_spec): cluster_spec = normalize_cluster_spec(cluster_spec) if not cluster_spec.as_dict(): return '' if 'ps' in cluster_spec.jobs: return '/job:ps/replica:0/task:0' if 'chief' in cluster_spec.jobs: return '/job:chief/replica:0/task:0' assert 'worker' in cluster_spec.jobs return '/job:worker/replica:0/task:0'
Return the task name of the coordination service leader. Args: cluster_spec: a dict, `ClusterDef` or `ClusterSpec` object sxpecifying the cluster configurations. Returns: a string indicating the task name of the coordination service leader.
github-repos
def _init_vocab_from_list(self, vocab_list): def token_gen(): for token in vocab_list: if token not in RESERVED_TOKENS: yield token self._init_vocab(token_gen())
Initialize tokens from a list of tokens. It is ok if reserved tokens appear in the vocab list. They will be removed. The set of tokens in vocab_list should be unique. Args: vocab_list: A list of tokens.
juraj-google-style
def switch_to_window(self, window_name): data = { 'name': window_name } self._execute(Command.SWITCH_TO_WINDOW, data)
Switch to the given window. Support: Web(WebView) Args: window_name(str): The window to change focus to. Returns: WebDriver Object.
juraj-google-style
def delete_ldap_group_link(self, cn, provider=None, **kwargs): path = '/groups/%s/ldap_group_links' % self.get_id() if provider is not None: path += '/%s' % provider path += '/%s' % cn self.manager.gitlab.http_delete(path)
Delete an LDAP group link. Args: cn (str): CN of the LDAP group provider (str): LDAP provider for the LDAP group **kwargs: Extra options to send to the server (e.g. sudo) Raises: GitlabAuthenticationError: If authentication is not correct GitlabDeleteError: If the server cannot perform the request
juraj-google-style
def detect_unused_return_values(self, f): values_returned = [] nodes_origin = {} for n in f.nodes: for ir in n.irs: if isinstance(ir, HighLevelCall): if ir.lvalue and not isinstance(ir.lvalue, StateVariable): values_returned.append(ir.lvalue) nodes_origin[ir.lvalue] = ir for read in ir.read: if read in values_returned: values_returned.remove(read) return [nodes_origin[value].node for value in values_returned]
Return the nodes where the return value of a call is unused Args: f (Function) Returns: list(Node)
juraj-google-style
def load_parent_implems(self, parent_implems): for (trname, attr, implem) in parent_implems.get_custom_implementations(): self.implementations[trname] = implem.copy() self.transitions_at[trname] = attr self.custom_implems.add(trname)
Import previously defined implementations. Args: parent_implems (ImplementationList): List of implementations defined in a parent class.
codesearchnet
def encode(data, scheme=None, size=None): size = (size if size else 'ShapeAuto') size_name = '{0}{1}'.format(ENCODING_SIZE_PREFIX, size) if (not hasattr(DmtxSymbolSize, size_name)): raise PyLibDMTXError('Invalid size [{0}]: should be one of {1}'.format(size, ENCODING_SIZE_NAMES)) size = getattr(DmtxSymbolSize, size_name) scheme = (scheme if scheme else 'Ascii') scheme_name = '{0}{1}'.format(ENCODING_SCHEME_PREFIX, scheme.capitalize()) if (not hasattr(DmtxScheme, scheme_name)): raise PyLibDMTXError('Invalid scheme [{0}]: should be one of {1}'.format(scheme, ENCODING_SCHEME_NAMES)) scheme = getattr(DmtxScheme, scheme_name) with _encoder() as encoder: dmtxEncodeSetProp(encoder, DmtxProperty.DmtxPropScheme, scheme) dmtxEncodeSetProp(encoder, DmtxProperty.DmtxPropSizeRequest, size) if (dmtxEncodeDataMatrix(encoder, len(data), cast(data, c_ubyte_p)) == 0): raise PyLibDMTXError('Could not encode data, possibly because the image is not large enough to contain the data') (w, h, bpp) = map(partial(dmtxImageGetProp, encoder[0].image), (DmtxProperty.DmtxPropWidth, DmtxProperty.DmtxPropHeight, DmtxProperty.DmtxPropBitsPerPixel)) size = (((w * h) * bpp) pixels = cast(encoder[0].image[0].pxl, ctypes.POINTER((ctypes.c_ubyte * size))) return Encoded(width=w, height=h, bpp=bpp, pixels=ctypes.string_at(pixels, size))
Encodes `data` in a DataMatrix image. For now bpp is the libdmtx default which is 24 Args: data: bytes instance scheme: encoding scheme - one of `ENCODING_SCHEME_NAMES`, or `None`. If `None`, defaults to 'Ascii'. size: image dimensions - one of `ENCODING_SIZE_NAMES`, or `None`. If `None`, defaults to 'ShapeAuto'. Returns: Encoded: with properties `(width, height, bpp, pixels)`. You can use that result to build a PIL image: Image.frombytes('RGB', (width, height), pixels)
codesearchnet
def voronoi(points, buffer_percent=100): seen = set() uniqpoints = [p for p in points if ((str(p[:2]) not in seen) and (not seen.add(str(p[:2]))))] classpoints = [_Point(*point[:2]) for point in uniqpoints] (xs, ys) = list(zip(*uniqpoints))[:2] pointswidth = (max(xs) - min(xs)) pointsheight = (max(ys) - min(ys)) (xbuff, ybuff) = (((pointswidth / 100.0) * buffer_percent), ((pointsheight / 100.0) * buffer_percent)) (midx, midy) = ((sum(xs) / float(len(xs))), (sum(ys) / float(len(ys)))) bufferbox = [((midx - xbuff), midy), ((midx + xbuff), midy), (midx, (midy + ybuff)), (midx, (midy - ybuff))] classpoints.extend([_Point(*corner) for corner in bufferbox]) (vertices, edges, poly_dict) = tesselator.computeVoronoiDiagram(classpoints) polygons = list() for (sitepoint, polyedges) in list(poly_dict.items()): polyedges = [edge[1:] for edge in polyedges] poly = list() (firststart, firstend) = polyedges.pop(0) poly.append(firstend) while polyedges: curend = poly[(- 1)] for (i, other) in enumerate(polyedges): (otherstart, otherend) = other if (otherstart == curend): poly.append(otherend) polyedges.pop(i) break elif (otherend == curend): poly.append(otherstart) polyedges.pop(i) break try: sitepoint = uniqpoints[sitepoint] except IndexError: sitepoint = None poly = [vertices[vi] for vi in poly if (vi != (- 1))] polygons.append((sitepoint, poly)) return polygons
Surrounds each point in an input list of xy tuples with a unique Voronoi polygon. Arguments: - **points**: A list of xy or xyz point tuples to triangulate. - **buffer_percent** (optional): Controls how much bigger than the original bbox of the input points to set the bbox of fake points, used to account for lacking values around the edges (default is 100 percent). Returns: - Returns a list of 2-tuples, with the first item in each tuple being the original input point (or None for each corner of the bounding box buffer), and the second item being the point's corressponding Voronoi polygon.
codesearchnet
def repair(self, volume_id_or_uri, timeout=-1): data = { "type": "ExtraManagedStorageVolumePaths", "resourceUri": self._client.build_uri(volume_id_or_uri) } custom_headers = {'Accept-Language': 'en_US'} uri = self.URI + '/repair' return self._client.create(data, uri=uri, timeout=timeout, custom_headers=custom_headers)
Removes extra presentations from a specified volume on the storage system. Args: volume_id_or_uri: Can be either the volume id or the volume uri. timeout: Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation in OneView, just stops waiting for its completion. Returns: dict: Storage volume.
juraj-google-style
def generate_ast(path): if os.path.isfile(path): with open(path, 'r') as f: try: tree = ast.parse(f.read()) return PytTransformer().visit(tree) except SyntaxError: global recursive if (not recursive): _convert_to_3(path) recursive = True return generate_ast(path) else: raise SyntaxError('The ast module can not parse the file and the python 2 to 3 conversion also failed.') raise IOError(('Input needs to be a file. Path: ' + path))
Generate an Abstract Syntax Tree using the ast module. Args: path(str): The path to the file e.g. example/foo/bar.py
codesearchnet
def _should_fetch_reason(self) -> Tuple[(bool, str)]: is_redirect = False if self._strong_redirects: try: is_redirect = self._web_client_session.redirect_tracker.is_redirect() except AttributeError: pass return self._fetch_rule.check_subsequent_web_request(self._item_session, is_redirect=is_redirect)
Return info about whether the URL should be fetched. Returns: tuple: A two item tuple: 1. bool: If True, the URL should be fetched. 2. str: A short reason string explaining the verdict.
codesearchnet
def db_en010(self, value=None): if value is not None: try: value = float(value) except ValueError: raise ValueError('value {} need to be of type float ' 'for field `db_en010`'.format(value)) self._db_en010 = value
Corresponds to IDD Field `db_en010` mean coincident dry-bulb temperature to Enthalpy corresponding to 1.0% annual cumulative frequency of occurrence Args: value (float): value for IDD Field `db_en010` Unit: C if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value
juraj-google-style
def iter_processed_text(self, file, encoding=None, base_url=None): for text, is_link in self.iter_text(file, encoding): if is_link and base_url: new_link = urljoin_safe(base_url, text, allow_fragments=False) if new_link: yield (new_link, is_link) else: yield (new_link, False) else: yield (text, is_link)
Return the file text and processed absolute links. Args: file: A file object containing the document. encoding (str): The encoding of the document. base_url (str): The URL at which the document is located. Returns: iterator: Each item is a tuple: 1. str: The text 2. bool: Whether the text a link
juraj-google-style
def _load_plugins(namespace, instantiate=True): mgr = ExtensionManager(namespace=namespace, on_load_failure_callback=(lambda _, ep, err: LOGGER.warning('Could not load plugin {}: {}'.format(ep.name, err)))) if instantiate: plugins = dict(((ext.name, (ext.plugin if isinstance(ext.plugin, Plugin) else ext.plugin())) for ext in mgr)) else: plugins = dict(((ext.name, ext.plugin) for ext in mgr)) return plugins
Loads all the plugins for the given namespace Args: namespace(str): Namespace string, as in the setuptools entry_points instantiate(bool): If true, will instantiate the plugins too Returns: dict of str, object: Returns the list of loaded plugins
codesearchnet
def master_key_from_seed(seed): S = get_bytes(seed) I = hmac.new(b'Bitcoin seed', S, hashlib.sha512).digest() (Il, Ir) = (I[:32], I[32:]) parse_Il = int.from_bytes(Il, 'big') if ((parse_Il == 0) or (parse_Il >= bitcoin_curve.n)): raise ValueError('Bad seed, resulting in invalid key!') return HDPrivateKey(key=parse_Il, chain_code=Ir, index=0, depth=0)
Generates a master key from a provided seed. Args: seed (bytes or str): a string of bytes or a hex string Returns: HDPrivateKey: the master private key.
codesearchnet
def _as_document(self, identifier): return {'identifier': u('{}').format(identifier['identifier']), 'type': u('{}').format(identifier['type']), 'name': u('{}').format(identifier['name'])}
Converts given identifier to the document indexed by FTS backend. Args: identifier (dict): identifier to convert. Dict contains at least 'identifier', 'type' and 'name' keys. Returns: dict with structure matches to BaseIdentifierIndex._schema.
codesearchnet
def InitPathInfos(self, client_id, path_infos): self.ClearPathHistory(client_id, path_infos) self.WritePathInfos(client_id, path_infos)
Initializes a collection of path info records for a client. Unlike `WritePathInfo`, this method clears stat and hash histories of paths associated with path info records. This method is intended to be used only in the data migration scripts. Args: client_id: A client identifier for which the paths are to be initialized. path_infos: A list of `rdf_objects.PathInfo` objects to write.
codesearchnet
async def delCronJob(self, iden): cron = self.cell.agenda.appts.get(iden) if (cron is None): raise s_exc.NoSuchIden() self._trig_auth_check(cron.useriden) (await self.cell.agenda.delete(iden))
Delete a cron job Args: iden (bytes): The iden of the cron job to be deleted
codesearchnet
def orient_undirected_graph(self, data, graph): self.arguments['{VERBOSE}'] = str(self.verbose).upper() self.arguments['{SCORE}'] = self.scores[self.score] fe = DataFrame(nx.adj_matrix(graph, weight=None).todense()) fg = DataFrame((1 - fe.values)) results = self._run_gies(data, fixedGaps=fg, verbose=self.verbose) return nx.relabel_nodes(nx.DiGraph(results), {idx: i for (idx, i) in enumerate(data.columns)})
Run GIES on an undirected graph. Args: data (pandas.DataFrame): DataFrame containing the data graph (networkx.Graph): Skeleton of the graph to orient Returns: networkx.DiGraph: Solution given by the GIES algorithm.
codesearchnet
def __init__(self, channel): self.Ping = channel.unary_unary( '/processor.Processor/Ping', request_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, response_deserializer=processor__pb2.Status.FromString, ) self.Process = channel.unary_unary( '/processor.Processor/Process', request_serializer=message__pb2.Message.SerializeToString, response_deserializer=message__pb2.Message.FromString, )
Constructor. Args: channel: A grpc.Channel.
juraj-google-style
def update_video(video_data): try: video = _get_video(video_data.get('edx_video_id')) except Video.DoesNotExist: error_message = u'Video not found when trying to update video with edx_video_id: {0}'.format(video_data.get('edx_video_id')) raise ValVideoNotFoundError(error_message) serializer = VideoSerializer(video, data=video_data) if serializer.is_valid(): serializer.save() return video_data.get('edx_video_id') else: raise ValCannotUpdateError(serializer.errors)
Called on to update Video objects in the database update_video is used to update Video objects by the given edx_video_id in the video_data. Args: video_data (dict): { url: api url to the video edx_video_id: ID of the video duration: Length of video in seconds client_video_id: client ID of video encoded_video: a list of EncodedVideo dicts url: url of the video file_size: size of the video in bytes profile: ID of the profile courses: Courses associated with this video } Raises: Raises ValVideoNotFoundError if the video cannot be retrieved. Raises ValCannotUpdateError if the video cannot be updated. Returns the successfully updated Video object
codesearchnet
def line_on_device(device: 'cirq.google.XmonDevice', length: int, method: LinePlacementStrategy=greedy.GreedySequenceSearchStrategy()) -> GridQubitLineTuple: return method.place_line(device, length)
Searches for linear sequence of qubits on device. Args: device: Google Xmon device instance. length: Desired number of qubits making up the line. method: Line placement method. Defaults to cirq.greedy.GreedySequenceSearchMethod. Returns: Line sequences search results.
codesearchnet
def update(self, value: int, force_update: bool=False, comment: Optional[str]=None): self.value = value if comment is not None: self.comment = comment if self.last_value is None: self.start_time = self.last_time = time.time() self.start_value = self.last_value = value self.elapsed_time = self.predicted_remaining = None self.first_calls = self.warmup self.wait_for = 1 self.update_bar(value) elif value <= self.last_value and (not force_update): return elif force_update or self.first_calls > 0 or value >= min(self.last_value + self.wait_for, self.total): if self.first_calls > 0: self.first_calls -= 1 current_time = time.time() self.elapsed_time = current_time - self.start_time if value > self.start_value: self.average_time_per_item = self.elapsed_time / (value - self.start_value) else: self.average_time_per_item = None if value >= self.total: value = self.total self.predicted_remaining = None if not self.leave: self.close() elif self.average_time_per_item is not None: self.predicted_remaining = self.average_time_per_item * (self.total - value) self.update_bar(value) self.last_value = value self.last_time = current_time if self.average_time_per_item is None or self.average_time_per_item == 0: self.wait_for = 1 else: self.wait_for = max(int(self.update_every / self.average_time_per_item), 1)
The main method to update the progress bar to `value`. Args: value (`int`): The value to use. Must be between 0 and `total`. force_update (`bool`, *optional*, defaults to `False`): Whether or not to force and update of the internal state and display (by default, the bar will wait for `value` to reach the value it predicted corresponds to a time of more than the `update_every` attribute since the last update to avoid adding boilerplate). comment (`str`, *optional*): A comment to add on the left of the progress bar.
github-repos
def entry_dict_from_list(all_slab_entries): entry_dict = {} for entry in all_slab_entries: hkl = tuple(entry.miller_index) if hkl not in entry_dict.keys(): entry_dict[hkl] = {} if entry.clean_entry: clean = entry.clean_entry else: clean = entry if clean not in entry_dict[hkl].keys(): entry_dict[hkl][clean] = [] if entry.adsorbates: entry_dict[hkl][clean].append(entry) return entry_dict
Converts a list of SlabEntry to an appropriate dictionary. It is assumed that if there is no adsorbate, then it is a clean SlabEntry and that adsorbed SlabEntry has the clean_entry parameter set. Args: all_slab_entries (list): List of SlabEntry objects Returns: (dict): Dictionary of SlabEntry with the Miller index as the main key to a dictionary with a clean SlabEntry as the key to a list of adsorbed SlabEntry.
juraj-google-style
def calc_timestep_statistic(self, statistic, time): ti = np.where(self.times == time)[0][0] ma = np.where(self.masks[ti].ravel() == 1) if statistic in ['mean', 'max', 'min', 'std', 'ptp']: stat_val = getattr(self.timesteps[ti].ravel()[ma], statistic)() elif statistic == 'median': stat_val = np.median(self.timesteps[ti].ravel()[ma]) elif 'percentile' in statistic: per = int(statistic.split("_")[1]) stat_val = np.percentile(self.timesteps[ti].ravel()[ma], per) elif 'dt' in statistic: stat_name = statistic[:-3] if ti == 0: stat_val = 0 else: stat_val = self.calc_timestep_statistic(stat_name, time) -\ self.calc_timestep_statistic(stat_name, time - 1) else: stat_val = np.nan return stat_val
Calculate statistics from the primary attribute of the StObject. Args: statistic: statistic being calculated time: Timestep being investigated Returns: Value of the statistic
juraj-google-style
def remove_trunk_group(self, intf, value): string = 'no switchport trunk group {}'.format(value) return self.configure_interface(intf, string)
Removes a specified trunk group to the interface Args: intf (str): The interface name to remove the trunk group from value (str): The trunk group value Returns: True if the operation as successfully applied otherwise false
juraj-google-style
def RegisterHasher(cls, hasher_class): hasher_name = hasher_class.NAME.lower() if hasher_name in cls._hasher_classes: raise KeyError(( 'hasher class already set for name: {0:s}.').format( hasher_class.NAME)) cls._hasher_classes[hasher_name] = hasher_class
Registers a hasher class. The hasher classes are identified based on their lower case name. Args: hasher_class (type): class object of the hasher. Raises: KeyError: if hasher class is already set for the corresponding name.
juraj-google-style
def __init__(self, project, throttle_rampup=True, hint_num_workers=_Mutate._DEFAULT_HINT_NUM_WORKERS): mutate_fn = DeleteFromDatastore._DatastoreDeleteFn(project) super().__init__(mutate_fn, throttle_rampup, hint_num_workers)
Initialize the `DeleteFromDatastore` transform. Args: project: (:class:`str`) The ID of the project from which the entities will be deleted. throttle_rampup: Whether to enforce a gradual ramp-up. hint_num_workers: A hint for the expected number of workers, used to estimate appropriate limits during ramp-up throttling.
github-repos
def requestedFormat(request, acceptedFormat): if ('format' in request.args): fieldFormat = request.args.get('format') if (fieldFormat not in acceptedFormat): raise ValueError(('requested format not supported: ' + fieldFormat)) return fieldFormat else: return request.accept_mimetypes.best_match(acceptedFormat)
Return the response format requested by client Client could specify requested format using: (options are processed in this order) - `format` field in http request - `Accept` header in http request Example: chooseFormat(request, ['text/html','application/json']) Args: acceptedFormat: list containing all the accepted format Returns: string: the user requested mime-type (if supported) Raises: ValueError: if user request a mime-type not supported
codesearchnet
def create(self, python=None, system_site=False, always_copy=False): command = 'virtualenv' if python: command = '{0} --python={1}'.format(command, python) if system_site: command = '{0} --system-site-packages'.format(command) if always_copy: command = '{0} --always-copy'.format(command) command = '{0} {1}'.format(command, self.path) self._execute(command)
Create a new virtual environment. Args: python (str): The name or path of a python interpreter to use while creating the virtual environment. system_site (bool): Whether or not use use the system site packages within the virtual environment. Default is False. always_copy (bool): Whether or not to force copying instead of symlinking in the virtual environment. Default is False.
codesearchnet
def update_metric_by_name(self, metric_name, metric_type, description=None, custom_properties=None, tags=None, **kwargs): data = {'type': metric_type.upper(), 'description': (description or ''), 'customProperties': (custom_properties or {}), 'tags': (tags or [])} resp = self._put(self._u(self._METRIC_ENDPOINT_SUFFIX, str(metric_name)), data=data, **kwargs) resp.raise_for_status() return resp.json()
Create or update a metric object Args: metric_name (string): name of metric type (string): metric type, must be one of 'gauge', 'counter', 'cumulative_counter' description (optional[string]): a description custom_properties (optional[dict]): dictionary of custom properties tags (optional[list of strings]): list of tags associated with metric
codesearchnet
def _print_extension(self, extension: message.Message) -> None: if not fhir_types.is_type_or_profile_of_extension(extension): raise ValueError(f'Message of type: {extension.DESCRIPTOR.full_name} is not a FHIR Extension.') if self.json_format == _FhirJsonFormat.ANALYTIC: self.generator.push(f'"{cast(Any, extension).url.value}"') else: self._print_message(extension)
Pushes the Extension into the JSON text generator. If the _FhirJsonFormat is set to ANALYTIC, this method only prints the url. Args: extension: The Extension to print.
github-repos
def oauth_required(self, method): def check_oauth(request_handler, *args, **kwargs): if self._in_error: self._display_error_message(request_handler) return user = users.get_current_user() if (not user): request_handler.redirect(users.create_login_url(request_handler.request.uri)) return self._create_flow(request_handler) self.flow.params['state'] = _build_state_value(request_handler, user) self.credentials = self._storage_class(self._credentials_class, None, self._credentials_property_name, user=user).get() if (not self.has_credentials()): return request_handler.redirect(self.authorize_url()) try: resp = method(request_handler, *args, **kwargs) except client.AccessTokenRefreshError: return request_handler.redirect(self.authorize_url()) finally: self.credentials = None return resp return check_oauth
Decorator that starts the OAuth 2.0 dance. Starts the OAuth dance for the logged in user if they haven't already granted access for this application. Args: method: callable, to be decorated method of a webapp.RequestHandler instance.
codesearchnet
def two_point_effective_mass(cartesian_k_points, eigenvalues): assert (cartesian_k_points.shape[0] == 2) assert (eigenvalues.size == 2) dk = (cartesian_k_points[1] - cartesian_k_points[0]) mod_dk = np.sqrt(np.dot(dk, dk)) delta_e = (((eigenvalues[1] - eigenvalues[0]) * ev_to_hartree) * 2.0) effective_mass = ((mod_dk * mod_dk) / delta_e) return effective_mass
Calculate the effective mass given eigenvalues at two k-points. Reimplemented from Aron Walsh's original effective mass Fortran code. Args: cartesian_k_points (np.array): 2D numpy array containing the k-points in (reciprocal) Cartesian coordinates. eigenvalues (np.array): numpy array containing the eigenvalues at each k-point. Returns: (float): The effective mass
codesearchnet
def is_function_pipelined(self, tf_function, *args): attr_name = tpu_embedding_v3._PIPELINE_ATTRIBUTE func_graph = tf_function.get_concrete_function(*args).graph while_op = None for op in func_graph.get_operations(): if op.name == 'while': while_op = op break self.assertIsNotNone(while_op, 'while op not found') body_name = while_op.get_attr('body').name while_body_func = None try: while_body_func = func_graph.get_concrete_function(body_name) except AttributeError as exc: for func in while_op.graph._functions.values(): if func.name.decode() == body_name: while_body_func = func break if while_body_func is None: raise ValueError('body not found') from exc while_body_graph = while_body_func.graph attr_value = None for op in while_body_graph.get_operations(): try: attr = op.get_attr(attr_name) logging.info('Op "%s" has pipelining attr: %s : %s', op.name, attr_name, attr) attr_value = attr.decode('utf-8') break except ValueError: pass has_pipelining_attr = attr_value in [tpu_embedding_v3._PIPELINE_MODE_FORWARD, tpu_embedding_v3._PIPELINE_MODE_BACKWARD] return has_pipelining_attr
Returns whether the tf_function is flagged for embedding pipelining. Args: tf_function: a tf.function. *args: the arguments to the tf_function. Returns: Whether the tf_function is (will be) pipelined. This helper looks for a while loop in the provided function. It then looks for any op that has the pipelining attribute (e.g., XlaSparseDenseMatmulWithCsrInput). The presence of the attribute indicates that the function is to be pipelined during compilation. Example usge: with summary_ops_v2.record_if(False): is_pipelined = self.is_function_pipelined(tpu_test_fn, tpu_iter) self.assertTrue(is_pipelined) with summary_ops_v2.record_if(True): is_pipelined = self.is_function_pipelined(tpu_test_fn, tpu_iter) self.assertFalse(is_pipelined)
github-repos
def _pyval_field_major_to_node_major(keys, values, depth): assert keys if depth == 0: return dict(zip(keys, values)) nvals = len(values[0]) assert all((nvals == len(values[i]) for i in range(1, len(values)))) return [_pyval_field_major_to_node_major(keys, value_slice, depth - 1) for value_slice in zip(*values)]
Regroup each field (k, v) from dict-of-list to list-of-dict. Given a "field-major" encoding of the StructuredTensor (which maps each key to a single nested list containing the values for all structs), return a corresponding "node-major" encoding, consisting of a nested list of dicts. Args: keys: The field names (list of string). Must not be empty. values: The field values (list of python values). Must have the same length as `keys`. depth: The list depth at which dictionaries should be created. Returns: A nested list of dict, with depth `depth`.
github-repos
def __init__(self, problems, schedule, **kwargs): super(MultiProblemV2, self).__init__(**kwargs) self.problems = problems self.schedule = schedule
Creates a MultiProblem object. Args: problems: A list of problem.Problem objects. schedule: A schedule tuple, see encode_schedule for details. **kwargs: Keywords for problem.Problem.__init__.
juraj-google-style
def videos(self, **kwargs): path = self._get_series_id_season_number_path('videos') response = self._GET(path, kwargs) self._set_attrs_to_values(response) return response
Get the videos that have been added to a TV season (trailers, teasers, etc...). Args: language: (optional) ISO 639 code. Returns: A dict respresentation of the JSON returned from the API.
juraj-google-style
def init_cache(self, batch_size, max_length): input_ids = jnp.ones((batch_size, max_length)) attention_mask = jnp.ones_like(input_ids) position_ids = jnp.broadcast_to(jnp.arange(jnp.atleast_2d(input_ids).shape[-1]), input_ids.shape) init_variables = self.module.init(jax.random.PRNGKey(0), input_ids, attention_mask, position_ids, return_dict=False, init_cache=True) return init_variables['cache']
Args: batch_size (`int`): batch_size used for fast auto-regressive decoding. Defines the batch size of the initialized cache. max_length (`int`): maximum possible length for auto-regressive decoding. Defines the sequence length of the initialized cache.
github-repos
def get_if_deleted(self, addresses): with self._lock: results = [] for add in addresses: results.append(self._get_if_deleted(add)) return results
Returns a list of addresses that have been deleted, or None if it hasn't been deleted. Args: addresses (list of str): The addresses to check if deleted. Returns: (list of str): The addresses, if deleted, or None.
codesearchnet
def _pad_image(self, images: 'torch.tensor', size_divisibility: int=32) -> 'torch.tensor': height, width = get_image_size(images, channel_dim=ChannelDimension.FIRST) pad_height = 0 if height % size_divisibility == 0 else size_divisibility - height % size_divisibility pad_width = 0 if width % size_divisibility == 0 else size_divisibility - width % size_divisibility if pad_width + pad_height > 0: padding = (0, 0, pad_width, pad_height) images = F.pad(images, padding) return images
Pads an image or batched images constantly so that width and height are divisible by size_divisibility Args: image (`torch,tensor`): Image to pad. size_divisibility (`int`, *optional*, defaults to 32): The width and height of the image will be padded to be divisible by this number.
github-repos
def __init__(self, parent): super(ChatFrame, self).__init__(parent, padding=8, text="Chat") self.channel = tk.StringVar() self.message = tk.StringVar() self.channel_frame = ttk.Frame(self) self.channel_frame.grid(column=0, row=0, sticky="W E") self.channel_label = ttk.Label(self.channel_frame, text="Channel ID:") self.channel_label.grid(column=0, row=0, sticky="W E") self.channel_box = ttk.Entry(self.channel_frame, textvariable=self.channel) self.channel_box.grid(column=0, row=1, sticky="W E") self.channel_frame.columnconfigure(0, weight=1) self.message_frame = ttk.Frame(self) self.message_frame.grid(column=0, row=1, pady=8, sticky="W E") self.message_label = ttk.Label(self.message_frame, text="Message:") self.message_label.grid(column=0, row=0, sticky="W E") self.message_box = ttk.Entry(self.message_frame, textvariable=self.message) self.message_box.grid(column=0, row=1, sticky="W E") self.message_frame.columnconfigure(0, weight=1) self.send_button = ttk.Button(self, command=lambda: self.add_current_message(), text="Send") self.send_button.grid(column=0, row=2, sticky="W") self.columnconfigure(0, weight=1)
Send messages from the bot Args: parent:
juraj-google-style
def get_process_exit_code(self, task_type, task_id): with self._process_lock: p = self._processes[task_type, task_id] return p.exitcode if p else None
Returns the subprocess exit code given the task type and task id. Args: task_type: The task type. task_id: The task id. Returns: The subprocess exit code; `None` if the subprocess has not exited yet. Raises: KeyError: If the corresponding subprocess is not found with `task_type` and `task_id`.
github-repos
def normalize_tuple(value, n, name): if isinstance(value, int): return (value,) * n else: try: value_tuple = tuple(value) except TypeError: raise ValueError(f'Argument `{name}` must be a tuple of {str(n)} integers. Received: {str(value)}') if len(value_tuple) != n: raise ValueError(f'Argument `{name}` must be a tuple of {str(n)} integers. Received: {str(value)}') for single_value in value_tuple: try: int(single_value) except (ValueError, TypeError): raise ValueError(f'Argument `{name}` must be a tuple of {str(n)} integers. Received: {str(value)} including element {str(single_value)} of type {str(type(single_value))}') return value_tuple
Transforms a single integer or iterable of integers into an integer tuple. Args: value: The value to validate and convert. Could an int, or any iterable of ints. n: The size of the tuple to be returned. name: The name of the argument being validated, e.g. "strides" or "kernel_size". This is only used to format error messages. Returns: A tuple of n integers. Raises: ValueError: If something else than an int/long or iterable thereof was passed.
github-repos
def __init__(self, examples): self.config = {} self.set_examples(examples) self.set_model_type('classification') self.set_label_vocab([])
Constructs the WitConfigBuilder object. Args: examples: A list of tf.Example or tf.SequenceExample proto objects. These are the examples that will be displayed in WIT. If not model to infer these examples with is specified through the methods on this class, then WIT will display the examples for exploration, but no model inference will be performed by the tool.
juraj-google-style
def apply_scissor(self, new_band_gap): if self.is_metal(): max_index = -1000 for i in range(self.nb_bands): below = False above = False for j in range(len(self.kpoints)): if self.bands[Spin.up][i][j] < self.efermi: below = True if self.bands[Spin.up][i][j] > self.efermi: above = True if above and below: if i > max_index: max_index = i if self.is_spin_polarized: below = False above = False for j in range(len(self.kpoints)): if self.bands[Spin.down][i][j] < self.efermi: below = True if self.bands[Spin.down][i][j] > self.efermi: above = True if above and below: if i > max_index: max_index = i old_dict = self.as_dict() shift = new_band_gap for spin in old_dict['bands']: for k in range(len(old_dict['bands'][spin])): for v in range(len(old_dict['bands'][spin][k])): if k >= max_index: old_dict['bands'][spin][k][v] = \ old_dict['bands'][spin][k][v] + shift else: shift = new_band_gap - self.get_band_gap()['energy'] old_dict = self.as_dict() for spin in old_dict['bands']: for k in range(len(old_dict['bands'][spin])): for v in range(len(old_dict['bands'][spin][k])): if old_dict['bands'][spin][k][v] >= \ old_dict['cbm']['energy']: old_dict['bands'][spin][k][v] = \ old_dict['bands'][spin][k][v] + shift old_dict['efermi'] = old_dict['efermi'] + shift return LobsterBandStructureSymmLine.from_dict(old_dict)
Apply a scissor operator (shift of the CBM) to fit the given band gap. If it's a metal. We look for the band crossing the fermi level and shift this one up. This will not work all the time for metals! Args: new_band_gap: the band gap the scissor band structure need to have. Returns: a BandStructureSymmLine object with the applied scissor shift
juraj-google-style
def SetConfiguredUsers(self, users): prefix = (self.logger.name + '-') with tempfile.NamedTemporaryFile(mode='w', prefix=prefix, delete=True) as updated_users: updated_users_file = updated_users.name for user in users: updated_users.write((user + '\n')) updated_users.flush() if (not os.path.exists(self.google_users_dir)): os.makedirs(self.google_users_dir) shutil.copy(updated_users_file, self.google_users_file) file_utils.SetPermissions(self.google_users_file, mode=384, uid=0, gid=0)
Set the list of configured Google user accounts. Args: users: list, the username strings of the Linux accounts.
codesearchnet
def get_model(): if (not hasattr(g, 'model')): g.model = load_model(current_app.config['MODEL_CLS_PATH'], current_app.config['MODEL_CLS_NAME'], current_app.config['MODEL_LOAD_ARGS']) return g.model
Get the NN model that's being analyzed from the request context. Put the model in the request context if it is not yet there. Returns: instance of :class:`.models.model.Model` or derived class
codesearchnet
def price(self, market: pmd.ProcessedMarketData, name: Optional[str]=None): name = name or self._name + '_price' with tf.name_scope(name): discount_curve = get_discount_curve(self._discount_curve_type, market, self._mask) discount_factors = discount_curve.discount_factor(self._payment_dates) _, cashflows = self.cashflows(market) cashflow_pvs = cashflows * discount_factors return tf.math.reduce_sum(cashflow_pvs, axis=1)
Returns the present value of the stream on the valuation date. Args: market: An instance of `ProcessedMarketData`. name: Python str. The name to give to the ops created by this function. Default value: `None` which maps to 'price'. Returns: A `Tensor` of shape `batch_shape` containing the modeled price of each stream based on the input market data.
github-repos
def get_servo_torque(self): data = [] data.append(0x09) data.append(self.servoid) data.append(RAM_READ_REQ) data.append(PWM_RAM) data.append(BYTE2) send_data(data) rxdata = [] try: rxdata = SERPORT.read(13) if ord(rxdata[10])<=127: return ((ord(rxdata[10])&0x03)<<8) | (ord(rxdata[9])&0xFF) else: return (ord(rxdata[10])-0xFF)*0xFF + (ord(rxdata[9])&0xFF)-0xFF except HerkulexError: raise HerkulexError("could not communicate with motors")
Gets the current torque of Herkulex Gives the current load on the servo shaft. It is actually the PWM value to the motors Args: none Returns: int: the torque on servo shaft. range from -1023 to 1023 Raises: SerialException: Error occured while opening serial port
juraj-google-style
def _handle_client_exception(): try: (yield) except _ClientException as exception: if (exception.http_status in _ERROR_CODES): raise _ERROR_CODES[exception.http_status](exception.http_reason) raise
Handle Swift exception and convert to class IO exceptions Raises: OSError subclasses: IO error.
codesearchnet
def replace_species(self, species_mapping): species_mapping = {get_el_sp(k): v for (k, v) in species_mapping.items()} sp_to_replace = set(species_mapping.keys()) sp_in_structure = set(self.composition.keys()) if (not sp_in_structure.issuperset(sp_to_replace)): warnings.warn(('Some species to be substituted are not present in structure. Pls check your input. Species to be substituted = %s; Species in structure = %s' % (sp_to_replace, sp_in_structure))) for site in self._sites: if sp_to_replace.intersection(site.species): c = Composition() for (sp, amt) in site.species.items(): new_sp = species_mapping.get(sp, sp) try: c += (Composition(new_sp) * amt) except Exception: c += {new_sp: amt} site.species = c
Swap species. Args: species_mapping (dict): dict of species to swap. Species can be elements too. E.g., {Element("Li"): Element("Na")} performs a Li for Na substitution. The second species can be a sp_and_occu dict. For example, a site with 0.5 Si that is passed the mapping {Element('Si): {Element('Ge'):0.75, Element('C'):0.25} } will have .375 Ge and .125 C.
codesearchnet
def _VerifyValues(self, image, ksizes, strides, padding, patches): ksizes = [1] + ksizes + [1] strides = [1] + strides + [1] for dtype in [np.float16, np.float32, np.float64, dtypes.bfloat16.as_numpy_dtype]: out_tensor = array_ops.extract_volume_patches(constant_op.constant(image.astype(dtype)), ksizes=ksizes, strides=strides, padding=padding, name='im2col_3d') self.assertAllClose(patches.astype(dtype), self.evaluate(out_tensor))
Tests input-output pairs for the ExtractVolumePatches op. Args: image: Input tensor with shape: [batch, in_planes, in_rows, in_cols, depth]. ksizes: Patch size specified as: [ksize_planes, ksize_rows, ksize_cols]. strides: Output strides, specified as: [stride_planes, stride_rows, stride_cols]. padding: Padding type. patches: Expected output. Note: rates are not supported as of now.
github-repos
def __init__(self, window_size=zlib.MAX_WBITS): super(ZlibDecompressor, self).__init__() self._zlib_decompressor = zlib.decompressobj(window_size)
Initializes a decompressor. Args: window_size (Optional[int]): base two logarithm of the size of the compression history buffer (aka window size). When the value is negative, the standard zlib data header is suppressed.
juraj-google-style
def load_data(path, verbose=False, raise_errors = False): if not os.path.exists(path): if raise_errors: raise AttributeError('Path given does not exist!') else: print('Path given does not exist!') return path = Script.check_filename(path) if verbose: print('script path', path) data = {} if 'raw_data' in os.listdir(path): if verbose: print('raw_data subfolder found') data_files = os.listdir(os.path.join(path, 'raw_data' + '/')) path = os.path.join(path, 'raw_data' + '/') else: data_files = glob.glob(os.path.join(path, '*.csv')) if verbose: print('data_files found', data_files) if not data_files: if raise_errors: raise AttributeError('Could not find data files in {:s}'.format(path)) else: print('Could not find data files in {:s}'.format(path)) return for data_file in data_files: data_name = data_file.split('-')[-1][0:-4] try: imported_data_df = pd.read_csv(os.path.join(path, data_file)) column_headers = list(imported_data_df.columns.values) if sum([int(x.isdigit()) for x in column_headers]) != len(column_headers): data[data_name] = {h: imported_data_df[h].values for h in column_headers} else: data[data_name] = np.squeeze(imported_data_df.values) except pd.errors.EmptyDataError as err: if raise_errors: raise err('data file ' + data_file + ' is empty: did not load!') else: print('data file ' + data_file + ' is empty: did not load!') return data
loads the data that has been save with Script.save. Args: path: path to folder saved by Script.save or raw_data folder within verbose: if true print additional information raise_errors: if true raise errors if false just print to std out Returns: a dictionary with the data of form data = {param_1_name: param_1_data, ...}
juraj-google-style
def received_response(self, value): if value == self._defaults['receivedResponse'] and 'receivedResponse' in self._values: del self._values['receivedResponse'] else: self._values['receivedResponse'] = value
The received_response property. Args: value (string). the property value.
juraj-google-style
def write_fasta_file_from_dict(indict, outname, outdir=None, outext='.faa', force_rerun=False): if not outdir: outdir = '' outfile = ssbio.utils.outfile_maker(inname='', outname=outname, outdir=outdir, outext=outext) if ssbio.utils.force_rerun(flag=force_rerun, outfile=outfile): seqs = [] for i, s in indict.items(): seq = ssbio.protein.sequence.utils.cast_to_seq_record(s, id=i) seqs.append(seq) SeqIO.write(seqs, outfile, "fasta") return outfile
Write a FASTA file for a dictionary of IDs and their sequence strings. Args: indict: Input dictionary with keys as IDs and values as sequence strings outname: Name of the output file which will have outext appended to it outdir: Path to directory to output sequences to outext: Extension of FASTA file, default ".faa" force_rerun: If file should be overwritten if it exists Returns: str: Path to output FASTA file.
juraj-google-style
def write_summaries(self, tagged_data, experiment_name, run_name): logger.debug('Writing summaries for %s tags', len(tagged_data)) with self._db: self._db.execute('BEGIN TRANSACTION') run_id = self._maybe_init_run(experiment_name, run_name) tag_to_metadata = { tag: tagdata.metadata for tag, tagdata in six.iteritems(tagged_data) } tag_to_id = self._maybe_init_tags(run_id, tag_to_metadata) tensor_values = [] for tag, tagdata in six.iteritems(tagged_data): tag_id = tag_to_id[tag] for step, wall_time, tensor_proto in tagdata.values: dtype = tensor_proto.dtype shape = ','.join(str(d.size) for d in tensor_proto.tensor_shape.dim) data = self._make_blob( tensor_proto.tensor_content or tensor_util.make_ndarray(tensor_proto).tobytes()) tensor_values.append((tag_id, step, wall_time, dtype, shape, data)) self._db.executemany( , tensor_values)
Transactionally writes the given tagged summary data to the DB. Args: tagged_data: map from tag to TagData instances. experiment_name: name of experiment. run_name: name of run.
juraj-google-style
def find_all(container): if isinstance(container, dict): names = container.keys() else: names = dir(container) built_context = BasicContext() for name in names: if name.startswith('_'): continue if isinstance(container, dict): obj = container[name] else: obj = getattr(container, name) if isinstance(container, dict) and isinstance(obj, str): built_context[name] = obj elif hasattr(obj, 'metadata') and isinstance(getattr(obj, 'metadata'), AnnotatedMetadata): built_context[name] = obj return built_context
Find all annotated function inside of a container. Annotated functions are identified as those that: - do not start with a _ character - are either annotated with metadata - or strings that point to lazily loaded modules Args: container (object): The container to search for annotated functions. Returns: dict: A dict with all of the found functions in it.
juraj-google-style
class MCTCTProcessor(ProcessorMixin): feature_extractor_class = 'MCTCTFeatureExtractor' tokenizer_class = 'AutoTokenizer' def __init__(self, feature_extractor, tokenizer): super().__init__(feature_extractor, tokenizer) self.current_processor = self.feature_extractor self._in_target_context_manager = False def __call__(self, *args, **kwargs): if self._in_target_context_manager: return self.current_processor(*args, **kwargs) if 'raw_speech' in kwargs: warnings.warn('Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.') audio = kwargs.pop('raw_speech') else: audio = kwargs.pop('audio', None) sampling_rate = kwargs.pop('sampling_rate', None) text = kwargs.pop('text', None) if len(args) > 0: audio = args[0] args = args[1:] if audio is None and text is None: raise ValueError('You need to specify either an `audio` or `text` input to process.') if audio is not None: inputs = self.feature_extractor(audio, *args, sampling_rate=sampling_rate, **kwargs) if text is not None: encodings = self.tokenizer(text, **kwargs) if text is None: return inputs elif audio is None: return encodings else: inputs['labels'] = encodings['input_ids'] return inputs def batch_decode(self, *args, **kwargs): return self.tokenizer.batch_decode(*args, **kwargs) def pad(self, *args, **kwargs): if self._in_target_context_manager: return self.current_processor.pad(*args, **kwargs) input_features = kwargs.pop('input_features', None) labels = kwargs.pop('labels', None) if len(args) > 0: input_features = args[0] args = args[1:] if input_features is not None: input_features = self.feature_extractor.pad(input_features, *args, **kwargs) if labels is not None: labels = self.tokenizer.pad(labels, **kwargs) if labels is None: return input_features elif input_features is None: return labels else: input_features['labels'] = labels['input_ids'] return input_features def decode(self, *args, **kwargs): return self.tokenizer.decode(*args, **kwargs) @contextmanager def as_target_processor(self): warnings.warn('`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your labels by using the argument `text` of the regular `__call__` method (either in the same call as your audio inputs, or in a separate call.') self._in_target_context_manager = True self.current_processor = self.tokenizer yield self.current_processor = self.feature_extractor self._in_target_context_manager = False
Constructs a MCTCT processor which wraps a MCTCT feature extractor and a MCTCT tokenizer into a single processor. [`MCTCTProcessor`] offers all the functionalities of [`MCTCTFeatureExtractor`] and [`AutoTokenizer`]. See the [`~MCTCTProcessor.__call__`] and [`~MCTCTProcessor.decode`] for more information. Args: feature_extractor (`MCTCTFeatureExtractor`): An instance of [`MCTCTFeatureExtractor`]. The feature extractor is a required input. tokenizer (`AutoTokenizer`): An instance of [`AutoTokenizer`]. The tokenizer is a required input.
github-repos
def run(self, dag): for node in dag.op_nodes(): basic_insts = ['measure', 'reset', 'barrier', 'snapshot'] if (node.name in basic_insts): continue if (node.name in self.basis): continue rule = node.op.definition if (not rule): raise QiskitError(('Cannot unroll the circuit to the given basis, %s. No rule to expand instruction %s.' % (str(self.basis), node.op.name))) decomposition = DAGCircuit() decomposition.add_qreg(rule[0][1][0][0]) for inst in rule: decomposition.apply_operation_back(*inst) unrolled_dag = self.run(decomposition) dag.substitute_node_with_dag(node, unrolled_dag) return dag
Expand all op nodes to the given basis. Args: dag(DAGCircuit): input dag Raises: QiskitError: if unable to unroll given the basis due to undefined decomposition rules (such as a bad basis) or excessive recursion. Returns: DAGCircuit: output unrolled dag
codesearchnet
def extract_build_info(exe_path, elf_section=ELF_SECTION): build_info = {} with mkdtemp() as tempd, pushd(tempd): proc = subprocess.Popen([OBJCOPY, DUMP_SECTION, '{secn}={ofile}'.format(secn=elf_section, ofile=BUILDINFO_FILE), exe_path], stderr=subprocess.PIPE) proc.wait() errno = proc.returncode stderr = proc.stderr.read() if (errno or len(stderr)): LOGGER.warning('objcopy failed with errno %s.', errno) if len(stderr): LOGGER.warning('objcopy failed with following msg:\n%s', stderr) return build_info with open(BUILDINFO_FILE) as build_info_f: try: build_info = json.load(build_info_f, object_hook=byteify) except JSONDcdError as jsde: LOGGER.warning('benchmark executable build is not valid json:') LOGGER.warning(jsde.msg) LOGGER.warning('build info section content:') LOGGER.warning(jsde.doc) return build_info
Extracts the build information from a given executable. The build information is expected to be in json format, which is parsed and returned as a dictionary. If no build information is found an empty dictionary is returned. This assumes binutils 2.25 to work. Args: exe_path (str): The full path to the executable to be examined Returns: dict: A dictionary of the extracted information.
codesearchnet
def rsqrt(x): if any_symbolic_tensors((x,)): return Rsqrt().symbolic_call(x) x = backend.convert_to_tensor(x) return backend.math.rsqrt(x)
Computes reciprocal of square root of x element-wise. Args: x: input tensor Returns: A tensor with the same dtype as `x`. Example: >>> x = keras.ops.convert_to_tensor([1.0, 10.0, 100.0]) >>> keras.ops.rsqrt(x) array([1.0, 0.31622776, 0.1], dtype=float32)
github-repos
def async_decorator(func): @functools.wraps(func) def async_wrapper(*args, **kwargs): 'Wraps up the call to ``func``, so that it is called from a separate\n thread.\n\n The callback, if given, will be called with two parameters,\n ``exception`` and ``result`` as ``callback(exception, result)``. If\n the thread ran to completion without error, ``exception`` will be\n ``None``, otherwise ``exception`` will be the generated exception that\n stopped the thread. Result is the result of the exected function.\n\n Args:\n callback (function): the callback to ultimately be called\n args: list of arguments to pass to ``func``\n kwargs: key-word arguments dictionary to pass to ``func``\n\n Returns:\n A thread if the call is asynchronous, otherwise the the return value\n of the wrapped function.\n\n Raises:\n TypeError: if ``callback`` is not callable or is missing\n ' if (('callback' not in kwargs) or (not kwargs['callback'])): return func(*args, **kwargs) callback = kwargs.pop('callback') if (not callable(callback)): raise TypeError("Expected 'callback' is not callable.") def thread_func(*args, **kwargs): 'Thread function on which the given ``func`` and ``callback``\n are executed.\n\n Args:\n args: list of arguments to pass to ``func``\n kwargs: key-word arguments dictionary to pass to ``func``\n\n Returns:\n Return value of the wrapped function.\n ' (exception, res) = (None, None) try: res = func(*args, **kwargs) except Exception as e: exception = e return callback(exception, res) thread = threads.ThreadReturn(target=thread_func, args=args, kwargs=kwargs) thread.daemon = True thread.start() return thread return async_wrapper
Asynchronous function decorator. Interprets the function as being asynchronous, so returns a function that will handle calling the Function asynchronously. Args: func (function): function to be called asynchronously Returns: The wrapped function. Raises: AttributeError: if ``func`` is not callable
codesearchnet
def get_python_executable(version: tuple[int, ...]) -> list[str] | None: if can_compile_bytecode_natively(version): return None for exe in _get_python_exes(version): exe_version = _get_python_exe_version(exe) if exe_version == version: return exe raise PythonNotFoundError()
Get a python executable corresponding to version. Args: version: The required python version Returns: - None: The current host interpreter can compile `version` - [path-to-exe, args]: A valid python-`version` interpreter Raises: PythonNotFoundError: if no suitable interpreter is found.
github-repos
def connect(): upnp = miniupnpc.UPnP() upnp.discoverdelay = 200 providers = upnp.discover() if (providers > 1): log.debug('multiple upnp providers found', num_providers=providers) elif (providers < 1): log.error('no upnp providers found') return None try: location = upnp.selectigd() log.debug('connected', upnp=upnp) except Exception as e: log.error('Error when connecting to uPnP provider', exception_info=e) return None if (not valid_mappable_ipv4(upnp.lanaddr)): log.error('could not query your lanaddr', reported=upnp.lanaddr) return None try: if (not valid_mappable_ipv4(upnp.externalipaddress())): log.error('could not query your externalipaddress', reported=upnp.externalipaddress()) return None return (upnp, location) except Exception: log.error('error when connecting with uPnP provider', location=location) return None
Try to connect to the router. Returns: u (miniupnc.UPnP): the connected upnp-instance router (string): the connection information
codesearchnet
def _split_heads(self, fused_qkv: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: batch_size, seq_length, three_times_hidden_size = fused_qkv.shape fused_qkv = fused_qkv.view(batch_size, seq_length, self.num_heads, 3, self.head_dim) return (fused_qkv[..., 0, :], fused_qkv[..., 1, :], fused_qkv[..., 2, :])
Split the last dimension into (num_heads, head_dim) without making any copies, results share same memory storage as `fused_qkv` Args: fused_qkv (`torch.tensor`): [batch_size, seq_length, num_heads * 3 * head_dim] Returns: query: [batch_size, seq_length, num_heads, head_dim] key: [batch_size, seq_length, num_heads, head_dim] value: [batch_size, seq_length, num_heads, head_dim]
github-repos
def indent(text, n=2, ch=' '): padding = (ch * n) return '\n'.join(((padding + line) for line in text.split('\n')))
Indent all the lines in a given block of text by a specified amount. Args: text (str) : The text to indent n (int, optional) : The amount to indent each line by (default: 2) ch (char, optional) : What character to fill the indentation with (default: " ")
codesearchnet
def index_in_block(self, channel_index: int) -> int: if ((channel_index < 0) or (channel_index >= self.cdim)): raise ValueError() struct = self.block_structure if (len(struct) == 1): return (channel_index, 0) i = 1 while ((sum(struct[:i]) <= channel_index) and (i < self.cdim)): i += 1 block_index = (i - 1) index_in_block = (channel_index - sum(struct[:block_index])) return (index_in_block, block_index)
Return the index a channel has within the subblock it belongs to I.e., only for reducible circuits, this gives a result different from the argument itself. Args: channel_index (int): The index of the external channel Raises: ValueError: for an invalid `channel_index`
codesearchnet
def decode_response(data): res = CaseInsensitiveDict() for dataline in data.decode('utf-8').splitlines()[1:]: dataline = dataline.strip() if not dataline: continue line_parts = dataline.split(':', 1) if len(line_parts) < 2: line_parts = (line_parts[0], '') res[line_parts[0].strip()] = line_parts[1].strip() return res
Decodes the data from a SSDP response. Args: data (bytes): The encoded response. Returns: dict of string -> string: Case-insensitive dictionary of header name to header value pairs extracted from the response.
juraj-google-style
def leapfrog_step(leapfrog_step_state: LeapFrogStepState, step_size: FloatTensor, target_log_prob_fn: PotentialFn, kinetic_energy_fn: PotentialFn ) -> Tuple[LeapFrogStepState, LeapFrogStepExtras]: state = leapfrog_step_state.state state_grads = leapfrog_step_state.state_grads momentum = leapfrog_step_state.momentum step_size = maybe_broadcast_structure(step_size, state) state = tf.nest.map_structure(tf.convert_to_tensor, state) momentum = tf.nest.map_structure(tf.convert_to_tensor, momentum) state = tf.nest.map_structure(tf.convert_to_tensor, state) if state_grads is None: _, _, state_grads = call_and_grads(target_log_prob_fn, state) else: state_grads = tf.nest.map_structure(tf.convert_to_tensor, state_grads) momentum = tf.nest.map_structure(lambda m, sg, s: m + 0.5 * sg * s, momentum, state_grads, step_size) kinetic_energy, kinetic_energy_extra, momentum_grads = call_and_grads( kinetic_energy_fn, momentum) state = tf.nest.map_structure(lambda x, mg, s: x + mg * s, state, momentum_grads, step_size) target_log_prob, state_extra, state_grads = call_and_grads( target_log_prob_fn, state) momentum = tf.nest.map_structure(lambda m, sg, s: m + 0.5 * sg * s, momentum, state_grads, step_size) return LeapFrogStepState(state, state_grads, momentum), LeapFrogStepExtras( target_log_prob, state_extra, kinetic_energy, kinetic_energy_extra)
Leapfrog `TransitionOperator`. Args: leapfrog_step_state: LeapFrogStepState. step_size: Step size, structure broadcastable to the `target_log_prob_fn` state. target_log_prob_fn: Target log prob fn. kinetic_energy_fn: Kinetic energy fn. Returns: leapfrog_step_state: LeapFrogStepState. leapfrog_step_extras: LeapFrogStepExtras.
juraj-google-style
def after(self): d = Deferred() self._after_deferreds.append(d) return d.chain
Return a deferred that will fire after the request is finished. Returns: Deferred: a new deferred that will fire appropriately
codesearchnet
def val_to_mrc(code, val): code = str(code) if (len(code) < 3): code += ((3 - len(code)) * ' ') return ('%s L %s' % (code, val))
Convert one single `val` to MRC. This function may be used for control fields in MARC records. Args:, code (str): Code of the field. val (str): Value of the field. Returns: str: Correctly padded MRC line with field.
codesearchnet
def marshal_json( obj, types=JSON_TYPES, fields=None, ): return marshal_dict( obj, types, fields=fields, )
Recursively marshal a Python object to a JSON-compatible dict that can be passed to json.{dump,dumps}, a web client, or a web server, etc... Args: obj: object, It's members can be nested Python objects which will be converted to dictionaries types: tuple-of-types, The JSON primitive types, typically you would not change this fields: None-list-of-str, Explicitly marshal only these fields Returns: dict
juraj-google-style
def _render_timestep(self, t: int, s: Fluents, a: Fluents, f: Fluents, r: np.float32) -> None: print("============================") print("TIME = {}".format(t)) print("============================") fluent_variables = self._compiler.rddl.action_fluent_variables self._render_fluent_timestep('action', a, fluent_variables) fluent_variables = self._compiler.rddl.interm_fluent_variables self._render_fluent_timestep('interms', f, fluent_variables) fluent_variables = self._compiler.rddl.state_fluent_variables self._render_fluent_timestep('states', s, fluent_variables) self._render_reward(r)
Prints fluents and rewards for the given timestep `t`. Args: t (int): timestep s (Sequence[Tuple[str], np.array]: State fluents. a (Sequence[Tuple[str], np.array]: Action fluents. f (Sequence[Tuple[str], np.array]: Interm state fluents. r (np.float32): Reward.
juraj-google-style
def __init__(self, json_data): super(ImmutableData, self).__init__() self._json_data = json_dict(json_data)
Init a new ImmutableData object from a dictionary or JSON string. Args: json_data(dict, basestring): Input JSON string or dictionary. Raises: TypeError: If the input object is not a dictionary or string.
juraj-google-style
def assign(self, institute, case, user, link): LOG.info("Creating event for assigning {0} to {1}" .format(user['name'].encode('utf-8'), case['display_name'])) self.create_event( institute=institute, case=case, user=user, link=link, category='case', verb='assign', subject=case['display_name'] ) LOG.info("Updating {0} to be assigned with {1}" .format(case['display_name'], user['name'])) updated_case = self.case_collection.find_one_and_update( {'_id': case['_id']}, {'$addToSet': {'assignees': user['_id']}}, return_document=pymongo.ReturnDocument.AFTER ) return updated_case
Assign a user to a case. This function will create an Event to log that a person has been assigned to a case. Also the user will be added to case "assignees". Arguments: institute (dict): A institute case (dict): A case user (dict): A User object link (str): The url to be used in the event Returns: updated_case(dict)
juraj-google-style
def get_keys_to_action(self): keyword_to_key = {'UP': ord('w'), 'DOWN': ord('s'), 'LEFT': ord('a'), 'RIGHT': ord('d'), 'FIRE': ord(' ')} keys_to_action = {} for (action_id, action_meaning) in enumerate(self.action_meanings): keys_tuple = tuple(sorted([key for (keyword, key) in keyword_to_key.items() if (keyword in action_meaning)])) assert (keys_tuple not in keys_to_action) keys_to_action[keys_tuple] = action_id keys_to_action[(ord('r'),)] = self.RETURN_DONE_ACTION keys_to_action[(ord('c'),)] = self.TOGGLE_WAIT_ACTION keys_to_action[(ord('n'),)] = self.WAIT_MODE_NOOP_ACTION return keys_to_action
Get mapping from keyboard keys to actions. Required by gym.utils.play in environment or top level wrapper. Returns: { Unicode code point for keyboard key: action (formatted for step()), ... }
codesearchnet
def _hash_file(fpath, algorithm='sha256', chunk_size=65535): if isinstance(algorithm, str): hasher = _resolve_hasher(algorithm) else: hasher = algorithm with open(fpath, 'rb') as fpath_file: for chunk in iter(lambda: fpath_file.read(chunk_size), b''): hasher.update(chunk) return hasher.hexdigest()
Calculates a file sha256 or md5 hash. Example: ```python _hash_file('/path/to/file.zip') 'e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855' ``` Args: fpath: path to the file being validated algorithm: hash algorithm, one of `'auto'`, `'sha256'`, or `'md5'`. The default `'auto'` detects the hash algorithm in use. chunk_size: Bytes to read at a time, important for large files. Returns: The file hash
github-repos
def acquire_resources(self, source): if source not in self.consulted: self.consulted.add(source) if isinstance(source, Tag): res = source else: res = source(self.H) if res is None: res = set() elif isinstance(res, (list, tuple)): res = set(res) elif isinstance(res, Tag): res = {res} self.resources |= res
Store the resources returned by ``source()``. If ``source`` has been acquired before, it will not be called a second time. Args: source (callable): A function that returns a resource or a list of resources. Returns: None
juraj-google-style
def register_frame_to_skip(cls, file_name, function_name, line_number=None): if (line_number is not None): cls._frames_to_skip.add((file_name, function_name, line_number)) else: cls._frames_to_skip.add((file_name, function_name))
Registers a function name to skip when walking the stack. The ABSLLogger sometimes skips method calls on the stack to make the log messages meaningful in their appropriate context. This method registers a function from a particular file as one which should be skipped. Args: file_name: str, the name of the file that contains the function. function_name: str, the name of the function to skip. line_number: int, if provided, only the function with this starting line number will be skipped. Otherwise, all functions with the same name in the file will be skipped.
codesearchnet