code
stringlengths
20
4.93k
docstring
stringlengths
33
1.27k
source
stringclasses
3 values
def __call__(self, shape, dtype=dtypes.float32, **kwargs): self._validate_kwargs(kwargs) dtype = dtypes.as_dtype(dtype) if not dtype.is_numpy_compatible or dtype == dtypes.string: raise ValueError(f'Argument `dtype` expected to be numeric or boolean. Received {dtype}.') if _PARTITION_SHAPE in kwargs: shape = kwargs[_PARTITION_SHAPE] return array_ops.zeros(shape, dtype)
Returns a tensor object initialized as specified by the initializer. Args: shape: Shape of the tensor. dtype: Optional dtype of the tensor. Only numeric or boolean dtypes are supported. **kwargs: Additional keyword arguments. Raises: ValuesError: If the dtype is not numeric or boolean.
github-repos
class SessionRunValues(collections.namedtuple('SessionRunValues', ['results', 'options', 'run_metadata'])):
Contains the results of `Session.run()`. In the future we may use this object to add more information about result of run without changing the Hook API. Args: results: The return values from `Session.run()` corresponding to the fetches attribute returned in the RunArgs. Note that this has the same shape as the RunArgs fetches. For example: fetches = global_step_tensor => results = nparray(int) fetches = [train_op, summary_op, global_step_tensor] => results = [None, nparray(string), nparray(int)] fetches = {'step': global_step_tensor, 'summ': summary_op} => results = {'step': nparray(int), 'summ': nparray(string)} options: `RunOptions` from the `Session.run()` call. run_metadata: `RunMetadata` from the `Session.run()` call.
github-repos
def write_payload(payload=None, objectInput=None): temp = tempfile.mkstemp()[1] log.debug("Write payload in temp file {!r}".format(temp)) with open(temp, 'wb') as f: if payload: payload = base64.b64decode(payload) elif objectInput: if six.PY3: payload = objectInput.buffer.read() elif six.PY2: payload = objectInput.read() f.write(payload) return temp
This function writes a base64 payload or file object on disk. Args: payload (string): payload in base64 objectInput (object): file object/standard input to analyze Returns: Path of file
juraj-google-style
def _factored_dims(self, shape): if ((not self._factored) or (shape.ndims < 2)): return None sorted_dims = sorted(shape.dims, key=(lambda d: (- d.size))) if (sorted_dims[1].size < self._min_dim_size_to_factor): return None return sorted_dims[:2]
Should we use a factored second moment estimator. Based on the shape of the variable. If we factor the accumulator, then this function returns a list of two mtf.Dimensions to reduce over. We always pick the two largest dimensions. If there are not two dimensions of size >= min_dim_size_to_factor, then we do not factor. Args: shape: a Shape Returns: either a list of 2 Dimensions or None
codesearchnet
def _set_options_from_file(self, file_handle): options = [] line_number = 0 section = None for line in file_handle.read().splitlines(): line_number += 1 orig_line = line line = line.strip() if not line or line.startswith(' continue if line.startswith('[') and line.endswith(']'): section = line.strip('[]') continue if not section: raise ValueError( 'Unable to parse unit file; ' 'Unexpected line outside of a section: {0} (line: {1}'.format( line, line_number )) continuation = False try: if options[-1]['value'].endswith('\\'): options[-1]['value'] = options[-1]['value'][:-1] continuation = True except IndexError: pass try: if continuation: options[-1]['value'] += orig_line continue name, value = line.split('=', 1) options.append({ 'section': section, 'name': name, 'value': value }) except ValueError: raise ValueError( 'Unable to parse unit file; ' 'Malformed line in section {0}: {1} (line: {2})'.format( section, line, line_number )) self._data['options'] = options return True
Parses a unit file and updates self._data['options'] Args: file_handle (file): a file-like object (supporting read()) containing a unit Returns: True: The file was successfuly parsed and options were updated Raises: IOError: from_file was specified and it does not exist ValueError: The unit contents specified in from_string or from_file is not valid
juraj-google-style
def short(cls, path): if not path: return path path = str(path) if cls.paths: for p in cls.paths: if p: path = path.replace(p + "/", "") path = path.replace(cls.home, "~") return path
Example: short("examined /Users/joe/foo") => "examined ~/foo" Args: path: Path to represent in its short form Returns: (str): Short form, using '~' if applicable
juraj-google-style
def _previous_block_never_completed(self, current_block, previous_block, new_state): if previous_block: previously_timing_block = previous_block.status_code in _InstrumentationStatusCodeCategories.TIMING currently_new_block = current_block.status_code == _InstrumentationStatusCodes.START or new_state == _InstrumentationBlockStates.RESULT return all([previously_timing_block, currently_new_block]) else: return False
Checks if the previous instrumentation method block completed. Args: current_block: _InstrumentationBlock, the current instrumentation block to check for being a different instrumentation test method. previous_block: _InstrumentationBlock, rhe previous instrumentation block to check for an incomplete status. new_state: _InstrumentationBlockStates, the next state for the parser, used to check for the instrumentation run ending with an incomplete test. Returns: A boolean indicating whether the previous instrumentation block completed executing.
github-repos
def binary_crossentropy(target, output, from_logits=False): target = tf.convert_to_tensor(target) output = tf.convert_to_tensor(output) if len(target.shape) != len(output.shape): raise ValueError(f'Arguments `target` and `output` must have the same rank (ndim). Received: target.shape={target.shape}, output.shape={output.shape}') for e1, e2 in zip(target.shape, output.shape): if e1 is not None and e2 is not None and (e1 != e2): raise ValueError(f'Arguments `target` and `output` must have the same shape. Received: target.shape={target.shape}, output.shape={output.shape}') output, from_logits = _get_logits(output, from_logits, 'Sigmoid', 'binary_crossentropy') if from_logits: return tf.nn.sigmoid_cross_entropy_with_logits(labels=target, logits=output) output = tf.clip_by_value(output, backend.epsilon(), 1.0 - backend.epsilon()) bce = target * tf.math.log(output) bce += (1 - target) * tf.math.log(1 - output) return -bce
Binary crossentropy between an output tensor and a target tensor. Args: target: A tensor with the same shape as `output`. output: A tensor. from_logits: Whether `output` is expected to be a logits tensor. By default, we consider that `output` encodes a probability distribution. Returns: A tensor.
github-repos
def from_api_repr(cls, resource): etag = resource.get("etag") if etag is not None: resource = resource.copy() resource["etag"] = base64.b64decode(etag.encode("ascii")) return super(Policy, cls).from_api_repr(resource)
Factory: create a policy from a JSON resource. Overrides the base class version to store :attr:`etag` as bytes. Args: resource (dict): JSON policy resource returned by the ``getIamPolicy`` REST API. Returns: :class:`Policy`: the parsed policy
juraj-google-style
def _get_log_file(self, handler): if ('file_name_pattern' not in handler): filename = '%Y-%m-%d-%H-%M-%S-{name}.pcap' else: filename = handler['file_name_pattern'] log_file = handler['log_dir'] if ('path' in handler): log_file = os.path.join(log_file, handler['path'], filename) else: log_file = os.path.join(log_file, filename) log_file = time.strftime(log_file, time.gmtime()) log_file = log_file.format(**handler) return log_file
Generate log file path for a given handler Args: handler: The handler configuration dictionary for which a log file path should be generated.
codesearchnet
def no_selenium_errors(func): def _inner(*args, **kwargs): try: return_val = func(*args, **kwargs) except WebDriverException: LOGGER.warning(u'Exception ignored during retry loop:', exc_info=True) return False else: return return_val return _inner
Decorator to create an `EmptyPromise` check function that is satisfied only when `func` executes without a Selenium error. This protects against many common test failures due to timing issues. For example, accessing an element after it has been modified by JavaScript ordinarily results in a `StaleElementException`. Methods decorated with `no_selenium_errors` will simply retry if that happens, which makes tests more robust. Args: func (callable): The function to execute, with retries if an error occurs. Returns: Decorated function
codesearchnet
def from_file(feff_inp_file='feff.inp', ldos_file='ldos'): header_str = Header.header_string_from_file(feff_inp_file) header = Header.from_string(header_str) structure = header.struct nsites = structure.num_sites parameters = Tags.from_file(feff_inp_file) if "RECIPROCAL" in parameters: pot_dict = dict() pot_readstart = re.compile('.*iz.*lmaxsc.*xnatph.*xion.*folp.*') pot_readend = re.compile('.*ExternalPot.*switch.*') pot_inp = re.sub(r'feff.inp', r'pot.inp', feff_inp_file) dos_index = 1 begin = 0 with zopen(pot_inp, "r") as potfile: for line in potfile: if len(pot_readend.findall(line)) > 0: break if begin == 1: begin += 1 continue if begin == 2: z_number = int(line.strip().split()[0]) ele_name = Element.from_Z(z_number).name if ele_name not in pot_dict: pot_dict[ele_name] = dos_index else: pot_dict[ele_name] = min(dos_index, pot_dict[ele_name]) dos_index += 1 if len(pot_readstart.findall(line)) > 0: begin = 1 else: pot_string = Potential.pot_string_from_file(feff_inp_file) dicts = Potential.pot_dict_from_string(pot_string) pot_dict = dicts[0] with zopen(ldos_file + "00.dat", "r") as fobject: f = fobject.readlines() efermi = float(f[0].split()[4]) dos_energies = [] ldos = {} for i in range(1, len(pot_dict) + 1): if len(str(i)) == 1: ldos[i] = np.loadtxt("{}0{}.dat".format(ldos_file, i)) else: ldos[i] = np.loadtxt("{}{}.dat".format(ldos_file, i)) for i in range(0, len(ldos[1])): dos_energies.append(ldos[1][i][0]) all_pdos = [] vorb = {"s": Orbital.s, "p": Orbital.py, "d": Orbital.dxy, "f": Orbital.f0} forb = {"s": 0, "p": 1, "d": 2, "f": 3} dlength = len(ldos[1]) for i in range(nsites): pot_index = pot_dict[structure.species[i].symbol] all_pdos.append(defaultdict(dict)) for k, v in vorb.items(): density = [ldos[pot_index][j][forb[k] + 1] for j in range(dlength)] updos = density downdos = None if downdos: all_pdos[-1][v] = {Spin.up: updos, Spin.down: downdos} else: all_pdos[-1][v] = {Spin.up: updos} pdos = all_pdos vorb2 = {0: Orbital.s, 1: Orbital.py, 2: Orbital.dxy, 3: Orbital.f0} pdoss = {structure[i]: {v: pdos[i][v] for v in vorb2.values()} for i in range(len(pdos))} forb = {"s": 0, "p": 1, "d": 2, "f": 3} tdos = [0] * dlength for i in range(nsites): pot_index = pot_dict[structure.species[i].symbol] for v in forb.values(): density = [ldos[pot_index][j][v + 1] for j in range(dlength)] for j in range(dlength): tdos[j] = tdos[j] + density[j] tdos = {Spin.up: tdos} dos = Dos(efermi, dos_energies, tdos) complete_dos = CompleteDos(structure, dos, pdoss) charge_transfer = LDos.charge_transfer_from_file(feff_inp_file, ldos_file) return LDos(complete_dos, charge_transfer)
Creates LDos object from raw Feff ldos files by by assuming they are numbered consecutively, i.e. ldos01.dat ldos02.dat... Args: feff_inp_file (str): input file of run to obtain structure ldos_file (str): output ldos file of run to obtain dos info, etc.
juraj-google-style
def flag(self, diagnostic, thresh=None): if thresh is None: thresh = self.defaults[diagnostic] result = self.results[diagnostic] if isinstance(result, pd.DataFrame): if diagnostic == 'CorrelationMatrix': result = result.copy() np.fill_diagonal(result.values, 0) return result.applymap(thresh).sum().nonzero()[0] else: return result.apply(thresh).nonzero()[0]
Returns indices of diagnostic that satisfy (return True from) the threshold predicate. Will use class-level default threshold if None provided. Args: diagnostic (str): name of the diagnostic thresh (func): threshold function (boolean predicate) to apply to each element
juraj-google-style
def __init__(self, learning_rate, l1_regularization_strength=0.0, l2_regularization_strength=0.0, use_locking=False, name='ProximalGradientDescent'): super(ProximalGradientDescentOptimizer, self).__init__(use_locking, name) self._learning_rate = learning_rate self._l1_regularization_strength = l1_regularization_strength self._l2_regularization_strength = l2_regularization_strength self._l1_regularization_strength_tensor = None self._l2_regularization_strength_tensor = None
Construct a new proximal gradient descent optimizer. Args: learning_rate: A Tensor or a floating point value. The learning rate to use. l1_regularization_strength: A float value, must be greater than or equal to zero. l2_regularization_strength: A float value, must be greater than or equal to zero. use_locking: If True use locks for update operations. name: Optional name prefix for the operations created when applying gradients. Defaults to "GradientDescent".
github-repos
def is_ordered(cat_id): url = 'https: auth = Auth() r = _req_with_retries(auth.gbdx_connection, url) if r is not None: return r.status_code == 200 return False
Checks to see if a CatalogID has been ordered or not. Args: catalogID (str): The catalog ID from the platform catalog. Returns: ordered (bool): Whether or not the image has been ordered
juraj-google-style
def solid_named(self, name): check.str_param(name, 'name') if name not in self._solid_dict: raise DagsterInvariantViolationError( 'Pipeline {pipeline_name} has no solid named {name}.'.format( pipeline_name=self.name, name=name ) ) return self._solid_dict[name]
Return the solid named "name". Throws if it does not exist. Args: name (str): Name of solid Returns: SolidDefinition: SolidDefinition with correct name.
juraj-google-style
def gpu_devices(devices=None): return find_devices('GPU', devices)
Gets GPU devices out of `devices`. Args: devices: A device list (as a list of strings). If None, the list of all available devices will be used for it. Returns: Those in `devices` that are GPUs.
github-repos
def sys_save_screenshot(name: Optional[str] = None) -> None: lib.TCOD_sys_save_screenshot( _bytes(name) if name is not None else ffi.NULL )
Save a screenshot to a file. By default this will automatically save screenshots in the working directory. The automatic names are formatted as screenshotNNN.png. For example: screenshot000.png, screenshot001.png, etc. Whichever is available first. Args: file Optional[AnyStr]: File path to save screenshot.
juraj-google-style
def suggest(self, query): (res, suggest) = self.search(query, results=1, suggestion=True) try: title = (suggest or res[0]) except IndexError: title = None return title
Gather suggestions based on the provided title or None if no suggestions found Args: query (str): Page title Returns: String or None: Suggested page title or **None** if no \ suggestion found
codesearchnet
def change(script, layer_num=None): if (layer_num is None): if isinstance(script, mlx.FilterScript): layer_num = script.last_layer() else: layer_num = 0 filter_xml = ''.join([' <filter name="Change the current layer">\n', ' <Param name="mesh" ', 'value="{:d}" '.format(layer_num), 'description="Mesh" ', 'type="RichMesh" ', '/>\n', ' </filter>\n']) util.write_filter(script, filter_xml) if isinstance(script, mlx.FilterScript): script.set_current_layer(layer_num) return None
Change the current layer by specifying the new layer number. Args: script: the mlx.FilterScript object or script filename to write the filter to. layer_num (int): the number of the layer to change to. Default is the last layer if script is a mlx.FilterScript object; if script is a filename the default is the first layer. Layer stack: Modifies current layer MeshLab versions: 2016.12 1.3.4BETA
codesearchnet
def from_file(cls, filename): with zopen(filename) as f: return cls.from_string(f.read())
Read an Fiesta input from a file. Currently tested to work with files generated from this class itself. Args: filename: Filename to parse. Returns: FiestaInput object
codesearchnet
def _free_array(self, handle: int): with self._lock: if (self._arrays[handle] is not None): self._arrays[handle] = None self._count -= 1
Frees the memory for the array with the given handle. Args: handle: The handle of the array whose memory should be freed. This handle must come from the _create_array method.
codesearchnet
def _FormatSocketUnixToken(self, token_data): protocol = bsmtoken.BSM_PROTOCOLS.get(token_data.socket_family, 'UNKNOWN') return { 'protocols': protocol, 'family': token_data.socket_family, 'path': token_data.socket_path}
Formats an Unix socket token as a dictionary of values. Args: token_data (bsm_token_data_sockunix): AUT_SOCKUNIX token data. Returns: dict[str, str]: token values.
juraj-google-style
def convert_elementwise_add( params, w_name, scope_name, inputs, layers, weights, names ): print('Converting elementwise_add ...') if 'broadcast' in params: model0 = layers[inputs[0]] model1 = layers[inputs[1]] if names == 'short': tf_name = 'A' + random_string(7) elif names == 'keep': tf_name = w_name else: tf_name = w_name + str(random.random()) def target_layer(x): layer = tf.add(x[0], x[1]) return layer lambda_layer = keras.layers.Lambda(target_layer, name=tf_name) layers[scope_name] = lambda_layer([layers[inputs[0]], layers[inputs[1]]]) else: model0 = layers[inputs[0]] model1 = layers[inputs[1]] if names == 'short': tf_name = 'A' + random_string(7) elif names == 'keep': tf_name = w_name else: tf_name = w_name + str(random.random()) add = keras.layers.Add(name=tf_name) layers[scope_name] = add([model0, model1])
Convert elementwise addition. Args: params: dictionary with layer parameters w_name: name prefix in state_dict scope_name: pytorch scope name inputs: pytorch node inputs layers: dictionary with keras tensors weights: pytorch state_dict names: use short names for keras layers
juraj-google-style
def disconnect_async(self, conn_id, callback): try: context = self.conns.get_context(conn_id) except ArgumentError: callback(conn_id, self.id, False, "Could not find connection information") return self.conns.begin_disconnection(conn_id, callback, self.get_config('default_timeout')) topics = context['topics'] disconn_message = {'key': context['key'], 'client': self.name, 'type': 'command', 'operation': 'disconnect'} self.client.publish(topics.action, disconn_message)
Asynchronously disconnect from a device that has previously been connected Args: conn_id (int): a unique identifier for this connection on the DeviceManager that owns this adapter. callback (callable): A function called as callback(conn_id, adapter_id, success, failure_reason) when the disconnection finishes. Disconnection can only either succeed or timeout.
juraj-google-style
def create_document(self, doc: Dict, mime_type: str = None, url: str = "http: doc_id=None, type_=None) -> Document: return Document(self, doc, mime_type, url, doc_id=doc_id).with_type(type_)
Factory method to wrap input JSON docs in an ETK Document object. Args: doc (object): a JSON object containing a document in CDR format. mime_type (str): if doc is a string, the mime_type tells what it is url (str): if the doc came from the web, specifies the URL for it doc_id type_ Returns: wrapped Document
juraj-google-style
def round(self, decimals=0): return self.__class__(np.round(self, decimals=decimals))
Wrapper around numpy.round to ensure object of same type is returned Args: decimals :Number of decimal places to round to (default: 0). If decimals is negative, it specifies the number of positions to the left of the decimal point. Returns (Tensor): rounded tensor of same type
juraj-google-style
def _check_disabled(self): if self.config['check_disabled']: if (self.config['on_disabled'] == 'withdraw'): self.log.info('Check is disabled and ip_prefix will be withdrawn') self.log.info('adding %s in the queue', self.ip_with_prefixlen) self.action.put(self.del_operation) self.log.info('Check is now permanently disabled') elif (self.config['on_disabled'] == 'advertise'): self.log.info('check is disabled, ip_prefix wont be withdrawn') self.log.info('adding %s in the queue', self.ip_with_prefixlen) self.action.put(self.add_operation) self.log.info('check is now permanently disabled') return True return False
Check if health check is disabled. It logs a message if health check is disabled and it also adds an item to the action queue based on 'on_disabled' setting. Returns: True if check is disabled otherwise False.
codesearchnet
def set_task(project_, task_): global project, task project = project_ task = task_ msg.okay("Set project name to {}.{}".format(project, task), 2)
Sets the active project and task. All subsequent logging will be saved to the database with that project and task. Args: project_ (str): active project name; a project can have multiple tasks. task_ (str): active task name. Logging is separated at the project and task level.
juraj-google-style
def ch_start_time(self, *channels: List[Channel]) -> int: intervals = list(itertools.chain(*(self._table[chan] for chan in channels if chan in self._table))) if intervals: return min((interval.begin for interval in intervals)) return 0
Return earliest start time in this collection. Args: *channels: Channels over which to obtain start_time.
juraj-google-style
def _to_bfloat16_unbiased(x, noise): x_sign = tf.sign(x) x = ((x * x_sign) + 1e-30) cand1 = tf.to_bfloat16(x) cand1_f = tf.to_float(cand1) cand2 = tf.to_bfloat16(tf.where(tf.greater(x, cand1_f), (cand1_f * 1.005), (cand1_f * 0.995))) ret = _randomized_roundoff_to_bfloat16(x, noise, cand1, cand2) return (ret * tf.to_bfloat16(x_sign))
Convert a float32 to a bfloat16 using randomized roundoff. Args: x: A float32 Tensor. noise: a float32 Tensor with values in [0, 1), broadcastable to tf.shape(x) Returns: A float32 Tensor.
codesearchnet
def get_layer_policy(layer): if not isinstance(layer, base_layer.Layer): raise ValueError('get_policy can only be called on a layer, but got: %s' % (layer,)) return layer.dtype_policy
Returns the dtype policy of a layer. Warning: This function is deprecated. Use `tf.keras.layers.Layer.dtype_policy` instead. Args: layer: A `tf.keras.layers.Layer`. Returns: The `tf.keras.mixed_precision.Policy` of the layer.
github-repos
def __init__(self, expression, options=None, **kwargs): if options is None: options = Options() self._expression = expression self._options = options self._expression_parts = [] self._parsed = False for kwarg in kwargs: if hasattr(self._options, kwarg): setattr(self._options, kwarg, kwargs[kwarg]) else: raise WrongArgumentException( "Unknow {} configuration argument".format(kwarg)) GetText(options.locale_code)
Initializes a new instance of the ExpressionDescriptorclass Args: expression: The cron expression string options: Options to control the output description Raises: WrongArgumentException: if kwarg is unknow
juraj-google-style
def put_async(self, path, value): request = Put(self._get_next_id(), path, value) request.set_callback(self._q.put) future = self._dispatch_request(request) return future
Puts a value to a path and returns immediately Args: path (list): The path to put to value (object): The value to set Returns: Future: A single Future which will resolve to the result
codesearchnet
def trace_flush(self): cmd = enums.JLinkTraceCommand.FLUSH res = self._dll.JLINKARM_TRACE_Control(cmd, 0) if (res == 1): raise errors.JLinkException('Failed to flush the trace buffer.') return None
Flushes the trace buffer. After this method is called, the trace buffer is empty. This method is best called when the device is reset. Args: self (JLink): the ``JLink`` instance. Returns: ``None``
codesearchnet
def get_edgestore_handle(client: arango.client.ArangoClient, username=None, password=None, edgestore_db_name: str=edgestore_db_name, edgestore_edges_name: str=edgestore_edges_name, edgestore_nodes_name: str=edgestore_nodes_name, edgestore_pipeline_name: str=edgestore_pipeline_name, edgestore_pipeline_stats_name: str=edgestore_pipeline_stats_name, edgestore_pipeline_errors_name: str=edgestore_pipeline_errors_name) -> arango.database.StandardDatabase: (username, password) = get_user_creds(username, password) sys_db = client.db('_system', username=username, password=password) try: if (username and password): edgestore_db = sys_db.create_database(name=edgestore_db_name, users=[{'username': username, 'password': password, 'active': True}]) else: edgestore_db = sys_db.create_database(name=edgestore_db_name) except arango.exceptions.DatabaseCreateError: if (username and password): edgestore_db = client.db(edgestore_db_name, username=username, password=password) else: edgestore_db = client.db(edgestore_db_name) try: nodes = edgestore_db.create_collection(edgestore_nodes_name, index_bucket_count=64) nodes.add_hash_index(fields=['name'], unique=False) nodes.add_hash_index(fields=['components'], unique=False) except Exception: pass try: edges = edgestore_db.create_collection(edgestore_edges_name, edge=True, index_bucket_count=64) edges.add_hash_index(fields=['relation'], unique=False) edges.add_hash_index(fields=['edge_types'], unique=False) edges.add_hash_index(fields=['nanopub_id'], unique=False) edges.add_hash_index(fields=['metadata.project'], unique=False) edges.add_hash_index(fields=['annotations[*].id'], unique=False) except Exception: pass try: edgestore_db.create_collection(edgestore_pipeline_name) except Exception: pass try: edgestore_db.create_collection(edgestore_pipeline_errors_name) except Exception: pass try: edgestore_db.create_collection(edgestore_pipeline_stats_name) except arango.exceptions.CollectionCreateError as e: pass return edgestore_db
Get Edgestore arangodb database handle Args: client (arango.client.ArangoClient): Description username (None, optional): Description password (None, optional): Description edgestore_db_name (str, optional): Description edgestore_edges_name (str, optional): Description edgestore_nodes_name (str, optional): Description Returns: arango.database.StandardDatabase: Description
codesearchnet
def __setRouterSelectionJitter(self, iRouterJitter): print 'call _setRouterSelectionJitter' try: cmd = 'routerselectionjitter %s' % str(iRouterJitter) print cmd return self.__sendCommand(cmd) == 'Done' except Exception, e: ModuleHelper.WriteIntoDebugLogger("setRouterSelectionJitter() Error: " + str(e))
set ROUTER_SELECTION_JITTER parameter for REED to upgrade to Router Args: iRouterJitter: a random period prior to request Router ID for REED Returns: True: successful to set the ROUTER_SELECTION_JITTER False: fail to set ROUTER_SELECTION_JITTER
juraj-google-style
def Skew(poly, dist=None, **kws): if isinstance(poly, distributions.Dist): x = polynomials.variable(len(poly)) (poly, dist) = (x, poly) else: poly = polynomials.Poly(poly) if (poly.dim < len(dist)): polynomials.setdim(poly, len(dist)) shape = poly.shape poly = polynomials.flatten(poly) m1 = E(poly, dist) m2 = E((poly ** 2), dist) m3 = E((poly ** 3), dist) out = (((m3 - ((3 * m2) * m1)) + (2 * (m1 ** 3))) / ((m2 - (m1 ** 2)) ** 1.5)) out = numpy.reshape(out, shape) return out
Skewness operator. Element by element 3rd order statistics of a distribution or polynomial. Args: poly (Poly, Dist): Input to take skewness on. dist (Dist): Defines the space the skewness is taken on. It is ignored if ``poly`` is a distribution. Returns: (numpy.ndarray): Element for element variance along ``poly``, where ``skewness.shape == poly.shape``. Examples: >>> dist = chaospy.J(chaospy.Gamma(1, 1), chaospy.Normal(0, 2)) >>> print(chaospy.Skew(dist)) [2. 0.] >>> x, y = chaospy.variable(2) >>> poly = chaospy.Poly([1, x, y, 10*x*y]) >>> print(chaospy.Skew(poly, dist)) [nan 2. 0. 0.]
codesearchnet
def fit_to_cols(what, indent='', cols=79): lines = [] while what: (what, next_line) = split_line(what=what, cols=cols, indent=indent) lines.append(next_line) return '\n'.join(lines)
Wrap the given text to the columns, prepending the indent to each line. Args: what(str): text to wrap. indent(str): indentation to use. cols(int): colt to wrap to. Returns: str: Wrapped text
codesearchnet
def load_pos_model(lang="en", version="2"): src_dir = "pos{}".format(version) p = locate_resource(src_dir, lang) fh = _open(p) return dict(np.load(fh))
Return a part of speech tagger parameters for `lang` and of version `version` Args: lang (string): language code. version (string): version of the parameters to be used.
juraj-google-style
def static_lengths(self, ragged_lengths=True): if self.num_row_partitions == 0: return self._static_inner_shape_as_list(False) first_dim = self.row_partitions[0].static_nrows if isinstance(first_dim, tensor_shape.Dimension): first_dim = first_dim.value rp_dims = [first_dim] for rp in self.row_partitions: if rp.is_uniform(): rp_dims.append(rp.static_uniform_row_length) elif ragged_lengths: const_vals = tensor_util.constant_value(rp.row_lengths()) if const_vals is None: rp_dims.append(None) else: rp_dims.append(tuple(const_vals.tolist())) else: rp_dims.append(None) return rp_dims + self._static_inner_shape_as_list(True)
Returns a list of statically known axis lengths. This represents what values are known. For each row partition, it presents either the uniform row length (if statically known), the list of row lengths, or none if it is not statically known. For the inner shape, if the rank is known, then each dimension is reported if known, and None otherwise. If the rank of the inner shape is not known, then the returned list ends with an ellipsis. Args: ragged_lengths: If false, returns None for all ragged dimensions. Returns: A Sequence[Union[Sequence[int],int, None]] of lengths, with a possible Ellipsis at the end.
github-repos
def set_result(self, result): if self.done(): raise RuntimeError('set_result can only be called once.') self._result = result self._trigger()
Set the result of the future to the provided result. Args: result (Any): The result
codesearchnet
def stream(self, report, callback=None): conn_id = self._find_connection(self.conn_string) if isinstance(report, BroadcastReport): self.adapter.notify_event_nowait(self.conn_string, 'broadcast', report) elif (conn_id is not None): self.adapter.notify_event_nowait(self.conn_string, 'report', report) if (callback is not None): callback((isinstance(report, BroadcastReport) or (conn_id is not None)))
Queue data for streaming Args: report (IOTileReport): A report object to stream to a client callback (callable): An optional callback that will be called with a bool value of True when this report actually gets streamed. If the client disconnects and the report is dropped instead, callback will be called with False
codesearchnet
def _unescape_token(token): r def match(m): r if m.group(1) is None: return u"_" if m.group(0) == u"\\u" else u"\\" try: return six.unichr(int(m.group(1))) except (ValueError, OverflowError) as _: return _UNDEFINED_UNICODE return _UNESCAPE_REGEX.sub(match, token)
r"""Replaces escaped characters in the token with their unescaped versions. Applies inverse transformations as _escape_token(): 1. Replace "\u" with "_", and "\\" with "\". 2. Replace "\###;" with the unicode character the ### refers to. Args: token: escaped string Returns: unescaped string
juraj-google-style
def get_sendback(self, uuid, key): def send_back_callback(data): self.sendResponse(serializers.serialize(data), uuid, key) return send_back_callback
Return function for sending progress messages back to original caller. Args: uuid (str): UUID of the received message. key (str): Routing key. Returns: fn reference: Reference to function which takes only one data \ argument.
codesearchnet
def truncate_too_long_number(numobj): if is_valid_number(numobj): return True numobj_copy = PhoneNumber() numobj_copy.merge_from(numobj) national_number = numobj.national_number while not is_valid_number(numobj_copy): national_number = national_number numobj_copy.national_number = national_number validation_result = is_possible_number_with_reason(numobj_copy) if (validation_result == ValidationResult.TOO_SHORT or national_number == 0): return False numobj.national_number = national_number return True
Truncate a number object that is too long. Attempts to extract a valid number from a phone number that is too long to be valid, and resets the PhoneNumber object passed in to that valid version. If no valid number could be extracted, the PhoneNumber object passed in will not be modified. Arguments: numobj -- A PhoneNumber object which contains a number that is too long to be valid. Returns True if a valid phone number can be successfully extracted.
juraj-google-style
async def change_file(self, file_path: str, description: str = None): with open(file_path, 'rb') as f: await self._change(asset=f.read())
change the file of that attachment |methcoro| Warning: |unstable| Args: file_path: path to the file you want to add / modify description: *optional* description for your attachment Raises: ValueError: file_path must not be None APIException
juraj-google-style
def str_delimited(results, header=None, delimiter="\t"): returnstr = "" if header is not None: returnstr += delimiter.join(header) + "\n" return returnstr + "\n".join([delimiter.join([str(m) for m in result]) for result in results])
Given a tuple of tuples, generate a delimited string form. >>> results = [["a","b","c"],["d","e","f"],[1,2,3]] >>> print(str_delimited(results,delimiter=",")) a,b,c d,e,f 1,2,3 Args: result: 2d sequence of arbitrary types. header: optional header Returns: Aligned string output in a table-like format.
juraj-google-style
def __init__(self, current): import sys from pyoko.modelmeta import model_registry out = [] for mdl_name in sys.PYOKO_LOGS.copy(): try: mdl = model_registry.get_model(mdl_name) except KeyError: continue bucket_name = mdl.objects.adapter.bucket.name mdl.objects.adapter.bucket.set_decoder('application/json', lambda a: bytes_to_str(a)) for k in set(sys.PYOKO_LOGS[mdl_name]): if k not in sys.PYOKO_LOGS['new']: obj = mdl.objects.data().get(k) print(obj) out.append("{}/|{}/|{}".format( bucket_name, k, obj[0])) sys.PYOKO_LOGS[mdl_name] = [] mdl.objects.adapter.bucket.set_decoder('application/json', binary_json_decoder) sys.PYOKO_LOGS['new'] = [] current.output = { 'response': "\n".join(out), 'http_headers': (('Content-Type', 'text/plain; charset=utf-8'), ), }
GET method handler Args: req: Request object. resp: Response object.
juraj-google-style
def get_asn_origin_whois(self, asn_registry='radb', asn=None, retry_count=3, server=None, port=43): try: if (server is None): server = ASN_ORIGIN_WHOIS[asn_registry]['server'] conn = socket.socket(socket.AF_INET, socket.SOCK_STREAM) conn.settimeout(self.timeout) log.debug('ASN origin WHOIS query for {0} at {1}:{2}'.format(asn, server, port)) conn.connect((server, port)) query = ' -i origin {0}{1}'.format(asn, '\r\n') conn.send(query.encode()) response = '' while True: d = conn.recv(4096).decode() response += d if (not d): break conn.close() if ('Query rate limit exceeded' in response): if (retry_count > 0): log.debug('ASN origin WHOIS query rate limit exceeded. Waiting...') sleep(1) return self.get_asn_origin_whois(asn_registry=asn_registry, asn=asn, retry_count=(retry_count - 1), server=server, port=port) else: raise WhoisRateLimitError('ASN origin Whois lookup failed for {0}. Rate limit exceeded, wait and try again (possibly a temporary block).'.format(asn)) elif (('error 501' in response) or ('error 230' in response)): log.debug('ASN origin WHOIS query error: {0}'.format(response)) raise ValueError return str(response) except (socket.timeout, socket.error) as e: log.debug('ASN origin WHOIS query socket error: {0}'.format(e)) if (retry_count > 0): log.debug('ASN origin WHOIS query retrying (count: {0})'.format(str(retry_count))) return self.get_asn_origin_whois(asn_registry=asn_registry, asn=asn, retry_count=(retry_count - 1), server=server, port=port) else: raise WhoisLookupError('ASN origin WHOIS lookup failed for {0}.'.format(asn)) except WhoisRateLimitError: raise except: raise WhoisLookupError('ASN origin WHOIS lookup failed for {0}.'.format(asn))
The function for retrieving CIDR info for an ASN via whois. Args: asn_registry (:obj:`str`): The source to run the query against (asn.ASN_ORIGIN_WHOIS). asn (:obj:`str`): The AS number (required). retry_count (:obj:`int`): The number of times to retry in case socket errors, timeouts, connection resets, etc. are encountered. Defaults to 3. server (:obj:`str`): An optional server to connect to. port (:obj:`int`): The network port to connect on. Defaults to 43. Returns: str: The raw ASN origin whois data. Raises: WhoisLookupError: The ASN origin whois lookup failed. WhoisRateLimitError: The ASN origin Whois request rate limited and retries were exhausted.
codesearchnet
def db_wb004(self, value=None): if (value is not None): try: value = float(value) except ValueError: raise ValueError('value {} need to be of type float for field `db_wb004`'.format(value)) self._db_wb004 = value
Corresponds to IDD Field `db_wb004` mean coincident dry-bulb temperature to Wet-bulb temperature corresponding to 0.4% annual cumulative frequency of occurrence Args: value (float): value for IDD Field `db_wb004` Unit: C if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value
codesearchnet
def collect_results(rule, max_results=500, result_stream_args=None): if (result_stream_args is None): logger.error('This function requires a configuration dict for the inner ResultStream object.') raise KeyError rs = ResultStream(rule_payload=rule, max_results=max_results, **result_stream_args) return list(rs.stream())
Utility function to quickly get a list of tweets from a ``ResultStream`` without keeping the object around. Requires your args to be configured prior to using. Args: rule (str): valid powertrack rule for your account, preferably generated by the `gen_rule_payload` function. max_results (int): maximum number of tweets or counts to return from the API / underlying ``ResultStream`` object. result_stream_args (dict): configuration dict that has connection information for a ``ResultStream`` object. Returns: list of results Example: >>> from searchtweets import collect_results >>> tweets = collect_results(rule, max_results=500, result_stream_args=search_args)
codesearchnet
def audio_bottom(x, model_hparams, vocab_size): del vocab_size inputs = x with tf.variable_scope("audio_modality"): def xnet_resblock(x, filters, res_relu, name): with tf.variable_scope(name): y = common_layers.separable_conv_block( x, filters, [((1, 1), (3, 3)), ((1, 1), (3, 3))], first_relu=True, padding="SAME", force2d=True, name="sep_conv_block") y = common_layers.pool(y, (3, 3), "MAX", "SAME", strides=(2, 2)) return y + common_layers.conv_block( x, filters, [((1, 1), (1, 1))], padding="SAME", strides=(2, 2), first_relu=res_relu, force2d=True, name="res_conv0") x = tf.to_float(inputs) / 255. x.set_shape([None, None, None, 1]) for i in range(model_hparams.audio_compression): x = xnet_resblock(x, 2**(i + 1), True, "compress_block_%d" % i) return xnet_resblock(x, model_hparams.hidden_size, False, "compress_block_final")
Transform input from data space to model space. Args: x: A Tensor with shape [batch, ...] model_hparams: HParams, model hyperparmeters. vocab_size: int, vocabulary size. Returns: body_input: A Tensor with shape [batch, ?, ?, model_hparams.hidden_size].
juraj-google-style
def _copy_fn(fn): if (not callable(fn)): raise TypeError('fn is not callable: {}'.format(fn)) return types.FunctionType(code=fn.__code__, globals=fn.__globals__, name=fn.__name__, argdefs=fn.__defaults__, closure=fn.__closure__)
Create a deep copy of fn. Args: fn: a callable Returns: A `FunctionType`: a deep copy of fn. Raises: TypeError: if `fn` is not a callable.
codesearchnet
def to_json_string(self): return json.dumps(self.__dict__, indent=2) + '\n'
Serializes this instance to a JSON formatted string. Returns: str: JSON formatted string representing the configuration instance.
github-repos
def send_html(self, html, body=None, msgtype="m.text"): return self.client.api.send_message_event( self.room_id, "m.room.message", self.get_html_content(html, body, msgtype))
Send an html formatted message. Args: html (str): The html formatted message to be sent. body (str): The unformatted body of the message to be sent.
juraj-google-style
def add_signature_block(src_fileobj, dest_fileobj, signing_algorithm, signature=None): algo_id = {'sha1': 1, 'sha384': 2}[signing_algorithm] if not signature: signature = make_dummy_signature(algo_id) src_fileobj.seek(0) mardata = mar.parse_stream(src_fileobj) header = mardata.header dest_fileobj.write(mar_header.build(header)) sig = dict(algorithm_id=algo_id, size=len(signature), signature=signature, ) filesize = 0 sigs_offset = dest_fileobj.tell() sigs = sigs_header.build(dict( filesize=filesize, count=1, sigs=[sig], )) dest_fileobj.write(sigs) dest_fileobj.write(extras_header.build(mardata.additional)) data_offset = dest_fileobj.tell() src_fileobj.seek(mardata.data_offset) write_to_file(takeexactly(src_fileobj, mardata.data_length), dest_fileobj) index_offset = dest_fileobj.tell() index = mardata.index data_offset_delta = data_offset - mardata.data_offset for e in index.entries: e.offset += data_offset_delta dest_fileobj.write(index_header.build(index)) filesize = dest_fileobj.tell() dest_fileobj.seek(0) header.index_offset = index_offset dest_fileobj.write(mar_header.build(header)) dest_fileobj.seek(sigs_offset) sigs = sigs_header.build(dict( filesize=filesize, count=1, sigs=[sig], )) dest_fileobj.write(sigs)
Add a signature block to marfile, a MarReader object. Productversion and channel are preserved, but any existing signatures are overwritten. Args: src_fileobj (file object): The input MAR file to add a signature to dest_fileobj (file object): File object to write new MAR file to. Must be open in w+b mode. signing_algorithm (str): One of 'sha1', or 'sha384' signature (bytes): Signature to write, or None to use a dummy signature
juraj-google-style
def from_value(cls, ion_type, value, annotations=()): if value is None: value = IonPyNull() else: args, kwargs = cls._to_constructor_args(value) value = cls(*args, **kwargs) value.ion_event = None value.ion_type = ion_type value.ion_annotations = annotations return value
Constructs a value as a copy with an associated Ion type and annotations. Args: ion_type (IonType): The associated Ion type. value (Any): The value to construct from, generally of type ``cls``. annotations (Sequence[unicode]): The sequence Unicode strings decorating this value.
juraj-google-style
def out_file_name(out_dir, fname, ext=None): if (ext is None): return os.path.join(out_dir, os.path.basename(fname)) fname = remove_ext(fname) return os.path.join(out_dir, '{}.{}'.format(fname, ext))
Return path of output file, given a directory, file name and extension. If fname is a path, it is converted to its basename. Args: out_dir (str): path to the directory where output should be written. fname (str): path to the input file. ext (str): file extension of the output file (defaults to None). Returns: str: out_dir + fname with extension replaced. If `ext` is `None`, the original extension is kept.
codesearchnet
def from_string(string): lines = string.split("\n") toks = lines[0].split() lengths = [float(i) for i in toks] toks = lines[1].split() angles = [float(i) for i in toks[0:3]] a = lengths.pop(-1) lengths.insert(0, a) alpha = angles.pop(-1) angles.insert(0, alpha) latt = Lattice.from_lengths_and_angles(lengths, angles) sp = [] coords = [] chrg = [] for l in lines[4:]: m = re.match(r'\d+\s+(\w+)\s+([0-9\-\.]+)\s+([0-9\-\.]+)\s+' + r'([0-9\-\.]+)\s+(?:0\s+){8}([0-9\-\.]+)', l.strip()) if m: sp.append(m.group(1)) coords.append([float(m.group(i)) for i in [3, 4, 2]]) chrg.append(m.group(5)) return ZeoCssr( Structure(latt, sp, coords, site_properties={'charge': chrg}) )
Reads a string representation to a ZeoCssr object. Args: string: A string representation of a ZeoCSSR. Returns: ZeoCssr object.
juraj-google-style
def is_registered(self, cuuid, host): if (cuuid in self.registry) and (self.registry[cuuid]["host"] == host): return True else: return False
This function will check to see if a given host with client uuid is currently registered. Args: cuuid (string): The client uuid that wishes to register. host (tuple): The (address, port) tuple of the client that is registering. Returns: Will return True if the client is registered and will return False if it is not.
juraj-google-style
def remove_interceptor(self, name): for index, interceptor in enumerate(self.interceptors): matches = ( type(interceptor).__name__ == name or getattr(interceptor, 'name') == name ) if matches: self.interceptors.pop(index) return True return False
Removes a specific interceptor by name. Arguments: name (str): interceptor name to disable. Returns: bool: `True` if the interceptor was disabled, otherwise `False`.
juraj-google-style
def pause(): t = timer() if f.t.stopped: raise StoppedError('Cannot pause stopped timer.') if f.t.paused: raise PausedError('Timer already paused.') f.t.paused = True f.t.tmp_total += (t - f.t.start_t) f.t.start_t = None f.t.last_t = None return t
Pause the timer, preventing subsequent time from accumulating in the total. Renders the timer inactive, disabling other timing commands. Returns: float: The current time. Raises: PausedError: If timer already paused. StoppedError: If timer already stopped.
codesearchnet
def write_label_list(path, label_list): entries = [] for label in label_list: entries.append([label.start, label.end, label.value]) textfile.write_separated_lines(path, entries, separator='\t')
Writes the given `label_list` to an audacity label file. Args: path (str): Path to write the file to. label_list (audiomate.annotations.LabelList): Label list
codesearchnet
def make_fixture(model_class, **kwargs): all_fields = get_fields(model_class) fields_for_random_generation = map( lambda x: getattr(model_class, x), all_fields ) overrides = {} for kwarg, value in kwargs.items(): if kwarg in all_fields: kwarg_field = getattr(model_class, kwarg) fields_for_random_generation.remove(kwarg_field) overrides.update({kwarg_field: value}) random_values = get_random_values(fields_for_random_generation) values = dict(overrides, **random_values) assert len(all_fields) == len(values), ( "Mismatch in values, {} != {}".format( len(all_fields), len(values) ) ) data = {k.name: v for k, v in values.items()} return model_class(**data)
Take the model_klass and generate a fixure for it Args: model_class (MongoEngine Document): model for which a fixture is needed kwargs (dict): any overrides instead of random values Returns: dict for now, other fixture types are not implemented yet
juraj-google-style
def _scale_size(size, scale): (w, h) = size return (int(((w * float(scale)) + 0.5)), int(((h * float(scale)) + 0.5)))
Rescale a size by a ratio. Args: size (tuple): w, h. scale (float): Scaling factor. Returns: tuple[int]: scaled size.
codesearchnet
def lookup(self, obj): for registered in self._registry: if isinstance(obj, registered): return self._registry[registered] raise LookupError(f'{type(obj)} has not been registered.')
Looks up 'obj'. Args: obj: The object to lookup within the registry. Returns: Value for 'obj' in the registry if found. Raises: LookupError: if 'obj' has not been registered.
github-repos
def myRank(grade, badFormat, year, length): return int(sorted(everyonesAverage(year, badFormat, length), reverse=True).index(grade) + 1)
rank of candidateNumber in year Arguments: grade {int} -- a weighted average for a specific candidate number and year badFormat {dict} -- candNumber : [results for candidate] year {int} -- year you are in length {int} -- length of each row in badFormat divided by 2 Returns: int -- rank of candidateNumber in year
juraj-google-style
def get_version_details(self, version_name): name = ('%s/versions/%s' % (self._full_model_name, version_name)) return self._api.projects().models().versions().get(name=name).execute()
Get details of a version. Args: version: the name of the version in short form, such as "v1". Returns: a dictionary containing the version details.
juraj-google-style
def create_temp(node, namer): if isinstance(node, gast.Name): name = node.id elif isinstance(node, (gast.Attribute, gast.Subscript)): name = node.value.id else: raise TypeError temp_node = gast.Name(id=namer.temp(name), annotation=None, ctx=None) anno.setanno(temp_node, 'temp_var', node) return temp_node
Create a temporary variable. Args: node: Create a temporary variable to store this variable in. namer: A naming object that guarantees the names are unique. Returns: node: See `create_grad`. Returns a temporary variable, which is always a simple variable annotated with `temp_var`.
juraj-google-style
def render_wrapper(self, region='us-east-1'): base = self.settings['pipeline']['base'] if self.base: base = self.base email = self.settings['pipeline']['notifications']['email'] slack = self.settings['pipeline']['notifications']['slack'] deploy_type = self.settings['pipeline']['type'] pipeline_id = self.compare_with_existing(region=region) data = { 'app': { 'appname': self.app_name, 'group_name': self.group_name, 'repo_name': self.repo_name, 'base': base, 'deploy_type': deploy_type, 'environment': 'packaging', 'region': region, 'triggerjob': self.trigger_job, 'run_as_user': DEFAULT_RUN_AS_USER, 'email': email, 'slack': slack, 'pipeline': self.settings['pipeline'] }, 'id': pipeline_id } self.log.debug('Wrapper app data:\n%s', pformat(data)) wrapper = get_template(template_file='pipeline/pipeline_wrapper.json.j2', data=data, formats=self.generated) return json.loads(wrapper)
Generate the base Pipeline wrapper. This renders the non-repeatable stages in a pipeline, like jenkins, baking, tagging and notifications. Args: region (str): AWS Region. Returns: dict: Rendered Pipeline wrapper.
juraj-google-style
def __init__(self, inputs, num_clusters, initial_clusters, distance_metric, random_seed, kmeans_plus_plus_num_retries, kmc2_chain_length, cluster_centers, cluster_centers_updated, cluster_centers_initialized): self._inputs = inputs self._num_clusters = num_clusters self._initial_clusters = initial_clusters self._distance_metric = distance_metric self._seed = random_seed self._kmeans_plus_plus_num_retries = kmeans_plus_plus_num_retries self._kmc2_chain_length = kmc2_chain_length self._cluster_centers = cluster_centers self._cluster_centers_updated = cluster_centers_updated self._cluster_centers_initialized = cluster_centers_initialized self._num_selected = array_ops.shape(self._cluster_centers)[0] self._num_remaining = self._num_clusters - self._num_selected self._num_data = math_ops.add_n([array_ops.shape(i)[0] for i in self._inputs])
Creates an op factory. Args: inputs: See KMeans constructor. num_clusters: An integer Tensor providing the number of clusters. initial_clusters: See KMeans constructor. distance_metric: See KMeans constructor. random_seed: See KMeans constructor. kmeans_plus_plus_num_retries: See KMeans constructor. kmc2_chain_length: See KMeans constructor. cluster_centers: The TF variable holding the initial centers. It may already contain some centers when the op is executed. cluster_centers_updated: A second TF variable to hold a copy of the initial centers, used for full-batch mode. In mini-batch mode, cluster_centers_updated is the same variable as cluster_centers. cluster_centers_initialized: A boolean TF variable that will be set to true when all the initial centers have been chosen.
github-repos
def formula_balance(model): compound_formula = {} for compound in model.compounds: if (compound.formula is not None): try: f = Formula.parse(compound.formula).flattened() compound_formula[compound.id] = f except ParseError as e: msg = 'Error parsing formula for compound {}:\n{}\n{}'.format(compound.id, e, compound.formula) if (e.indicator is not None): msg += '\n{}'.format(e.indicator) logger.warning(msg) for reaction in model.reactions: (yield (reaction, reaction_formula(reaction.equation, compound_formula)))
Calculate formula compositions for each reaction. Call :func:`reaction_formula` for each reaction. Yield (reaction, result) pairs, where result has two formula compositions or `None`. Args: model: :class:`psamm.datasource.native.NativeModel`.
codesearchnet
def sample_forecast_max_hail(self, dist_model_name, condition_model_name, num_samples, condition_threshold=0.5, query=None): if (query is not None): dist_forecasts = self.matched_forecasts['dist'][dist_model_name].query(query) dist_forecasts = dist_forecasts.reset_index(drop=True) condition_forecasts = self.matched_forecasts['condition'][condition_model_name].query(query) condition_forecasts = condition_forecasts.reset_index(drop=True) else: dist_forecasts = self.matched_forecasts['dist'][dist_model_name] condition_forecasts = self.matched_forecasts['condition'][condition_model_name] max_hail_samples = np.zeros((dist_forecasts.shape[0], num_samples)) areas = dist_forecasts['Area'].values for f in np.arange(dist_forecasts.shape[0]): condition_prob = condition_forecasts.loc[(f, self.forecast_bins['condition'][0])] if (condition_prob >= condition_threshold): max_hail_samples[f] = np.sort(gamma.rvs(*dist_forecasts.loc[(f, self.forecast_bins['dist'])].values, size=(num_samples, areas[f])).max(axis=1)) return max_hail_samples
Samples every forecast hail object and returns an empirical distribution of possible maximum hail sizes. Hail sizes are sampled from each predicted gamma distribution. The total number of samples equals num_samples * area of the hail object. To get the maximum hail size for each realization, the maximum value within each area sample is used. Args: dist_model_name: Name of the distribution machine learning model being evaluated condition_model_name: Name of the hail/no-hail model being evaluated num_samples: Number of maximum hail samples to draw condition_threshold: Threshold for drawing hail samples query: A str that selects a subset of the data for evaluation Returns: A numpy array containing maximum hail samples for each forecast object.
codesearchnet
def __init__(self, http_error): error_details = None error_response = None if http_error.fp: try: error_response = http_error.fp.read() error_body = json.loads(error_response) error_details = ['%s: %s' % (detail['message'], detail['debug_info']) for detail in error_body['error']['errors']] except (ValueError, TypeError, KeyError): pass if error_details: error_details_str = ', '.join(error_details) error_message = ('HTTP %s (%s) error when communicating with URL: %s. ' 'Details: %s' % (http_error.code, http_error.reason, http_error.filename, error_details_str)) else: error_message = ('HTTP %s (%s) error when communicating with URL: %s. ' 'Response: %s' % (http_error.code, http_error.reason, http_error.filename, error_response)) super(ServerRequestException, self).__init__(error_message)
Create a ServerRequestException from a given urllib2.HTTPError. Args: http_error: The HTTPError that the ServerRequestException will be based on.
juraj-google-style
def generate_block_graph(block_graph: blocks.BlockGraph, loader: jinja2.BaseLoader) -> str: return _generate_visualization(template_file=_BLOCKGRAPH_TEMPLATE_NAME, loader=loader, graph_data=block_serializer.encode_merged_graph(block_graph))
Generate the visualization webpage. Args: block_graph: blocks.BlockGraph. The block graph of the code. loader: A jinja22 loader Returns: str. The rendered visualization page.
github-repos
def setPadding(self, padding): self._pad = padding self._zfill = self.__class__.getPaddingNum(self._pad)
Set new padding characters for the sequence. i.e. "#" or "@@@" or '%04d', or an empty string to disable range formatting. Args: padding (str): sequence padding to set
juraj-google-style
def squad_v1_exact_match(y_true: List[List[str]], y_predicted: List[str]) -> float: EM_total = 0 count = 0 for ground_truth, prediction in zip(y_true, y_predicted): if len(ground_truth[0]) == 0: continue count += 1 EMs = [int(normalize_answer(gt) == normalize_answer(prediction)) for gt in ground_truth] EM_total += max(EMs) return 100 * EM_total / count if count > 0 else 0
Calculates Exact Match score between y_true and y_predicted EM score uses the best matching y_true answer: if y_pred equal at least to one answer in y_true then EM = 1, else EM = 0 Skips examples without an answer. Args: y_true: list of correct answers (correct answers are represented by list of strings) y_predicted: list of predicted answers Returns: exact match score : float
juraj-google-style
def _RemoveAuthorizedKeys(self, user): pw_entry = self._GetUser(user) if not pw_entry: return home_dir = pw_entry.pw_dir authorized_keys_file = os.path.join(home_dir, '.ssh', 'authorized_keys') if os.path.exists(authorized_keys_file): try: os.remove(authorized_keys_file) except OSError as e: message = 'Could not remove authorized keys for user %s. %s.' self.logger.warning(message, user, str(e))
Remove a Linux user account's authorized keys file to prevent login. Args: user: string, the Linux user account to remove access.
juraj-google-style
async def disconnect(self, conn_id): self._ensure_connection(conn_id, True) dev = self._get_property(conn_id, 'device') dev.connected = False self._teardown_connection(conn_id)
Asynchronously disconnect from a connected device Args: conn_id (int): A unique identifier that will refer to this connection callback (callback): A callback that will be called as callback(conn_id, adapter_id, success, failure_reason)
juraj-google-style
def __init__(self, vlan_id=None): super().__init__(action_type=ActionType.OFPAT_SET_VLAN_VID, length=8) self.vlan_id = vlan_id
Create an ActionVlanVid with the optional parameters below. Args: vlan_id (int): VLAN priority.
juraj-google-style
def transform_feature(self, transformation_cache, state_manager): input_tensor = transformation_cache.get(self.key, state_manager) if self.normalizer_fn is not None: input_tensor = self.normalizer_fn(input_tensor) return input_tensor
See `FeatureColumn` base class. In this case, we apply the `normalizer_fn` to the input tensor. Args: transformation_cache: A `FeatureTransformationCache` object to access features. state_manager: A `StateManager` to create / access resources such as lookup tables. Returns: Normalized input tensor.
github-repos
def create_authors(project_dir=os.curdir): pkg_info_file = os.path.join(project_dir, 'PKG-INFO') authors_file = os.path.join(project_dir, 'AUTHORS') if os.path.exists(pkg_info_file): return authors = get_authors(project_dir=project_dir) with open(authors_file, 'wb') as authors_fd: authors_fd.write((b'\n'.join((a.encode('utf-8') for a in authors)) + b'\n'))
Creates the authors file, if not in a package. Returns: None Raises: RuntimeError: If the authors could not be retrieved
codesearchnet
async def get_records_for_zone(self, dns_zone, params=None): managed_zone = self.get_managed_zone(dns_zone) url = f'{self._base_url}/managedZones/{managed_zone}/rrsets' if not params: params = {} if 'fields' not in params: params['fields'] = ('rrsets/name,rrsets/kind,rrsets/rrdatas,' 'rrsets/type,rrsets/ttl,nextPageToken') next_page_token = None records = [] while True: if next_page_token: params['pageToken'] = next_page_token response = await self.get_json(url, params=params) records.extend(response['rrsets']) next_page_token = response.get('nextPageToken') if not next_page_token: break logging.info(f'Found {len(records)} rrsets for zone "{dns_zone}".') return records
Get all resource record sets for a managed zone, using the DNS zone. Args: dns_zone (str): Desired DNS zone to query. params (dict): (optional) Additional query parameters for HTTP requests to the GDNS API. Returns: list of dicts representing rrsets.
juraj-google-style
class TFDebertaXSoftmax(keras.layers.Layer): def __init__(self, axis=-1, **kwargs): super().__init__(**kwargs) self.axis = axis def call(self, inputs: tf.Tensor, mask: tf.Tensor): rmask = tf.logical_not(tf.cast(mask, tf.bool)) output = tf.where(rmask, tf.cast(float('-inf'), dtype=self.compute_dtype), inputs) output = stable_softmax(tf.cast(output, dtype=tf.float32), self.axis) output = tf.where(rmask, 0.0, output) return output
Masked Softmax which is optimized for saving memory Args: input (`tf.Tensor`): The input tensor that will apply softmax. mask (`tf.Tensor`): The mask matrix where 0 indicate that element will be ignored in the softmax calculation. dim (int): The dimension that will apply softmax
github-repos
def AddValue(self, registry_value): name = registry_value.name.upper() if (name in self._values): raise KeyError('Value: {0:s} already exists.'.format(registry_value.name)) self._values[name] = registry_value
Adds a value. Args: registry_value (WinRegistryValue): Windows Registry value. Raises: KeyError: if the value already exists.
codesearchnet
def add_profile_variants(self, profile_variants): results = self.db.profile_variant.insert_many(profile_variants) return results
Add several variants to the profile_variant collection in the database Args: profile_variants(list(models.ProfileVariant))
codesearchnet
def valueReadPreprocessor(valueString, replaceParamsFile=None): if (type(valueString) is bool): log.warning('Only numerical variable types can be handled by the valueReadPreprocessor function.') return valueString processedValue = valueString if ((replaceParamsFile is not None) and (valueString is not None)): if (('[' in valueString) or (']' in valueString)): processedValue = '{0}'.format(REPLACE_NO_VALUE) for targetParam in replaceParamsFile.targetParameters: if (targetParam.targetVariable == valueString): processedValue = '{0}'.format(((- 1) * targetParam.id)) break return processedValue
Apply global pre-processing to values during reading throughout the project. Args: valueString (str): String representing the value to be preprocessed. replaceParamsFile (gsshapy.orm.ReplaceParamFile, optional): Instance of the replace param file. Required if replacement variables are included in the project. Returns: str: Processed value as a string
codesearchnet
def index_min(x, idx, y): return _index_update_helper(tf_np.ndarray._with_index_min, x, idx, y)
Pure equivalent of `x[idx] = minimum(x[idx], y)`. Returns the value of x that would result from the NumPy-style indexed assignment `x[idx] = minimum(x[idx], y)`. Because it's a pure function, `x` itself won't be changed. Args: x: an array with the values to be updated. idx: a Numpy-style index, consisting of `None`, integers, slice objects, ellipses, ndarrays with integer dtypes, or a tuple of the above. y: the array of updates. `y` must be broadcastable to the shape of the array that would be returned by `x[idx]`. Returns: The updated version of `x`.
github-repos
def queuify_logger(logger, queue_handler, queue_listener): if isinstance(logger, str): logger = logging.getLogger(logger) handlers = [handler for handler in logger.handlers if (handler not in queue_listener.handlers)] if handlers: queue_listener.handlers = tuple((list(queue_listener.handlers) + handlers)) del logger.handlers[:] logger.addHandler(queue_handler)
Replace logger's handlers with a queue handler while adding existing handlers to a queue listener. This is useful when you want to use a default logging config but then optionally add a logger's handlers to a queue during runtime. Args: logger (mixed): Logger instance or string name of logger to queue-ify handlers. queue_handler (QueueHandler): Instance of a ``QueueHandler``. queue_listener (QueueListener): Instance of a ``QueueListener``.
codesearchnet
def process_user_info_response(self, response): mapping = (('username', 'preferred_username'), ('email', 'email'), ('last_name', 'family_name'), ('first_name', 'given_name')) return {dest: response[source] for (dest, source) in mapping}
Process the user info response data. By default, this simply maps the edX user info key-values (example below) to Django-friendly names. If your provider returns different fields, you should sub-class this class and override this method. .. code-block:: python { "username": "jdoe", "email": "jdoe@example.com", "first_name": "Jane", "last_name": "Doe" } Arguments: response (dict): User info data Returns: dict
codesearchnet
def yaml(modules_to_register: Iterable[Any] = None, classes_to_register: Iterable[Any] = None) -> ruamel.yaml.YAML: yaml = ruamel.yaml.YAML(typ = "rt") yaml.representer.add_representer(np.ndarray, numpy_to_yaml) yaml.constructor.add_constructor("!numpy_array", numpy_from_yaml) yaml = register_module_classes(yaml = yaml, modules = modules_to_register) yaml = register_classes(yaml = yaml, classes = classes_to_register) return yaml
Create a YAML object for loading a YAML configuration. Args: modules_to_register: Modules containing classes to be registered with the YAML object. Default: None. classes_to_register: Classes to be registered with the YAML object. Default: None. Returns: A newly creating YAML object, configured as apporpirate.
juraj-google-style
def _compute_transitions(self, corpus, order=1): self.transitions = defaultdict(lambda: defaultdict(int)) for corpus_entry in corpus: tokens = self.tokenize(corpus_entry) last_tokens = utils.prefilled_buffer( self._start_symbol, length=self.order) for token_value in chain(tokens, self._end_symbol): for suffix in utils.get_suffixes(last_tokens): self.transitions[suffix][token_value] += 1 last_tokens.append(token_value) self._compute_relative_probs(self.transitions)
Computes the transition probabilities of a corpus Args: corpus: the given corpus (a corpus_entry needs to be iterable) order: the maximal Markov chain order
juraj-google-style
def eval_image(image, height, width, scope=None): with tf.name_scope(values=[image, height, width], name=scope, default_name='eval_image'): image = tf.image.central_crop(image, central_fraction=0.875) image = tf.expand_dims(image, 0) image = tf.image.resize_bilinear(image, [height, width], align_corners=False) image = tf.squeeze(image, [0]) return image
Prepare one image for evaluation. Args: image: 3-D float Tensor height: integer width: integer scope: Optional scope for name_scope. Returns: 3-D float Tensor of prepared image.
codesearchnet
def check_upload_status(self, video_id): if (not self.authenticated): raise ApiError(_('Authentication is required')) entry = self.fetch_video(video_id) upload_status = Api.yt_service.CheckUploadStatus(entry) if (upload_status is not None): video_upload_state = upload_status[0] detailed_message = upload_status[1] return {'upload_state': video_upload_state, 'detailed_message': detailed_message} else: return True
Checks the video upload status Newly uploaded videos may be in the processing state Authentication is required Returns: True if video is available otherwise a dict containes upload_state and detailed message i.e. {"upload_state": "processing", "detailed_message": ""}
codesearchnet
def check_par(chrom, pos): par = False for interval in PAR.get(chrom,[]): if (pos >= interval[0] and pos <= interval[1]): par = True return par
Check if a coordinate is in the PAR region Args: chrom(str) pos(int) Returns: par(bool)
juraj-google-style
async def _on_event(self, event_): conv_id = event_.conversation_id.id try: conv = await self._get_or_fetch_conversation(conv_id) except exceptions.NetworkError: logger.warning( 'Failed to fetch conversation for event notification: %s', conv_id ) else: self._sync_timestamp = parsers.from_timestamp(event_.timestamp) conv_event = conv.add_event(event_) if conv_event is not None: await self.on_event.fire(conv_event) await conv.on_event.fire(conv_event)
Receive a hangouts_pb2.Event and fan out to Conversations. Args: event_: hangouts_pb2.Event instance
juraj-google-style
def _multiple_field(cls): klassdict = cls.__dict__ try: return klassdict['_entitylist_multifield'][0] except (KeyError, IndexError, TypeError): from . import fields multifield_tuple = tuple(fields.find(cls, multiple=True)) assert (len(multifield_tuple) == 1) multifield = multifield_tuple[0] assert issubclass(multifield.type_, Entity) cls._entitylist_multifield = multifield_tuple return multifield_tuple[0]
Return the "multiple" TypedField associated with this EntityList. This also lazily sets the ``_entitylist_multiplefield`` value if it hasn't been set yet. This is set to a tuple containing one item because if we set the class attribute to the TypedField, we would effectively add a TypedField descriptor to the class, which we don't want. Raises: AssertionError: If there is more than one multiple TypedField or the the TypedField type_ is not a subclass of Entity.
codesearchnet
def invert_attention_mask(self, encoder_attention_mask: Tensor) -> Tensor: if encoder_attention_mask.dim() == 3: encoder_extended_attention_mask = encoder_attention_mask[:, None, :, :] if encoder_attention_mask.dim() == 2: encoder_extended_attention_mask = encoder_attention_mask[:, None, None, :] encoder_extended_attention_mask = encoder_extended_attention_mask.to(dtype=self.dtype) encoder_extended_attention_mask = (1.0 - encoder_extended_attention_mask) * torch.finfo(self.dtype).min return encoder_extended_attention_mask
Invert an attention mask (e.g., switches 0. and 1.). Args: encoder_attention_mask (`torch.Tensor`): An attention mask. Returns: `torch.Tensor`: The inverted attention mask.
github-repos
def _getScalesDiag(self, termx=0): assert (self.P > 1), 'VarianceDecomposition:: diagonal init_method allowed only for multi trait models' assert (self.noisPos is not None), 'VarianceDecomposition:: noise term has to be set' assert (termx < (self.n_randEffs - 1)), 'VarianceDecomposition:: termx>=n_randEffs-1' assert (self.trait_covar_type[self.noisPos] not in ['lowrank', 'block', 'fixed']), 'VarianceDecomposition:: diagonal initializaiton not posible for such a parametrization' assert (self.trait_covar_type[termx] not in ['lowrank', 'block', 'fixed']), 'VarianceDecimposition:: diagonal initializaiton not posible for such a parametrization' scales = [] res = self._getH2singleTrait(self.vd.getTerm(termx).getK()) scaleg = sp.sqrt(res['varg'].mean()) scalen = sp.sqrt(res['varn'].mean()) for term_i in range(self.n_randEffs): if (term_i == termx): _scales = (scaleg * self.diag[term_i]) elif (term_i == self.noisPos): _scales = (scalen * self.diag[term_i]) else: _scales = (0.0 * self.diag[term_i]) if (self.jitter[term_i] > 0): _scales = sp.concatenate((_scales, sp.array([sp.sqrt(self.jitter[term_i])]))) scales.append(_scales) return sp.concatenate(scales)
Internal function for parameter initialization Uses 2 term single trait model to get covar params for initialization Args: termx: non-noise term terms that is used for initialization
codesearchnet