code
stringlengths
20
4.93k
docstring
stringlengths
33
1.27k
source
stringclasses
3 values
def get_variable_dtype( master_dtype=tf.bfloat16, slice_dtype=tf.float32, activation_dtype=tf.float32): return mtf.VariableDType( master_dtype=tf.as_dtype(master_dtype), slice_dtype=tf.as_dtype(slice_dtype), activation_dtype=tf.as_dtype(activation_dtype))
Datatypes to use for the run. Args: master_dtype: string, datatype for checkpoints keep this the same between training and eval/inference slice_dtype: string, datatype for variables in memory must be tf.float32 for training activation_dtype: string, datatype for activations less memory usage if tf.bfloat16 but possible numerical issues Returns: a mtf.VariableDtype
juraj-google-style
def check_semidefinite_positiveness(A): B = empty_like(A) B[:] = A B[diag_indices_from(B)] += sqrt(finfo(float).eps) try: cholesky(B) except LinAlgError: return False return True
Check if ``A`` is a semi-definite positive matrix. Args: A (array_like): Matrix. Returns: bool: ``True`` if ``A`` is definite positive; ``False`` otherwise.
juraj-google-style
def compile_default_action(self, batch_size: Optional[int]=None) -> Sequence[tf.Tensor]: with self.graph.as_default(): with tf.name_scope('default_action'): self._initialize_default_action_fluents() if (batch_size is None): return self.default_action_fluents return self._compile_batch_fluents(self.default_action_fluents, batch_size)
Returns a tuple of tensors representing the default action fluents. Args: batch_size (int): The batch size. Returns: Sequence[tf.Tensor]: A tuple of tensors.
codesearchnet
def single_gate_params(gate, params=None): if (gate in ('U', 'u3')): return (params[0], params[1], params[2]) elif (gate == 'u2'): return ((np.pi / 2), params[0], params[1]) elif (gate == 'u1'): return (0, 0, params[0]) elif (gate == 'id'): return (0, 0, 0) raise QiskitError(('Gate is not among the valid types: %s' % gate))
Apply a single qubit gate to the qubit. Args: gate(str): the single qubit gate name params(list): the operation parameters op['params'] Returns: tuple: a tuple of U gate parameters (theta, phi, lam) Raises: QiskitError: if the gate name is not valid
codesearchnet
def _GetAPFSVolumeIdentifiers(self, scan_node): if not scan_node or not scan_node.path_spec: raise errors.ScannerError('Invalid scan node.') volume_system = apfs_volume_system.APFSVolumeSystem() volume_system.Open(scan_node.path_spec) volume_identifiers = self._source_scanner.GetVolumeIdentifiers( volume_system) if not volume_identifiers: return [] if len(volume_identifiers) > 1: if not self._mediator: raise errors.ScannerError( 'Unable to proceed. APFS volumes found but no mediator to ' 'determine how they should be used.') try: volume_identifiers = self._mediator.GetAPFSVolumeIdentifiers( volume_system, volume_identifiers) except KeyboardInterrupt: raise errors.UserAbort('File system scan aborted.') return self._NormalizedVolumeIdentifiers( volume_system, volume_identifiers, prefix='apfs')
Determines the APFS volume identifiers. Args: scan_node (SourceScanNode): scan node. Returns: list[str]: APFS volume identifiers. Raises: ScannerError: if the format of or within the source is not supported or the the scan node is invalid. UserAbort: if the user requested to abort.
juraj-google-style
def setWeekendHolidaySchedules(self, new_wknd, new_hldy, password="00000000"): result = False self.setContext("setWeekendHolidaySchedules") try: if not self.request(False): self.writeCmdMsg("Bad read CRC on setting") else: if not self.serialCmdPwdAuth(password): self.writeCmdMsg("Password failure") else: req_wkd = binascii.hexlify(str(new_wknd).zfill(2)) req_hldy = binascii.hexlify(str(new_hldy).zfill(2)) req_str = "015731023030433028" + req_wkd + req_hldy + "2903" req_str += self.calc_crc16(req_str[2:].decode("hex")) self.m_serial_port.write(req_str.decode("hex")) if self.m_serial_port.getResponse(self.getContext()).encode("hex") == "06": self.writeCmdMsg("Success(setWeekendHolidaySchedules): 06 returned.") result = True self.serialPostEnd() except: ekm_log(traceback.format_exc(sys.exc_info())) self.setContext("") return result
Serial call to set weekend and holiday :class:`~ekmmeters.Schedules`. Args: new_wknd (int): :class:`~ekmmeters.Schedules` value to assign. new_hldy (int): :class:`~ekmmeters.Schedules` value to assign. password (str): Optional password.. Returns: bool: True on completion and ACK.
juraj-google-style
def get_git_commit_sha(): return os.getenv('GIT_COMMIT')
Get git commit SHA for this build. Attempt to get the SHA from environment variable GIT_COMMIT, which should be available on Jenkins build agents. Returns: SHA hash of the git commit used for the build, if available
github-repos
def _force_float(v): try: return float(v) except Exception as exc: return float('nan') logger.warning('Failed to convert {} to float with {} error. Using 0 instead.'.format(v, exc))
Converts given argument to float. On fail logs warning and returns 0.0. Args: v (any): value to convert to float Returns: float: converted v or 0.0 if conversion failed.
codesearchnet
def output(self): return self._nested_outputs
Retrieves the output tensor(s) of a layer. Only applicable if the layer has exactly one output, i.e. if it is connected to one incoming layer. Returns: Output tensor or list of output tensors. Raises: AttributeError: if the layer is connected to more than one incoming layers. RuntimeError: if called in Eager mode.
github-repos
def local_service(self, name_or_id): if (not self._loop.inside_loop()): self._state_lock.acquire() try: if isinstance(name_or_id, int): if (name_or_id not in self._name_map): raise ArgumentError('Unknown ID used to look up service', id=name_or_id) name = self._name_map[name_or_id] else: name = name_or_id if (name not in self.services): raise ArgumentError('Unknown service name', name=name) return copy(self.services[name]) finally: if (not self._loop.inside_loop()): self._state_lock.release()
Get the locally synced information for a service. This method is safe to call outside of the background event loop without any race condition. Internally it uses a thread-safe mutex to protect the local copies of supervisor data and ensure that it cannot change while this method is iterating over it. Args: name_or_id (string or int): Either a short name for the service or a numeric id. Returns: ServiceState: the current state of the service synced locally at the time of the call.
codesearchnet
def assertNDArrayNear(self, ndarray1, ndarray2, err, msg=None): self.assertTrue(self._NDArrayNear(ndarray1, ndarray2, err), msg=msg)
Asserts that two numpy arrays have near values. Args: ndarray1: a numpy ndarray. ndarray2: a numpy ndarray. err: a float. The maximum absolute difference allowed. msg: Optional message to report on failure.
github-repos
def get_ip_prefixes_from_bird(filename): prefixes = [] with open(filename, 'r') as bird_conf: lines = bird_conf.read() for line in lines.splitlines(): line = line.strip(', ') if valid_ip_prefix(line): prefixes.append(line) return prefixes
Build a list of IP prefixes found in Bird configuration. Arguments: filename (str): The absolute path of the Bird configuration file. Notes: It can only parse a file with the following format define ACAST_PS_ADVERTISE = [ 10.189.200.155/32, 10.189.200.255/32 ]; Returns: A list of IP prefixes.
codesearchnet
def memory_read16(self, addr, num_halfwords, zone=None): return self.memory_read(addr, num_halfwords, zone=zone, nbits=16)
Reads memory from the target system in units of 16-bits. Args: self (JLink): the ``JLink`` instance addr (int): start address to read from num_halfwords (int): number of half words to read zone (str): memory zone to read from Returns: List of halfwords read from the target system. Raises: JLinkException: if memory could not be read
juraj-google-style
def list( self, **kwargs ): request = Request( 'GET', '/v3/accounts' ) response = self.ctx.request(request) if response.content_type is None: return response if not response.content_type.startswith("application/json"): return response jbody = json.loads(response.raw_body) parsed_body = {} if str(response.status) == "200": if jbody.get('accounts') is not None: parsed_body['accounts'] = [ self.ctx.account.AccountProperties.from_dict(d, self.ctx) for d in jbody.get('accounts') ] elif str(response.status) == "401": if jbody.get('errorCode') is not None: parsed_body['errorCode'] = \ jbody.get('errorCode') if jbody.get('errorMessage') is not None: parsed_body['errorMessage'] = \ jbody.get('errorMessage') elif str(response.status) == "405": if jbody.get('errorCode') is not None: parsed_body['errorCode'] = \ jbody.get('errorCode') if jbody.get('errorMessage') is not None: parsed_body['errorMessage'] = \ jbody.get('errorMessage') else: parsed_body = jbody response.body = parsed_body return response
Get a list of all Accounts authorized for the provided token. Args: Returns: v20.response.Response containing the results from submitting the request
juraj-google-style
def get_project(self, resource): self.project_service.set_auth(self._token_project) return self.project_service.get(resource)
Get attributes of the data model object named by the given resource. Args: resource (intern.resource.boss.BossResource): resource.name as well as any parents must be identified to succeed. Returns: (intern.resource.boss.BossResource): Returns resource of type requested on success. Raises: requests.HTTPError on failure.
juraj-google-style
def __init__(self, task_name, queue_name, base_path): self.task_name = task_name self.queue_name = queue_name self.base_path = base_path self.barrier_handler_path = '%s/output' % base_path self.pipeline_handler_path = '%s/run' % base_path self.finalized_handler_path = '%s/finalized' % base_path self.fanout_handler_path = '%s/fanout' % base_path self.abort_handler_path = '%s/abort' % base_path self.fanout_abort_handler_path = '%s/fanout_abort' % base_path self.session_filled_output_names = set()
Initializer. Args: task_name: The name of the currently running task or empty if there is no task running. queue_name: The queue this pipeline should run on (may not be the current queue this request is on). base_path: Relative URL for the pipeline's handlers.
juraj-google-style
def multiply(self, other): if not isinstance(other, Number): raise QiskitError("other is not a number") return Operator(other * self.data, self.input_dims(), self.output_dims())
Return the operator self + other. Args: other (complex): a complex number. Returns: Operator: the operator other * self. Raises: QiskitError: if other is not a valid complex number.
juraj-google-style
def __init__(self, parameter_name, value): super(InvalidParameterError, self).__init__() self.parameter_name = parameter_name self.value = value
Constructor for InvalidParameterError. Args: parameter_name: String; the name of the parameter which had a value rejected. value: The actual value passed in for the parameter. Usually string.
juraj-google-style
def CreateServiceProto(job): service = rdf_client.OSXServiceInformation(label=job.get('Label'), program=job.get('Program'), sessiontype=job.get('LimitLoadToSessionType'), lastexitstatus=int(job['LastExitStatus']), timeout=int(job['TimeOut']), ondemand=bool(job['OnDemand'])) for arg in job.get('ProgramArguments', '', stringify=False): service.args.Append(str(arg)) mach_dict = job.get('MachServices', {}, stringify=False) for (key, value) in iteritems(mach_dict): service.machservice.Append(('%s:%s' % (key, value))) job_mach_dict = job.get('PerJobMachServices', {}, stringify=False) for (key, value) in iteritems(job_mach_dict): service.perjobmachservice.Append(('%s:%s' % (key, value))) if ('PID' in job): service.pid = job['PID'].value return service
Create the Service protobuf. Args: job: Launchdjobdict from servicemanagement framework. Returns: sysinfo_pb2.OSXServiceInformation proto
codesearchnet
def _copy_file_or_directory(self, source, destination_directory): if os.path.isdir(source): for item in os.listdir(source): full_source = os.path.join(source, item) full_destination = os.path.join(destination_directory, item) shutil.copytree(full_source, full_destination) else: shutil.copy2(source, destination_directory)
Recursively copies files from source to destination_directory. Args: source: source file or directory to copy into destination_directory destination_directory: destination directory in which to copy source
juraj-google-style
def start_after(self, document_fields): return self._cursor_helper(document_fields, before=False, start=True)
Start query results after a particular document value. The result set will **exclude** the document specified by ``document_fields``. If the current query already has specified a start cursor -- either via this method or :meth:`~.firestore_v1beta1.query.Query.start_at` -- this will overwrite it. When the query is sent to the server, the ``document_fields`` will be used in the order given by fields set by :meth:`~.firestore_v1beta1.query.Query.order_by`. Args: document_fields (Union[~.firestore_v1beta1.\ document.DocumentSnapshot, dict, list, tuple]): a document snapshot or a dictionary/list/tuple of fields representing a query results cursor. A cursor is a collection of values that represent a position in a query result set. Returns: ~.firestore_v1beta1.query.Query: A query with cursor. Acts as a copy of the current query, modified with the newly added "start after" cursor.
codesearchnet
def update_state_wrapper(update_state_fn): def decorated(metric_obj, *args, **kwargs): strategy = distribute_lib.get_strategy() for weight in metric_obj.weights: if backend.is_tpu_strategy(strategy) and (not strategy.extended.variable_created_in_scope(weight)) and (not distribute_lib.in_cross_replica_context()): raise ValueError('Trying to run metric.update_state in replica context when the metric was not created in TPUStrategy scope. Make sure the keras Metric is created in TPUstrategy scope. ') with tf_utils.graph_context_for_symbolic_tensors(*args, **kwargs): update_op = update_state_fn(*args, **kwargs) if update_op is not None: metric_obj.add_update(update_op) return update_op return tf_decorator.make_decorator(update_state_fn, decorated)
Decorator to wrap metric `update_state()` with `add_update()`. Args: update_state_fn: function that accumulates metric statistics. Returns: Decorated function that wraps `update_state_fn()` with `add_update()`.
github-repos
def track_trace(self, name, properties=None, severity=None): data = channel.contracts.MessageData() data.message = name or NULL_CONSTANT_STRING if properties: data.properties = properties if severity is not None: data.severity_level = channel.contracts.MessageData.PYTHON_LOGGING_LEVELS.get(severity) self.track(data, self._context)
Sends a single trace statement. Args: name (str). the trace statement.\n properties (dict). the set of custom properties the client wants attached to this data item. (defaults to: None)\n severity (str). the severity level of this trace, one of DEBUG, INFO, WARNING, ERROR, CRITICAL
juraj-google-style
def automatic_density(cls, structure, kppa, chksymbreak=None, use_symmetries=True, use_time_reversal=True, shifts=(0.5, 0.5, 0.5)): lattice = structure.lattice lengths = lattice.abc shifts = np.reshape(shifts, (-1, 3)) ngrid = kppa / structure.num_sites / len(shifts) mult = (ngrid * lengths[0] * lengths[1] * lengths[2]) ** (1 / 3.) num_div = [int(round(1.0 / lengths[i] * mult)) for i in range(3)] num_div = [i if i > 0 else 1 for i in num_div] angles = lattice.angles hex_angle_tol = 5 hex_length_tol = 0.01 right_angles = [i for i in range(3) if abs(angles[i] - 90) < hex_angle_tol] hex_angles = [i for i in range(3) if abs(angles[i] - 60) < hex_angle_tol or abs(angles[i] - 120) < hex_angle_tol] is_hexagonal = (len(right_angles) == 2 and len(hex_angles) == 1 and abs(lengths[right_angles[0]] - lengths[right_angles[1]]) < hex_length_tol) comment = "pymatge.io.abinit generated KPOINTS with grid density = " + "{} / atom".format(kppa) return cls( mode="monkhorst", num_kpts=0, kpts=[num_div], kpt_shifts=shifts, use_symmetries=use_symmetries, use_time_reversal=use_time_reversal, chksymbreak=chksymbreak, comment=comment)
Returns an automatic Kpoint object based on a structure and a kpoint density. Uses Gamma centered meshes for hexagonal cells and Monkhorst-Pack grids otherwise. Algorithm: Uses a simple approach scaling the number of divisions along each reciprocal lattice vector proportional to its length. Args: structure: Input structure kppa: Grid density
juraj-google-style
def _probe_services(self, handle): code = 0x2800 def event_filter_func(event): if (event.command_class == 4 and event.command == 2): event_handle, = unpack("B", event.payload[0:1]) return event_handle == handle return False def end_filter_func(event): if (event.command_class == 4 and event.command == 1): event_handle, = unpack("B", event.payload[0:1]) return event_handle == handle return False payload = struct.pack('<BHHBH', handle, 1, 0xFFFF, 2, code) try: response = self._send_command(4, 1, payload) except InternalTimeoutError: return False, {'reason': 'Timeout waiting for command response'} handle, result = unpack("<BH", response.payload) if result != 0: return False, None events = self._wait_process_events(0.5, event_filter_func, end_filter_func) gatt_events = [x for x in events if event_filter_func(x)] end_events = [x for x in events if end_filter_func(x)] if len(end_events) == 0: return False, None end_event = end_events[0] _, result, _ = unpack("<BHH", end_event.payload) if result != 0: self._logger.warn("Error enumerating GATT table, protocol error code = %d (0x%X)" % (result, result)) return False, None services = {} for event in gatt_events: process_gatt_service(services, event) return True, {'services': services}
Probe for all primary services and characteristics in those services Args: handle (int): the connection handle to probe
juraj-google-style
def assert_no_title(self, title, **kwargs): query = TitleQuery(title, **kwargs) @self.synchronize(wait=query.wait) def assert_no_title(): if query.resolves_for(self): raise ExpectationNotMet(query.negative_failure_message) return True return assert_no_title()
Asserts that the page doesn't have the given title. Args: title (str | RegexObject): The string that the title should include. **kwargs: Arbitrary keyword arguments for :class:`TitleQuery`. Returns: True Raises: ExpectationNotMet: If the assertion hasn't succeeded during the wait time.
codesearchnet
def _do_refresh_request(self, http): body = self._generate_refresh_request_body() headers = self._generate_refresh_request_headers() logger.info('Refreshing access_token') (resp, content) = transport.request(http, self.token_uri, method='POST', body=body, headers=headers) content = _helpers._from_bytes(content) if (resp.status == http_client.OK): d = json.loads(content) self.token_response = d self.access_token = d['access_token'] self.refresh_token = d.get('refresh_token', self.refresh_token) if ('expires_in' in d): delta = datetime.timedelta(seconds=int(d['expires_in'])) self.token_expiry = (delta + _UTCNOW()) else: self.token_expiry = None if ('id_token' in d): self.id_token = _extract_id_token(d['id_token']) self.id_token_jwt = d['id_token'] else: self.id_token = None self.id_token_jwt = None self.invalid = False if self.store: self.store.locked_put(self) else: logger.info('Failed to retrieve access token: %s', content) error_msg = 'Invalid response {0}.'.format(resp.status) try: d = json.loads(content) if ('error' in d): error_msg = d['error'] if ('error_description' in d): error_msg += (': ' + d['error_description']) self.invalid = True if (self.store is not None): self.store.locked_put(self) except (TypeError, ValueError): pass raise HttpAccessTokenRefreshError(error_msg, status=resp.status)
Refresh the access_token using the refresh_token. Args: http: an object to be used to make HTTP requests. Raises: HttpAccessTokenRefreshError: When the refresh fails.
codesearchnet
def _initialize_memory(self, policy_params): template = ( self._batch_env.observ[0], self._batch_env.action[0], tools.nested.map(lambda x: x[0, 0], policy_params), self._batch_env.reward[0]) with tf.variable_scope('ppo_temporary'): self._current_episodes = parts.EpisodeMemory( template, len(self._batch_env), self._config.max_length, 'episodes') self._finished_episodes = parts.EpisodeMemory( template, self._config.update_every, self._config.max_length, 'memory') self._num_finished_episodes = tf.Variable(0, False)
Initialize temporary and permanent memory. Args: policy_params: Nested tuple of policy parameters with all dimensions set. Initializes the attributes `self._current_episodes`, `self._finished_episodes`, and `self._num_finished_episodes`. The episodes memory serves to collect multiple episodes in parallel. Finished episodes are copied into the next free slot of the second memory. The memory index points to the next free slot.
juraj-google-style
def wrap_sequence(sequence, books=None, tensor_shape=None): if (books is None): books = bookkeeper.for_default_graph() my_sequence = [wrap(t, books=books, tensor_shape=tensor_shape) for t in sequence] return Layer(books, sequence=my_sequence, name=my_sequence[0].name)
Creates an input layer representing the given sequence of tensors. Args: sequence: A sequence of tensors. books: The bookkeeper. tensor_shape: An optional shape that will be set on the Tensor or verified to match the tensor. Returns: A layer.
codesearchnet
def stops_when(iterable, condition): if not callable(condition): cond_value = condition def condition(x): return x == cond_value return itertools.takewhile(lambda x: not condition(x), iterable)
Stop yielding items when a condition arise. Args: iterable: the iterable to filter. condition: if the callable returns True once, stop yielding items. If it's not a callable, it will be converted to one as `lambda condition: condition == item`. Example: >>> list(stops_when(range(10), lambda x: x > 5)) [0, 1, 2, 3, 4, 5] >>> list(stops_when(range(10), 7)) [0, 1, 2, 3, 4, 5, 6]
juraj-google-style
def dbclass(self, value): if (not is_valid_dbclass(value)): raise AttributeError("'{}' is not a valid database type".format(value)) self._class = value self._connectionXML.set('class', value)
Set the connection's dbclass property. Args: value: New dbclass value. String. Returns: Nothing.
codesearchnet
def _live_tensors(f, attr_name='inputs'): node, _ = parser.parse_entity(f, ()) entity_info = transformer.EntityInfo(name=f.__name__, source_code=None, source_file=None, future_features=(), namespace=sys.modules[f.__module__].__dict__) ctx = transformer.Context(entity_info, None, None) graphs = cfg.build(node) node = qual_names.resolve(node) node = activity.resolve(node, ctx, None) node = reaching_fndefs.resolve(node, ctx, graphs) node = liveness.resolve(node, ctx, graphs) op_arg_name = anno.getanno(node.args.args[0], anno.Basic.QN) op_inputs_outputs_name = qual_names.QN(op_arg_name, attr=attr_name) special_tracker = _SubscriptUseTracker(ctx, (op_inputs_outputs_name,)) node = special_tracker.visit(node) live_vars_in = anno.getanno(node.body[0], anno.Static.LIVE_VARS_IN) inputs_outputs_used_qns = set() for v in special_tracker.complex_reads: if v == op_inputs_outputs_name: return _ALL for v in live_vars_in: if v in special_tracker.reads: if v.has_subscript() and v.parent == op_inputs_outputs_name: inputs_outputs_used_qns.add(v) elif v == op_inputs_outputs_name: return _ALL function_calls_tracker = _FunctionCallsTracker(ctx, op_arg_name) node = function_calls_tracker.visit(node) input_output_indices = set() for called_f in function_calls_tracker.calls: child_indices = _live_tensors(called_f, attr_name=attr_name) if child_indices is _ALL: return _ALL input_output_indices |= child_indices for v in inputs_outputs_used_qns: assert v.has_subscript() _, subscript = v.qn if not subscript.is_simple(): return _ALL subscript_val, = subscript.qn if not isinstance(subscript_val, qual_names.Literal) and (not isinstance(subscript_val.value, int)): return _ALL input_output_indices.add(subscript_val.value) return input_output_indices
Returns the indices of the used inputs. Note: This currently only handles direct index accesses e.g. op.inputs[1]. If the function has slicing or list comprehension on attr_name then returns _ALL. This ensure that this is correct even if inefficient. Args: f: A grad function, taking the op as first argument. attr_name: op attr to track. "inputs" or "outputs". Returns: Either one of: * set of integers representing individual indices of inputs used * the value _ALL, if indices are used but cannot be determined which * empty set, if no inputs are used
github-repos
def run_std_server(self): config = tf.estimator.RunConfig() server = tf.train.Server(config.cluster_spec, job_name=config.task_type, task_index=config.task_id, protocol=config.protocol) server.join()
Starts a TensorFlow server and joins the serving thread. Typically used for parameter servers. Raises: ValueError: if not enough information is available in the estimator's config to create a server.
codesearchnet
def get_extended_surface_mesh(self, repeat=(5, 5, 1)): surf_str = Structure.from_sites(self.surface_sites) surf_str.make_supercell(repeat) return surf_str
Gets an extended surface mesh for to use for adsorption site finding by constructing supercell of surface sites Args: repeat (3-tuple): repeat for getting extended surface mesh
codesearchnet
def tag(self, resource_id): self._request_uri = '{}/{}'.format(self._request_uri, self.tcex.safetag(resource_id))
Update the request URI to include the Tag for specific retrieval. Args: resource_id (string): The tag name.
juraj-google-style
def AddMemberDefinition(self, member_definition): self._byte_size = None self.members.append(member_definition) if self.sections: section_definition = self.sections[-1] section_definition.members.append(member_definition)
Adds a member definition. Args: member_definition (DataTypeDefinition): member data type definition.
juraj-google-style
def jt_aggregate(func, is_create=False, has_pk=False): def helper(kwargs, obj): 'The helper function preceding actual function that aggregates\n unified jt fields.\n ' unified_job_template = None for item in UNIFIED_JT: if (kwargs.get(item, None) is not None): jt_id = kwargs.pop(item) if (unified_job_template is None): unified_job_template = (item, jt_id) else: raise exc.UsageError('More than one unified job template fields provided, please tighten your criteria.') if (unified_job_template is not None): kwargs['unified_job_template'] = unified_job_template[1] obj.identity = tuple((list(obj.identity) + ['unified_job_template'])) return '/'.join([UNIFIED_JT[unified_job_template[0]], str(unified_job_template[1]), 'schedules/']) elif is_create: raise exc.UsageError('You must provide exactly one unified job template field during creation.') def decorator_without_pk(obj, *args, **kwargs): old_endpoint = obj.endpoint new_endpoint = helper(kwargs, obj) if is_create: obj.endpoint = new_endpoint result = func(obj, *args, **kwargs) obj.endpoint = old_endpoint return result def decorator_with_pk(obj, pk=None, *args, **kwargs): old_endpoint = obj.endpoint new_endpoint = helper(kwargs, obj) if is_create: obj.endpoint = new_endpoint result = func(obj, *args, pk=pk, **kwargs) obj.endpoint = old_endpoint return result decorator = (decorator_with_pk if has_pk else decorator_without_pk) for item in CLICK_ATTRS: setattr(decorator, item, getattr(func, item, [])) decorator.__doc__ = func.__doc__ return decorator
Decorator to aggregate unified_jt-related fields. Args: func: The CURD method to be decorated. is_create: Boolean flag showing whether this method is create. has_pk: Boolean flag showing whether this method uses pk as argument. Returns: A function with necessary click-related attributes whose keyworded arguments are aggregated. Raises: exc.UsageError: Either more than one unified jt fields are provided, or none is provided when is_create flag is set.
codesearchnet
def mount_share_at_path(share_path, mount_path): sh_url = CFURLCreateWithString(None, share_path, None) mo_url = CFURLCreateWithString(None, mount_path, None) open_options = {NetFS.kNAUIOptionKey: NetFS.kNAUIOptionNoUI} mount_options = {NetFS.kNetFSAllowSubMountsKey: True, NetFS.kNetFSMountAtMountDirKey: True} (result, output) = NetFS.NetFSMountURLSync(sh_url, mo_url, None, None, open_options, mount_options, None) if (result != 0): raise Exception(('Error mounting url "%s" at path "%s": %s' % (share_path, mount_path, output))) return str(output[0])
Mounts a share at the specified path Args: share_path: String URL with all auth info to connect to file share. mount_path: Path to mount share on. Returns: The mount point or raises an error
codesearchnet
def _from_string(cls, serialized): try: usage_key, aside_type = _split_keys_v1(serialized) return cls(UsageKey.from_string(usage_key), aside_type) except ValueError as exc: raise InvalidKeyError(cls, exc.args)
Return an instance of `cls` parsed from its `serialized` form. Args: cls: The :class:`OpaqueKey` subclass. serialized (unicode): A serialized :class:`OpaqueKey`, with namespace already removed. Raises: InvalidKeyError: Should be raised if `serialized` is not a valid serialized key understood by `cls`.
juraj-google-style
def get_raw_mempool(self, id=None, endpoint=None): return self._call_endpoint(GET_RAW_MEMPOOL, id=id, endpoint=endpoint)
Returns the tx that are in the memorypool of the endpoint Args: id: (int, optional) id to use for response tracking endpoint: (RPCEndpoint, optional) endpoint to specify to use Returns: json object of the result or the error encountered in the RPC call
juraj-google-style
def delete_folder(self, folder_id, recursive=True): return self.__request("DELETE", "folders/%s" % (folder_id, ), querystring={'recursive': unicode(recursive).lower()})
Delete an existing folder Args: folder_id (int): ID of the folder to delete. recursive (bool): Delete all subfolder if True. Returns: dict. Response from Box. Raises: BoxError: An error response is returned from Box (status_code >= 400). BoxHttpResponseError: Response from Box is malformed. requests.exceptions.*: Any connection related problem.
juraj-google-style
def setup(template, version=None): temple.check.is_git_ssh_path(template) temple.check.not_in_git_repo() repo_path = temple.utils.get_repo_path(template) msg = ( 'You will be prompted for the parameters of your new project.' ' Please read the docs at https: ).format(repo_path) print(msg) cc_repo_dir, config = temple.utils.get_cookiecutter_config(template, version=version) if not version: with temple.utils.cd(cc_repo_dir): ret = temple.utils.shell('git rev-parse HEAD', stdout=subprocess.PIPE) version = ret.stdout.decode('utf-8').strip() _generate_files(repo_dir=cc_repo_dir, config=config, template=template, version=version)
Sets up a new project from a template Note that the `temple.constants.TEMPLE_ENV_VAR` is set to 'setup' during the duration of this function. Args: template (str): The git SSH path to a template version (str, optional): The version of the template to use when updating. Defaults to the latest version
juraj-google-style
def __init__(self, name=None): self._name = name self._items = []
Menu constructor. Args: name: (str or None) name of this menu.
github-repos
def _write(self, save_path, options=None): write_start_time = time.time() if not self._initialized: self._ensure_initialized() else: self._queue.join() self._copy_to_cpu() self._check_async_thread_error() context.async_wait() self._save_file_prefix = save_path self._use_checkpoint_save = False self._checkpoint_options = copy.copy(options) if options else None if self._checkpoint_options: self._checkpoint_options.experimental_enable_async_checkpoint = False self._queue.put(True) write_end_time = time.time() metrics.AddCheckpointWriteDuration(api_label=_ASYNC_CHECKPOINT, microseconds=_get_duration_microseconds(write_start_time, write_end_time)) return save_path
Save the checkpointed variables. This method has exactly the same logic as save(), except it does not increment the underlying save_counter, which is done by the caller, e.g., CheckpointManager. Args: save_path: The file prefix of the checkpoint file. options: Optional CheckpointOption instance. Returns: The full path of the checkpoint file.
github-repos
def get_field(self, field_type): for field in self.oxm_match_fields: if field.oxm_field == field_type: return field.oxm_value return None
Return the value for the 'field_type' field in oxm_match_fields. Args: field_type (~pyof.v0x04.common.flow_match.OxmOfbMatchField, ~pyof.v0x04.common.flow_match.OxmMatchFields): The type of the OXM field you want the value. Returns: The integer number of the 'field_type' if it exists. Otherwise return None.
juraj-google-style
def get_bond_order(sp1, sp2, dist, tol=0.2, default_bl=None): all_lengths = obtain_all_bond_lengths(sp1, sp2, default_bl) lengths_list = ([(all_lengths[1] * (1 + tol))] + [all_lengths[(idx + 1)] for idx in range(len(all_lengths))]) trial_bond_order = 0 while (trial_bond_order < len(lengths_list)): if (lengths_list[trial_bond_order] < dist): if (trial_bond_order == 0): return trial_bond_order else: low_bl = lengths_list[trial_bond_order] high_bl = lengths_list[(trial_bond_order - 1)] return (trial_bond_order - ((dist - low_bl) / (high_bl - low_bl))) trial_bond_order += 1 if (dist < (lengths_list[(- 1)] * (1 - tol))): warnings.warn(('%.2f angstrom distance is too short for %s and %s' % (dist, sp1, sp2))) return (trial_bond_order - 1)
Calculate the bond order given the distance of 2 species Args: sp1 (Specie): First specie. sp2 (Specie): Second specie. dist: Their distance in angstrom tol (float): Relative tolerance to test. Basically, the code checks if the distance between the sites is larger than (1 + tol) * the longest bond distance or smaller than (1 - tol) * the shortest bond distance to determine if they are bonded or the distance is too short. Defaults to 0.2. default_bl: If a particular type of bond does not exist, use this bond length (bond order = 1). If None, a ValueError will be thrown. Returns: Float value of bond order. For example, for C-C bond in benzene, return 1.7.
codesearchnet
def fit(self, X): LOGGER.debug('Fitting Gaussian Copula') column_names = self.get_column_names(X) distribution_class = import_object(self.distribution) for column_name in column_names: self.distribs[column_name] = distribution_class() column = self.get_column(X, column_name) self.distribs[column_name].fit(column) self.covariance = self._get_covariance(X) self.fitted = True
Compute the distribution for each variable and then its covariance matrix. Args: X(numpy.ndarray or pandas.DataFrame): Data to model. Returns: None
juraj-google-style
def trace_stop(self): cmd = enums.JLinkTraceCommand.STOP res = self._dll.JLINKARM_TRACE_Control(cmd, 0) if (res == 1): raise errors.JLinkException('Failed to stop trace.') return None
Stops collecting trace data. Args: self (JLink): the ``JLink`` instance. Returns: ``None``
juraj-google-style
def events_from_multifile_logdir(logdir): assert gfile.Exists(logdir) files = [file for file in gfile.ListDirectory(logdir) if 'tfevents' in file] return {file: events_from_file(os.path.join(logdir, file)) for file in files}
Returns map of filename to events for all `tfevents` files in the logdir. Args: logdir: The directory from which to load events. Returns: A dict mapping from relative filenames to lists of tf.Event protos. Raises: AssertionError: If logdir does not contain exactly one file.
github-repos
def _restore_from_tensors(self, restored_tensors): raise NotImplementedError
Restores checkpointed values to this `Trackable`. Please see the documentation for `Trackable._serialize_to_tensors`. Args: restored_tensors: A dictionary mapping names to tensors. The keys to this dictionary matches the names passed to _serialize_to_tensors. Returns: An op that runs the restoration.
github-repos
def generate_defect_structure(self, supercell=(1, 1, 1)): defect_structure = self.bulk_structure.copy() defect_structure.make_supercell(supercell) struct_for_defect_site = Structure( self.bulk_structure.copy().lattice, [self.site.specie], [self.site.frac_coords], to_unit_cell=True) struct_for_defect_site.make_supercell(supercell) defect_site = struct_for_defect_site[0] poss_deflist = sorted( defect_structure.get_sites_in_sphere(defect_site.coords, 2, include_index=True), key=lambda x: x[1]) defindex = poss_deflist[0][2] defect_structure.remove_sites([defindex]) defect_structure.set_charge(self.charge) return defect_structure
Returns Defective Vacancy structure, decorated with charge Args: supercell (int, [3x1], or [[]] (3x3)): supercell integer, vector, or scaling matrix
juraj-google-style
def adversary(self, name, **kwargs): group_obj = Adversary(name, **kwargs) return self._group(group_obj)
Add Adversary data to Batch object. Args: name (str): The name for this Group. date_added (str, kwargs): The date timestamp the Indicator was created. xid (str, kwargs): The external id for this Group. Returns: obj: An instance of Adversary.
codesearchnet
def in_top_k(predictions, targets, k): return nn.in_top_k(predictions, targets, k)
Returns whether the `targets` are in the top `k` `predictions`. Args: predictions: A tensor of shape `(batch_size, classes)` and type `float32`. targets: A 1D tensor of length `batch_size` and type `int32` or `int64`. k: An `int`, number of top elements to consider. Returns: A 1D tensor of length `batch_size` and type `bool`. `output[i]` is `True` if `predictions[i, targets[i]]` is within top-`k` values of `predictions[i]`.
github-repos
def status(self, **kwargs): path = '/geo_nodes/%s/status' % self.get_id() return self.manager.gitlab.http_get(path, **kwargs)
Get the status of the geo node. Args: **kwargs: Extra options to send to the server (e.g. sudo) Raises: GitlabAuthenticationError: If authentication is not correct GitlabGetError: If the server failed to perform the request Returns: dict: The status of the geo node
juraj-google-style
def forward(self, inputs: torch.Tensor): if 'batch' in self.norm_mlp.lower(): inputs_reshaped = torch.reshape(inputs, (inputs.shape[0] * inputs.shape[1], inputs.shape[2], inputs.shape[3])) inputs_reshaped = self.norm(inputs_reshaped) inputs = torch.reshape(inputs_reshaped, inputs.shape) else: inputs = self.norm(inputs) return inputs
Args: inputs (`torch.Tensor` of shape `((batch_size, num_channels, num_patches, d_model))`): Input to the normalization layer. Returns: `torch.Tensor` of shape `((batch_size, num_channels, num_patches, d_model))`
github-repos
def get_passage(self, offset: int) -> BioCPassage or None: for passage in self.passages: if passage.offset == offset: return passage return None
Gets passage Args: offset: passage offset Return: the passage with specified offset
juraj-google-style
def banner_print(msg, color='', width=60, file=sys.stdout, logger=_LOG): if logger: logger.debug(ANSI_ESC_RE.sub('', msg)) if CLI_QUIET: return lpad = (int(math.ceil((((width - _printed_len(msg)) - 2) / 2.0))) * '=') rpad = (int(math.floor((((width - _printed_len(msg)) - 2) / 2.0))) * '=') file.write('{sep}{color}{lpad} {msg} {rpad}{reset}{sep}{sep}'.format(sep=_linesep_for_file(file), color=color, lpad=lpad, msg=msg, rpad=rpad, reset=colorama.Style.RESET_ALL)) file.flush()
Print the message as a banner with a fixed width. Also logs the message (un-bannered) to the given logger at the debug level. Args: msg: The message to print. color: Optional colorama color string to be applied to the message. You can concatenate colorama color strings together in order to get any set of effects you want. width: Total width for the resulting banner. file: A file object to which the banner text will be written. Intended for use with CLI output file objects like sys.stdout. logger: A logger to use, or None to disable logging. Example: >>> banner_print('Foo Bar Baz') ======================== Foo Bar Baz =======================
codesearchnet
def get_items_by_ids(self, item_ids, item_type=None): urls = [urljoin(self.item_url, f'{i}.json') for i in item_ids] result = self._run_async(urls=urls) items = [Item(r) for r in result if r] if item_type: return [item for item in items if (item.item_type == item_type)] else: return items
Given a list of item ids, return all the Item objects Args: item_ids (obj): List of item IDs to query item_type (str): (optional) Item type to filter results with Returns: List of `Item` objects for given item IDs and given item type
codesearchnet
def remove(self, block_id): with self._mutex: entry = self._block_map[block_id] self._queue.remove(entry)
Remove a Processing Block from the queue. Args: block_id (str):
codesearchnet
def _start_services_on_ads(ads): running_ads = [] for ad in ads: running_ads.append(ad) start_logcat = not getattr(ad, KEY_SKIP_LOGCAT, DEFAULT_VALUE_SKIP_LOGCAT) try: ad.services.register( SERVICE_NAME_LOGCAT, logcat.Logcat, start_service=start_logcat) except Exception: is_required = getattr(ad, KEY_DEVICE_REQUIRED, DEFAULT_VALUE_DEVICE_REQUIRED) if is_required: ad.log.exception('Failed to start some services, abort!') destroy(running_ads) raise else: ad.log.exception('Skipping this optional device because some ' 'services failed to start.')
Starts long running services on multiple AndroidDevice objects. If any one AndroidDevice object fails to start services, cleans up all existing AndroidDevice objects and their services. Args: ads: A list of AndroidDevice objects whose services to start.
juraj-google-style
def isconst(cls, val): return isinstance(val, string_types) and \ ((len(val) == 7 and val[0] == "
Whether the value is a string color literal. Checks for a well-formed hexadecimal color value or a named color. Args: val (str) : the value to check Returns: True, if the value is a string color literal
juraj-google-style
def get_location(self, locations=None): countries = self.data.get('groups', None) if (not countries): return list() return [Locations.get_location_from_HDX_code(x['name'], locations=locations, configuration=self.configuration) for x in countries]
Return the dataset's location Args: locations (Optional[List[str]]): Valid locations list. Defaults to list downloaded from HDX. Returns: List[str]: list of locations or [] if there are none
codesearchnet
def _update_seek(self, offset, whence): with self._seek_lock: if whence == SEEK_SET: self._seek = offset elif whence == SEEK_CUR: self._seek += offset elif whence == SEEK_END: self._seek = offset + self._size else: raise ValueError('whence value %s unsupported' % whence) return self._seek
Update seek value. Args: offset (int): Offset. whence (int): Whence. Returns: int: Seek position.
juraj-google-style
def check_network_connection(server, port): logger = logging.getLogger(__name__) logger.debug("Checking network connection to server '%s'...", server) try: host = socket.gethostbyname(server) sock = socket.create_connection((host, port), 2) sock.close() except Exception: logger.debug("Network connection not working") return False logger.debug("Network connection working") return True
Checks if jasper can connect a network server. Arguments: server -- (optional) the server to connect with (Default: "www.google.com") Returns: True or False
juraj-google-style
def Process(self, path): path = re.sub(self.SYSTEMROOT_RE, '%systemroot%', path, count=1) path = re.sub(self.SYSTEM32_RE, '%systemroot%\\\\system32', path, count=1) matches_iter = self.WIN_ENVIRON_REGEX.finditer(path) var_names = set((m.group(1).lower() for m in matches_iter)) results = [path] for var_name in var_names: try: (var_regex, var_value) = self.vars_map[var_name] except KeyError: continue if isinstance(var_value, string_types): replacements = [var_value] else: replacements = var_value processed_results = [] for result in results: for repl in replacements: processed_results.append(var_regex.sub((lambda _: repl), result)) results = processed_results return results
Processes a given path. Args: path: Path (as a string) to post-process. Returns: A list of paths with environment variables replaced with their values. If the mapping had a list of values for a particular variable, instead of just one value, then all possible replacements will be returned.
codesearchnet
def getConParams(virtualhost): return pika.ConnectionParameters(host=settings.RABBITMQ_HOST, port=int(settings.RABBITMQ_PORT), virtual_host=virtualhost, credentials=pika.PlainCredentials(settings.RABBITMQ_USER_NAME, settings.RABBITMQ_USER_PASSWORD))
Connection object builder. Args: virtualhost (str): selected virtualhost in rabbitmq Returns: pika.ConnectionParameters: object filled by `constants` from :class:`edeposit.amqp.settings`.
codesearchnet
def __init__( self, function_approximator, batch_size=4, map_size=(10, 10), memory_num=4, repeating_penalty=0.5, enemy_num=2, enemy_init_dist=5 ): self.__map_arr = self.__create_map(map_size) self.__agent_pos = self.START_POS self.__enemy_num = enemy_num self.__enemy_pos_list = [None] * enemy_num self.__enemy_init_dist = enemy_init_dist self.__create_enemy(self.__map_arr) self.__reward_list = [] self.__route_memory_list = [] self.__memory_num = memory_num self.__repeating_penalty = repeating_penalty self.__batch_size = batch_size super().__init__(function_approximator) self.__inferencing_flag = False
Init. Args: function_approximator: is-a `FunctionApproximator`. map_size: Size of map. memory_num: The number of step of agent's memory. repeating_penalty: The value of penalty in the case that agent revisit. enemy_num: The number of enemies. enemy_init_dist: Minimum euclid distance of initial position of agent and enemies.
juraj-google-style
def __init__(self, app_id=None): self.valid = Valid(app_id) self.request = RequestBody() self.response = ResponseBody() self.logic = dict() self.launch = self.register('LaunchRequest') self.intent = self.register self.session_ended = self.register('SessionEndedRequest')
Inits a Skill class with proxy request and response. Args: app_id: str, default None. Skill application ID, declare to validate against application ID in the request.
juraj-google-style
def __init__(self, func=None, *, animation_gen, step=.1): if not callable(func): raise TypeError("argument 'func' for {!r} must be " "callable".format(self.__class__.__name__)) self._raise_if_annotated(func) self._func = func self._animation_gen = animation_gen self._step = step functools.update_wrapper(self, func)
Constructor. Args: func: If Animate is used without kwargs, then the function it decorates is passed in here. Otherwise, this is None. This argument should NOT be given directly via keyword assignment. animation_gen: A generator that yields strings for the animation. step: Seconds between each animation frame.
juraj-google-style
def make_prior(num_topics, initial_value): def _softplus_inverse(x): return np.log(np.expm1(x)) logit_concentration = tf.compat.v1.get_variable('logit_concentration', shape=[1, num_topics], initializer=tf.compat.v1.initializers.constant(_softplus_inverse(initial_value))) concentration = _clip_dirichlet_parameters(tf.nn.softplus(logit_concentration)) def prior(): return tfd.Dirichlet(concentration=concentration, name='topics_prior') prior_variables = [logit_concentration] return (prior, prior_variables)
Create the prior distribution. Args: num_topics: Number of topics. initial_value: The starting value for the prior parameters. Returns: prior: A `callable` that returns a `tf.distribution.Distribution` instance, the prior distribution. prior_variables: A `list` of `Variable` objects, the trainable parameters of the prior.
codesearchnet
def __init__(self, environ, base_paths=None): self.headers = util.get_headers_from_environ(environ) self.http_method = environ['REQUEST_METHOD'] self.url_scheme = environ['wsgi.url_scheme'] self.server = environ['SERVER_NAME'] self.port = environ['SERVER_PORT'] self.path = environ['PATH_INFO'] self.request_uri = environ.get('REQUEST_URI') if self.request_uri is not None and len(self.request_uri) < len(self.path): self.request_uri = None self.query = environ.get('QUERY_STRING') self.body = environ['wsgi.input'].read() if self.body and self.headers.get('CONTENT-ENCODING') == 'gzip': try: self.body = zlib.decompress(self.body, 16 + zlib.MAX_WBITS) except zlib.error: pass if _METHOD_OVERRIDE in self.headers: self.http_method = self.headers[_METHOD_OVERRIDE] del self.headers[_METHOD_OVERRIDE] self.source_ip = environ.get('REMOTE_ADDR') self.relative_url = self._reconstruct_relative_url(environ) if not base_paths: base_paths = set() elif isinstance(base_paths, list): base_paths = set(base_paths) for base_path in base_paths: if self.path.startswith(base_path): self.path = self.path[len(base_path):] if self.request_uri is not None: self.request_uri = self.request_uri[len(base_path):] self.base_path = base_path break else: raise ValueError('Invalid request path: %s' % self.path) if self.query: self.parameters = urlparse.parse_qs(self.query, keep_blank_values=True) else: self.parameters = {} self.body_json = self._process_req_body(self.body) if self.body else {} self.request_id = None if isinstance(self.body_json, list): if len(self.body_json) != 1: _logger.warning('Batch requests with more than 1 element aren\'t ' 'supported in devappserver2. Only the first element ' 'will be handled. Found %d elements.', len(self.body_json)) else: _logger.info('Converting batch request to single request.') self.body_json = self.body_json[0] self.body = json.dumps(self.body_json) self._is_batch = True else: self._is_batch = False
Constructor. Args: environ: An environ dict for the request as defined in PEP-333. Raises: ValueError: If the path for the request is invalid.
juraj-google-style
def tag(self, name, formatter=None): tag = Tag(name, formatter) for tag_data in self._tags: if tag_data.name == name: tag = tag_data break else: self._tags.append(tag) return tag
Return instance of Tag. Args: name (str): The value for this tag. formatter (method, optional): A method that take a tag value and returns a formatted tag. Returns: obj: An instance of Tag.
juraj-google-style
def ticker(self, contract: Contract) -> Ticker: return self.wrapper.tickers.get(id(contract))
Get ticker of the given contract. It must have been requested before with reqMktData with the same contract object. The ticker may not be ready yet if called directly after :meth:`.reqMktData`. Args: contract: Contract to get ticker for.
juraj-google-style
def owned_by(self, owner, also_check_group=False): if also_check_group: return ((self.owner == owner) and (self.group == owner)) else: return (self.owner == owner)
Checks if the specified user or user and group own the file. Args: owner (str): the user (or group) name for which we ask about ownership also_check_group (bool): if set to True, both user owner and group owner checked if set to False, only user owner checked Returns: bool: True if owner of the file is the specified owner
codesearchnet
def get_mock_ads(num): ads = [] for i in range(num): ad = mock.MagicMock(name='AndroidDevice', serial=str(i), h_port=None) ad.skip_logcat = False ads.append(ad) return ads
Generates a list of mock AndroidDevice objects. The serial number of each device will be integer 0 through num - 1. Args: num: An integer that is the number of mock AndroidDevice objects to create.
github-repos
def _request(self, method, resource_uri, **kwargs): data = kwargs.get('data') response = method((self.API_BASE_URL + resource_uri), json=data, headers=self.headers) response.raise_for_status() return response.json()
Perform a method on a resource. Args: method: requests.`method` resource_uri: resource endpoint Raises: HTTPError Returns: JSON Response
codesearchnet
def release_docs_side_effect(content): result = content.replace('{', '{{').replace('}', '}}') result = result.replace('{{version}}', '{version}') result = result.replace('{{circleci_build}}', '{circleci_build}') result = result.replace('{{travis_build}}', '{travis_build}') result = result.replace('{{appveyor_build}}', '{appveyor_build}') result = result.replace('{{coveralls_build}}', '{coveralls_build}') return result
Updates the template so that curly braces are escaped correctly. Args: content (str): The template for ``docs/index.rst.release.template``. Returns: str: The updated template with properly escaped curly braces.
codesearchnet
def rep1(parser: Union[Parser, Sequence[Input]]) -> RepeatedOnceParser: if isinstance(parser, str): parser = lit(parser) return RepeatedOnceParser(parser)
Match a parser one or more times repeatedly. This matches ``parser`` multiple times in a row. If it matches as least once, it returns a list of values from each time ``parser`` matched. If it does not match ``parser`` at all, it fails. Args: parser: Parser or literal
juraj-google-style
def _from_row_partition(cls, values, row_partition, validate=True): if not isinstance(row_partition, RowPartition): raise TypeError(f'Argument `row_partition` must be a RowPartition. Received {row_partition}.') if not isinstance(validate, bool): raise TypeError(f'Argument `validate` must have type bool. Received {validate}.') values, row_partition = cls._convert_values_and_partition(values, row_partition, 'partition') if row_partition._has_precomputed_value_rowids(): value_rowids_shape = row_partition.value_rowids().shape values.shape[:1].assert_is_compatible_with(value_rowids_shape) if validate: msg = 'Arguments to _from_row_partition do not form a valid RaggedTensor' nvals = _nrows(values, row_partition.dtype) checks = [check_ops.assert_equal(math_ops.cast(row_partition.nvals(), row_partition.dtype), nvals, message=msg)] if not isinstance(values, RaggedTensor): checks.append(check_ops.assert_rank_at_least(values, 1)) row_partition = row_partition._with_dependencies(checks) return cls(values=values, internal=True, row_partition=row_partition)
Creates a `RaggedTensor` with a row partition. This is used as a way for RaggedTensors to share row partitions. The outer dimension of values must be equal to `partition.nvals()`. Args: values: A potentially ragged tensor. row_partition: a `RowPartition`: can be shared between tensors. validate: If true, then use assertions to check that the arguments form a valid `RaggedTensor`. Returns: A `RaggedTensor`. `result.rank = values.rank + 1`. `result.ragged_rank = values.ragged_rank + 1`. Raises: ValueError: If partition.nvals() != _nrows(values)
github-repos
def __init__(self, *, allow_partial: bool, accessor_writable: bool, sealed: bool, root_path: Optional[utils.KeyPath], init_super: bool=True): self._set_raw_attr('_allow_partial', allow_partial) self._set_raw_attr('_accessor_writable', accessor_writable) self._set_raw_attr('_sealed', sealed) self._set_raw_attr('_sym_parent', None) self._set_raw_attr('_sym_path', root_path or utils.KeyPath()) self._set_raw_attr('_sym_puresymbolic', None) self._set_raw_attr('_sym_missing_values', None) self._set_raw_attr('_sym_nondefault_values', None) origin = Origin(None, '__init__') if flags.is_tracking_origin() else None self._set_raw_attr('_sym_origin', origin) if init_super: super().__init__() else: object.__init__(self)
Constructor. Args: allow_partial: Whether to allow required fields to be MISSING_VALUE or partial. accessor_writable: Whether to allow write access via attributes. This flag is useful when we want to enforce update of fields using `rebind` method, which leads to better trackability and batched field update notification. sealed: Whether object is sealed that cannot be changed. This flag is useful when we don't want downstream to modify the object. root_path: KeyPath of current object in its context (object tree). init_super: If True, call super.__init__, otherwise short-circuit. This flag is useful when user want to explicitly implement `__init__` for multi-inheritance, which is needed to pass different arguments to different bases. Please see `symbolic_test.py#testMultiInheritance` for more details.
github-repos
def is_scalar(value): return np.isscalar(value) or (isinstance(value, np.ndarray) and (len(np.squeeze(value).shape) == 0))
Test if the given value is a scalar. This function also works with memory mapped array values, in contrast to the numpy is_scalar method. Args: value: the value to test for being a scalar value Returns: boolean: if the given value is a scalar or not
juraj-google-style
def remove_option(self, section, name, value=None): if self._is_live(): raise RuntimeError('Submitted units cannot update their options') removed = 0 for option in list(self._data['options']): if (option['section'] == section): if (option['name'] == name): if ((value is None) or (option['value'] == value)): self._data['options'].remove(option) removed += 1 if (removed > 0): return True return False
Remove an option from a unit Args: section (str): The section to remove from. name (str): The item to remove. value (str, optional): If specified, only the option matching this value will be removed If not specified, all options with ``name`` in ``section`` will be removed Returns: True: At least one item was removed False: The item requested to remove was not found
codesearchnet
def determine_alert(self, action_schedule, issue_creation_time, last_alert): issue_age = time.time() - issue_creation_time alert_schedule_lookup = {pytimeparse.parse(action_time): action_time for action_time in action_schedule} alert_schedule = sorted(alert_schedule_lookup.keys()) last_alert_time = pytimeparse.parse(last_alert) for alert_time in alert_schedule: if last_alert_time < alert_time <= issue_age and last_alert_time != alert_time: return alert_schedule_lookup[alert_time] else: return None
Determine if we need to trigger an alert Args: action_schedule (`list`): A list contains the alert schedule issue_creation_time (`int`): Time we create the issue last_alert (`str`): Time we sent the last alert Returns: (`None` or `str`) None if no alert should be sent. Otherwise return the alert we should send
juraj-google-style
def update(self, identity, params=None, headers=None): path = self._sub_url_params('/payments/:identity', {'identity': identity}) if (params is not None): params = {self._envelope_key(): params} response = self._perform_request('PUT', path, params, headers, retry_failures=True) return self._resource_for(response)
Update a payment. Updates a payment object. This accepts only the metadata parameter. Args: identity (string): Unique identifier, beginning with "PM". params (dict, optional): Request body. Returns: ListResponse of Payment instances
codesearchnet
def parameter_attention(x, total_key_depth, total_value_depth, output_depth, memory_rows, num_heads, dropout_rate, name=None): with tf.variable_scope(name, default_name='parameter_attention', values=[x]): head_size_k = (total_key_depth head_size_v = (total_value_depth var_shape_k = [num_heads, memory_rows, head_size_k] var_shape_v = [num_heads, memory_rows, head_size_v] k = tf.get_variable('k', var_shape_k, initializer=tf.random_normal_initializer(0, ((output_depth ** (- 0.5)) * (num_heads ** 0.5)))) v = tf.get_variable('v', var_shape_v, initializer=tf.random_normal_initializer(0, ((output_depth ** (- 0.5)) * (output_depth ** 0.5)))) batch_size = common_layers.shape_list(x)[0] length = common_layers.shape_list(x)[1] q = common_layers.dense(x, total_key_depth, use_bias=False, name='q_transform') if dropout_rate: v = tf.nn.dropout(v, (1.0 - dropout_rate), noise_shape=[num_heads, memory_rows, 1]) q = tf.reshape(q, [batch_size, length, num_heads, head_size_k]) q = tf.transpose(q, [2, 0, 1, 3]) q = tf.reshape(q, [num_heads, (batch_size * length), head_size_k]) weights = tf.matmul(q, k, transpose_b=True) weights = tf.nn.softmax(weights) y = tf.matmul(weights, v) y = tf.reshape(y, [num_heads, batch_size, length, head_size_v]) y = tf.transpose(y, [1, 2, 0, 3]) y = tf.reshape(y, [batch_size, length, total_value_depth]) y.set_shape([None, None, total_value_depth]) y = common_layers.dense(y, output_depth, use_bias=False, name='output_transform') return y
Attention over parameters. We use the same multi-headed attention as in the other layers, but the memory keys and values are model parameters. There are no linear transformation on the keys or values. We are also a bit more careful about memory usage, since the number of memory positions may be very large. Args: x: a Tensor with shape [batch, length_q, channels] total_key_depth: an integer total_value_depth: an integer output_depth: an integer memory_rows: an integer num_heads: an integer dividing total_key_depth and total_value_depth dropout_rate: a floating point number name: an optional string Returns: A Tensor with shape [batch, length_q, output_depth].
codesearchnet
def fixings(self, date: types.DateTensor, fixing_type: curve_types.RateIndexCurve) -> Tuple[tf.Tensor, daycount_conventions.DayCountConventions]: index_type = fixing_type.index.type.value currency = fixing_type.currency.value if isinstance(date, tf.Tensor): date = dateslib.dates_from_tensor(date) else: date = dateslib.convert_to_date_tensor(date) try: curve_data = self._market_data_dict['rates'][currency][index_type] fixing_dates = curve_data['fixing_dates'] fixing_rates = curve_data['fixing_rates'] except KeyError: return (tf.zeros(tf.shape(date.ordinal()), dtype=self._dtype, name='fixings'), None) if isinstance(fixing_dates, tf.Tensor): fixing_dates = dateslib.dates_from_tensor(fixing_dates) else: fixing_dates = dateslib.convert_to_date_tensor(fixing_dates) if 'fixing_daycount' not in curve_data: raise ValueError(f'`fixing_daycount` should be specified for {index_type}.') fixing_daycount = curve_data['fixing_daycount'] fixing_daycount = daycount_conventions.DayCountConventions(fixing_daycount) fixing_rates = tf.convert_to_tensor(fixing_rates, dtype=self._dtype) fixing_dates_ordinal = fixing_dates.ordinal() date_ordinal = date.ordinal() batch_shape = tf.shape(date_ordinal)[:-1] fixing_dates_ordinal += tf.expand_dims(tf.zeros(batch_shape, dtype=tf.int32), axis=-1) inds = tf.searchsorted(fixing_dates_ordinal, date_ordinal) inds = tf.maximum(inds, 0) inds = tf.minimum(inds, tf.shape(fixing_dates_ordinal)[-1] - 1) return (tf.gather(fixing_rates, inds), fixing_daycount)
Returns past fixings of the market rates at the specified dates. The fixings are represented asannualized simple rates. When fixings are not provided for a curve, they are assumed to be zero for any date. Otherwise, it is assumed that the fixings are a left-continuous piecewise-constant of time with jumps being the supplied fixings. Args: date: The dates at which the fixings are computed. Should precede the valuation date. When passed as an integet `Tensor`, should be of shape `batch_shape + [3]` and contain `[year, month, day]` for each date. fixing_type: Rate index curve type for which the fixings are computed. Returns: A `Tensor` of the same shape of `date` and of `self.dtype` dtype. Represents fixings at the requested `date`.
github-repos
def to_las3(self, use_descriptions=False, dlm=",", source="Striplog"): data = self.to_csv(use_descriptions=use_descriptions, dlm=dlm, header=False) return templates.section.format(name='Lithology', short="LITH", source=source, data=data)
Returns an LAS 3.0 section string. Args: use_descriptions (bool): Whether to use descriptions instead of summaries, if available. dlm (str): The delimiter. source (str): The sourse of the data. Returns: str: A string forming Lithology section of an LAS3 file.
juraj-google-style
def next_state_fluent_ordering(self) -> List[str]: key = (lambda x: x.name) return [cpf.name for cpf in sorted(self.state_cpfs, key=key)]
The list of next state-fluent names in canonical order. Returns: List[str]: A list of fluent names.
codesearchnet
def items_purchased(self, category=None): return self._items(commerce.Cart.STATUS_PAID, category=category)
Aggregates the items that this user has purchased. Arguments: category (Optional[models.inventory.Category]): the category of items to restrict to. Returns: [ProductAndQuantity, ...]: A list of product-quantity pairs, aggregating like products from across multiple invoices.
juraj-google-style
def repository_tree(self, path='', ref='', recursive=False, **kwargs): gl_path = ('/projects/%s/repository/tree' % self.get_id()) query_data = {'recursive': recursive} if path: query_data['path'] = path if ref: query_data['ref'] = ref return self.manager.gitlab.http_list(gl_path, query_data=query_data, **kwargs)
Return a list of files in the repository. Args: path (str): Path of the top folder (/ by default) ref (str): Reference to a commit or branch recursive (bool): Whether to get the tree recursively all (bool): If True, return all the items, without pagination per_page (int): Number of items to retrieve per request page (int): ID of the page to return (starts with page 1) as_list (bool): If set to False and no pagination option is defined, return a generator instead of a list **kwargs: Extra options to send to the server (e.g. sudo) Raises: GitlabAuthenticationError: If authentication is not correct GitlabGetError: If the server failed to perform the request Returns: list: The representation of the tree
codesearchnet
def _execute(self, command, data=None, unpack=True): if not data: data = {} if self.session_id is not None: data.setdefault('session_id', self.session_id) data = self._wrap_el(data) res = self.remote_invoker.execute(command, data) ret = WebDriverResult.from_object(res) ret.raise_for_status() ret.value = self._unwrap_el(ret.value) if not unpack: return ret return ret.value
Private method to execute command. Args: command(Command): The defined command. data(dict): The uri variable and body. uppack(bool): If unpack value from result. Returns: The unwrapped value field in the json response.
juraj-google-style
def to_json(self, **kwargs): from keras.src.saving import serialization_lib model_config = serialization_lib.serialize_keras_object(self) return json.dumps(model_config, **kwargs)
Returns a JSON string containing the network configuration. To load a network from a JSON save file, use `keras.models.model_from_json(json_string, custom_objects={...})`. Args: **kwargs: Additional keyword arguments to be passed to `json.dumps()`. Returns: A JSON string.
github-repos
def search(self, query_term): fedora_search_url = '/'.join([self.base_url, 'rest', 'fcr:search']) fedora_search_url = '{}?{}'.format(fedora_search_url, urllib.parse.urlencode({'q': query_term})) search_request = urllib.request.Request(fedora_search_url, method='GET') search_request.add_header('Accept', 'text/turtle') try: search_response = urllib.request.urlopen(search_request) except urllib.error.URLError as error: raise error fedora_results = rdflib.Graph().parse(data=search_response.read(), format='turtle') return fedora_results
DEPRECIATED Method takes a query term and searches Fedora Repository using SPARQL search endpoint and returns a RDF graph of the search results. Args: query_term(str): String to search repository Returns: rdflib.Graph()
codesearchnet
def _select_in_voltage_range(self, min_voltage=None, max_voltage=None): min_voltage = (min_voltage if (min_voltage is not None) else self.min_voltage) max_voltage = (max_voltage if (max_voltage is not None) else self.max_voltage) return list(filter((lambda p: (min_voltage <= p.voltage <= max_voltage)), self.voltage_pairs))
Selects VoltagePairs within a certain voltage range. Args: min_voltage (float): The minimum allowable voltage for a given step. max_voltage (float): The maximum allowable voltage allowable for a given step. Returns: A list of VoltagePair objects
codesearchnet
def from_rfc3339(cls, stamp): with_nanos = _RFC3339_NANOS.match(stamp) if with_nanos is None: raise ValueError( "Timestamp: {}, does not match pattern: {}".format( stamp, _RFC3339_NANOS.pattern ) ) bare = datetime.datetime.strptime( with_nanos.group("no_fraction"), _RFC3339_NO_FRACTION ) fraction = with_nanos.group("nanos") if fraction is None: nanos = 0 else: scale = 9 - len(fraction) nanos = int(fraction) * (10 ** scale) return cls( bare.year, bare.month, bare.day, bare.hour, bare.minute, bare.second, nanosecond=nanos, tzinfo=pytz.UTC, )
Parse RFC 3339-compliant timestamp, preserving nanoseconds. Args: stamp (str): RFC 3339 stamp, with up to nanosecond precision Returns: :class:`DatetimeWithNanoseconds`: an instance matching the timestamp string Raises: ValueError: if `stamp` does not match the expected format
juraj-google-style
def tf2(): if tf.__version__.startswith('2.'): return tf elif (hasattr(tf, 'compat') and hasattr(tf.compat, 'v2')): return tf.compat.v2 raise ImportError('cannot import tensorflow 2.0 API')
Provide the root module of a TF-2.0 API for use within TensorBoard. Returns: The root module of a TF-2.0 API, if available. Raises: ImportError: if a TF-2.0 API is not available.
codesearchnet
def _starts_with_drive_letter(self, file_path): colon = self._matching_string(file_path, ':') return (self.is_windows_fs and (len(file_path) >= 2) and file_path[:1].isalpha and (file_path[1:2] == colon))
Return True if file_path starts with a drive letter. Args: file_path: the full path to be examined. Returns: `True` if drive letter support is enabled in the filesystem and the path starts with a drive letter.
codesearchnet
def add_timeline_to_sketch(self, sketch_id, index_id): resource_url = '{0:s}/sketches/{1:d}/timelines/'.format(self.api_base_url, sketch_id) form_data = {'timeline': [index_id]} self.session.post(resource_url, json=form_data)
Associate the specified timeline and sketch. Args: sketch_id (int): ID of sketch index_id (int): ID of timeline to add to sketch
codesearchnet
def get_most_severe_consequence(transcripts): most_severe_consequence = None most_severe_score = None for transcript in transcripts: for consequence in transcript['consequence'].split('&'): logger.debug('Checking severity score for consequence: {0}'.format(consequence)) severity_score = SEVERITY_DICT.get(consequence) logger.debug('Severity score found: {0}'.format(severity_score)) if (severity_score != None): if most_severe_score: if (severity_score < most_severe_score): most_severe_consequence = consequence most_severe_score = severity_score else: most_severe_consequence = consequence most_severe_score = severity_score return most_severe_consequence
Get the most severe consequence Go through all transcripts and get the most severe consequence Args: transcripts (list): A list of transcripts to evaluate Returns: most_severe_consequence (str): The most severe consequence
codesearchnet
def to_numpy_array(self, image, rescale=None, channel_first=True): self._ensure_format_supported(image) if isinstance(image, PIL.Image.Image): image = np.array(image) if is_torch_tensor(image): image = image.numpy() rescale = isinstance(image.flat[0], np.integer) if rescale is None else rescale if rescale: image = self.rescale(image.astype(np.float32), 1 / 255.0) if channel_first and image.ndim == 3: image = image.transpose(2, 0, 1) return image
Converts `image` to a numpy array. Optionally rescales it and puts the channel dimension as the first dimension. Args: image (`PIL.Image.Image` or `np.ndarray` or `torch.Tensor`): The image to convert to a NumPy array. rescale (`bool`, *optional*): Whether or not to apply the scaling factor (to make pixel values floats between 0. and 1.). Will default to `True` if the image is a PIL Image or an array/tensor of integers, `False` otherwise. channel_first (`bool`, *optional*, defaults to `True`): Whether or not to permute the dimensions of the image to put the channel dimension first.
github-repos