code
stringlengths
20
4.93k
docstring
stringlengths
33
1.27k
source
stringclasses
3 values
def get_candidates(self, input_ids: torch.LongTensor) -> Tuple[torch.LongTensor, Optional[torch.FloatTensor]]: input_length = input_ids.size(1) if self.max_length == input_length + 1: return (input_ids, None) chosen_ids = None match_found = False for ngram_size in range(min(self.max_matching_ngram_size, input_length - 1), 0, -1): windows = input_ids.unfold(dimension=1, size=ngram_size, step=1) ngram_tensor = input_ids[0, -ngram_size:] matches = (windows == ngram_tensor).all(dim=2) match_indices = matches.nonzero(as_tuple=True)[1] for idx in match_indices: start_idx = idx + ngram_size end_idx = start_idx + self.num_output_tokens end_idx = min(end_idx, input_length, self.max_length) if start_idx < end_idx: chosen_ids = input_ids[0, start_idx:end_idx] match_found = True mask = isin_mps_friendly(chosen_ids, self.eos_token_id) match_indices_eos = torch.nonzero(mask) if match_indices_eos.numel() > 0: first_eos_index = match_indices_eos[0].item() chosen_ids = chosen_ids[:first_eos_index] break if match_found: break if chosen_ids is None or len(chosen_ids) == 0: return (input_ids, None) chosen_ids = chosen_ids.unsqueeze(0) candidate_input_ids = torch.cat((input_ids, chosen_ids), dim=1) return (candidate_input_ids, None)
Fetches the candidates to be tried for the current input. Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. [What are input IDs?](../glossary#input-ids) Return: `torch.LongTensor` of shape `(num_candidates, candidate_length)`: The candidate sequences to be tried.
github-repos
def _BuildEventData(self, record): event_data = FseventsdEventData() event_data.path = record.path event_data.flags = record.event_flags event_data.event_identifier = record.event_identifier event_data.node_identifier = getattr(record, 'node_identifier', None) return event_data
Builds an FseventsdData object from a parsed structure. Args: record (dls_record_v1|dls_record_v2): parsed record structure. Returns: FseventsdEventData: event data attribute container.
codesearchnet
def delete_note(self, note_id): note, status = self.trash_note(note_id) if (status == -1): return note, status params = '/i/%s' % (str(note_id)) request = Request(url=DATA_URL+params, method='DELETE') request.add_header(self.header, self.get_token()) try: response = urllib2.urlopen(request) except IOError as e: return e, -1 except HTTPError as e: if e.code == 401: raise SimplenoteLoginFailed('Login to Simplenote API failed! Check Token.') else: return e, -1 return {}, 0
Method to permanently delete a note Arguments: - note_id (string): key of the note to trash Returns: A tuple `(note, status)` - note (dict): an empty dict or an error message - status (int): 0 on success and -1 otherwise
juraj-google-style
def __init__(self, name): self.name = name self.edges_in = set() self.edges_out = set()
Initialization method. Args: name (str): name of the vertex.
juraj-google-style
def RegisterCredentials(cls, credentials): if (credentials.type_indicator in cls._credentials): raise KeyError('Credentials object already set for type indicator: {0:s}.'.format(credentials.type_indicator)) cls._credentials[credentials.type_indicator] = credentials
Registers a path specification credentials. Args: credentials (Credentials): credentials. Raises: KeyError: if credentials object is already set for the corresponding type indicator.
codesearchnet
def _get_value(self, scalar_data_blob, dtype_enum): tensorflow_dtype = tf.DType(dtype_enum) buf = np.frombuffer(scalar_data_blob, dtype=tensorflow_dtype.as_numpy_dtype) return np.asscalar(buf)
Obtains value for scalar event given blob and dtype enum. Args: scalar_data_blob: The blob obtained from the database. dtype_enum: The enum representing the dtype. Returns: The scalar value.
codesearchnet
def _remove_subsequent_result_because_of_batch_failure(self, sig): batch = self._batches_by_txn_id[sig] seen = [] for txn in batch.transactions: txn_id = txn.header_signature for poss_successor in self._scheduled.copy(): if (not self.is_transaction_in_schedule(poss_successor)): continue if self._is_txn_to_replay(txn_id, poss_successor, seen): if self._txn_has_result(poss_successor): del self._txn_results[poss_successor] self._scheduled.remove(poss_successor) self._txns_available[poss_successor] = self._transactions[poss_successor] else: self._outstanding.add(poss_successor) seen.append(poss_successor)
Remove transactions from scheduled and txn_results for successors of txns in a failed batch. These transactions will now, or in the future be rescheduled in next_transaction; giving a replay ability. Args: sig (str): Transaction header signature
codesearchnet
def repeat(sequence): N = len(sequence) def f(i): return sequence[(i % N)] return partial(force, sequence=_advance(f))
Return a driver function that can advance a repeated of values. .. code-block:: none seq = [0, 1, 2, 3] # repeat(seq) => [0, 1, 2, 3, 0, 1, 2, 3, 0, 1, ...] Args: sequence (seq) : a sequence of values for the driver to bounce
codesearchnet
def unflatten(guide, falttened_input): return [unflatten(sub_list, falttened_input) if isinstance(sub_list, list) else next(falttened_input) for sub_list in guide]
Unflatten a falttened generator. Args: guide: A guide list to follow the structure falttened_input: A flattened iterator object Usage: guide = [["a"], ["b","c","d"], [["e"]], ["f"]] input_list = [0, 1, 2, 3, 4, 5, 6, 7] unflatten(guide, iter(input_list)) >> [[0], [1, 2, 3], [[4]], [5]]
juraj-google-style
def _get_access_token(): access_token = os.environ.get(ACCESS_TOKEN_ENVIRONMENT_VARIABLE) if access_token: return access_token else: for access_token_variable in LEGACY_ACCESS_TOKEN_ENVIRONMENT_VARIABLES: access_token = os.environ.get(access_token_variable) if access_token: env_var_deprecation_warning = PendingDeprecationWarning('Use of the `{legacy}` environment variable will be deprecated in the future. Please update your environment(s) to use the new `{new}` environment variable.'.format(legacy=access_token, new=ACCESS_TOKEN_ENVIRONMENT_VARIABLE)) warnings.warn(env_var_deprecation_warning) return access_token
Attempt to get the access token from the environment. Try using the current and legacy environment variables. If the access token is found in a legacy environment variable, raise a deprecation warning. Returns: The access token found in the environment (str), or None.
codesearchnet
def SetModifyTimestamp(self, value): if value is None or isinstance(value, int): self._last_modification_timestamp = value else: raise TypeError('timestamp can only be int or None, not %r' % value)
Set the last modify timestamp of this map. Args: value: An integer containing the number of seconds since epoch, or None. Raises: TypeError: The argument is not an int or None.
github-repos
def _dropout(x, rate, noise_shape, uniform_sampler, dummy_rng_step, name, default_name): with ops.name_scope(name, default_name, [x]) as name: is_rate_number = isinstance(rate, numbers.Real) if is_rate_number and (rate < 0 or rate >= 1): raise ValueError(f'`rate` must be a scalar tensor or a float in the range [0, 1). Received: rate={rate}') x = ops.convert_to_tensor(x, name='x') x_dtype = x.dtype if not x_dtype.is_floating: raise ValueError(f'`x.dtype` must be a floating point tensor as `x` will be scaled. Received: x_dtype={x_dtype}') if is_rate_number and rate == 0: dummy_rng_step() return x is_executing_eagerly = context.executing_eagerly() if not tensor_util.is_tf_type(rate): if is_rate_number: keep_prob = 1 - rate scale = 1 / keep_prob scale = ops.convert_to_tensor(scale, dtype=x_dtype) ret = gen_math_ops.mul(x, scale) else: raise ValueError(f'`rate` must be a scalar or scalar tensor. Received: rate={rate}') else: rate.get_shape().assert_has_rank(0) rate_dtype = rate.dtype if rate_dtype != x_dtype: if not rate_dtype.is_compatible_with(x_dtype): raise ValueError(f'`x.dtype` must be compatible with `rate.dtype`. Received: x.dtype={x_dtype} and rate.dtype={rate_dtype}') rate = gen_math_ops.cast(rate, x_dtype, name='rate') one_tensor = constant_op.constant(1, dtype=x_dtype) ret = gen_math_ops.real_div(x, gen_math_ops.sub(one_tensor, rate)) noise_shape = _get_noise_shape(x, noise_shape) random_tensor = uniform_sampler(shape=noise_shape, dtype=x_dtype) keep_mask = random_tensor >= rate zero_tensor = constant_op.constant(0, dtype=x_dtype) ret = array_ops.where_v2(keep_mask, ret, zero_tensor) if not is_executing_eagerly: ret.set_shape(x.get_shape()) return ret
Shared implementation of the various dropout functions. Args: x: same as the namesake in `dropout_v2`. rate: same as the namesake in `dropout_v2`. noise_shape: same as the namesake in `dropout_v2`. uniform_sampler: a callable of signature `(shape, dtype) -> Tensor`, used to generate a tensor of uniformly-distributed random numbers in the range `[0, 1)`, of the given shape and dtype. dummy_rng_step: a callable of signature `() -> None`, to make a dummy RNG call in the fast path. In the fast path where rate is 0, we don't need to generate random numbers, but some samplers still require you to make an RNG call, to make sure that RNG states won't depend on whether the fast path is taken. name: same as the namesake in `dropout_v2`. default_name: a default name in case `name` is `None`. Returns: A Tensor of the same shape and dtype of `x`.
github-repos
def encode_boxes(self, text: Union[TextInput, PreTokenizedInput, EncodedInput], text_pair: Optional[Union[TextInput, PreTokenizedInput, EncodedInput]]=None, boxes: Optional[List[List[int]]]=None, word_labels: Optional[List[List[int]]]=None, add_special_tokens: bool=True, padding: Union[bool, str, PaddingStrategy]=False, truncation: Union[bool, str, TruncationStrategy]=None, max_length: Optional[int]=None, stride: int=0, return_tensors: Optional[Union[str, TensorType]]=None, **kwargs) -> List[int]: encoded_inputs = self.encode_plus_boxes(text, text_pair=text_pair, boxes=boxes, word_labels=word_labels, add_special_tokens=add_special_tokens, padding=padding, truncation=truncation, max_length=max_length, stride=stride, return_tensors=return_tensors, **kwargs) return encoded_inputs['input_ids']
Args: Converts a string to a sequence of ids (integer), using the tokenizer and vocabulary. Same as doing `self.convert_tokens_to_ids(self.tokenize(text))`. text (`str`, `List[str]` or `List[int]`): The first sequence to be encoded. This can be a string, a list of strings (tokenized string using the `tokenize` method) or a list of integers (tokenized string ids using the `convert_tokens_to_ids` method). text_pair (`str`, `List[str]` or `List[int]`, *optional*): Optional second sequence to be encoded. This can be a string, a list of strings (tokenized string using the `tokenize` method) or a list of integers (tokenized string ids using the `convert_tokens_to_ids` method).
github-repos
def noisy_moment(self, moment: 'cirq.Moment', system_qubits: Sequence['cirq.Qid']) -> 'cirq.OP_TREE': if not hasattr(self.noisy_moments, '_not_overridden'): return self.noisy_moments([moment], system_qubits) if not hasattr(self.noisy_operation, '_not_overridden'): return [self.noisy_operation(op) for op in moment] assert False, 'Should be unreachable.'
Adds noise to the operations from a moment. Args: moment: The moment to add noise to. system_qubits: A list of all qubits in the system. Returns: An OP_TREE corresponding to the noisy operations for the moment.
juraj-google-style
def __create_and_save_state(cls, job_config, mapreduce_spec): state = model.MapreduceState.create_new(job_config.job_id) state.mapreduce_spec = mapreduce_spec state.active = True state.active_shards = 0 state.app_id = job_config._app config = datastore_rpc.Configuration(force_writes=job_config._force_writes) state.put(config=config) return state
Save map job state to datastore. Save state to datastore so that UI can see it immediately. Args: job_config: map_job.JobConfig. mapreduce_spec: model.MapreduceSpec. Returns: model.MapreduceState for this job.
juraj-google-style
def __init__(self, shape, scope='distribution', summary_labels=None): self.shape = shape self.scope = scope self.summary_labels = set(summary_labels or ()) self.variables = dict() self.all_variables = dict() def custom_getter(getter, name, registered=False, **kwargs): variable = getter(name=name, registered=True, **kwargs) if registered: pass elif name in self.all_variables: assert variable is self.all_variables[name] if kwargs.get('trainable', True): assert variable is self.variables[name] if 'variables' in self.summary_labels: tf.contrib.summary.histogram(name=name, tensor=variable) else: self.all_variables[name] = variable if kwargs.get('trainable', True): self.variables[name] = variable if 'variables' in self.summary_labels: tf.contrib.summary.histogram(name=name, tensor=variable) return variable self.parameterize = tf.make_template( name_=(scope + '/parameterize'), func_=self.tf_parameterize, custom_getter_=custom_getter ) self.sample = tf.make_template( name_=(scope + '/sample'), func_=self.tf_sample, custom_getter_=custom_getter ) self.log_probability = tf.make_template( name_=(scope + '/log-probability'), func_=self.tf_log_probability, custom_getter_=custom_getter ) self.entropy = tf.make_template( name_=(scope + '/entropy'), func_=self.tf_entropy, custom_getter_=custom_getter ) self.kl_divergence = tf.make_template( name_=(scope + '/kl-divergence'), func_=self.tf_kl_divergence, custom_getter_=custom_getter ) self.regularization_loss = tf.make_template( name_=(scope + '/regularization-loss'), func_=self.tf_regularization_loss, custom_getter_=custom_getter )
Distribution. Args: shape: Action shape.
juraj-google-style
def set_xml(self, diagram, force=False): no_of_running = WFInstance.objects.filter(wf=self, finished=False, started=True).count() if no_of_running and not force: raise RunningInstancesExist( "Can't update WF diagram! Running %s WF instances exists for %s" % ( no_of_running, self.name )) else: self.xml = diagram parser = BPMNParser(diagram.body) self.description = parser.get_description() self.title = parser.get_name() or self.name.replace('_', ' ').title() extensions = dict(parser.get_wf_extensions()) self.programmable = extensions.get('programmable', False) self.task_type = extensions.get('task_type', None) self.menu_category = extensions.get('menu_category', settings.DEFAULT_WF_CATEGORY_NAME) self.save()
updates xml link if there aren't any running instances of this wf Args: diagram: XMLDiagram object
juraj-google-style
def __init__(self, sbn): isbn = '0' + sbn super(Sbn, self).__init__(isbn)
Initialise a new ``Sbn`` object. Args: sbn (str): SBN string
juraj-google-style
def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None): output = [self.cls_token_id] + token_ids_0 + [self.sep_token_id] if token_ids_1: output += token_ids_1 + [self.sep_token_id] return output
Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. A BERT sequence has the following format: - single sequence: `[CLS] X [SEP]` - pair of sequences: `[CLS] A [SEP] B [SEP]` Args: token_ids_0 (`List[int]`): List of IDs to which the special tokens will be added. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
github-repos
def plot_state_histogram(result: trial_result.TrialResult) -> np.ndarray: import matplotlib.pyplot as plt num_qubits = len(result.measurements.keys()) states = 2**num_qubits values = np.zeros(states) measurement_by_result = np.array([ v.transpose()[0] for k, v in result.measurements.items()]).transpose() for meas in measurement_by_result: state_ind = int(''.join([str(x) for x in [int(x) for x in meas]]), 2) values[state_ind] += 1 plot_labels = [bin(x)[2:].zfill(num_qubits) for x in range(states)] plt.bar(np.arange(states), values, tick_label=plot_labels) plt.xlabel('qubit state') plt.ylabel('result count') plt.show() return values
Plot the state histogram from a single result with repetitions. States is a bitstring representation of all the qubit states in a single result. Currently this function assumes each measurement gate applies to only a single qubit. Args: result: The trial results to plot. Returns: The histogram. A list of values plotted on the y-axis.
juraj-google-style
def get_enterprise_customer_user(user_id, enterprise_uuid): EnterpriseCustomerUser = apps.get_model('enterprise', 'EnterpriseCustomerUser') try: return EnterpriseCustomerUser.objects.get( enterprise_customer__uuid=enterprise_uuid, user_id=user_id ) except EnterpriseCustomerUser.DoesNotExist: return None
Return the object for EnterpriseCustomerUser. Arguments: user_id (str): user identifier enterprise_uuid (UUID): Universally unique identifier for the enterprise customer. Returns: (EnterpriseCustomerUser): enterprise customer user record
juraj-google-style
def get(self): parser = reqparse.RequestParser() parser.add_argument('public_key', type=parameters.valid_ed25519, required=True) parser.add_argument('spent', type=parameters.valid_bool) args = parser.parse_args(strict=True) pool = current_app.config['bigchain_pool'] with pool() as bigchain: outputs = bigchain.get_outputs_filtered(args['public_key'], args['spent']) return [{'transaction_id': output.txid, 'output_index': output.output} for output in outputs]
API endpoint to retrieve a list of links to transaction outputs. Returns: A :obj:`list` of :cls:`str` of links to outputs.
codesearchnet
def path_is_empty(p: tcod.path.AStar) -> bool: return bool(lib.TCOD_path_is_empty(p._path_c))
Return True if a path is empty. Args: p (AStar): An AStar instance. Returns: bool: True if a path is empty. Otherwise False.
codesearchnet
def ToParameter(item: StackItem): if (isinstance(item, Array) or isinstance(item, Struct)): items = item.GetArray() output = [ContractParameter.ToParameter(subitem) for subitem in items] return ContractParameter(type=ContractParameterType.Array, value=output) elif isinstance(item, Boolean): return ContractParameter(type=ContractParameterType.Boolean, value=item.GetBoolean()) elif isinstance(item, ByteArray): return ContractParameter(type=ContractParameterType.ByteArray, value=item.GetByteArray()) elif isinstance(item, Integer): return ContractParameter(type=ContractParameterType.Integer, value=str(item.GetBigInteger())) elif isinstance(item, InteropInterface): return ContractParameter(type=ContractParameterType.InteropInterface, value=item.GetInterface())
Convert a StackItem to a ContractParameter object Args: item (neo.VM.InteropService.StackItem) The item to convert to a ContractParameter object Returns: ContractParameter
codesearchnet
def mixins(self, name): m = self._smixins(name) if m: return m return self._smixins(name.replace('?>?', ' '))
Search mixins for name. Allow '>' to be ignored. '.a .b()' == '.a > .b()' Args: name (string): Search term Returns: Mixin object list OR False
juraj-google-style
def _wrap_result(self, response): if isinstance(response, int): response = self._wrap_response(response) return HandlerResult(status=HandlerStatus.RETURN, message_out=self._response_proto(**response), message_type=self._response_type)
Wraps child's response in a HandlerResult to be sent back to client. Args: response (enum or dict): Either an integer status enum, or a dict of attributes to be added to the protobuf response.
codesearchnet
def output_reference(self, name): if name not in self.output_names: raise ValueError('Invalid output "{}"'.format(name)) return Reference(step_name=self.name_in_workflow, output_name=name)
Return a reference to the given output for use in an input of a next Step. For a Step named `echo` that has an output called `echoed`, the reference `echo/echoed` is returned. Args: name (str): the name of the Step output Raises: ValueError: The name provided is not a valid output name for this Step.
juraj-google-style
def register_many(self, *args): params = [] for name in args: params.append(self.register(name)) return params
Register many configuration names. Arguments: *args: Config names as strings. Returns: list: List of registered configs.
codesearchnet
def call(self, inputs): image_shape = tf.shape(input=inputs)[(- 3):] collapsed_shape = tf.concat(([(- 1)], image_shape), axis=0) out = tf.reshape(inputs, collapsed_shape) out = self.conv1(out) out = self.conv2(out) out = self.conv3(out) out = self.conv4(out) expanded_shape = tf.concat((tf.shape(input=inputs)[:(- 3)], [(- 1)]), axis=0) return tf.reshape(out, expanded_shape)
Runs the model to generate an intermediate representation of x_t. Args: inputs: A batch of image sequences `x_{1:T}` of shape `[sample_shape, batch_size, timesteps, height, width, channels]`. Returns: A batch of intermediate representations of shape [sample_shape, batch_size, timesteps, hidden_size].
codesearchnet
def prepare_namespace(self, func): if self.is_imethod: to_run = getattr(self.obj, self.imethod_name) else: to_run = func for (varname, modulename) in self.global_modules.items(): to_run.__globals__[varname] = __import__(modulename) if self.global_closure: to_run.__globals__.update(self.global_closure) if self.global_functions: to_run.__globals__.update(self.global_functions) return to_run
Prepares the function to be run after deserializing it. Re-associates any previously bound variables and modules from the closure Returns: callable: ready-to-call function
codesearchnet
def rot90(array, k=1, axes=(0, 1)): if any_symbolic_tensors((array,)): return Rot90(k=k, axes=axes).symbolic_call(array) return backend.numpy.rot90(array, k=k, axes=axes)
Rotate an array by 90 degrees in the plane specified by axes. This function rotates an array counterclockwise by 90 degrees `k` times in the plane specified by `axes`. Supports arrays of two or more dimensions. Args: array: Input array to rotate. k: Number of times the array is rotated by 90 degrees. axes: A tuple of two integers specifying the plane of rotation (defaults to `(0, 1)`). Returns: Rotated array. Examples: >>> import numpy as np >>> from keras import ops >>> m = np.array([[1, 2], [3, 4]]) >>> rotated = ops.rot90(m) >>> rotated array([[2, 4], [1, 3]]) >>> m = np.arange(8).reshape((2, 2, 2)) >>> rotated = ops.rot90(m, k=1, axes=(1, 2)) >>> rotated array([[[1, 3], [0, 2]], [[5, 7], [4, 6]]])
github-repos
def unravel_staff(staff_data): staff_list = [] for (role, staff_members) in staff_data['data'].items(): for member in staff_members: member['role'] = role staff_list.append(member) return staff_list
Unravels staff role dictionary into flat list of staff members with ``role`` set as an attribute. Args: staff_data(dict): Data return from py:method::get_staff Returns: list: Flat list of staff members with ``role`` set to role type (i.e. course_admin, instructor, TA, etc)
codesearchnet
def forecast(self, throughputs, backlog_size, num_simulations=10000, max_periods=10000, seed=None): self._check_throughputs(throughputs) results = [] if seed is not None: random.seed(seed) for i in range(0, num_simulations): simulated_backlog = backlog_size time_unit_count = 0 while simulated_backlog > 0: simulated_backlog -= random.choice(throughputs) time_unit_count += 1 if time_unit_count > max_periods: raise ValueError("More than {} periods calculated".format(max_periods)) results.append(time_unit_count) return Results(results)
Forecasts how long a backlog will take to complete given the historical values provided. Arguments: throughputs(List[int]): Number of units completed per unit of time (stories per week, story points per month, etc.) backlog_size(int): Units in the backlog (stories, points, etc.) Returns: results Exceptions: ValueError: If there aren't any positive throughputs, or the simulation takes too long.
juraj-google-style
def _predictResponseSize(mode, functioncode, payloadToSlave): MIN_PAYLOAD_LENGTH = 4 BYTERANGE_FOR_GIVEN_SIZE = slice(2, 4) NUMBER_OF_PAYLOAD_BYTES_IN_WRITE_CONFIRMATION = 4 NUMBER_OF_PAYLOAD_BYTES_FOR_BYTECOUNTFIELD = 1 RTU_TO_ASCII_PAYLOAD_FACTOR = 2 NUMBER_OF_RTU_RESPONSE_STARTBYTES = 2 NUMBER_OF_RTU_RESPONSE_ENDBYTES = 2 NUMBER_OF_ASCII_RESPONSE_STARTBYTES = 5 NUMBER_OF_ASCII_RESPONSE_ENDBYTES = 4 _checkMode(mode) _checkFunctioncode(functioncode, None) _checkString(payloadToSlave, description='payload', minlength=MIN_PAYLOAD_LENGTH) if (functioncode in [5, 6, 15, 16]): response_payload_size = NUMBER_OF_PAYLOAD_BYTES_IN_WRITE_CONFIRMATION elif (functioncode in [1, 2, 3, 4]): given_size = _twoByteStringToNum(payloadToSlave[BYTERANGE_FOR_GIVEN_SIZE]) if ((functioncode == 1) or (functioncode == 2)): number_of_inputs = given_size response_payload_size = ((NUMBER_OF_PAYLOAD_BYTES_FOR_BYTECOUNTFIELD + (number_of_inputs elif ((functioncode == 3) or (functioncode == 4)): number_of_registers = given_size response_payload_size = (NUMBER_OF_PAYLOAD_BYTES_FOR_BYTECOUNTFIELD + (number_of_registers * _NUMBER_OF_BYTES_PER_REGISTER)) else: raise ValueError('Wrong functioncode: {}. The payload is: {!r}'.format(functioncode, payloadToSlave)) if (mode == MODE_ASCII): return ((NUMBER_OF_ASCII_RESPONSE_STARTBYTES + (response_payload_size * RTU_TO_ASCII_PAYLOAD_FACTOR)) + NUMBER_OF_ASCII_RESPONSE_ENDBYTES) else: return ((NUMBER_OF_RTU_RESPONSE_STARTBYTES + response_payload_size) + NUMBER_OF_RTU_RESPONSE_ENDBYTES)
Calculate the number of bytes that should be received from the slave. Args: * mode (str): The modbus protcol mode (MODE_RTU or MODE_ASCII) * functioncode (int): Modbus function code. * payloadToSlave (str): The raw request that is to be sent to the slave (not hex encoded string) Returns: The preducted number of bytes (int) in the response. Raises: ValueError, TypeError.
codesearchnet
def create_detector(self, detector): resp = self._post(self._u(self._DETECTOR_ENDPOINT_SUFFIX), data=detector) resp.raise_for_status() return resp.json()
Creates a new detector. Args: detector (object): the detector model object. Will be serialized as JSON. Returns: dictionary of the response (created detector model).
codesearchnet
def get_application_configurations(self, name=None): if hasattr(self, 'applicationConfigurations'): return self._get_elements(self.applicationConfigurations, 'applicationConfigurations', ApplicationConfiguration, None, name)
Retrieves application configurations for this instance. Args: name (str, optional): Only return application configurations containing property **name** that matches `name`. `name` can be a regular expression. If `name` is not supplied, then all application configurations are returned. Returns: list(ApplicationConfiguration): A list of application configurations matching the given `name`. .. versionadded 1.12
juraj-google-style
def apply_cut(self, cut): return MacroSubsystem(self.network, self.network_state, self.micro_node_indices, cut=cut, time_scale=self.time_scale, blackbox=self.blackbox, coarse_grain=self.coarse_grain)
Return a cut version of this |MacroSubsystem|. Args: cut (Cut): The cut to apply to this |MacroSubsystem|. Returns: MacroSubsystem: The cut version of this |MacroSubsystem|.
codesearchnet
def parse_user_data(variables, raw_user_data, blueprint_name): variable_values = {} for (key, value) in variables.items(): if (type(value) is CFNParameter): variable_values[key] = value.to_parameter_value() else: variable_values[key] = value template = string.Template(raw_user_data) res = '' try: res = template.substitute(variable_values) except ValueError as exp: raise InvalidUserdataPlaceholder(blueprint_name, exp.args[0]) except KeyError as key: raise MissingVariable(blueprint_name, key) return res
Parse the given user data and renders it as a template It supports referencing template variables to create userdata that's supplemented with information from the stack, as commonly required when creating EC2 userdata files. For example: Given a raw_user_data string: 'open file ${file}' And a variables dictionary with: {'file': 'test.txt'} parse_user_data would output: open file test.txt Args: variables (dict): variables available to the template raw_user_data (str): the user_data to be parsed blueprint_name (str): the name of the blueprint Returns: str: The parsed user data, with all the variables values and refs replaced with their resolved values. Raises: InvalidUserdataPlaceholder: Raised when a placeholder name in raw_user_data is not valid. E.g ${100} would raise this. MissingVariable: Raised when a variable is in the raw_user_data that is not given in the blueprint
codesearchnet
def _genBgTerm_fromXX(self,vTot,vCommon,XX,a=None,c=None): vSpecific = vTot-vCommon SP.random.seed(0) if c==None: c = SP.randn(self.P) XX += 1e-3 * SP.eye(XX.shape[0]) L = LA.cholesky(XX,lower=True) R = self.genWeights(self.N,self.P) A = self.genTraitEffect() if a is not None: A[0,:] = a Yc = SP.dot(L,SP.dot(R,A)) Yc*= SP.sqrt(vCommon)/SP.sqrt(Yc.var(0).mean()) R = SP.randn(self.N,self.P) Yi = SP.dot(L,SP.dot(R,SP.diag(c))) Yi*= SP.sqrt(vSpecific)/SP.sqrt(Yi.var(0).mean()) return Yc, Yi
generate background term from SNPs Args: vTot: variance of Yc+Yi vCommon: variance of Yc XX: kinship matrix a: common scales, it can be set for debugging purposes c: indipendent scales, it can be set for debugging purposes
juraj-google-style
def EncodeEnv(env, encoding=None): encoding = encoding or _GetEncoding() return {Encode(k, encoding=encoding): Encode(v, encoding=encoding) for k, v in env.items()}
Encodes all the key value pairs in env in preparation for subprocess. Args: env: {str: str}, The environment you are going to pass to subprocess. encoding: str, The encoding to use or None to use the default. Returns: {bytes: bytes}, The environment to pass to subprocess.
github-repos
def _keyDown(key): if key not in keyboardMapping or keyboardMapping[key] is None: return if type(key) == int: fake_input(_display, X.KeyPress, key) _display.sync() return needsShift = pyautogui.isShiftCharacter(key) if needsShift: fake_input(_display, X.KeyPress, keyboardMapping['shift']) fake_input(_display, X.KeyPress, keyboardMapping[key]) if needsShift: fake_input(_display, X.KeyRelease, keyboardMapping['shift']) _display.sync()
Performs a keyboard key press without the release. This will put that key in a held down state. NOTE: For some reason, this does not seem to cause key repeats like would happen if a keyboard key was held down on a text field. Args: key (str): The key to be pressed down. The valid names are listed in pyautogui.KEY_NAMES. Returns: None
juraj-google-style
def __init__(self, learning_rate, initial_accumulator_value=0.1, use_locking=False, name='Adagrad'): if initial_accumulator_value <= 0.0: raise ValueError('initial_accumulator_value must be positive: %s' % initial_accumulator_value) super(AdagradOptimizer, self).__init__(use_locking, name) self._learning_rate = learning_rate self._initial_accumulator_value = initial_accumulator_value self._learning_rate_tensor = None
Construct a new Adagrad optimizer. Args: learning_rate: A `Tensor` or a floating point value. The learning rate. initial_accumulator_value: A floating point value. Starting value for the accumulators, must be positive. use_locking: If `True` use locks for update operations. name: Optional name prefix for the operations created when applying gradients. Defaults to "Adagrad". Raises: ValueError: If the `initial_accumulator_value` is invalid.
github-repos
def symbol_top(body_output, targets, model_hparams, vocab_size): del targets if model_hparams.shared_embedding_and_softmax_weights: scope_name = 'shared' reuse = tf.AUTO_REUSE else: scope_name = 'softmax' reuse = False with tf.variable_scope(scope_name, reuse=reuse): body_output_shape = common_layers.shape_list(body_output) var = get_weights(model_hparams, vocab_size, body_output_shape[(- 1)]) if (model_hparams.factored_logits and (model_hparams.mode == tf.estimator.ModeKeys.TRAIN)): body_output = tf.expand_dims(body_output, 3) return common_layers.FactoredTensor(body_output, var) else: body_output = tf.reshape(body_output, [(- 1), body_output_shape[(- 1)]]) logits = tf.matmul(body_output, var, transpose_b=True) return tf.reshape(logits, (body_output_shape[:(- 1)] + [1, vocab_size]))
Generate logits. Args: body_output: A Tensor with shape [batch, p0, p1, model_hparams.hidden_size]. targets: Unused. model_hparams: HParams, model hyperparmeters. vocab_size: int, vocabulary size. Returns: logits: A Tensor with shape [batch, p0, p1, ?, vocab_size].
codesearchnet
def concurrence(state): rho = np.array(state) if (rho.ndim == 1): rho = outer(state) if (len(state) != 4): raise Exception('Concurrence is only defined for more than two qubits') YY = np.fliplr(np.diag([(- 1), 1, 1, (- 1)])) A = rho.dot(YY).dot(rho.conj()).dot(YY) w = la.eigh(A, eigvals_only=True) w = np.sqrt(np.maximum(w, 0)) return max(0.0, (w[(- 1)] - np.sum(w[0:(- 1)])))
Calculate the concurrence. Args: state (np.array): a quantum state (1x4 array) or a density matrix (4x4 array) Returns: float: concurrence. Raises: Exception: if attempted on more than two qubits.
codesearchnet
def create_course_completion(self, user_id, payload): return self._post(urljoin(self.enterprise_configuration.degreed_base_url, self.global_degreed_config.completion_status_api_path), payload, self.COMPLETION_PROVIDER_SCOPE)
Send a completion status payload to the Degreed Completion Status endpoint Args: user_id: Unused. payload: JSON encoded object (serialized from DegreedLearnerDataTransmissionAudit) containing completion status fields per Degreed documentation. Returns: A tuple containing the status code and the body of the response. Raises: HTTPError: if we received a failure response code from Degreed
codesearchnet
def add_nodes(self, nodes): if (not isinstance(nodes, list)): add_list = [nodes] else: add_list = nodes self.node_list.extend(add_list)
Add a given node or list of nodes to self.node_list. Args: node (Node or list[Node]): the node or list of nodes to add to the graph Returns: None Examples: Adding one node: :: >>> from blur.markov.node import Node >>> graph = Graph() >>> node_1 = Node('One') >>> graph.add_nodes(node_1) >>> print([node.value for node in graph.node_list]) ['One'] Adding multiple nodes at a time in a list: :: >>> from blur.markov.node import Node >>> graph = Graph() >>> node_1 = Node('One') >>> node_2 = Node('Two') >>> graph.add_nodes([node_1, node_2]) >>> print([node.value for node in graph.node_list]) ['One', 'Two']
codesearchnet
def initialize(self, map_arr, start_point_label="S", end_point_label="G", wall_label=" np.set_printoptions(threshold=np.inf) self.__agent_label = agent_label self.__map_arr = map_arr self.__start_point_label = start_point_label start_arr_tuple = np.where(self.__map_arr == self.__start_point_label) x_arr, y_arr = start_arr_tuple self.__start_point_tuple = (x_arr[0], y_arr[0]) end_arr_tuple = np.where(self.__map_arr == self.__end_point_label) x_arr, y_arr = end_arr_tuple self.__end_point_tuple = (x_arr[0], y_arr[0]) self.__wall_label = wall_label for x in range(self.__map_arr.shape[1]): for y in range(self.__map_arr.shape[0]): if (x, y) == self.__start_point_tuple or (x, y) == self.__end_point_tuple: continue arr_value = self.__map_arr[y][x] if arr_value == self.__wall_label: continue self.save_r_df((x, y), float(arr_value))
Initialize map of maze and setup reward value. Args: map_arr: Map. the 2d- `np.ndarray`. start_point_label: Label of start point. end_point_label: Label of end point. wall_label: Label of wall. agent_label: Label of agent.
juraj-google-style
def expand_value_set_url_using_service(self, value_set_url: str, terminology_service_url: str) -> value_set_pb2.ValueSet: value_set_url, value_set_version = url_utils.parse_url_version(value_set_url) auth = self.auth_per_terminology_server.get(terminology_service_url) return self._expand_value_set_url_using_service(value_set_url=value_set_url, value_set_version=value_set_version, terminology_service_url=terminology_service_url, auth=auth)
Expands the value set using the requested terminology service. Requests an expansion of the value set from the terminology server at `terminology_service_url` for the given URL and version if present on the URL. If the terminology service requires credentials to access, `terminology_service_url` must have an entry in the `auth_per_terminology_server` given to this class' constructor. Retrieves the current definition of the value set from the terminology service as well as its expansion. Args: value_set_url: The url of the value set to expand. terminology_service_url: The url of the terminology service to use when expanding `value_set_url`. Returns: The current definition of the value set from the server with its expanded codes present.
github-repos
async def teardown_client(self, client_id): client_info = self._client_info(client_id) self.adapter.remove_monitor(client_info['monitor']) conns = client_info['connections'] for (conn_string, conn_id) in conns.items(): try: self._logger.debug('Disconnecting client %s from conn %s at teardown', client_id, conn_string) (await self.adapter.disconnect(conn_id)) except: self._logger.exception('Error disconnecting device during teardown_client: conn_string=%s', conn_string) del self._clients[client_id]
Release all resources held by a client. This method must be called and awaited whenever a client is disconnected. It ensures that all of the client's resources are properly released and any devices they have connected to are disconnected cleanly. Args: client_id (str): The client that we should tear down. Raises: ArgumentError: The client_id is unknown.
codesearchnet
def CreateAd(client, opener, ad_group_id): ad_group_ad_service = client.GetService('AdGroupAdService', 'v201809') media_service = client.GetService('MediaService', 'v201809') marketing_image_id = _CreateImage(media_service, opener, 'https: logo_image_id = _CreateImage(media_service, opener, 'https: ad = {'xsi_type': 'ResponsiveDisplayAd', 'marketingImage': {'xsi_type': 'Image', 'mediaId': marketing_image_id}, 'shortHeadline': 'Travel', 'longHeadline': 'Travel the World', 'description': 'Take to the air!', 'businessName': 'Interplanetary Cruises', 'finalUrls': ['http: ad_group_ad = {'ad': ad, 'adGroupId': ad_group_id} operations = [{'operation': 'ADD', 'operand': ad_group_ad}] return ad_group_ad_service.mutate(operations)['value'][0]
Creates a ResponsiveDisplayAd. Args: client: an AdWordsClient instance. opener: an OpenerDirector instance. ad_group_id: an int ad group ID. Returns: The ad group ad that was successfully created.
codesearchnet
def build_aspect_ratio_mask(aspect_ratios: List[List[Tuple[int, int]]], max_image_tiles: int) -> np.ndarray: batch_size = len(aspect_ratios) max_num_images = max([len(row) for row in aspect_ratios]) aspect_ratio_mask = np.zeros((batch_size, max_num_images, max_image_tiles), dtype=np.int64) aspect_ratio_mask[:, :, 0] = 1 for i, sample_aspect_ratios in enumerate(aspect_ratios): for j, (num_tiles_w, num_tiles_h) in enumerate(sample_aspect_ratios): aspect_ratio_mask[i, j, :num_tiles_w * num_tiles_h] = 1 return aspect_ratio_mask
Builds a mask for the aspect ratios of the images. Args: aspect_ratios (`List[List[Tuple[int, int]]]`): A list of lists containing aspect ratios for each image in the batch. Each aspect ratio is represented as a tuple of (width, height) in terms of number of tiles. max_image_tiles (`int`): The maximum number of tiles any image can be split into. Returns: `np.ndarray`: A 3D numpy array of shape (batch_size, max_num_images, max_image_tiles). The mask contains 1s for valid tiles and 0s for padding.
github-repos
def simplify_countryname(cls, country): countryupper = country.upper() words = get_words_in_sentence(countryupper) index = countryupper.find(',') if (index != (- 1)): countryupper = countryupper[:index] index = countryupper.find(':') if (index != (- 1)): countryupper = countryupper[:index] regex = re.compile('\\(.+?\\)') countryupper = regex.sub('', countryupper) remove = copy.deepcopy(cls.simplifications) for (simplification1, simplification2) in cls.abbreviations.items(): countryupper = countryupper.replace(simplification1, '') remove.append(simplification2) for (simplification1, simplifications) in cls.multiple_abbreviations.items(): countryupper = countryupper.replace(simplification1, '') for simplification2 in simplifications: remove.append(simplification2) remove = '|'.join(remove) regex = re.compile((('\\b(' + remove) + ')\\b'), flags=re.IGNORECASE) countryupper = regex.sub('', countryupper) countryupper = countryupper.strip() countryupper_words = get_words_in_sentence(countryupper) if (len(countryupper_words) > 1): countryupper = countryupper_words[0] if countryupper: words.remove(countryupper) return (countryupper, words)
Simplifies country name by removing descriptive text eg. DEMOCRATIC, REPUBLIC OF etc. Args: country (str): Country name to simplify Returns: Tuple[str, List[str]]: Uppercase simplified country name and list of removed words
codesearchnet
def parse_verilog_file(fname): with open(fname, 'rt') as fh: text = fh.read() return parse_verilog(text)
Parse a named Verilog file Args: fname (str): File to parse. Returns: List of parsed objects.
juraj-google-style
def load_readers(filenames=None, reader=None, reader_kwargs=None, ppp_config_dir=None): reader_instances = {} reader_kwargs = (reader_kwargs or {}) reader_kwargs_without_filter = reader_kwargs.copy() reader_kwargs_without_filter.pop('filter_parameters', None) if (ppp_config_dir is None): ppp_config_dir = get_environ_config_dir() if ((not filenames) and (not reader)): return {} elif (reader and (filenames is not None) and (not filenames)): raise ValueError("'filenames' was provided but is empty.") elif (not filenames): LOG.warning("'filenames' required to create readers and load data") return {} elif ((reader is None) and isinstance(filenames, dict)): reader = list(filenames.keys()) remaining_filenames = set((f for fl in filenames.values() for f in fl)) elif (reader and isinstance(filenames, dict)): filenames = filenames[reader] remaining_filenames = set((filenames or [])) else: remaining_filenames = set((filenames or [])) for (idx, reader_configs) in enumerate(configs_for_reader(reader, ppp_config_dir)): if isinstance(filenames, dict): readers_files = set(filenames[reader[idx]]) else: readers_files = remaining_filenames try: reader_instance = load_reader(reader_configs, **reader_kwargs) except (KeyError, IOError, yaml.YAMLError) as err: LOG.info('Cannot use %s', str(reader_configs)) LOG.debug(str(err)) continue if readers_files: loadables = reader_instance.select_files_from_pathnames(readers_files) if loadables: reader_instance.create_filehandlers(loadables, fh_kwargs=reader_kwargs_without_filter) reader_instances[reader_instance.name] = reader_instance remaining_filenames -= set(loadables) if (not remaining_filenames): break if remaining_filenames: LOG.warning("Don't know how to open the following files: {}".format(str(remaining_filenames))) if (not reader_instances): raise ValueError('No supported files found') elif (not any((list(r.available_dataset_ids) for r in reader_instances.values()))): raise ValueError('No dataset could be loaded. Either missing requirements (such as Epilog, Prolog) or none of the provided files match the filter parameters.') return reader_instances
Create specified readers and assign files to them. Args: filenames (iterable or dict): A sequence of files that will be used to load data from. A ``dict`` object should map reader names to a list of filenames for that reader. reader (str or list): The name of the reader to use for loading the data or a list of names. reader_kwargs (dict): Keyword arguments to pass to specific reader instances. ppp_config_dir (str): The directory containing the configuration files for satpy. Returns: Dictionary mapping reader name to reader instance
codesearchnet
def _find_root_dir(path, spor_dir): start_path = pathlib.Path((os.getcwd() if (path is None) else path)) paths = ([start_path] + list(start_path.parents)) for path in paths: data_dir = (path / spor_dir) if (data_dir.exists() and data_dir.is_dir()): return path raise ValueError('No spor repository found')
Search for a spor repo containing `path`. This searches for `spor_dir` in directories dominating `path`. If a directory containing `spor_dir` is found, then that directory is returned as a `pathlib.Path`. Returns: The dominating directory containing `spor_dir` as a `pathlib.Path`. Raises: ValueError: No repository is found.
codesearchnet
def last_updated(path): filesystem = FileSystems.get_filesystem(path) return filesystem.last_updated(path)
Get UNIX Epoch time in seconds on the FileSystem. Args: path: string path of file. Returns: float UNIX Epoch time Raises: ``BeamIOError``: if path doesn't exist.
github-repos
def log_histogram(self, name, value, step=None): if isinstance(value, six.string_types): raise TypeError('"value" should be a number, got {}'.format(type(value))) self._check_step(step) tf_name = self._ensure_tf_name(name) summary = self._histogram_summary(tf_name, value, step=step) self._log_summary(tf_name, summary, value, step=step)
Log a histogram for given name on given step. Args: name (str): name of the variable (it will be converted to a valid tensorflow summary name). value (tuple or list): either list of numbers to be summarized as a histogram, or a tuple of bin_edges and bincounts that directly define a histogram. step (int): non-negative integer used for visualization
codesearchnet
def map_to_pdf(map_source, zoom, x, y, width, height): map_source = app.config["mapsources"][map_source] pdf_file = print_map(map_source, x=float(x), y=float(y), zoom=int(zoom), width=float(width), height=float(height), format='pdf') return send_file(pdf_file, attachment_filename="map.pdf", as_attachment=True)
Generate a PDF at the given position. Args: map_source (str): id of the map source to print. zoom (int): zoom-level to print x (float): Center of the Map in mercator projection (EPSG:4326), x-coordinate y (float): Center of the Map in mercator projection (EPSG:4326), y-coordinate width (float): width of the pdf in mm height (float): height of the pdf in mm Returns:
juraj-google-style
def maybe(cls, val: Optional[T]) -> 'Option[T]': return cast('Option[T]', NONE) if val is None else cls.Some(val)
Shortcut method to return ``Some`` or :py:data:`NONE` based on ``val``. Args: val: Some value. Returns: ``Some(val)`` if the ``val`` is not None, otherwise :py:data:`NONE`. Examples: >>> Option.maybe(0) Some(0) >>> Option.maybe(None) NONE
juraj-google-style
def compress_encoder_2d(x, hparams, name=None): return compress_encoder( x, hparams, strides=(2, 2), kernel_size=(hparams.kernel_size, hparams.kernel_size), name=name)
Encoder that compresses 2-D inputs by 2**num_compress_steps. Args: x: Tensor of shape [batch, height, width, channels]. hparams: HParams. name: string, variable scope. Returns: Tensor of shape [batch, latent_length, hparams.hidden_size], where latent_length is hparams.num_latents * (height*width) / 2**(hparams.num_compress_steps).
juraj-google-style
def write_to_hdf5(self, filename_out, *args, **kwargs): t0 = time.time() self.__update_header() if self.container.isheavy(): self.__write_to_hdf5_heavy(filename_out) else: self.__write_to_hdf5_light(filename_out) t1 = time.time() logger.info('Conversion time: %2.2fsec' % (t1- t0))
Write data to HDF5 file. It check the file size then decides how to write the file. Args: filename_out (str): Name of output file
juraj-google-style
def keep_doc_examples_only(content: str) -> str: splits = content.split('```') content = '```' + '```'.join(splits[1::2]) + '```' lines_to_keep = [] for line in content.split('\n'): line = re.sub(' if len(line) != 0 and (not line.isspace()): lines_to_keep.append(line) return '\n'.join(lines_to_keep)
Remove everything from the code content except the doc examples (used to determined if a diff should trigger doc tests or not). Args: content (`str`): The code to clean Returns: `str`: The cleaned code.
github-repos
def subscriber(address,topics,callback,message_type): return Subscriber(address,topics,callback,message_type)
Creates a subscriber binding to the given address and subscribe the given topics. The callback is invoked for every message received. Args: - address: the address to bind the PUB socket to. - topics: the topics to subscribe - callback: the callback to invoke for every message. Must accept 2 variables - topic and message - message_type: the type of message to receive
juraj-google-style
def set_dataset_year_range(self, dataset_year, dataset_end_year=None): if isinstance(dataset_year, int): dataset_date = '01/01/%d' % dataset_year elif isinstance(dataset_year, str): dataset_date = '01/01/%s' % dataset_year else: raise hdx.data.hdxobject.HDXError('dataset_year has type %s which is not supported!' % type(dataset_year).__name__) if dataset_end_year is None: dataset_end_year = dataset_year if isinstance(dataset_end_year, int): dataset_end_date = '31/12/%d' % dataset_end_year elif isinstance(dataset_end_year, str): dataset_end_date = '31/12/%s' % dataset_end_year else: raise hdx.data.hdxobject.HDXError('dataset_end_year has type %s which is not supported!' % type(dataset_end_year).__name__) self.set_dataset_date(dataset_date, dataset_end_date)
Set dataset date as a range from year or start and end year. Args: dataset_year (Union[str, int]): Dataset year given as string or int dataset_end_year (Optional[Union[str, int]]): Dataset end year given as string or int Returns: None
juraj-google-style
def dot(matrix, vector): matrix_weld_type = None vector_weld_type = None if isinstance(matrix, LazyOpResult): matrix_weld_type = matrix.weld_type matrix = matrix.expr elif isinstance(matrix, np.ndarray): matrix_weld_type = numpy_weld_impl.numpy_to_weld_type_mapping[ str(matrix.dtype)] if isinstance(vector, LazyOpResult): vector_weld_type = vector.weld_type vector = vector.expr elif isinstance(vector, np.ndarray): vector_weld_type = numpy_weld_impl.numpy_to_weld_type_mapping[ str(vector.dtype)] return NumpyArrayWeld( numpy_weld_impl.dot( matrix, vector, matrix_weld_type, vector_weld_type), WeldDouble())
Computes the dot product between a matrix and a vector. TODO: Make this more generic Args: matrix (TYPE): Description vector (TYPE): Description
juraj-google-style
def trailing_stop_loss(self, accountID, **kwargs): return self.create( accountID, order=TrailingStopLossOrderRequest(**kwargs) )
Shortcut to create a Trailing Stop Loss Order in an Account Args: accountID : The ID of the Account kwargs : The arguments to create a TrailingStopLossOrderRequest Returns: v20.response.Response containing the results from submitting the request
juraj-google-style
def CrowdsaleRegister(self, wallet, register_addresses, from_addr=None): invoke_args = [self.ScriptHash.ToString(), 'crowdsale_register', [PromptUtils.parse_param(p, wallet) for p in register_addresses]] (tx, fee, results, num_ops, engine_success) = TestInvokeContract(wallet, invoke_args, None, True, from_addr) return (tx, fee, results)
Register for a crowd sale. Args: wallet (neo.Wallets.Wallet): a wallet instance. register_addresses (list): list of public addresses to register for the sale. Returns: tuple: InvocationTransaction: the transaction. int: the transaction fee. list: the neo VM evaluation stack results.
codesearchnet
def build_inputs_with_special_tokens(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]: if token_ids_1 is None: return self.prefix_tokens + token_ids_0 + self.suffix_tokens return self.prefix_tokens + token_ids_0 + token_ids_1 + self.suffix_tokens
Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. An NLLB sequence has the following format, where `X` represents the sequence: - `input_ids` (for encoder) `X [eos, src_lang_code]` - `decoder_input_ids`: (for decoder) `X [eos, tgt_lang_code]` BOS is never used. Pairs of sequences are not the expected use case, but they will be handled without a separator. Args: token_ids_0 (`List[int]`): List of IDs to which the special tokens will be added. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
github-repos
def decode(self, ids, strip_extraneous=False): if strip_extraneous: ids = strip_ids(ids, list(range((self._num_reserved_ids or 0)))) return ' '.join(self.decode_list(ids))
Transform a sequence of int ids into a human-readable string. EOS is not expected in ids. Args: ids: list of integers to be converted. strip_extraneous: bool, whether to strip off extraneous tokens (EOS and PAD). Returns: s: human-readable string.
codesearchnet
def from_dict(event_dict): return CallbackEvent(callback_id=event_dict['callbackId'], name=event_dict['name'], creation_time=event_dict['time'], data=event_dict['data'])
Creates a CallbackEvent object from a dictionary. Args: event_dict: dict, a dictionary representing an event. Returns: A CallbackEvent object.
github-repos
def entry_dict_from_list(all_slab_entries): entry_dict = {} for entry in all_slab_entries: hkl = tuple(entry.miller_index) if (hkl not in entry_dict.keys()): entry_dict[hkl] = {} if entry.clean_entry: clean = entry.clean_entry else: clean = entry if (clean not in entry_dict[hkl].keys()): entry_dict[hkl][clean] = [] if entry.adsorbates: entry_dict[hkl][clean].append(entry) return entry_dict
Converts a list of SlabEntry to an appropriate dictionary. It is assumed that if there is no adsorbate, then it is a clean SlabEntry and that adsorbed SlabEntry has the clean_entry parameter set. Args: all_slab_entries (list): List of SlabEntry objects Returns: (dict): Dictionary of SlabEntry with the Miller index as the main key to a dictionary with a clean SlabEntry as the key to a list of adsorbed SlabEntry.
codesearchnet
def _build_key_wrapping_specification(self, value): if value is None: return None if not isinstance(value, dict): raise TypeError("Key wrapping specification must be a dictionary.") encryption_key_info = self._build_encryption_key_information( value.get('encryption_key_information') ) mac_signature_key_info = self._build_mac_signature_key_information( value.get('mac_signature_key_information') ) key_wrapping_specification = cobjects.KeyWrappingSpecification( wrapping_method=value.get('wrapping_method'), encryption_key_information=encryption_key_info, mac_signature_key_information=mac_signature_key_info, attribute_names=value.get('attribute_names'), encoding_option=value.get('encoding_option') ) return key_wrapping_specification
Build a KeyWrappingSpecification struct from a dictionary. Args: value (dict): A dictionary containing the key/value pairs for a KeyWrappingSpecification struct. Returns: KeyWrappingSpecification: a KeyWrappingSpecification struct Raises: TypeError: if the input argument is invalid
juraj-google-style
def _DiscoverElementTypeFromLocalname(self, type_localname): elem_type = None last_exception = None for ns_prefix in self.zeep_client.wsdl.types.prefix_map.values(): try: elem_type = self.zeep_client.get_type(('{%s}%s' % (ns_prefix, type_localname))) except zeep.exceptions.LookupError as e: last_exception = e continue break if (not elem_type): raise last_exception return elem_type
Searches all namespaces for a type by name. Args: type_localname: The name of the type. Returns: A fully qualified SOAP type with the specified name. Raises: A zeep.exceptions.LookupError if the type cannot be found in any namespace.
codesearchnet
def decorate(self, record): color = 'gray' if record.levelno == logging.WARNING: color = 'yellow' if record.levelno == logging.INFO: color = 'green' if record.levelno == logging.DEBUG: color = 'gray' if record.levelno >= logging.ERROR: color = 'red' notify = False if record.levelno >= logging.ERROR: nofiy = True payload = { 'color': color, 'notify': notify, 'message_format': 'text' } return payload
Build up HipChat specific values for log record Args: record (:obj:`logging.record`): log message object Returns: dict: params for POST request
juraj-google-style
def call(self, inputs, states): raise NotImplementedError('Abstract method')
The function that contains the logic for one RNN step calculation. Args: inputs: the input tensor, which is a slide from the overall RNN input by the time dimension (usually the second dimension). states: the state tensor from previous step, which has the same shape as `(batch, state_size)`. In the case of timestep 0, it will be the initial state user specified, or zero filled tensor otherwise. Returns: A tuple of two tensors: 1. output tensor for the current timestep, with size `output_size`. 2. state tensor for next step, which has the shape of `state_size`.
github-repos
def factor_hatch(field_name, patterns, factors, start=0, end=None): return field(field_name, CategoricalPatternMapper(patterns=patterns, factors=factors, start=start, end=end))
Create a ``DataSpec`` dict that applies a client-side ``CategoricalPatternMapper`` transformation to a ``ColumnDataSource`` column. Args: field_name (str) : a field name to configure ``DataSpec`` with patterns (seq[string]) : a list of hatch patterns to use to map to factors (seq) : a sequences of categorical factors corresponding to the palette start (int, optional) : a start slice index to apply when the column data has factors with multiple levels. (default: 0) end (int, optional) : an end slice index to apply when the column data has factors with multiple levels. (default: None) Returns: dict Added in version 1.1.1
codesearchnet
def check_schema_transforms_match(schema, inverted_features): num_target_transforms = 0 for col_schema in schema: col_name = col_schema['name'] col_type = col_schema['type'].lower() if col_name in inverted_features: for transform in inverted_features[col_name]: transform_name = transform['transform'] if transform_name == constant.TARGET_TRANSFORM: num_target_transforms += 1 continue elif col_type in constant.NUMERIC_SCHEMA: if transform_name not in constant.NUMERIC_TRANSFORMS: raise ValueError( 'Transform %s not supported by schema %s' % (transform_name, col_type)) elif col_type == constant.STRING_SCHEMA: if (transform_name not in constant.CATEGORICAL_TRANSFORMS + constant.TEXT_TRANSFORMS and transform_name != constant.IMAGE_TRANSFORM): raise ValueError( 'Transform %s not supported by schema %s' % (transform_name, col_type)) else: raise ValueError('Unsupported schema type %s' % col_type) if col_name in inverted_features: transform_set = {x['transform'] for x in inverted_features[col_name]} if 1 != sum([transform_set.issubset(set(constant.NUMERIC_TRANSFORMS)), transform_set.issubset(set(constant.CATEGORICAL_TRANSFORMS)), transform_set.issubset(set(constant.TEXT_TRANSFORMS)), transform_set.issubset(set([constant.IMAGE_TRANSFORM])), transform_set.issubset(set([constant.TARGET_TRANSFORM]))]): message = % (str(constant.TEXT_TRANSFORMS), str(constant.CATEGORICAL_TRANSFORMS), str(constant.NUMERIC_TRANSFORMS), constant.IMAGE_TRANSFORM, constant.TARGET_TRANSFORM, col_name, str(transform_set)) raise ValueError(message) if num_target_transforms != 1: raise ValueError('Must have exactly one target transform')
Checks that the transform and schema do not conflict. Args: schema: schema list inverted_features: inverted_features dict Raises: ValueError if transform cannot be applied given schema type.
juraj-google-style
def get_appliance_by_name(self, appliance_name): appliances = self.get_appliances() if appliances: for appliance in appliances: if (appliance['name'] == appliance_name): return appliance return None
Gets the particular Image Streamer resource based on its name. Args: appliance_name: The Image Streamer resource name. Returns: dict: Image Streamer resource.
codesearchnet
def register_frame_to_skip(method: Union[Callable[..., Any], List[Callable[..., Any]]]) -> bool: register_fn = getattr(_DEFAULT_LOGGER.__class__, 'register_frame_to_skip', None) if register_fn is None: return False methods = [method] if not isinstance(method, list) else method for m in methods: register_fn(inspect.getsourcefile(m), m.__name__) return True
Skips the source of the given method when logging. Args: method: The method to skip. Can be a single method or a list of methods. Returns: True if the method is registered to skip. Raises: TypeError: The source file of the method cannot be inspected.
github-repos
def CmdRegister(self, challenge_param, app_param): self.logger.debug('CmdRegister') if ((len(challenge_param) != 32) or (len(app_param) != 32)): raise errors.InvalidRequestError() body = bytearray((challenge_param + app_param)) response = self.InternalSendApdu(apdu.CommandApdu(0, apdu.CMD_REGISTER, 3, 0, body)) response.CheckSuccessOrRaise() return response.body
Register security key. Ask the security key to register with a particular origin & client. Args: challenge_param: Arbitrary 32 byte challenge string. app_param: Arbitrary 32 byte applciation parameter. Returns: A binary structure containing the key handle, attestation, and a signature over that by the attestation key. The precise format is dictated by the FIDO U2F specs. Raises: TUPRequiredError: A Test of User Precense is required to proceed. ApduError: Something went wrong on the device.
codesearchnet
def __init__(self, location, field_type): super(GlobalContextField, self).__init__(location, field_type) self.location = location self.field_type = field_type self.validate()
Construct a new GlobalContextField object that references a field at a given location. Args: location: Location, specifying where the field was declared. Returns: new GlobalContextField object
juraj-google-style
def _Check3DImage(image, require_static=True): try: image_shape = image.get_shape().with_rank(3) except ValueError: raise ValueError("'image' (shape %s) must be three-dimensional." % image.shape) if require_static and (not image_shape.is_fully_defined()): raise ValueError("'image' (shape %s) must be fully defined." % image_shape) if any((x == 0 for x in image_shape)): raise ValueError("all dims of 'image.shape' must be > 0: %s" % image_shape) if not image_shape.is_fully_defined(): return [check_ops.assert_positive(array_ops.shape(image), ["all dims of 'image.shape' must be > 0."])] else: return []
Assert that we are working with a properly shaped image. Args: image: 3-D Tensor of shape [height, width, channels] require_static: If `True`, requires that all dimensions of `image` are known and non-zero. Raises: ValueError: if `image.shape` is not a 3-vector. Returns: An empty list, if `image` has fully defined dimensions. Otherwise, a list containing an assert op is returned.
github-repos
def on_moved(self, event): if (not self._event_error): pathtools_options = {'included_patterns': self.patterns, 'excluded_patterns': self.ignore_patterns, 'case_sensitive': self.case_sensitive} if match_path(event.dest_path, **pathtools_options): self.logger.info(u'Change detected from a move on: %s', event.dest_path) self.compile_dependencies(event.dest_path)
Called when a file or a directory is moved or renamed. Many editors don't directly change a file, instead they make a transitional file like ``*.part`` then move it to the final filename. Args: event: Watchdog event, either ``watchdog.events.DirMovedEvent`` or ``watchdog.events.FileModifiedEvent``.
codesearchnet
def add_tensor_filter(self, filter_name, tensor_filter): self._tensor_filters[filter_name] = tensor_filter
Add a tensor filter. Args: filter_name: (`str`) name of the filter. tensor_filter: (`callable`) the filter callable. See the doc string of `DebugDumpDir.find()` for more details about its signature.
github-repos
def generate_key(action, path_or_id, settings=None, default=" (default)"): settings = " {}".format(str(sorted(settings.items()))) if settings else default return "{}: {}{}".format(action.upper(), path_or_id, settings)
generate_key: generate key used for caching Args: action (str): how video is being processed (e.g. COMPRESSED or DOWNLOADED) path_or_id (str): path to video or youtube_id settings (dict): settings for compression or downloading passed in by user default (str): if settings are None, default to this extension (avoid overwriting keys) Returns: filename
juraj-google-style
def predict_proba(self, a, b, nb_runs=6, nb_jobs=None, gpu=None, idx=0, verbose=None, ttest_threshold=0.01, nb_max_runs=16, train_epochs=1000, test_epochs=1000): (Nb_jobs, verbose, gpu) = SETTINGS.get_default(('nb_jobs', nb_jobs), ('verbose', verbose), ('gpu', gpu)) x = np.stack([a.ravel(), b.ravel()], 1) ttest_criterion = TTestCriterion(max_iter=nb_max_runs, runs_per_iter=nb_runs, threshold=ttest_threshold) AB = [] BA = [] while ttest_criterion.loop(AB, BA): if (nb_jobs != 1): result_pair = Parallel(n_jobs=nb_jobs)((delayed(GNN_instance)(x, idx=idx, device=('cuda:{}'.format((run % gpu)) if gpu else 'cpu'), verbose=verbose, train_epochs=train_epochs, test_epochs=test_epochs) for run in range(ttest_criterion.iter, (ttest_criterion.iter + nb_runs)))) else: result_pair = [GNN_instance(x, idx=idx, device=('cuda:0' if gpu else 'cpu'), verbose=verbose, train_epochs=train_epochs, test_epochs=test_epochs) for run in range(ttest_criterion.iter, (ttest_criterion.iter + nb_runs))] AB.extend([runpair[0] for runpair in result_pair]) BA.extend([runpair[1] for runpair in result_pair]) if verbose: print('P-value after {} runs : {}'.format(ttest_criterion.iter, ttest_criterion.p_value)) score_AB = np.mean(AB) score_BA = np.mean(BA) return ((score_BA - score_AB) / (score_BA + score_AB))
Run multiple times GNN to estimate the causal direction. Args: a (np.ndarray): Variable 1 b (np.ndarray): Variable 2 nb_runs (int): number of runs to execute per batch (before testing for significance with t-test). nb_jobs (int): number of runs to execute in parallel. (Initialized with ``cdt.SETTINGS.NB_JOBS``) gpu (bool): use gpu (Initialized with ``cdt.SETTINGS.GPU``) idx (int): (optional) index of the pair, for printing purposes verbose (bool): verbosity (Initialized with ``cdt.SETTINGS.verbose``) ttest_threshold (float): threshold to stop the boostraps before ``nb_max_runs`` if the difference is significant nb_max_runs (int): Max number of bootstraps train_epochs (int): Number of epochs during which the model is going to be trained test_epochs (int): Number of epochs during which the model is going to be tested Returns: float: Causal score of the pair (Value : 1 if a->b and -1 if b->a)
codesearchnet
def compute_sub_structure(self, sub_structure, tol=0.001): total_energy_matrix = self.total_energy_matrix.copy() def find_match(site): for test_site in sub_structure: frac_diff = (abs((np.array(site.frac_coords) - np.array(test_site.frac_coords))) % 1) frac_diff = [((abs(a) < tol) or (abs(a) > (1 - tol))) for a in frac_diff] if all(frac_diff): return test_site return None matches = [] for (i, site) in enumerate(self._s): matching_site = find_match(site) if matching_site: new_charge = compute_average_oxidation_state(matching_site) old_charge = self._oxi_states[i] scaling_factor = (new_charge / old_charge) matches.append(matching_site) else: scaling_factor = 0 total_energy_matrix[(i, :)] *= scaling_factor total_energy_matrix[(:, i)] *= scaling_factor if (len(matches) != len(sub_structure)): output = ['Missing sites.'] for site in sub_structure: if (site not in matches): output.append('unmatched = {}'.format(site)) raise ValueError('\n'.join(output)) return sum(sum(total_energy_matrix))
Gives total ewald energy for an sub structure in the same lattice. The sub_structure must be a subset of the original structure, with possible different charges. Args: substructure (Structure): Substructure to compute Ewald sum for. tol (float): Tolerance for site matching in fractional coordinates. Returns: Ewald sum of substructure.
codesearchnet
def from_conv_part_data(conv_part_data, self_user_id): user_id = UserID(chat_id=conv_part_data.id.chat_id, gaia_id=conv_part_data.id.gaia_id) return User(user_id, conv_part_data.fallback_name, None, None, [], (self_user_id == user_id) or (self_user_id is None))
Construct user from ``ConversationParticipantData`` message. Args: conv_part_id: ``ConversationParticipantData`` message. self_user_id (~hangups.user.UserID or None): The ID of the current user. If ``None``, assume ``conv_part_id`` is the current user. Returns: :class:`~hangups.user.User` object.
juraj-google-style
def record_value(self, value, count=1): if value < 0: return False counts_index = self._counts_index_for(value) if (counts_index < 0) or (self.counts_len <= counts_index): return False self.counts[counts_index] += count self.total_count += count self.min_value = min(self.min_value, value) self.max_value = max(self.max_value, value) return True
Record a new value into the histogram Args: value: the value to record (must be in the valid range) count: incremental count (defaults to 1)
juraj-google-style
def serialize_dtype(o): if len(o) == 0: return dict( _type='np.dtype', descr=str(o)) return dict( _type='np.dtype', descr=o.descr)
Serializes a :obj:`numpy.dtype`. Args: o (:obj:`numpy.dtype`): :obj:`dtype` to be serialized. Returns: A dictionary that can be passed to :obj:`json.dumps`.
juraj-google-style
def post(self, path, body, headers=None): response = requests.post(self._url_for(path), data=json.dumps(body), headers=self._headers(headers)) self._handle_errors(response) return response
Perform a POST request, providing a body, which will be JSON-encoded. Args: path (str): A path that gets appended to ``base_url``. body (dict): Dictionary that will be JSON-encoded and sent as the body. Example: api_client.post('/users', body={'name': 'Billy Jean'}) Returns: A requests ``Response`` object.
codesearchnet
def process_node(layer, node_data): args, kwargs = deserialize_node(node_data, created_layers) layer(*args, **kwargs)
Reconstruct node by linking to inbound layers Args: layer: Layer to process node_data: List of layer configs
github-repos
def with_rank_at_most(self, rank): if self.rank is not None and self.rank > rank: raise ValueError('Shape %s must have rank at most %d' % (self, rank)) else: return self
Returns a shape based on `self` with at most the given rank. Args: rank: An integer. Returns: A shape that is at least as specific as `self` with at most the given rank. Raises: ValueError: If `self` does not represent a shape with at most the given `rank`.
github-repos
def calculate_oobatake_dG(seq, temp): dH = calculate_oobatake_dH(seq, temp) dS = calculate_oobatake_dS(seq, temp) dG = dH - (temp + 273.15) * dS return dG - 563.552
Get free energy of unfolding (dG) using Oobatake method in units cal/mol. Args: seq (str, Seq, SeqRecord): Amino acid sequence temp (float): Temperature in degrees C Returns: float: Free energy of unfolding dG (J/mol)
juraj-google-style
def dump_stats(filename): res = _dump_impl() f = open(filename, 'w') json.dump(res, f, indent=4) f.close()
Write collected information to file. Args: filename: absolute filename
juraj-google-style
def __init__(self, zoom): self.zoom = zoom super().__init__('Zoom angle should be in [0,360] (received {})' .format(zoom))
Initialization of instances: Args: zoom (int): the invalid zoom level. Attributes: zoom (int): the invalid zoom level.
juraj-google-style
def featurize_row(self, x, y): x = x.ravel() y = y.ravel() b = np.ones(x.shape) dx = np.cos(np.dot(self.W2, np.vstack((x, b)))).mean(1) dy = np.cos(np.dot(self.W2, np.vstack((y, b)))).mean(1) if (sum(dx) > sum(dy)): return np.hstack((dx, dy, np.cos(np.dot(self.W, np.vstack((x, y, b)))).mean(1))) else: return np.hstack((dx, dy, np.cos(np.dot(self.W, np.vstack((y, x, b)))).mean(1)))
Projects the causal pair to the RKHS using the sampled kernel approximation. Args: x (np.ndarray): Variable 1 y (np.ndarray): Variable 2 Returns: np.ndarray: projected empirical distributions into a single fixed-size vector.
codesearchnet
def write_genotypes(self, genotypes): if self._mode != "w": raise UnsupportedOperation("not available in 'r' mode") if self._nb_values is None: self._nb_values = len(genotypes) if self._nb_values != len(genotypes): raise ValueError("{:,d} samples expected, got {:,d}".format( self._nb_values, len(genotypes), )) byte_array = [ g[0] | (g[1] << 2) | (g[2] << 4) | (g[3] << 6) for g in self._grouper((_byte_recode[geno] for geno in genotypes), 4) ] self._bed.write(bytearray(byte_array))
Write genotypes to binary file. Args: genotypes (numpy.ndarray): The genotypes to write in the BED file.
juraj-google-style
def brake_on(self): data = [] data.append(10) data.append(self.servoid) data.append(RAM_WRITE_REQ) data.append(TORQUE_CONTROL_RAM) data.append(1) data.append(64) send_data(data)
Set the Brakes of Herkulex In braked mode, position control and velocity control will not work, enable torque before that Args: none
codesearchnet
def from_json(cls, data): assert 'header' in data, 'Required keyword "header" is missing!' assert 'values' in data, 'Required keyword "values" is missing!' return cls(Header.from_json(data['header']), data['values'])
Create a Data Collection from a dictionary. Args: { "header": A Ladybug Header, "values": An array of values, }
juraj-google-style