code
stringlengths
20
4.93k
docstring
stringlengths
33
1.27k
source
stringclasses
3 values
def generate_tuple_zip(self, token_list, n=2): return zip(*[token_list[i:] for i in range(n)])
Generate the N-gram. Args: token_list: The list of tokens. n N Returns: zip of Tuple(N-gram)
juraj-google-style
def add_to_graph(self, g=None, overwrite=False): if not context.executing_eagerly() and (not g): g = ops.get_default_graph() if g is not None: g._add_function_recursive(self._delayed_rewrite_functions.forward())
Registers the function, adds it to the graph g or default graph. Args: g: If specified, registers the function with this graph. Defaults to the current context (either the default graph or the eager context). overwrite: A bool. If True, its forward function will overwrite any existing function of the same signature name in the graph `g`.
github-repos
def concat_video(video_list, out_file, vcodec=None, acodec=None, log_level='info', print_cmd=False, **kwargs): _, tmp_filename = tempfile.mkstemp(suffix='.txt', text=True) with open(tmp_filename, 'w') as f: for filename in video_list: f.write('file {}\n'.format(osp.abspath(filename))) options = {'log_level': log_level} if vcodec is None: options['vcodec'] = 'copy' if acodec is None: options['acodec'] = 'copy' convert_video( tmp_filename, out_file, print_cmd, pre_options='-f concat -safe 0', **options) os.remove(tmp_filename)
Concatenate multiple videos into a single one. Args: video_list (list): A list of video filenames out_file (str): Output video filename vcodec (None or str): Output video codec, None for unchanged acodec (None or str): Output audio codec, None for unchanged log_level (str): Logging level of ffmpeg. print_cmd (bool): Whether to print the final ffmpeg command.
juraj-google-style
def _map_column_names_to_types(self, row_type): try: if not isinstance(row_type, RowTypeConstraint): row_type = RowTypeConstraint.from_user_type(row_type) inferred_types = {name: typ for name, typ in row_type._fields} for k, t in inferred_types.items(): if t in _primitive_types_to_typing_container_type: inferred_types[k] = _primitive_types_to_typing_container_type[t] for name, typ in inferred_types.items(): if isinstance(typ, np.dtype): inferred_types[name] = typ.type return inferred_types except: return {}
Return a dictionary of column names and types. Args: element_type: A type of the element. This could be a NamedTuple or a Row. Returns: A dictionary of column names and types.
github-repos
def update_configuration(self, timeout=-1): uri = "{}/configuration".format(self.data['uri']) return self.update_with_zero_body(uri=uri, timeout=timeout)
Reapplies the appliance's configuration on the enclosure. This includes running the same configure steps that were performed as part of the enclosure add. Args: timeout: Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation in OneView; it just stops waiting for its completion. Returns: Enclosure
juraj-google-style
def update_configuration(self, timeout=(- 1)): uri = '{}/configuration'.format(self.data['uri']) return self.update_with_zero_body(uri=uri, timeout=timeout)
Reapplies the appliance's configuration on the enclosure. This includes running the same configure steps that were performed as part of the enclosure add. Args: timeout: Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation in OneView; it just stops waiting for its completion. Returns: Enclosure
codesearchnet
def remove_keywords_from_list(self, keyword_list): if (not isinstance(keyword_list, list)): raise AttributeError('keyword_list should be a list') for keyword in keyword_list: self.remove_keyword(keyword)
To remove keywords present in list Args: keyword_list (list(str)): List of keywords to remove Examples: >>> keyword_processor.remove_keywords_from_list(["java", "python"]}) Raises: AttributeError: If `keyword_list` is not a list.
codesearchnet
def generate(self, text): if (not text): raise Exception('No text to speak') if (len(text) >= self.MAX_CHARS): raise Exception('Number of characters must be less than 2000') params = self.__params.copy() params['text'] = text self._data = requests.get(self.TTS_URL, params=params, stream=False).iter_content()
Try to get the generated file. Args: text: The text that you want to generate.
codesearchnet
def check(self, orb): return ((self.prev is not None) and (np.sign(self(orb)) != np.sign(self(self.prev))))
Method that check whether or not the listener is triggered Args: orb (Orbit): Return: bool: True if there is a zero-crossing for the parameter watched by the listener
codesearchnet
def _keys(self, pattern): result = [] for client in self.redis_clients: result.extend(list(client.scan_iter(match=pattern))) return result
Execute the KEYS command on all Redis shards. Args: pattern: The KEYS pattern to query. Returns: The concatenated list of results from all shards.
codesearchnet
def build_pipeline(cls, project, zones, min_cores, min_ram, disk_size, boot_disk_size, preemptible, accelerator_type, accelerator_count, image, script_name, envs, inputs, outputs, pipeline_name): if (min_cores is None): min_cores = job_model.DEFAULT_MIN_CORES if (min_ram is None): min_ram = job_model.DEFAULT_MIN_RAM if (disk_size is None): disk_size = job_model.DEFAULT_DISK_SIZE if (boot_disk_size is None): boot_disk_size = job_model.DEFAULT_BOOT_DISK_SIZE if (preemptible is None): preemptible = job_model.DEFAULT_PREEMPTIBLE docker_command = cls._build_pipeline_docker_command(script_name, inputs, outputs, envs) input_envs = ([{'name': SCRIPT_VARNAME}] + [{'name': env.name} for env in envs if env.value]) input_files = [cls._build_pipeline_input_file_param(var.name, var.docker_path) for var in inputs if ((not var.recursive) and var.value)] output_files = [cls._build_pipeline_file_param(var.name, var.docker_path) for var in outputs if ((not var.recursive) and var.value)] return {'ephemeralPipeline': {'projectId': project, 'name': pipeline_name, 'resources': {'minimumCpuCores': min_cores, 'minimumRamGb': min_ram, 'bootDiskSizeGb': boot_disk_size, 'preemptible': preemptible, 'zones': google_base.get_zones(zones), 'acceleratorType': accelerator_type, 'acceleratorCount': accelerator_count, 'disks': [{'name': 'datadisk', 'autoDelete': True, 'sizeGb': disk_size, 'mountPoint': providers_util.DATA_MOUNT_POINT}]}, 'inputParameters': (input_envs + input_files), 'outputParameters': output_files, 'docker': {'imageName': image, 'cmd': docker_command}}}
Builds a pipeline configuration for execution. Args: project: string name of project. zones: list of zone names for jobs to be run at. min_cores: int number of CPU cores required per job. min_ram: int GB of RAM required per job. disk_size: int GB of disk to attach under /mnt/data. boot_disk_size: int GB of disk for boot. preemptible: use a preemptible VM for the job accelerator_type: string GCE defined accelerator type. accelerator_count: int number of accelerators of the specified type to attach. image: string Docker image name in which to run. script_name: file name of the script to run. envs: list of EnvParam objects specifying environment variables to set within each job. inputs: list of FileParam objects specifying input variables to set within each job. outputs: list of FileParam objects specifying output variables to set within each job. pipeline_name: string name of pipeline. Returns: A nested dictionary with one entry under the key ephemeralPipeline containing the pipeline configuration.
codesearchnet
def _update_repo(repo_config, store, tags_only): repo_path = store.clone(repo_config['repo'], repo_config['rev']) cmd_output('git', 'fetch', cwd=repo_path) tag_cmd = ('git', 'describe', 'origin/master', '--tags') if tags_only: tag_cmd += ('--abbrev=0',) else: tag_cmd += ('--exact',) try: rev = cmd_output(*tag_cmd, cwd=repo_path)[1].strip() except CalledProcessError: tag_cmd = ('git', 'rev-parse', 'origin/master') rev = cmd_output(*tag_cmd, cwd=repo_path)[1].strip() if (rev == repo_config['rev']): return repo_config try: path = store.clone(repo_config['repo'], rev) manifest = load_manifest(os.path.join(path, C.MANIFEST_FILE)) except InvalidManifestError as e: raise RepositoryCannotBeUpdatedError(six.text_type(e)) hooks = {hook['id'] for hook in repo_config['hooks']} hooks_missing = (hooks - {hook['id'] for hook in manifest}) if hooks_missing: raise RepositoryCannotBeUpdatedError('Cannot update because the tip of master is missing these hooks:\n{}'.format(', '.join(sorted(hooks_missing)))) new_config = repo_config.copy() new_config['rev'] = rev return new_config
Updates a repository to the tip of `master`. If the repository cannot be updated because a hook that is configured does not exist in `master`, this raises a RepositoryCannotBeUpdatedError Args: repo_config - A config for a repository
codesearchnet
def count(self): e = ((self.alpha * float((self.m ** 2))) / np.sum((2.0 ** (- self.reg)))) if (e <= ((5.0 / 2.0) * self.m)): num_zero = (self.m - np.count_nonzero(self.reg)) return self._linearcounting(num_zero) if (e <= ((1.0 / 30.0) * (1 << 32))): return e return self._largerange_correction(e)
Estimate the cardinality of the data values seen so far. Returns: int: The estimated cardinality.
codesearchnet
def log_variable_sizes(var_list, tag, verbose=True, mesh_to_impl=None): if not var_list: return name_to_var = {v.name: v for v in var_list} total_size = 0 total_slice_size = 0 for v_name in sorted(list(name_to_var)): v = name_to_var[v_name] v_size = v.shape.size if mesh_to_impl is not None: slice_size = mesh_to_impl[v.mesh].slice_size(v.shape) else: slice_size = 0 total_slice_size += slice_size if verbose: tf.logging.info( "Variable %s size %s slice_size %s %s", v.name.ljust(60), str(v_size).ljust(12), str(slice_size).ljust(12), str(v.shape).ljust(60)) if isinstance(v, StackedVariable): for n in v.original_names: tf.logging.info(" " + n) total_size += v_size tf.logging.info("%s count: %s Total size: %s Total slice_size: %s", tag.ljust(30), str(len(var_list)).ljust(6), str(total_size).ljust(15), str(total_slice_size).ljust(15))
Log the sizes and shapes of variables, and the total size. Args: var_list: a list of variables; defaults to trainable_variables tag: a string; defaults to "Trainable Variables" verbose: bool, if True, log every weight; otherwise, log total size only. mesh_to_impl: an optional map from Mesh to MeshImpl
juraj-google-style
def list_attributes(self, name): result = self.client.service.getListAttributes(name, self.proxy_id) if (isinstance(result, list) and (len(result) == 1)): return result[0] return result
Look up the attributes of a list. Args: name (str): The name of the list Returns: dict: attributes of the list
codesearchnet
def resize_attention_map(attentions, height, width, align_corners=False): scale = (height * width if height > width: feat_width = int(np.round(width / scale)) feat_height = attentions.shape[2] else: feat_height = int(np.round(height / scale)) feat_width = attentions.shape[2] batch_size = attentions.shape[0] groups = attentions.shape[1] attentions = attentions.reshape(batch_size, groups, feat_height, feat_width) attentions = nn.functional.interpolate(attentions, size=(height, width), mode='bilinear', align_corners=align_corners) return attentions
Args: attentions (`torch.Tensor`): attention map of shape [batch_size, groups, feat_height*feat_width] height (`int`): height of the output attention map width (`int`): width of the output attention map align_corners (`bool`, *optional*): the `align_corner` argument for `nn.functional.interpolate`. Returns: `torch.Tensor`: resized attention map of shape [batch_size, groups, height, width]
github-repos
def submit(self, **kwargs): (verbose, dry_run) = (kwargs.pop('verbose', 0), kwargs.pop('dry_run', False)) if (not self.flows): print('Cannot submit an empty list of flows!') return 0 if hasattr(self, 'qjob'): print(('BatchLauncher has qjob %s' % self.qjob)) if (not self.batch_pid_file.exists): print("It seems that the batch script reached the end. Wont' try to submit it again") return 0 msg = 'Here I have to understand if qjob is in the queue. but I need an abstract API that can retrieve info from the queue id' raise RuntimeError(msg) if self.qjob.in_status('Running|Queued'): print('Job is still running. Cannot submit') else: del self.qjob (script, num_flows_inbatch) = self._get_script_nflows() if (num_flows_inbatch == 0): print("All flows have reached all_ok! Batch script won't be submitted") return 0 if verbose: print('*** submission script ***') print(script) self.script_file.write(script) self.script_file.chmod(480) for flow in self.flows: flow.build_and_pickle_dump() if dry_run: return (- 1) print(('Will submit %s flows in batch script' % len(self.flows))) (self.qjob, process) = self.qadapter.submit_to_queue(self.script_file.path) self.batch_pidfile.write(str(self.qjob.qid)) self.pickle_dump() process.wait() return dict2namedtuple(retcode=process.returncode, qjob=self.qjob, num_flows_inbatch=num_flows_inbatch)
Submit a job script that will run the schedulers with `abirun.py`. Args: verbose: Verbosity level dry_run: Don't submit the script if dry_run. Default: False Returns: namedtuple with attributes: retcode: Return code as returned by the submission script. qjob: :class:`QueueJob` object. num_flows_inbatch: Number of flows executed by the batch script Return code of the job script submission.
codesearchnet
def indicator(self, indicator_type=None, owner=None, **kwargs): if not indicator_type: return Indicator(self.tcex, None, owner=owner, **kwargs) upper_indicator_type = indicator_type.upper() indicator = None if upper_indicator_type == 'ADDRESS': indicator = Address(self.tcex, kwargs.pop('ip', None), owner=owner, **kwargs) elif upper_indicator_type == 'EMAILADDRESS': indicator = EmailAddress(self.tcex, kwargs.pop('address', None), owner=owner, **kwargs) elif upper_indicator_type == 'FILE': indicator = File(self.tcex, **kwargs) elif upper_indicator_type == 'HOST': indicator = Host(self.tcex, kwargs.pop('hostname', None), owner=owner, **kwargs) elif upper_indicator_type == 'URL': indicator = URL(self.tcex, kwargs.pop('url', None), owner=owner, **kwargs) else: try: if upper_indicator_type in self._custom_indicator_classes.keys(): custom_indicator_details = self._custom_indicator_classes[indicator_type] value_fields = custom_indicator_details.get('value_fields') c = getattr(module, custom_indicator_details.get('branch')) if len(value_fields) == 1: indicator = c(value_fields[0], owner=owner, **kwargs) elif len(value_fields) == 2: indicator = c(value_fields[0], value_fields[1], owner=owner, **kwargs) elif len(value_fields) == 3: indicator = c(value_fields[0], value_fields[2], owner=owner, **kwargs) except Exception: return None return indicator
Create the Indicator TI object. Args: owner: indicator_type: **kwargs: Return:
juraj-google-style
def CopyFromDateTimeString(self, time_string): date_time_values = self._CopyDateTimeFromString(time_string) year = date_time_values.get('year', 0) month = date_time_values.get('month', 0) day_of_month = date_time_values.get('day_of_month', 0) hours = date_time_values.get('hours', 0) minutes = date_time_values.get('minutes', 0) seconds = date_time_values.get('seconds', 0) microseconds = date_time_values.get('microseconds', 0) (milliseconds, _) = divmod(microseconds, definitions.MICROSECONDS_PER_MILLISECOND) if ((year < 1601) or (year > 30827)): raise ValueError('Unsupported year value: {0:d}.'.format(year)) self._normalized_timestamp = None self._number_of_seconds = self._GetNumberOfSecondsFromElements(year, month, day_of_month, hours, minutes, seconds) self.year = year self.month = month self.day_of_month = day_of_month self.day_of_week = None self.hours = hours self.minutes = minutes self.seconds = seconds self.milliseconds = milliseconds self.is_local_time = False
Copies a SYSTEMTIME structure from a date and time string. Args: time_string (str): date and time value formatted as: YYYY-MM-DD hh:mm:ss.######[+-]##:## Where # are numeric digits ranging from 0 to 9 and the seconds fraction can be either 3 or 6 digits. The time of day, seconds fraction and time zone offset are optional. The default time zone is UTC. Raises: ValueError: if the date string is invalid or not supported.
codesearchnet
def unique_flags(items, key=None): len_ = len(items) if (key is None): item_to_index = dict(zip(reversed(items), reversed(range(len_)))) indices = item_to_index.values() else: indices = argunique(items, key=key) flags = boolmask(indices, len_) return flags
Returns a list of booleans corresponding to the first instance of each unique item. Args: items (Sequence): indexable collection of items key (Callable, optional): custom normalization function. If specified returns items where `key(item)` is unique. Returns: List[bool] : flags the items that are unique Example: >>> import ubelt as ub >>> items = [0, 2, 1, 1, 0, 9, 2] >>> flags = unique_flags(items) >>> assert flags == [True, True, True, False, False, True, False] >>> flags = unique_flags(items, key=lambda x: x % 2 == 0) >>> assert flags == [True, False, True, False, False, False, False]
codesearchnet
def supervisor(self): supervisor = self._cached_client('supervisor') if (not self._api_supervisor_session): self._api_supervisor_session = self.__create_supervisor_session(supervisor) return supervisor
Return an authenticated connection for use, open new if required. Returns: SupervisorWebService: New or existing session with the Five9 Statistics API.
codesearchnet
def __init__(self, liblightning=None, program=None): self._load(liblightning) self._set_signatures() self._init() self._executable = None
Bindings to GNU Lightning library. Args: liblightning: Set to override path to liblightning. program: Set to override argument to init_jit, used with bfd.
juraj-google-style
def _checkpoint_adapter(self, path: str): del path return None
Returns a checkpoint adapter for this object. Needs to be overridden if the `Trackable` requires adapter at restore. Override this method to define callbacks for checkpoint positions to be applied at restore time. Args: path: Checkpoint path. Returns: A subclass of AbstractCheckpointAdapter that defines callbacks at restore for this trackable.
github-repos
def CacheFileObject(self, path_spec, file_object): self._file_object_cache.CacheObject(path_spec.comparable, file_object)
Caches a file-like object based on a path specification. Args: path_spec (PathSpec): path specification. file_object (FileIO): file-like object.
codesearchnet
def __init__(self, gans_value_function=None): if gans_value_function is None: gans_value_function = MiniMax() if isinstance(gans_value_function, GANsValueFunction) is False: raise TypeError("The type of `gans_value_function` must be `GANsValueFunction`.") self.__gans_value_function = gans_value_function self.__logger = getLogger("pygan") super().__init__(gans_value_function)
Init. Args: gans_value_function: is-a `GANsValueFunction`.
juraj-google-style
def __init__(self, data): if isinstance(data, py2to3.INTEGER_TYPES): self.data = data self.text = '{0:d}'.format(data) elif isinstance(data, float): self.data = py2to3.LONG_TYPE(data) self.text = '{0:f}'.format(data) elif isinstance(data, py2to3.STRING_TYPES): if isinstance(data, py2to3.BYTES_TYPE): self.text = data.decode('utf-8', errors='ignore') else: self.text = data try: self.data = timelib.Timestamp.FromTimeString(self.text) except (ValueError, errors.TimestampError): raise ValueError('Wrongly formatted date string: {0:s}'.format( self.text)) elif isinstance(data, datetime.datetime): posix_time = int(calendar.timegm(data.utctimetuple())) self.data = ( posix_time * definitions.MICROSECONDS_PER_SECOND) + data.microsecond self.text = '{0!s}'.format(data) elif isinstance(data, DateCompareObject): self.data = data.data self.text = '{0!s}'.format(data) else: raise ValueError('Unsupported type: {0:s}.'.format(type(data)))
Take a date object and use that for comparison. Args: data: A string, datetime object or an integer containing the number of micro seconds since January 1, 1970, 00:00:00 UTC. Raises: ValueError: if the date string is invalid.
juraj-google-style
def en020(self, value=None): if (value is not None): try: value = float(value) except ValueError: raise ValueError('value {} need to be of type float for field `en020`'.format(value)) self._en020 = value
Corresponds to IDD Field `en020` mean coincident dry-bulb temperature to Enthalpy corresponding to 2.0% annual cumulative frequency of occurrence Args: value (float): value for IDD Field `en020` Unit: kJ/kg if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value
codesearchnet
def _VerifyHMAC(self, comms=None): if (self.hmac_type == 'SIMPLE_HMAC'): msg = comms.encrypted digest = comms.hmac elif (self.hmac_type == 'FULL_HMAC'): msg = b''.join([comms.encrypted, comms.encrypted_cipher, comms.encrypted_cipher_metadata, comms.packet_iv.SerializeToString(), struct.pack('<I', comms.api_version)]) digest = comms.full_hmac else: raise DecryptionError('HMAC type no supported.') try: rdf_crypto.HMAC(self.cipher.hmac_key).Verify(msg, digest) except rdf_crypto.VerificationError as e: raise DecryptionError(('HMAC verification failed: %s' % e)) return True
Verifies the HMAC. This method raises a DecryptionError if the received HMAC does not verify. If the HMAC verifies correctly, True is returned. Args: comms: The comms RdfValue to verify. Raises: DecryptionError: The HMAC did not verify. Returns: True
codesearchnet
def _cookiecutter_configs_have_changed(template, old_version, new_version): temple.check.is_git_ssh_path(template) repo_path = temple.utils.get_repo_path(template) github_client = temple.utils.GithubClient() api = '/repos/{}/contents/cookiecutter.json'.format(repo_path) old_config_resp = github_client.get(api, params={'ref': old_version}) old_config_resp.raise_for_status() new_config_resp = github_client.get(api, params={'ref': new_version}) new_config_resp.raise_for_status() return (old_config_resp.json()['content'] != new_config_resp.json()['content'])
Given an old version and new version, check if the cookiecutter.json files have changed When the cookiecutter.json files change, it means the user will need to be prompted for new context Args: template (str): The git SSH path to the template old_version (str): The git SHA of the old version new_version (str): The git SHA of the new version Returns: bool: True if the cookiecutter.json files have been changed in the old and new versions
codesearchnet
def inputs(eval_data, data_dir, batch_size): if not eval_data: filenames = [os.path.join(data_dir, 'data_batch_%d.bin' % i) for i in xrange(1, 6)] num_examples_per_epoch = NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN else: filenames = [os.path.join(data_dir, 'test_batch.bin')] num_examples_per_epoch = NUM_EXAMPLES_PER_EPOCH_FOR_EVAL for f in filenames: if not tf.gfile.Exists(f): raise ValueError('Failed to find file: ' + f) filename_queue = tf.train.string_input_producer(filenames) read_input = read_cifar10(filename_queue) reshaped_image = tf.cast(read_input.uint8image, tf.float32) height = IMAGE_SIZE width = IMAGE_SIZE resized_image = tf.image.resize_image_with_crop_or_pad(reshaped_image, height, width) float_image = tf.image.per_image_standardization(resized_image) float_image.set_shape([height, width, 3]) read_input.label.set_shape([1]) min_fraction_of_examples_in_queue = 0.4 min_queue_examples = int(num_examples_per_epoch * min_fraction_of_examples_in_queue) return _generate_image_and_label_batch(float_image, read_input.label, min_queue_examples, batch_size, shuffle=False)
Construct input for CIFAR evaluation using the Reader ops. Args: eval_data: bool, indicating if one should use the train or eval data set. data_dir: Path to the CIFAR-10 data directory. batch_size: Number of images per batch. Returns: images: Images. 4D tensor of [batch_size, IMAGE_SIZE, IMAGE_SIZE, 3] size. labels: Labels. 1D tensor of [batch_size] size.
juraj-google-style
def _set_grid_info(self, which, low, high, num, scale, name): setattr(self.generate_info, (which + '_low'), low) setattr(self.generate_info, (which + '_high'), high) setattr(self.generate_info, ('num_' + which), num) setattr(self.generate_info, (which + 'val_name'), name) if (scale not in ['lin', 'log']): raise ValueError('{} scale must be lin or log.'.format(which)) setattr(self.generate_info, (which + 'scale'), scale) return
Set the grid values for x or y. Create information for the grid of x and y values. Args: which (str): `x` or `y`. low/high (float): Lowest/highest value for the axis. num (int): Number of points on axis. scale (str): Scale of the axis. Choices are 'log' or 'lin'. name (str): Name representing the axis. See GenerateContainer documentation for options for the name. unit (str): Unit for this axis quantity. See GenerateContainer documentation for options for the units. Raises: ValueError: If scale is not 'log' or 'lin'.
codesearchnet
def _get_schema(cls, schema): if isinstance(schema, string_types): schema = cls._get_object_from_python_path(schema) if isclass(schema): schema = schema() if not isinstance(schema, Schema): raise TypeError("The schema must be a path to a Marshmallow " "schema or a Marshmallow schema.") return schema
Method that will fetch a Marshmallow schema flexibly. Args: schema (marshmallow.Schema|str): Either the schema class, an instance of a schema, or a Python path to a schema. Returns: marshmallow.Schema: The desired schema. Raises: TypeError: This is raised if the provided object isn't a Marshmallow schema.
juraj-google-style
def Serialize(self, writer): writer.WriteUInt32(self.Timestamp) writer.WriteUInt64(self.Services) octets = bytearray(map(lambda oct: int(oct), self.Address.split('.'))) octets += bytearray(12) writer.WriteBytes(octets) writer.WriteUInt16(self.Port, endian='>')
Serialize object. Args: writer (neo.IO.BinaryWriter):
juraj-google-style
def __init__(self, num_evals, log_progress=True): self._num_evals = num_evals self._evals_completed = None self._log_progress = log_progress self._log_frequency = 1 if num_evals is None or num_evals < 20 else math.floor(num_evals / 10.0)
Constructs the run hook. Args: num_evals: The number of evaluations to run for. if set to None, will iterate the dataset until all inputs are exhausted. log_progress: Whether to log evaluation progress, defaults to True.
github-repos
def MakeCACert(private_key, common_name=u"grr", issuer_cn=u"grr_test", issuer_c=u"US"): public_key = private_key.GetPublicKey() builder = x509.CertificateBuilder() issuer = x509.Name([ x509.NameAttribute(oid.NameOID.COMMON_NAME, issuer_cn), x509.NameAttribute(oid.NameOID.COUNTRY_NAME, issuer_c) ]) subject = x509.Name( [x509.NameAttribute(oid.NameOID.COMMON_NAME, common_name)]) builder = builder.subject_name(subject) builder = builder.issuer_name(issuer) valid_from = rdfvalue.RDFDatetime.Now() - rdfvalue.Duration("1d") valid_until = rdfvalue.RDFDatetime.Now() + rdfvalue.Duration("3650d") builder = builder.not_valid_before(valid_from.AsDatetime()) builder = builder.not_valid_after(valid_until.AsDatetime()) builder = builder.serial_number(1) builder = builder.public_key(public_key.GetRawPublicKey()) builder = builder.add_extension( x509.BasicConstraints(ca=True, path_length=None), critical=True) builder = builder.add_extension( x509.SubjectKeyIdentifier.from_public_key(public_key.GetRawPublicKey()), critical=False) certificate = builder.sign( private_key=private_key.GetRawPrivateKey(), algorithm=hashes.SHA256(), backend=openssl.backend) return rdf_crypto.RDFX509Cert(certificate)
Generate a CA certificate. Args: private_key: The private key to use. common_name: Name for cert. issuer_cn: Name for issuer. issuer_c: Country for issuer. Returns: The certificate.
juraj-google-style
def replace(s, pattern, replacement): def _replacement(matchobj): return replacement return re.sub(pattern, _replacement, s)
Replaces occurrences of a match string in a given string and returns the new string. The match string can be a regex expression. Args: s (str): the string to modify pattern (str): the search expression replacement (str): the string to replace each match with
codesearchnet
def is_workdir(cls, path): try: cls(path=path).load() except MalformedWorkdir: return False return True
Check if the given path is a workdir Args: path(str): Path to check Return: bool: True if the given path is a workdir
juraj-google-style
def notes_to_midi(self, notes: np.ndarray, beatstep: np.ndarray, offset_sec: int=0.0): requires_backends(self, ['pretty_midi']) new_pm = pretty_midi.PrettyMIDI(resolution=384, initial_tempo=120.0) new_inst = pretty_midi.Instrument(program=0) new_notes = [] for onset_idx, offset_idx, pitch, velocity in notes: new_note = pretty_midi.Note(velocity=velocity, pitch=pitch, start=beatstep[onset_idx] - offset_sec, end=beatstep[offset_idx] - offset_sec) new_notes.append(new_note) new_inst.notes = new_notes new_pm.instruments.append(new_inst) new_pm.remove_invalid_notes() return new_pm
Converts notes to Midi. Args: notes (`numpy.ndarray`): This is used to create Pretty Midi objects. beatstep (`numpy.ndarray`): This is the extrapolated beatstep that we get from feature extractor. offset_sec (`int`, *optional*, defaults to 0.0): This represents the offset seconds which is used while creating each Pretty Midi Note.
github-repos
class FlaxMinLengthLogitsProcessor(FlaxLogitsProcessor): def __init__(self, min_length: int, eos_token_id: int): if not isinstance(min_length, int) or min_length < 0: raise ValueError(f'`min_length` has to be a positive integer, but is {min_length}') if not isinstance(eos_token_id, int) or eos_token_id < 0: raise ValueError(f'`eos_token_id` has to be a positive integer, but is {eos_token_id}') self.min_length = min_length self.eos_token_id = eos_token_id def __call__(self, input_ids: jnp.ndarray, scores: jnp.ndarray, cur_len: int) -> jnp.ndarray: apply_penalty = 1 - jnp.clip(cur_len - self.min_length, 0, 1) scores = jnp.where(apply_penalty, scores.at[:, self.eos_token_id].set(-float('inf')), scores) return scores
[`FlaxLogitsProcessor`] enforcing a min-length by setting EOS probability to 0. Args: min_length (`int`): The minimum length below which the score of `eos_token_id` is set to `-float("Inf")`. eos_token_id (`int`): The id of the *end-of-sequence* token.
github-repos
def __init__(self, start_timestamp, end_timestamp): if start_timestamp is None or end_timestamp is None: raise ValueError( 'Time range must have either a start and an end timestamp.') if start_timestamp > end_timestamp: raise ValueError( 'Invalid start must be earlier than end timestamp.') super(TimeRange, self).__init__() self.duration = end_timestamp - start_timestamp self.end_timestamp = end_timestamp self.start_timestamp = start_timestamp
Initializes a date and time range. The timestamp are integers containing the number of microseconds since January 1, 1970, 00:00:00 UTC. Args: start_timestamp (int): timestamp that marks the start of the range. end_timestamp (int): timestamp that marks the end of the range. Raises: ValueError: If the time range is badly formed.
juraj-google-style
def _Verify(self): if self._expected_calls_queue: if ((len(self._expected_calls_queue) == 1) and isinstance(self._expected_calls_queue[0], MultipleTimesGroup) and self._expected_calls_queue[0].IsSatisfied()): pass else: raise ExpectedMethodCallsError(self._expected_calls_queue)
Verify that all of the expected calls have been made. Raises: ExpectedMethodCallsError: if there are still more method calls in the expected queue.
codesearchnet
def add_parameter(self, name, min_val, max_val): self.__parameters.append(Parameter(name, min_val, max_val))
Adds a paramber to the Population Args: name (str): name of the parameter min_val (int or float): minimum value for the parameter max_val (int or float): maximum value for the parameter
juraj-google-style
def make_decoder(num_topics, num_words): topics_words_logits = tf.compat.v1.get_variable('topics_words_logits', shape=[num_topics, num_words], initializer=tf.compat.v1.glorot_normal_initializer()) topics_words = tf.nn.softmax(topics_words_logits, axis=(- 1)) def decoder(topics): word_probs = tf.matmul(topics, topics_words) return tfd.OneHotCategorical(probs=word_probs, name='bag_of_words') return (decoder, topics_words)
Create the decoder function. Args: num_topics: The number of topics. num_words: The number of words. Returns: decoder: A `callable` mapping a `Tensor` of encodings to a `tfd.Distribution` instance over words.
codesearchnet
def _read(self, entry): start_time = time.time() content = self._zip.read(entry.filename) ctx = context.get() if ctx: operation.counters.Increment(COUNTER_IO_READ_BYTES, len(content))(ctx) operation.counters.Increment(COUNTER_IO_READ_MSEC, int(((time.time() - start_time) * 1000)))(ctx) return content
Read entry content. Args: entry: zip file entry as zipfile.ZipInfo. Returns: Entry content as string.
codesearchnet
def __init__(self, resolver_context): super(APFSContainerFileSystem, self).__init__(resolver_context) self._file_object = None self._fsapfs_container = None
Initializes an APFS container file system. Args: resolver_context (resolver.Context): resolver context.
juraj-google-style
def extrinsic_events(network, previous_state, current_state, next_state, indices=None, major_complex=None): if major_complex: mc_nodes = major_complex.subsystem.node_indices elif indices: mc_nodes = indices else: major_complex = compute.major_complex(network, current_state) mc_nodes = major_complex.subsystem.node_indices mechanisms = list(utils.powerset(mc_nodes, nonempty=True)) all_nodes = network.node_indices return events(network, previous_state, current_state, next_state, all_nodes, mechanisms=mechanisms)
Set of all mechanisms that are in the major complex but which have true causes and effects within the entire network. Args: network (Network): The network to analyze. previous_state (tuple[int]): The state of the network at ``t - 1``. current_state (tuple[int]): The state of the network at ``t``. next_state (tuple[int]): The state of the network at ``t + 1``. Keyword Args: indices (tuple[int]): The indices of the major complex. major_complex (AcSystemIrreducibilityAnalysis): The major complex. If ``major_complex`` is given then ``indices`` is ignored. Returns: tuple(actions): List of extrinsic events in the major complex.
codesearchnet
def rpm_versioned_name(cls, name, version, default_number=False): regexp = re.compile(r'^python(\d*|)-(.*)') auto_provides_regexp = re.compile(r'^python(\d*|)dist(.*)') if (not version or version == cls.get_default_py_version() and not default_number): found = regexp.search(name) if found and found.group(2) != 'devel': if 'epel' not in cls.template: return 'python-{0}'.format(regexp.search(name).group(2)) return name versioned_name = name if version: if regexp.search(name): versioned_name = re.sub(r'^python(\d*|)-', 'python{0}-'.format( version), name) elif auto_provides_regexp.search(name): versioned_name = re.sub( r'^python(\d*|)dist', 'python{0}dist'.format( version), name) else: versioned_name = 'python{0}-{1}'.format(version, name) if ('epel' in cls.template and version != cls.get_default_py_version()): versioned_name = versioned_name.replace('{0}'.format( version), '%{{python{0}_pkgversion}}'.format(version)) return versioned_name
Properly versions the name. For example: rpm_versioned_name('python-foo', '26') will return python26-foo rpm_versioned_name('pyfoo, '3') will return python3-pyfoo If version is same as settings.DEFAULT_PYTHON_VERSION, no change is done. Args: name: name to version version: version or None Returns: Versioned name or the original name if given version is None.
juraj-google-style
def method_exists(cls, method): methods = cls.API_METHODS for key in method.split('.'): methods = methods.get(key) if methods is None: break if isinstance(methods, str): logger.debug('%r: %r', method, methods) return True return False
Whether a given method exists in the known API. Arguments: method (:py:class:`str`): The name of the method. Returns: :py:class:`bool`: Whether the method is in the known API.
juraj-google-style
def __init__(self, adapter_id): super(ConnectionManager, self).__init__() self.id = adapter_id self._stop_event = threading.Event() self._actions = queue.Queue() self._connections = {} self._int_connections = {} self._data_lock = threading.Lock() self.daemon = True self._logger = logging.getLogger(__name__) self._logger.addHandler(logging.NullHandler()) self._logger.setLevel(logging.INFO)
Constructor. Args: adapter_id (int): Since the ConnectionManager responds to callbacks on behalf of a DeviceAdapter, it needs to know what adapter_id to send with the callbacks.
juraj-google-style
def plot_time_series(self, f_start=None, f_stop=None, if_id=0, logged=True, orientation='h', MJD_time=False, **kwargs): ax = plt.gca() plot_f, plot_data = self.grab_data(f_start, f_stop, if_id) if logged and self.header[b'nbits'] >= 8: plot_data = db(plot_data) if len(plot_data.shape) > 1: plot_data = plot_data.mean(axis=1) else: plot_data = plot_data.mean() extent = self._calc_extent(plot_f=plot_f,plot_t=self.timestamps,MJD_time=MJD_time) plot_t = np.linspace(extent[2],extent[3],len(self.timestamps)) if MJD_time: tlabel = "Time [MJD]" else: tlabel = "Time [s]" if logged: plabel = "Power [dB]" else: plabel = "Power [counts]" if 'v' in orientation: plt.plot(plot_data, plot_t, **kwargs) plt.xlabel(plabel) else: plt.plot(plot_t, plot_data, **kwargs) plt.xlabel(tlabel) plt.ylabel(plabel) ax.autoscale(axis='both',tight=True)
Plot the time series. Args: f_start (float): start frequency, in MHz f_stop (float): stop frequency, in MHz logged (bool): Plot in linear (False) or dB units (True), kwargs: keyword args to be passed to matplotlib imshow()
juraj-google-style
def GetAvailableClaimTotal(self): coinrefs = [coin.Reference for coin in self.GetUnclaimedCoins()] bonus = Blockchain.CalculateBonusIgnoreClaimed(coinrefs, True) return bonus
Gets the total amount of Gas that this wallet is able to claim at a given moment. Returns: Fixed8: the amount of Gas available to claim as a Fixed8 number.
codesearchnet
def refresh(self, access_token=None, **kwargs): if not self.token_lock.locked(): with self.token_lock: if access_token == self.access_token or access_token is None: if self.developer_token is not None: r = self._httpclient.request( method='POST', url=self.developer_token_url, path='/request_token', headers={ 'Authorization': 'Bearer {}'.format( self.developer_token ) }, timeout=30, raise_for_status=True ) elif all( [ self.client_id, self.client_secret, self.refresh_token ] ): data = { 'client_id': self.client_id, 'client_secret': self.client_secret, 'refresh_token': self.refresh_token, 'grant_type': 'refresh_token' } r = self._httpclient.request( method='POST', url=self.token_url, json=data, path='/api/oauth2/RequestToken', **kwargs ) else: raise PartialCredentialsError( "Missing one or more required credentials" ) if r: if not r.ok: raise PanCloudError( '%s %s: %s' % ( r.status_code, r.reason, r.text) ) try: r_json = r.json() except ValueError as e: raise PanCloudError("Invalid JSON: %s" % e) else: if r.json().get( 'error_description' ) or r.json().get( 'error' ): raise PanCloudError(r.text) self.access_token = r_json.get( 'access_token', None ) self.jwt_exp = self._decode_exp( self.access_token_) if r_json.get('refresh_token', None): self.refresh_token = \ r_json.get('refresh_token') self.write_credentials() return self.access_token_
Refresh access and refresh tokens. Args: access_token (str): Access token to refresh. Defaults to ``None``. Returns: str: Refreshed access token.
juraj-google-style
def __init__(self, provider, template, **kwargs): super(StatikJinjaTemplate, self).__init__(template.filename, **kwargs) self.provider = provider self.template = template
Constructor. Args: provider: The provider that created this template. template: The Jinja2 template to wrap.
juraj-google-style
def send_message( self, request: str, response_expected: bool, **kwargs: Any ) -> Response: response = self.session.post(self.endpoint, data=request.encode(), **kwargs) return Response(response.text, raw=response)
Transport the message to the server and return the response. Args: request: The JSON-RPC request string. response_expected: Whether the request expects a response. Returns: A Response object.
juraj-google-style
def from_lasio(cls, l, remap=None, funcs=None): params = {} funcs = funcs or {} funcs['location'] = str for field, (sect, code) in las_fields['location'].items(): params[field] = utils.lasio_get(l, sect, code, remap=remap, funcs=funcs) return cls(params)
Make a Location object from a lasio object. Assumes we're starting with a lasio object, l. Args: l (lasio). remap (dict): Optional. A dict of 'old': 'new' LAS field names. funcs (dict): Optional. A dict of 'las field': function() for implementing a transform before loading. Can be a lambda. Returns: Location. An instance of this class.
juraj-google-style
def _prepare_swaption_indices(tensor_shape): tensor_shape = np.array(tensor_shape, dtype=np.int64) batch_shape = tensor_shape[1:-1] batch_size = np.prod(batch_shape) index_list = [] for i in range(len(tensor_shape)): index = np.arange(0, tensor_shape[i], dtype=np.int64) if i == 0 or i == len(tensor_shape) - 1: index = tf.tile(index, [batch_size]) else: index = np.tile(np.repeat(index, np.prod(tensor_shape[i + 1:])), [np.prod(tensor_shape[1:i])]) index_list.append(index) return tf.stack(index_list, axis=-1)
Indices for `gather_nd` for analytic valuation. For a `Tensor` x of shape `tensor_shape` = [n] + batch_shape + [n], this function returns indices for tf.gather_nd to get `x[i,...,i]` Args: tensor_shape: A list of length `k` representing shape of the `Tensor`. Returns: A `Tensor` of shape (num_elements, k) where num_elements= n * batch_size of dtype tf.int64.
github-repos
def __similarity(s1, s2, ngrams_fn, n=3): ngrams1, ngrams2 = set(ngrams_fn(s1, n=n)), set(ngrams_fn(s2, n=n)) matches = ngrams1.intersection(ngrams2) return 2 * len(matches) / (len(ngrams1) + len(ngrams2))
The fraction of n-grams matching between two sequences Args: s1: a string s2: another string n: an int for the n in n-gram Returns: float: the fraction of n-grams matching
juraj-google-style
def getISOSetupList(self): transfer_p = self.__transfer transfer = transfer_p.contents if transfer.type != TRANSFER_TYPE_ISOCHRONOUS: raise TypeError( 'This method cannot be called on non-iso transfers.' ) return [ { 'length': x.length, 'actual_length': x.actual_length, 'status': x.status, } for x in libusb1.get_iso_packet_list(transfer_p) ]
Get individual ISO transfer's setup. Returns a list of dicts, each containing an individual ISO transfer parameters: - length - actual_length - status (see libusb1's API documentation for their signification) Returned list is consistent with getISOBufferList return value. Should not be called on a submitted transfer (except for 'length' values).
juraj-google-style
def add_relations(spec_dict: Mapping[str, Any]) -> Mapping[str, Any]: spec_dict["relations"]["list"] = [] spec_dict["relations"]["list_short"] = [] spec_dict["relations"]["list_long"] = [] spec_dict["relations"]["to_short"] = {} spec_dict["relations"]["to_long"] = {} for relation_name in spec_dict["relations"]["info"]: abbreviated_name = spec_dict["relations"]["info"][relation_name]["abbreviation"] spec_dict["relations"]["list"].extend((relation_name, abbreviated_name)) spec_dict["relations"]["list_long"].append(relation_name) spec_dict["relations"]["list_short"].append(abbreviated_name) spec_dict["relations"]["to_short"][relation_name] = abbreviated_name spec_dict["relations"]["to_short"][abbreviated_name] = abbreviated_name spec_dict["relations"]["to_long"][abbreviated_name] = relation_name spec_dict["relations"]["to_long"][relation_name] = relation_name return spec_dict
Add relation keys to spec_dict Args: spec_dict (Mapping[str, Any]): bel specification dictionary Returns: Mapping[str, Any]: bel specification dictionary with added relation keys
juraj-google-style
def experimental_local_results(self, value): return self._extended._local_results(value)
Returns the list of all local per-replica values contained in `value`. Note: This only returns values on the worker initiated by this client. When using a `tf.distribute.Strategy` like `tf.distribute.experimental.MultiWorkerMirroredStrategy`, each worker will be its own client, and this function will only return values computed on that worker. Args: value: A value returned by `experimental_run()`, `run(), or a variable created in `scope`. Returns: A tuple of values contained in `value` where ith element corresponds to ith replica. If `value` represents a single value, this returns `(value,).`
github-repos
def institutes(self, institute_ids=None): query = {} if institute_ids: query['_id'] = {'$in': institute_ids} LOG.debug("Fetching all institutes") return self.institute_collection.find(query)
Fetch all institutes. Args: institute_ids(list(str)) Returns: res(pymongo.Cursor)
juraj-google-style
def features(self): buf = (ctypes.c_char * self.MAX_BUF_SIZE)() self._dll.JLINKARM_GetFeatureString(buf) result = ctypes.string_at(buf).decode().strip() if (len(result) == 0): return list() return result.split(', ')
Returns a list of the J-Link embedded features. Args: self (JLink): the ``JLink`` instance Returns: A list of strings, each a feature. Example: ``[ 'RDI', 'FlashBP', 'FlashDL', 'JFlash', 'GDB' ]``
codesearchnet
def _save_env(env): env_path = os.path.join(env['resultdir'], 'env') if os.path.isdir(env['resultdir']): with open(env_path, 'w') as f: yaml.dump(env, f)
Saves one environment. Args: env (dict): the env dict to save.
codesearchnet
def load(self, read_tuple_name): self.prefix_width = 0 self.read_tuple_id_width = 0 self.genome_id_width = 0 self.chr_id_width = 0 self.coor_width = 0 parts = read_tuple_name.split("__") self.prefix_width = len(parts[0]) self.read_tuple_id_width = len(parts[1]) segments = parts[2][1:-1].split("),(") for segment in segments: int_widths = list(map(len, segment.split(","))) self.genome_id_width = max(self.genome_id_width, int_widths[0]) self.chr_id_width = max(self.chr_id_width, int_widths[1]) self.coor_width = max(self.coor_width, int_widths[2], int_widths[3])
Load RNF values from a read tuple name. Args: read_tuple_name (str): Read tuple name which the values are taken from.
juraj-google-style
def shutdown(self, vm_names=None, reboot=False): self.virt_env.shutdown(vm_names, reboot)
Shutdown this prefix Args: vm_names(list of str): List of the vms to shutdown reboot(bool): If true, reboot the requested vms Returns: None
codesearchnet
def subscribe(self, devices_to_bind=[]): if self.entity_api_key == "": return {'status': 'failure', 'response': 'No API key found in request'} self.bind(devices_to_bind) loop = asyncio.new_event_loop() t1 = threading.Thread(target=self.start_subscribe_worker, args=(loop,)) t1.daemon = True t1.start()
This function allows an entity to subscribe for data from the devices specified in the bind operation. It creates a thread with an event loop to manager the tasks created in start_subscribe_worker. Args: devices_to_bind (list): an array of devices to listen to
juraj-google-style
def npy_to_numpy(npy_array): stream = BytesIO(npy_array) return np.load(stream, allow_pickle=True)
Convert an NPY array into numpy. Args: npy_array (npy array): to be converted to numpy array Returns: (np.array): converted numpy array.
juraj-google-style
def scale(p, factor, o=(0, 0)): v = vector(o, p) sv = v[0] * factor, v[1] * factor return translate(sv, o)
scale vector Args: p: point (x, y) factor: scaling factor o: origin (x, y)
juraj-google-style
def write_new_config(self, updates): with open(self._new_config, 'w') as config_file: for update in updates: line = '{0}=={1} config_file.write(line)
Given a list of updates, write the updates out to the provided configuartion file. Args: updates (list): List of Update objects.
codesearchnet
def record(self, value=1.0, time_ms=None): if time_ms is None: time_ms = time.time() * 1000 self._last_record_time = time_ms with self._lock: for stat in self._stats: stat.record(self._config, value, time_ms) self._check_quotas(time_ms) for parent in self._parents: parent.record(value, time_ms)
Record a value at a known time. Arguments: value (double): The value we are recording time_ms (int): A POSIX timestamp in milliseconds. Default: The time when record() is evaluated (now) Raises: QuotaViolationException: if recording this value moves a metric beyond its configured maximum or minimum bound
juraj-google-style
def enroll_user_in_course(self, username, course_id, mode, cohort=None): return self.client.enrollment.post({'user': username, 'course_details': {'course_id': course_id}, 'mode': mode, 'cohort': cohort})
Call the enrollment API to enroll the user in the course specified by course_id. Args: username (str): The username by which the user goes on the OpenEdX platform course_id (str): The string value of the course's unique identifier mode (str): The enrollment mode which should be used for the enrollment cohort (str): Add the user to this named cohort Returns: dict: A dictionary containing details of the enrollment, including course details, mode, username, etc.
codesearchnet
def encode_bu64(b): s = base64.standard_b64encode(b) s = s.rstrip('=') s = s.replace('+', '-') s = s.replace('/', '_') return s
Encode bytes to a URL safe flavor of Base64 used by JWTs. - Reverse of decode_bu64(). Args: b: bytes Bytes to Base64 encode. Returns: bytes: URL safe Base64 encoded version of input.
juraj-google-style
def setCTRatio(self, new_ct, password='00000000'): ret = False self.setContext('setCTRatio') try: self.clearCmdMsg() if ((new_ct != CTRatio.Amps_100) and (new_ct != CTRatio.Amps_200) and (new_ct != CTRatio.Amps_400) and (new_ct != CTRatio.Amps_600) and (new_ct != CTRatio.Amps_800) and (new_ct != CTRatio.Amps_1000) and (new_ct != CTRatio.Amps_1200) and (new_ct != CTRatio.Amps_1500) and (new_ct != CTRatio.Amps_2000) and (new_ct != CTRatio.Amps_3000) and (new_ct != CTRatio.Amps_4000) and (new_ct != CTRatio.Amps_5000)): self.writeCmdMsg(('Legal CT Ratios: 100, 200, 400, 600, ' + '800, 1000, 1200, 1500, 2000, 3000, 4000 and 5000')) self.setContext('') return ret if (len(password) != 8): self.writeCmdMsg('Invalid password length.') self.setContext('') return ret if (not self.request(False)): self.writeCmdMsg('Bad read CRC on setting') elif (not self.serialCmdPwdAuth(password)): self.writeCmdMsg('Password failure') else: req_str = (('015731023030443028' + binascii.hexlify(str(new_ct).zfill(4))) + '2903') req_str += self.calc_crc16(req_str[2:].decode('hex')) self.m_serial_port.write(req_str.decode('hex')) if (self.m_serial_port.getResponse(self.getContext()).encode('hex') == '06'): self.writeCmdMsg('Success(setCTRatio): 06 returned.') ret = True self.serialPostEnd() except: ekm_log(traceback.format_exc(sys.exc_info())) self.setContext('') return ret
Serial call to set CT ratio for attached inductive pickup. Args: new_ct (int): A :class:`~ekmmeters.CTRatio` value, a legal amperage setting. password (str): Optional password. Returns: bool: True on completion with ACK.
codesearchnet
def get_collectors(self, limit=1000, offset=0): options = { 'limit': limit, 'offset': offset, } request = requests.get(self.url, params=options, auth=self.auth) try: results = request.json()['collectors'] except KeyError: results = request.json() except json.decoder.JSONDecodeError: results = [] return results
Returns a dict of collectors. Args: limit (int): number of collectors to return offset (int): the offset of where the list of collectors should begin from
juraj-google-style
def daemonize(pidfile=None): resource.setrlimit(resource.RLIMIT_CORE, (0, 0)) os.chdir("/") os.umask(0) pid = os.fork() if pid > 0: os._exit(0) os.setsid() pid = os.fork() if pid > 0: os._exit(0) def terminate(signal, stack_frame): msg = 'Terminating on signal {}'.format(signal) logger.info(msg) raise SystemExit(msg) signal.signal(signal.SIGTERM, terminate) streams = [sys.stdin, sys.stdout, sys.stderr] for stream in streams: devnull = os.open(os.devnull, os.O_RDWR) os.dup2(devnull, stream.fileno()) for fd in [stream.fileno() for stream in streams]: try: os.close(fd) except OSError as err: if err.errno == errno.EBADF: pass if pidfile is None or pidfile.strip() == '': logger.debug('Empty pidfile set') else: pid = os.getpid() try: with open(pidfile, 'w') as f: f.write('{}\n'.format(pid)) f.close() except EnvironmentError: logger.error('Failed to create pidfile at {}'.format(pidfile)) def remove_pid_file(): os.remove(pidfile) atexit.register(remove_pid_file) logger.debug('Process daemonized')
Turn the running process into a proper daemon according to PEP3143. Args: pidfile --The pidfile to create.
juraj-google-style
def assert_input_compatibility(input_spec, inputs, layer_name): if not input_spec: return input_spec = tree.flatten(input_spec) if isinstance(inputs, dict): names = [spec.name for spec in input_spec] if all(names): list_inputs = [] for name in names: if name not in inputs: raise ValueError(f'Missing data for input "{name}". You passed a data dictionary with keys {list(inputs.keys())}. Expected the following keys: {names}') list_inputs.append(inputs[name]) inputs = list_inputs inputs = tree.flatten(inputs) if len(inputs) != len(input_spec): raise ValueError(f'Layer "{layer_name}" expects {len(input_spec)} input(s), but it received {len(inputs)} input tensors. Inputs received: {inputs}') for input_index, (x, spec) in enumerate(zip(inputs, input_spec)): if spec is None: continue if x is None and spec.optional: continue if not hasattr(x, 'shape'): raise ValueError(f"Inputs to a layer should be tensors. Got '{x}' (of type {type(x)}) as input for layer '{layer_name}'.") shape = backend.standardize_shape(x.shape) ndim = len(shape) if spec.ndim is not None and (not spec.allow_last_axis_squeeze): if ndim != spec.ndim: raise ValueError(f'Input {input_index} of layer "{layer_name}" is incompatible with the layer: expected ndim={spec.ndim}, found ndim={ndim}. Full shape received: {shape}') if spec.max_ndim is not None: if ndim is not None and ndim > spec.max_ndim: raise ValueError(f'Input {input_index} of layer "{layer_name}" is incompatible with the layer: expected max_ndim={spec.max_ndim}, found ndim={ndim}') if spec.min_ndim is not None: if ndim is not None and ndim < spec.min_ndim: raise ValueError(f'Input {input_index} of layer "{layer_name}" is incompatible with the layer: expected min_ndim={spec.min_ndim}, found ndim={ndim}. Full shape received: {shape}') if spec.dtype is not None: dtype = backend.standardize_dtype(x.dtype) if dtype != spec.dtype: raise ValueError(f'Input {input_index} of layer "{layer_name}" is incompatible with the layer: expected dtype={spec.dtype}, found dtype={dtype}') if spec.axes: for axis, value in spec.axes.items(): if value is not None and shape[axis] not in {value, None}: raise ValueError(f'Input {input_index} of layer "{layer_name}" is incompatible with the layer: expected axis {axis} of input shape to have value {value}, but received input with shape {shape}') if spec.shape is not None: spec_shape = spec.shape if spec.allow_last_axis_squeeze: if shape and shape[-1] == 1: shape = shape[:-1] if spec_shape and spec_shape[-1] == 1: spec_shape = spec_shape[:-1] for spec_dim, dim in zip(spec_shape, shape): if spec_dim is not None and dim is not None: if spec_dim != dim: raise ValueError(f'Input {input_index} of layer "{layer_name}" is incompatible with the layer: expected shape={spec.shape}, found shape={shape}')
Checks compatibility between the layer and provided inputs. This checks that the tensor(s) `inputs` verify the input assumptions of a layer (if any). If not, a clear and actional exception gets raised. Args: input_spec: An InputSpec instance, list of InputSpec instances, a nested structure of InputSpec instances, or None. inputs: Input tensor, list of input tensors, or a nested structure of input tensors. layer_name: String, name of the layer (for error message formatting). Raises: ValueError: in case of mismatch between the provided inputs and the expectations of the layer.
github-repos
def detect_phantomjs(version='2.1'): if settings.phantomjs_path() is not None: phantomjs_path = settings.phantomjs_path() else: if hasattr(shutil, "which"): phantomjs_path = shutil.which("phantomjs") or "phantomjs" else: phantomjs_path = "phantomjs" try: proc = Popen([phantomjs_path, "--version"], stdout=PIPE, stderr=PIPE) proc.wait() out = proc.communicate() if len(out[1]) > 0: raise RuntimeError('Error encountered in PhantomJS detection: %r' % out[1].decode('utf8')) required = V(version) installed = V(out[0].decode('utf8')) if installed < required: raise RuntimeError('PhantomJS version to old. Version>=%s required, installed: %s' % (required, installed)) except OSError: raise RuntimeError('PhantomJS is not present in PATH or BOKEH_PHANTOMJS_PATH. Try "conda install phantomjs" or \ "npm install -g phantomjs-prebuilt"') return phantomjs_path
Detect if PhantomJS is avaiable in PATH, at a minimum version. Args: version (str, optional) : Required minimum version for PhantomJS (mostly for testing) Returns: str, path to PhantomJS
juraj-google-style
def _convert_rnn_weights(layer, weights): def transform_kernels(kernels, func, n_gates): return np.hstack([func(k) for k in np.hsplit(kernels, n_gates)]) def transpose_input(from_cudnn): order = 'F' if from_cudnn else 'C' def transform(kernel): return kernel.T.reshape(kernel.shape, order=order) return transform target_class = layer.__class__.__name__ if target_class in ['LSTM', 'CuDNNLSTM'] and len(weights) == 3: units = weights[1].shape[0] bias_shape = weights[2].shape n_gates = 4 if bias_shape == (2 * units * n_gates,): source = 'CuDNNLSTM' elif bias_shape == (units * n_gates,): source = 'LSTM' else: raise ValueError('Invalid bias shape: ' + str(bias_shape)) def convert_lstm_weights(weights, from_cudnn=True): kernels = transform_kernels(weights[0], transpose_input(from_cudnn), n_gates) recurrent_kernels = transform_kernels(weights[1], lambda k: k.T, n_gates) if from_cudnn: biases = np.sum(np.split(weights[2], 2, axis=0), axis=0) else: biases = np.tile(0.5 * weights[2], 2) return [kernels, recurrent_kernels, biases] if source != target_class: weights = convert_lstm_weights(weights, from_cudnn=source == 'CuDNNLSTM') if target_class in ['GRU', 'CuDNNGRU'] and len(weights) == 3: units = weights[1].shape[0] bias_shape = weights[2].shape n_gates = 3 def convert_gru_weights(weights, from_cudnn=True): kernels = transform_kernels(weights[0], transpose_input(from_cudnn), n_gates) recurrent_kernels = transform_kernels(weights[1], lambda k: k.T, n_gates) biases = np.array(weights[2]).reshape((2, -1) if from_cudnn else -1) return [kernels, recurrent_kernels, biases] if bias_shape == (2 * units * n_gates,): source = 'CuDNNGRU' elif bias_shape == (2, units * n_gates): source = 'GRU(reset_after=True)' elif bias_shape == (units * n_gates,): source = 'GRU(reset_after=False)' else: raise ValueError('Invalid bias shape: ' + str(bias_shape)) if target_class == 'CuDNNGRU': target = 'CuDNNGRU' elif layer.reset_after: target = 'GRU(reset_after=True)' else: target = 'GRU(reset_after=False)' if source != target: types = (source, target) if 'GRU(reset_after=False)' in types: raise ValueError('%s is not compatible with %s' % types) if source == 'CuDNNGRU': weights = convert_gru_weights(weights, from_cudnn=True) elif source == 'GRU(reset_after=True)': weights = convert_gru_weights(weights, from_cudnn=False) return weights
Converts weights for RNN layers between native and CuDNN format. Input kernels for each gate are transposed and converted between Fortran and C layout, recurrent kernels are transposed. For LSTM biases are summed/ split in half, for GRU biases are reshaped. Weights can be converted in both directions between `LSTM` and`CuDNNSLTM` and between `CuDNNGRU` and `GRU(reset_after=True)`. Default `GRU` is not compatible with `CuDNNGRU`. For missing biases in `LSTM`/`GRU` (`use_bias=False`) no conversion is made. Args: layer: Target layer instance. weights: List of source weights values (input kernels, recurrent kernels, [biases]) (Numpy arrays). Returns: A list of converted weights values (Numpy arrays). Raises: ValueError: for incompatible GRU layer/weights or incompatible biases
github-repos
class DistributionMetric(Metric): def __init__(self, dist_metric, submit_timestamp, metric_id, metric_type): custom_label = dist_metric.key.metric.namespace + '_' + parse_step(dist_metric.key.step) + '_' + metric_type + '_' + dist_metric.key.metric.name value = getattr(dist_metric.result, metric_type) if value is None: msg = '%s: the result is expected to be an integer, not None.' % custom_label _LOGGER.debug(msg) raise ValueError(msg) super().__init__(submit_timestamp, metric_id, value, dist_metric, custom_label)
The Distribution Metric in ready-to-publish format. Args: dist_metric (object): distribution metric object from MetricResult submit_timestamp (float): date-time of saving metric to database metric_id (uuid): unique id to identify test run
github-repos
def predict_features(self, df_features, df_target, idx=0, **kwargs): X = df_features.values y = df_target.values clf = ard(compute_score=True) clf.fit(X, y.ravel()) return np.abs(clf.coef_)
For one variable, predict its neighbouring nodes. Args: df_features (pandas.DataFrame): df_target (pandas.Series): idx (int): (optional) for printing purposes kwargs (dict): additional options for algorithms Returns: list: scores of each feature relatively to the target
juraj-google-style
def get_angle(self, i: int, j: int, k: int) -> float: v1 = self[i].coords - self[j].coords v2 = self[k].coords - self[j].coords return get_angle(v1, v2, units="degrees")
Returns angle specified by three sites. Args: i: Index of first site. j: Index of second site. k: Index of third site. Returns: Angle in degrees.
juraj-google-style
def getall(self): interfaces_re = re.compile('(?<=^interface\\s)([Et|Po].+)$', re.M) response = dict() for name in interfaces_re.findall(self.config): interface = self.get(name) if interface: response[name] = interface return response
Returns a dict object to all Switchports This method will return all of the configured switchports as a dictionary object keyed by the interface identifier. Returns: A Python dictionary object that represents all configured switchports in the current running configuration
codesearchnet
def convert(self, vroot, entry_variables): for converter in self.converters: vroot = converter.convert(vroot, entry_variables) return vroot
Convert a given graph. Convert a given graph using the `converters` in the order of the registeration, i.e., sequentially. Args: vroot (:obj:`Variable`): NNabla Variable entry_variables (:obj:`Variable`): Entry variable from which the conversion starts.
juraj-google-style
def remove_item(self, **kwargs): path = self._get_id_path('remove_item') kwargs.update({'session_id': self.session_id}) payload = { 'media_id': kwargs.pop('media_id', None), } response = self._POST(path, kwargs, payload) self._set_attrs_to_values(response) return response
Delete movies from a list that the user created. A valid session id is required. Args: media_id: A movie id. Returns: A dict respresentation of the JSON returned from the API.
juraj-google-style
def assign_group_v2(group_assignment, device_index, base_key): group_size, group_key = gen_collective_ops.collective_assign_group_v2(group_assignment=group_assignment, device_index=device_index, base_key=base_key) return (group_size, group_key)
Assign group key based on group_assignment. Args: group_assignment: a 2 dimensional integer Tensor that encodes which devices belong to the same group. The values are indices of the devices within 0 to number of devices. device_index: integer for the index of the current device base_key: integer to offset the resulted group_key. The base key shall be unique for different values of group_assignment in the same tf.function. Notes: The device_index argument must be consistent with the index of the device of this Op in the device assignment list. The behavior of this Op is undefined if they are inconsistent. Returns: group_size, group_key: The group size and group key for the current device.
github-repos
def div(x, y, name=None): return _div_python2(x, y, name)
Divides x / y elementwise (using Python 2 division operator semantics). @compatibility(TF2) This function is deprecated in TF2. Prefer using the Tensor division operator, `tf.divide`, or `tf.math.divide`, which obey the Python 3 division operator semantics. @end_compatibility This function divides `x` and `y`, forcing Python 2 semantics. That is, if `x` and `y` are both integers then the result will be an integer. This is in contrast to Python 3, where division with `/` is always a float while division with `//` is always an integer. Args: x: `Tensor` numerator of real numeric type. y: `Tensor` denominator of real numeric type. name: A name for the operation (optional). Returns: `x / y` returns the quotient of x and y.
github-repos
def get_dataset(dataset='s2l1c'): if (dataset == 's2l1c'): search_string = os.path.join(DIR_DATA, dataset, '**', '*_B??.jp2') files = glob.glob(search_string, recursive=True) if (not files): raise IOError(f'Could not find raster files of the s2l1c dataset. Search string: {search_string}') basename_splitted = [pth.replace('.jp2', '').split('_')[(- 2):] for pth in files] dset = {'raster_files': files, 'raster_bands': [ele[1] for ele in basename_splitted], 'raster_times': [ele[0] for ele in basename_splitted], 'vector_file': os.path.join(DIR_DATA, 's2l1c', 's2l1c_ref.gpkg'), 'vector_file_osm': os.path.join(DIR_DATA, 's2l1c', 'gis_osm_landuse-water_a_free_1_area-10000-to-500000.gpkg')} elif (dataset == 'lsts'): search_string = os.path.join(DIR_DATA, dataset, '**', '*.tif') files = glob.glob(search_string, recursive=True) if (not files): raise IOError(f'Could not find raster files of the lsts dataset. Search string: {search_string}') basename_splitted = [os.path.basename(pth).replace('.tif', '').split('_') for pth in files] dset = {'raster_files': files, 'raster_bands': [ele[1] for ele in basename_splitted], 'raster_times': [ele[0][9:16] for ele in basename_splitted]} return dset
Get a specific sampledata to play around. So far the following sampledata exist: * 's2l1c': One Sentinel-2 Level 1C scene with a reference dataset. * 'lsts': A time series of 105 Landsat scenes each with the bands b3 (red), b4 (nir), b5 (swir1) and fmask. Keyword Arguments: dataset {str} -- The name of the dataset (default: {'s2l1c'}). Returns: [dict] -- A dictionary with paths and information about the sampledata.
codesearchnet
def get_user_groups(self, user): self.project_service.set_auth(self._token_project) return self.project_service.get_user_groups(user)
Get user's group memberships. Args: user (string): User name. Returns: (list): User's groups. Raises: requests.HTTPError on failure.
codesearchnet
def forward(self, hidden_states: List[torch.Tensor], patch_height: Optional[int]=None, patch_width: Optional[int]=None, prompt_depth: Optional[torch.Tensor]=None) -> List[torch.Tensor]: if not isinstance(hidden_states, (tuple, list)): raise TypeError('hidden_states should be a tuple or list of tensors') if len(hidden_states) != len(self.config.neck_hidden_sizes): raise ValueError('The number of hidden states should be equal to the number of neck hidden sizes.') hidden_states = self.reassemble_stage(hidden_states, patch_height, patch_width) features = [self.convs[i](feature) for i, feature in enumerate(hidden_states)] output = self.fusion_stage(features, prompt_depth=prompt_depth) return output
Args: hidden_states (`List[torch.FloatTensor]`, each of shape `(batch_size, sequence_length, hidden_size)` or `(batch_size, hidden_size, height, width)`): List of hidden states from the backbone.
github-repos
def _HasId(self, schedule, entity_id): try: self._GetById(schedule, entity_id) has = True except KeyError: has = False return has
Check if the schedule has an entity with the given id. Args: schedule: The transitfeed.Schedule instance to look in. entity_id: The id of the entity. Returns: True if the schedule has an entity with the id or False if not.
codesearchnet
def __init__(self, children: Optional[List['AbstractSyntaxTree']]=None) -> None: self.data_type: Optional[_fhir_path_data_types.FhirPathDataType] = None self.parent = None self._children = children for c in self._children or []: c.parent = weakref.proxy(self)
Initializes an `AbstractSyntaxTree` with an optional list of children. Note that the `parent` property is set for children at the time their parent is initialized. It is set as a weak reference to avoid retain cycles. Args: children: The optional list of children belonging to this node.
github-repos
def new(self, index=None): if (index is None): try: return next(self.select(New, None, False)) except StopIteration: raise NoSuchAnnotation else: for e in self.select(New, None, False): return e[index] raise NoSuchAnnotation
Get the new corrected annotation. This returns only one annotation if multiple exist, use `index` to select another in the sequence. Returns: an annotation element (:class:`AbstractElement`) Raises: :class:`NoSuchAnnotation`
codesearchnet
def call_with_captures(self, args, kwargs, captures):
Calls this AtomicFunction with captures as defined by its FunctionType. Args: args: Tuple containing positional arguments kwargs: Dict containing keyword arguments captures: Tuple of tensors supplying captured tensor values. Returns: A structured output value based on the inputs.
github-repos
def add_keywords_from_list(self, keyword_list): if not isinstance(keyword_list, list): raise AttributeError("keyword_list should be a list") for keyword in keyword_list: self.add_keyword(keyword)
To add keywords from a list Args: keyword_list (list(str)): List of keywords to add Examples: >>> keyword_processor.add_keywords_from_list(["java", "python"]}) Raises: AttributeError: If `keyword_list` is not a list.
juraj-google-style
def __init__(self, offsets, max_values_count, max_values_size): self._offsets = offsets self._max_values_count = max_values_count self._max_values_size = max_values_size
Constructor. Args: offsets: offsets for each input file to start from as list of ints. max_values_count: maximum number of values to yield for a single value at a time. Ignored if -1. max_values_size: maximum total size of yielded values. Ignored if -1
juraj-google-style
def simple_generate_batch(cls, create, size, **kwargs): strategy = enums.CREATE_STRATEGY if create else enums.BUILD_STRATEGY return cls.generate_batch(strategy, size, **kwargs)
Generate a batch of instances. These instances will be either 'built' or 'created'. Args: size (int): the number of instances to generate create (bool): whether to 'build' or 'create' the instances. Returns: object list: the generated instances
juraj-google-style
def get(cls, issue_type): if isinstance(issue_type, str): obj = getattr(db, cls.__name__).find_one(cls.issue_type == issue_type) elif isinstance(issue_type, int): obj = getattr(db, cls.__name__).find_one(cls.issue_type_id == issue_type) elif isinstance(issue_type, cls): return issue_type else: obj = None if not obj: obj = cls() obj.issue_type = issue_type db.session.add(obj) db.session.commit() db.session.refresh(obj) return obj
Returns the IssueType object for `issue_type`. If no existing object was found, a new type will be created in the database and returned Args: issue_type (str,int,IssueType): Issue type name, id or class Returns: :obj:`IssueType`
juraj-google-style
def bind(self, attribute, cls, buffer, fmt, *, offset=0, stride=0, divisor=0, normalize=False) -> None: self.mglo.bind(attribute, cls, buffer.mglo, fmt, offset, stride, divisor, normalize)
Bind individual attributes to buffers. Args: location (int): The attribute location. cls (str): The attribute class. Valid values are ``f``, ``i`` or ``d``. buffer (Buffer): The buffer. format (str): The buffer format. Keyword Args: offset (int): The offset. stride (int): The stride. divisor (int): The divisor. normalize (bool): The normalize parameter, if applicable.
codesearchnet
def getConstraint(self, name): return lock_and_call( lambda: Constraint(self._impl.getConstraint(name)), self._lock )
Get the constraint with the corresponding name. Args: name: Name of the constraint to be found. Raises: TypeError: if the specified constraint does not exist.
juraj-google-style
def _CheckLocation(self, file_entry, search_depth): if self._location_segments is None: return False if search_depth < 0 or search_depth > self._number_of_location_segments: return False if search_depth == 0: segment_name = '' else: segment_name = self._location_segments[search_depth - 1] if self._is_regex: if isinstance(segment_name, py2to3.STRING_TYPES): flags = re.DOTALL | re.UNICODE if not self._is_case_sensitive: flags |= re.IGNORECASE try: segment_name = r'^{0:s}$'.format(segment_name) segment_name = re.compile(segment_name, flags=flags) except sre_constants.error: return False self._location_segments[search_depth - 1] = segment_name elif not self._is_case_sensitive: segment_name = segment_name.lower() self._location_segments[search_depth - 1] = segment_name if search_depth > 0: if self._is_regex: if not segment_name.match(file_entry.name): return False elif self._is_case_sensitive: if segment_name != file_entry.name: return False elif segment_name != file_entry.name.lower(): return False return True
Checks the location find specification. Args: file_entry (FileEntry): file entry. search_depth (int): number of location path segments to compare. Returns: bool: True if the file entry matches the find specification, False if not.
juraj-google-style