code stringlengths 20 4.93k | docstring stringlengths 33 1.27k | source stringclasses 3 values |
|---|---|---|
def _construct(configdict, prefix, ua):
if not ua:
raise UserAgentError("User_agent parameter missing. It can be your project's name for example.")
preprefix = configdict.get('preprefix')
if preprefix:
user_agent = '%s:' % preprefix
else:
user_agent = ''
if prefix:
user_agent = '%s%s-' % (user_agent, prefix)
user_agent = '%s%s' % (user_agent, ua)
return user_agent | Construct user agent
Args:
configdict (str): Additional configuration for user agent
prefix (str): Text to put at start of user agent
ua (str): Custom user agent text
Returns:
str: Full user agent string | juraj-google-style |
def load(self):
if self.loaded:
LOGGER.debug('Already loaded')
return
try:
(basepath, dirs, _) = os.walk(self.path).next()
except StopIteration:
raise MalformedWorkdir(('Empty dir %s' % self.path))
full_path = partial(os.path.join, basepath)
found_current = False
for dirname in dirs:
if ((dirname == 'current') and os.path.islink(full_path('current'))):
self.current = os.path.basename(os.readlink(full_path('current')))
found_current = True
continue
elif (dirname == 'current'):
raise MalformedWorkdir(('"%s/current" should be a soft link' % self.path))
self.prefixes[dirname] = self.prefix_class(prefix=self.join(dirname))
if (not found_current):
raise MalformedWorkdir(('"%s/current" should exist and be a soft link' % self.path))
self._update_current() | Loads the prefixes that are available is the workdir
Returns:
None
Raises:
MalformedWorkdir: if the wordir is malformed | codesearchnet |
def init_app(self, app):
app.ldap3_login_manager = self
servers = list(self._server_pool)
for s in servers:
self._server_pool.remove(s)
self.init_config(app.config)
if hasattr(app, 'teardown_appcontext'):
app.teardown_appcontext(self.teardown)
else:
app.teardown_request(self.teardown)
self.app = app | Configures this extension with the given app. This registers an
``teardown_appcontext`` call, and attaches this ``LDAP3LoginManager``
to it as ``app.ldap3_login_manager``.
Args:
app (flask.Flask): The flask app to initialise with | codesearchnet |
def _get_cached_time(self):
if (not self._cached_time):
self._cached_time = self._meta.datetime.utcnow()
return self._cached_time | Method that will allow for consistent modified and archived
timestamps.
Returns:
self.Meta.datetime: This method will return a datetime that is
compatible with the current class's datetime library. | codesearchnet |
def create_datastore_for_topline(self, delete_first=0, path=None):
data = load_yaml(script_dir_plus_file(join('..', 'hdx_datasource_topline.yml'), Resource))
self.create_datastore_from_dict_schema(data, delete_first, path=path) | For tabular data, create a resource in the HDX datastore which enables data preview in HDX using the built in
YAML definition for a topline. If path is not supplied, the file is first downloaded from HDX.
Args:
delete_first (int): Delete datastore before creation. 0 = No, 1 = Yes, 2 = If no primary key. Defaults to 0.
path (Optional[str]): Local path to file that was uploaded. Defaults to None.
Returns:
None | juraj-google-style |
def registry_key(self, key_name, value_name, value_type, **kwargs):
indicator_obj = RegistryKey(key_name, value_name, value_type, **kwargs)
return self._indicator(indicator_obj) | Add Registry Key data to Batch object.
Args:
key_name (str): The key_name value for this Indicator.
value_name (str): The value_name value for this Indicator.
value_type (str): The value_type value for this Indicator.
confidence (str, kwargs): The threat confidence for this Indicator.
date_added (str, kwargs): The date timestamp the Indicator was created.
last_modified (str, kwargs): The date timestamp the Indicator was last modified.
rating (str, kwargs): The threat rating for this Indicator.
xid (str, kwargs): The external id for this Indicator.
Returns:
obj: An instance of Registry Key. | codesearchnet |
def step(self, input_stream, value):
reading = IOTileReading(input_stream.encode(), self.tick_count, value)
self.sensor_graph.process_input(input_stream, reading, self.rpc_executor) | Step the sensor graph through one since input.
The internal tick count is not advanced so this function may
be called as many times as desired to input specific conditions
without simulation time passing.
Args:
input_stream (DataStream): The input stream to push the
value into
value (int): The reading value to push as an integer | juraj-google-style |
def df(self):
import pandas as pd
return pd.concat([w.df(uwi=True) for w in self]) | Makes a pandas DataFrame containing Curve data for all the wells
in the Project. The DataFrame has a dual index of well UWI and
curve Depths. Requires `pandas`.
Args:
No arguments.
Returns:
`pandas.DataFrame`. | codesearchnet |
def stack_colormap(lower, upper, n=256):
A = get_cmap(lower)
B = get_cmap(upper)
name = "%s-%s" % (A.name, B.name)
lin = np.linspace(0, 1, n)
return array_cmap(np.vstack((A(lin), B(lin))), name, n=n) | Stacks two colormaps (``lower`` and ``upper``) such that
low half -> ``lower`` colors, high half -> ``upper`` colors
Args:
lower (colormap): colormap for the lower half of the stacked colormap.
upper (colormap): colormap for the upper half of the stacked colormap.
n (int): Number of colormap steps. Default is ``256``. | juraj-google-style |
def get_site_orbital_dos(self, site, orbital):
return Dos(self.efermi, self.energies, self.pdos[site][orbital]) | Get the Dos for a particular orbital of a particular site.
Args:
site: Site in Structure associated with CompleteDos.
orbital: Orbital in the site.
Returns:
Dos containing densities for orbital of site. | codesearchnet |
def get_symmetric_wallace_tensor(self, tau):
wallace = self.get_wallace_tensor(tau)
return Tensor(0.5 * (wallace + np.transpose(wallace, [2, 3, 0, 1]))) | Gets the symmetrized wallace tensor for determining
yield strength criteria.
Args:
tau (3x3 array-like): stress at which to evaluate
the wallace tensor. | juraj-google-style |
def frames2video(frame_dir, video_file, fps=30, fourcc='XVID', filename_tmpl='{:06d}.jpg', start=0, end=0, show_progress=True):
if (end == 0):
ext = filename_tmpl.split('.')[(- 1)]
end = len([name for name in scandir(frame_dir, ext)])
first_file = osp.join(frame_dir, filename_tmpl.format(start))
check_file_exist(first_file, ('The start frame not found: ' + first_file))
img = cv2.imread(first_file)
(height, width) = img.shape[:2]
resolution = (width, height)
vwriter = cv2.VideoWriter(video_file, VideoWriter_fourcc(*fourcc), fps, resolution)
def write_frame(file_idx):
filename = osp.join(frame_dir, filename_tmpl.format(file_idx))
img = cv2.imread(filename)
vwriter.write(img)
if show_progress:
track_progress(write_frame, range(start, end))
else:
for i in range(start, end):
filename = osp.join(frame_dir, filename_tmpl.format(i))
img = cv2.imread(filename)
vwriter.write(img)
vwriter.release() | Read the frame images from a directory and join them as a video
Args:
frame_dir (str): The directory containing video frames.
video_file (str): Output filename.
fps (float): FPS of the output video.
fourcc (str): Fourcc of the output video, this should be compatible
with the output file type.
filename_tmpl (str): Filename template with the index as the variable.
start (int): Starting frame index.
end (int): Ending frame index.
show_progress (bool): Whether to show a progress bar. | codesearchnet |
def _FormatDate(self, event):
date_time = dfdatetime_posix_time.PosixTimeInMicroseconds(
timestamp=event.timestamp)
year, month, day_of_month = date_time.GetDate()
try:
return '{0:04d}-{1:02d}-{2:02d}'.format(year, month, day_of_month)
except (TypeError, ValueError):
self._ReportEventError(event, (
'unable to copy timestamp: {0!s} to a human readable date. '
'Defaulting to: "0000-00-00"').format(event.timestamp))
return '0000-00-00' | Formats the date.
Args:
event (EventObject): event.
Returns:
str: date field. | juraj-google-style |
def custom_line_color_map(self, values):
if (not isinstance(values, list)):
raise TypeError('custom_line_color_map must be a list')
self.options['custom_line_color_map'] = values | Set the custom line color map.
Args:
values (list): list of colors.
Raises:
TypeError: Custom line color map must be a list. | codesearchnet |
def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
output = [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
if token_ids_1 is not None:
output += token_ids_1 + [self.sep_token_id]
return output | Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
adding special tokens. A Lxmert sequence has the following format:
- single sequence: `[CLS] X [SEP]`
- pair of sequences: `[CLS] A [SEP] B [SEP]`
Args:
token_ids_0 (`List[int]`):
List of IDs to which the special tokens will be added.
token_ids_1 (`List[int]`, *optional*):
Optional second list of IDs for sequence pairs.
Returns:
`List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens. | github-repos |
def solve_sweep_wavelength(self, structure, wavelengths, filename='wavelength_n_effs.dat', plot=True):
n_effs = []
for w in tqdm.tqdm(wavelengths, ncols=70):
structure.change_wavelength(w)
self.solve(structure)
n_effs.append(np.real(self.n_effs))
if filename:
self._write_n_effs_to_file(n_effs, (self._modes_directory + filename), wavelengths)
if plot:
if MPL:
title = '$n_{eff}$ vs Wavelength'
y_label = '$n_{eff}$'
else:
title = ('n_{effs} vs Wavelength' % x_label)
y_label = 'n_{eff}'
self._plot_n_effs((self._modes_directory + filename), (self._modes_directory + 'fraction_te.dat'), 'Wavelength', 'n_{eff}', title)
return n_effs | Solve for the effective indices of a fixed structure at
different wavelengths.
Args:
structure (Slabs): The target structure to solve
for modes.
wavelengths (list): A list of wavelengths to sweep
over.
filename (str): The nominal filename to use when saving the
effective indices. Defaults to 'wavelength_n_effs.dat'.
plot (bool): `True` if plots should be generates,
otherwise `False`. Default is `True`.
Returns:
list: A list of the effective indices found for each wavelength. | codesearchnet |
def index(self, value, start=0, end=None):
try:
index = self._dict[value]
except KeyError:
raise ValueError
else:
start = self._fix_neg_index(start)
end = self._fix_end_index(end)
if start <= index and index < end:
return index
else:
raise ValueError | Return the index of value between start and end.
By default, the entire setlist is searched.
This runs in O(1)
Args:
value: The value to find the index of
start (int): The index to start searching at (defaults to 0)
end (int): The index to stop searching at (defaults to the end of the list)
Returns:
int: The index of the value
Raises:
ValueError: If the value is not in the list or outside of start - end
IndexError: If start or end are out of range | juraj-google-style |
def GetAdGroups(self, client_customer_id, campaign_id):
self.client.SetClientCustomerId(client_customer_id)
selector = {'fields': ['Id', 'Name', 'Status'], 'predicates': [{'field': 'CampaignId', 'operator': 'EQUALS', 'values': [campaign_id]}, {'field': 'Status', 'operator': 'NOT_EQUALS', 'values': ['REMOVED']}]}
adgroups = self.client.GetService('AdGroupService').get(selector)
if (int(adgroups['totalNumEntries']) > 0):
return adgroups['entries']
else:
return None | Retrieves all AdGroups for the given campaign that haven't been removed.
Args:
client_customer_id: str Client Customer Id being used in API request.
campaign_id: str id of the campaign for which to fetch ad groups.
Returns:
list List of AdGroup data objects. | codesearchnet |
def indicator(self, indicator_type, summary, **kwargs):
indicator_obj = Indicator(indicator_type, summary, **kwargs)
return self._indicator(indicator_obj) | Add Indicator data to Batch object.
Args:
indicator_type (str): The ThreatConnect define Indicator type.
summary (str): The value for this Indicator.
confidence (str, kwargs): The threat confidence for this Indicator.
date_added (str, kwargs): The date timestamp the Indicator was created.
last_modified (str, kwargs): The date timestamp the Indicator was last modified.
rating (str, kwargs): The threat rating for this Indicator.
xid (str, kwargs): The external id for this Indicator.
Returns:
obj: An instance of Indicator. | codesearchnet |
def memory_read8(self, addr, num_bytes, zone=None):
return self.memory_read(addr, num_bytes, zone=zone, nbits=8) | Reads memory from the target system in units of bytes.
Args:
self (JLink): the ``JLink`` instance
addr (int): start address to read from
num_bytes (int): number of bytes to read
zone (str): memory zone to read from
Returns:
List of bytes read from the target system.
Raises:
JLinkException: if memory could not be read. | codesearchnet |
def compile_dependencies(self, sourcepath, include_self=False):
items = self.inspector.parents(sourcepath)
if include_self:
items.add(sourcepath)
return filter(None, [self.compile_source(item) for item in items]) | Apply compile on all dependencies
Args:
sourcepath (string): Sass source path to compile to its
destination using project settings.
Keyword Arguments:
include_self (bool): If ``True`` the given sourcepath is add to
items to compile, else only its dependencies are compiled. | codesearchnet |
def _ParseFileEntry(self, knowledge_base, file_entry):
root_key = self._GetPlistRootKey(file_entry)
if not root_key:
location = getattr(file_entry.path_spec, 'location', '')
raise errors.PreProcessFail((
'Unable to read: {0:s} plist: {1:s} with error: missing root '
'key.').format(self.ARTIFACT_DEFINITION_NAME, location))
try:
match = self._GetKeysDefaultEmpty(root_key, self._KEYS)
except KeyError as exception:
location = getattr(file_entry.path_spec, 'location', '')
raise errors.PreProcessFail(
'Unable to read: {0:s} plist: {1:s} with error: {2!s}'.format(
self.ARTIFACT_DEFINITION_NAME, location, exception))
name = match.get('name', [None])[0]
uid = match.get('uid', [None])[0]
if not name or not uid:
return
user_account = artifacts.UserAccountArtifact(
identifier=uid, username=name)
user_account.group_identifier = match.get('gid', [None])[0]
user_account.full_name = match.get('realname', [None])[0]
user_account.shell = match.get('shell', [None])[0]
user_account.user_directory = match.get('home', [None])[0]
try:
knowledge_base.AddUserAccount(user_account)
except KeyError:
pass | Parses artifact file system data for a preprocessing attribute.
Args:
knowledge_base (KnowledgeBase): to fill with preprocessing information.
file_entry (dfvfs.FileEntry): file entry that contains the artifact
value data.
Raises:
errors.PreProcessFail: if the preprocessing fails. | juraj-google-style |
def to_api_repr(self):
resource = copy.deepcopy(self._properties)
query_parameters = resource['query'].get('queryParameters')
if query_parameters:
if (query_parameters[0].get('name') is None):
resource['query']['parameterMode'] = 'POSITIONAL'
else:
resource['query']['parameterMode'] = 'NAMED'
return resource | Build an API representation of the query job config.
Returns:
dict: A dictionary in the format used by the BigQuery API. | codesearchnet |
def imread(path, grayscale=False, size=None, interpolate='bilinear', channel_first=False, as_uint16=False, num_channels=(- 1)):
_imread_before(grayscale, num_channels)
r_mode = (cv2.IMREAD_GRAYSCALE if grayscale else cv2.IMREAD_UNCHANGED)
img = _imread_helper(path, r_mode)
if (as_uint16 and (img.dtype != np.uint16)):
if (img.dtype == np.uint8):
logger.warning('You want to read image as uint16, but the original bit-depth is 8 bit.All pixel values are simply increased by 256 times.')
img = (img.astype(np.uint16) * 256)
else:
raise ValueError('casting {} to uint16 is not safe.'.format(img.dtype))
img = _cvtColor_helper(img, num_channels)
img = _imread_after(img, size, interpolate, channel_first, imresize)
return img | Read image by cv2 module.
Args:
path (str or 'file object'): File path or object to read.
grayscale (bool):
size (tupple of int):
(width, height).
If None, output img shape depends on the files to read.
channel_first (bool):
This argument specifies the shape of img is whether (height, width, channel) or (channel, height, width).
Default value is False, which means the img shape is (height, width, channel).
interpolate (str):
must be one of ["nearest", "box", "bilinear", "hamming", "bicubic", "lanczos"].
as_uint16 (bool):
If True, this function reads image as uint16.
num_channels (int):
channel size of output array.
Default is -1 which preserves raw image shape.
Returns:
numpy.ndarray | codesearchnet |
def _get_genes(self, variant):
ensembl_ids = []
hgnc_symbols = []
for transcript in variant.transcripts:
if transcript.ensembl_id:
ensembl_ids.append(transcript.ensembl_id)
if transcript.hgnc_symbol:
hgnc_symbols.append(transcript.hgnc_symbol)
genes = get_gene_info(
ensembl_ids=ensembl_ids,
hgnc_symbols=hgnc_symbols
)
return genes | Add the genes for a variant
Get the hgnc symbols from all transcripts and add them
to the variant
Args:
variant (dict): A variant dictionary
Returns:
genes (list): A list of Genes | juraj-google-style |
def start(self, request: Request) -> Response:
if (self._session_state != SessionState.ready):
raise RuntimeError('Session not ready')
response = Response()
(yield from self._prepare_fetch(request, response))
response.file_transfer_size = (yield from self._fetch_size(request))
if request.restart_value:
try:
(yield from self._commander.restart(request.restart_value))
response.restart_value = request.restart_value
except FTPServerError:
_logger.debug('Could not restart file.', exc_info=1)
(yield from self._open_data_stream())
command = Command('RETR', request.file_path)
(yield from self._begin_stream(command))
self._session_state = SessionState.file_request_sent
return response | Start a file or directory listing download.
Args:
request: Request.
Returns:
A Response populated with the initial data connection reply.
Once the response is received, call :meth:`download`.
Coroutine. | codesearchnet |
def indicator(self, data):
data = self.get_first_hash(data)
super(File, self).indicator(data) | Update the request URI to include the Indicator for specific indicator retrieval.
Args:
data (string): The indicator value | juraj-google-style |
def audio(self, tag, audiodata, step=None, sample_rate=44100):
audiodata = onp.array(audiodata)
if (step is None):
step = self._step
else:
self._step = step
audiodata = onp.clip(onp.squeeze(audiodata), (- 1), 1)
if (audiodata.ndim != 1):
raise ValueError('Audio data must be 1D.')
sample_list = (32767.0 * audiodata).astype(int).tolist()
wio = io.BytesIO()
wav_buf = wave.open(wio, 'wb')
wav_buf.setnchannels(1)
wav_buf.setsampwidth(2)
wav_buf.setframerate(sample_rate)
enc = b''.join([struct.pack('<h', v) for v in sample_list])
wav_buf.writeframes(enc)
wav_buf.close()
encoded_audio_bytes = wio.getvalue()
wio.close()
audio = Summary.Audio(sample_rate=sample_rate, num_channels=1, length_frames=len(sample_list), encoded_audio_string=encoded_audio_bytes, content_type='audio/wav')
summary = Summary(value=[Summary.Value(tag=tag, audio=audio)])
self.add_summary(summary, step) | Saves audio.
NB: single channel only right now.
Args:
tag: str: label for this data
audiodata: ndarray [Nsamples,]: data between (-1.0,1.0) to save as wave
step: int: training step
sample_rate: sample rate of passed in audio buffer | codesearchnet |
def read(self, input_buffer, kmip_version=enums.KMIPVersion.KMIP_1_0):
super(LocateRequestPayload, self).read(input_buffer, kmip_version=kmip_version)
local_buffer = utils.BytearrayStream(input_buffer.read(self.length))
if self.is_tag_next(enums.Tags.MAXIMUM_ITEMS, local_buffer):
self._maximum_items = primitives.Integer(tag=enums.Tags.MAXIMUM_ITEMS)
self._maximum_items.read(local_buffer, kmip_version=kmip_version)
if self.is_tag_next(enums.Tags.OFFSET_ITEMS, local_buffer):
self._offset_items = primitives.Integer(tag=enums.Tags.OFFSET_ITEMS)
self._offset_items.read(local_buffer, kmip_version=kmip_version)
if self.is_tag_next(enums.Tags.STORAGE_STATUS_MASK, local_buffer):
self._storage_status_mask = primitives.Integer(tag=enums.Tags.STORAGE_STATUS_MASK)
self._storage_status_mask.read(local_buffer, kmip_version=kmip_version)
if self.is_tag_next(enums.Tags.OBJECT_GROUP_MEMBER, local_buffer):
self._object_group_member = primitives.Enumeration(enums.ObjectGroupMember, tag=enums.Tags.OBJECT_GROUP_MEMBER)
self._object_group_member.read(local_buffer, kmip_version=kmip_version)
if (kmip_version < enums.KMIPVersion.KMIP_2_0):
while self.is_tag_next(enums.Tags.ATTRIBUTE, local_buffer):
attribute = objects.Attribute()
attribute.read(local_buffer, kmip_version=kmip_version)
self._attributes.append(attribute)
elif self.is_tag_next(enums.Tags.ATTRIBUTES, local_buffer):
attributes = objects.Attributes()
attributes.read(local_buffer, kmip_version=kmip_version)
temp_attr = objects.convert_attributes_to_template_attribute(attributes)
self._attributes = temp_attr.attributes
else:
raise exceptions.InvalidKmipEncoding('The Locate request payload encoding is missing the attributes structure.') | Read the data encoding the Locate request payload and decode it into
its constituent parts.
Args:
input_buffer (stream): A data buffer containing encoded object
data, supporting a read method.
kmip_version (KMIPVersion): An enumeration defining the KMIP
version with which the object will be decoded. Optional,
defaults to KMIP 1.0.
Raises:
InvalidKmipEncoding: Raised if the attributes structure is missing
from the encoded payload for KMIP 2.0+ encodings. | codesearchnet |
def changes(self, **kwargs):
path = '%s/%s/changes' % (self.manager.path, self.get_id())
return self.manager.gitlab.http_get(path, **kwargs) | List the merge request changes.
Args:
**kwargs: Extra options to send to the server (e.g. sudo)
Raises:
GitlabAuthenticationError: If authentication is not correct
GitlabListError: If the list could not be retrieved
Returns:
RESTObjectList: List of changes | juraj-google-style |
def _default_static_lib(self, obj_files):
c_compiler = self.F90_COMPILER.c_compiler
static_lib_dir = os.path.join(self.build_lib, "bezier", "lib")
if not os.path.exists(static_lib_dir):
os.makedirs(static_lib_dir)
c_compiler.create_static_lib(
obj_files, "bezier", output_dir=static_lib_dir
)
for extension in self.extensions:
extension.extra_objects[:] = [
os.path.join(self.build_temp, rel_path)
for rel_path in extension.extra_objects
] | Create a static library (i.e. a ``.a`` / ``.lib`` file).
Args:
obj_files (List[str]): List of paths of compiled object files. | juraj-google-style |
def _get_accumulator(tensor):
assert isinstance(tensor.graph, func_graph_module.FuncGraph)
def get_func_graph_output(t):
for output in tensor.graph.outputs:
if output is t:
return t
identity_op = t.consumers()[0]
if identity_op.type == 'Identity' and any((identity_op.outputs[0] is t for t in tensor.graph.outputs)):
return identity_op.outputs[0]
return None
for consumer in tensor.consumers():
if consumer.type != 'TensorListPushBack':
continue
accum_input_idx = -1
for accum_input_idx, inp in enumerate(tensor.graph.inputs):
if inp is consumer.inputs[0]:
break
else:
continue
output = get_func_graph_output(consumer.outputs[0])
if output is None:
continue
for accum_output_idx, out in enumerate(tensor.graph.outputs):
if out is output:
if accum_input_idx == accum_output_idx:
return output
break
return None | Returns TensorList if any containing accumulated values of tensor.
We try to find a pattern of the form:
input_tl tensor
\ /
(TensorListPushBack)
|
output_tl
which satisfies the following conditions:
1. input_tl must be in tensor.graph.inputs.
2. output_tl or Identity(output_tl) must be in tensor.graph.outputs.
3. tensor.graph.input_index(input_tl) == tensor.graph.output_index(output_t).
output_tl or Identity(output_tl) (whichever is in tensor.graph.outputs) is
returned if such a pattern is found else None is returned.
Args:
tensor: The Tensor to be accumulated.
Returns:
A variant tensor in the same graph as `tensor` or None if no accumulator is
found. | github-repos |
def wait_until_finished(
self, refresh_period=DEFAULT_TASK_INSTANCE_WAIT_REFRESH_PERIOD
):
return self.manager.wait_until_finished(
uuid=self.uuid, refresh_period=refresh_period
) | Wait until a task instance with the given UUID is finished.
Args:
refresh_period (int, optional): How many seconds to wait
before checking the task's status. Defaults to 5
seconds.
Returns:
:class:`saltant.models.base_task_instance.BaseTaskInstance`:
This task instance model after it finished. | juraj-google-style |
def _semicircle_integral(dist_bins, idx):
r = 1
x1 = dist_bins[idx]
x2 = dist_bins[idx + 1]
if dist_bins[idx] == 1:
area1 = 0.25 * math.pi * r ** 2
else:
area1 = 0.5 * ((x1 * math.sqrt(r ** 2 - x1 ** 2)) + (
r ** 2 * math.atan(x1 / math.sqrt(r ** 2 - x1 ** 2))))
area2 = 0.5 * ((x2 * math.sqrt(r ** 2 - x2 ** 2)) + (
r ** 2 * math.atan(x2 / math.sqrt(r ** 2 - x2 ** 2))))
return (area1 - area2) / (0.25 * math.pi * r ** 2) | An internal method to get an integral between two bounds of a unit
semicircle. Used in algorithm to determine bond probabilities.
Args:
dist_bins: (float) list of all possible bond weights
idx: (float) index of starting bond weight
Returns:
(float) integral of portion of unit semicircle | juraj-google-style |
def _check_edgemap_registers(self, edge_map, keyregs, valregs, valreg=True):
add_regs = set()
reg_frag_chk = {}
for v in keyregs.values():
reg_frag_chk[v] = {j: False for j in range(len(v))}
for k in edge_map.keys():
if (k[0].name in keyregs):
reg_frag_chk[k[0]][k[1]] = True
for (k, v) in reg_frag_chk.items():
s = set(v.values())
if (len(s) == 2):
raise DAGCircuitError(('edge_map fragments reg %s' % k))
elif (s == set([False])):
if ((k in self.qregs.values()) or (k in self.cregs.values())):
raise DAGCircuitError(('unmapped duplicate reg %s' % k))
else:
add_regs.add(k)
elif valreg:
if (not (edge_map[(k, 0)][0].name in valregs)):
size = max(map((lambda x: x[1]), filter((lambda x: (x[0] == edge_map[(k, 0)][0])), edge_map.values())))
qreg = QuantumRegister((size + 1), edge_map[(k, 0)][0].name)
add_regs.add(qreg)
return add_regs | Check that wiremap neither fragments nor leaves duplicate registers.
1. There are no fragmented registers. A register in keyregs
is fragmented if not all of its (qu)bits are renamed by edge_map.
2. There are no duplicate registers. A register is duplicate if
it appears in both self and keyregs but not in edge_map.
Args:
edge_map (dict): map from (reg,idx) in keyregs to (reg,idx) in valregs
keyregs (dict): a map from register names to Register objects
valregs (dict): a map from register names to Register objects
valreg (bool): if False the method ignores valregs and does not
add regs for bits in the edge_map image that don't appear in valregs
Returns:
set(Register): the set of regs to add to self
Raises:
DAGCircuitError: if the wiremap fragments, or duplicates exist | codesearchnet |
def sin(duration: int, amp: complex, freq: float = None,
phase: float = 0, name: str = None) -> SamplePulse:
if freq is None:
freq = 1/duration
return _sampled_sin_pulse(duration, amp, freq, phase=phase, name=name) | Generates sine wave `SamplePulse`.
Args:
duration: Duration of pulse. Must be greater than zero.
amp: Pulse amplitude.
freq: Pulse frequency, units of 1/dt. If `None` defaults to single cycle.
phase: Pulse phase.
name: Name of pulse. | juraj-google-style |
def assert_no_new_pyobjects_executing_eagerly(warmup_iters: int=2) -> Callable[[Callable[..., Any]], Callable[..., None]]:
def wrap_f(f: Callable[..., Any]) -> Callable[..., None]:
def decorator(self: 'TensorFlowTestCase', *args, **kwargs) -> None:
with context.eager_mode():
gc.disable()
test_errors = None
test_skipped = None
if hasattr(self._outcome, 'errors'):
test_errors = self._outcome.errors
test_skipped = self._outcome.skipped
else:
test_errors = self._outcome.result.errors
test_skipped = self._outcome.result.skipped
for _ in range(warmup_iters):
f(self, *args, **kwargs)
self.doCleanups()
obj_count_by_type = _get_object_count_by_type()
gc.collect()
registered_function_names = context.context().list_function_names()
obj_count_by_type = _get_object_count_by_type(exclude=gc.get_referents(test_errors, test_skipped))
if ops.has_default_graph():
collection_sizes_before = {collection: len(ops.get_collection(collection)) for collection in ops.get_default_graph().collections}
for _ in range(3):
f(self, *args, **kwargs)
self.doCleanups()
if ops.has_default_graph():
for collection_key in ops.get_default_graph().collections:
collection = ops.get_collection(collection_key)
size_before = collection_sizes_before.get(collection_key, 0)
if len(collection) > size_before:
raise AssertionError('Collection %s increased in size from %d to %d (current items %s).' % (collection_key, size_before, len(collection), collection))
del collection
del collection_key
del size_before
del collection_sizes_before
gc.collect()
obj_count_by_type = _get_object_count_by_type(exclude=gc.get_referents(test_errors, test_skipped)) - obj_count_by_type
leftover_functions = context.context().list_function_names() - registered_function_names
assert not leftover_functions, 'The following functions were newly created: %s' % leftover_functions
assert not obj_count_by_type, 'The following objects were newly created: %s' % str(obj_count_by_type)
gc.enable()
return tf_decorator.make_decorator(f, decorator)
return wrap_f | Decorator for asserting that no new Python objects persist after a test.
Returns a decorator that runs the test multiple times executing eagerly,
first as a warmup and then to let objects accumulate. The warmup helps ignore
caches which do not grow as the test is run repeatedly.
Useful for checking that there are no missing Py_DECREFs in the C exercised by
a bit of Python.
Args:
warmup_iters: The number of warmup iterations, excluded from measuring.
Returns:
A decorator function which can be applied to the test function. | github-repos |
def _safe_initial_value_from_tensor(name, tensor, op_cache):
op = tensor.op
new_op = op_cache.get(op.name)
if new_op is None:
new_op = _safe_initial_value_from_op(name, op, op_cache)
op_cache[op.name] = new_op
return new_op.outputs[tensor.value_index] | Replace dependencies on variables with their initialized values.
Args:
name: Variable name.
tensor: A `Tensor`. The tensor to replace.
op_cache: A dict mapping operation names to `Operation`s. Used to memoize
the results so as to avoid creating redundant operations.
Returns:
A `Tensor` compatible with `tensor`. Any inputs that lead to variable
values will be replaced with a corresponding graph that uses the
variable's initialized values. This is done on a best-effort basis. If no
modifications need to be made then `tensor` will be returned unchanged. | github-repos |
async def get_messages(self, name):
resp = await self.send_command(OPERATIONS.CMD_QUERY_MESSAGES, {'name': name},
MESSAGES.QueryMessagesResponse, timeout=5.0)
return [states.ServiceMessage.FromDictionary(x) for x in resp] | Get stored messages for a service.
Args:
name (string): The name of the service to get messages from.
Returns:
list(ServiceMessage): A list of the messages stored for this service | juraj-google-style |
def get_scalar_arg_dtypes(self):
dtypes = []
for (name, data) in self._kernel_data.items():
dtypes.extend(data.get_scalar_arg_dtypes())
return dtypes | Get the location and types of the input scalars.
Returns:
list: for every kernel input element either None if the data is a buffer or the numpy data type if
if is a scalar. | codesearchnet |
def get_estimator(output_dir, train_config, args):
target_name = train_config['target_column']
if (is_classification_model(args.model_type) and (target_name not in train_config['categorical_columns'])):
raise ValueError('When using a classification model, the target must be a categorical variable.')
if (is_regression_model(args.model_type) and (target_name not in train_config['numerical_columns'])):
raise ValueError('When using a regression model, the target must be a numerical variable.')
if (is_dnn_model(args.model_type) and (not args.layer_sizes)):
raise ValueError('--layer-size* must be used with DNN models')
if (is_linear_model(args.model_type) and args.layer_sizes):
raise ValueError('--layer-size* cannot be used with linear models')
feature_columns = _tflearn_features(train_config, args)
config = tf.contrib.learn.RunConfig(save_checkpoints_secs=args.save_checkpoints_secs)
train_dir = os.path.join(output_dir, 'train')
if (args.model_type == 'dnn_regression'):
estimator = tf.contrib.learn.DNNRegressor(feature_columns=feature_columns, hidden_units=args.layer_sizes, config=config, model_dir=train_dir, optimizer=tf.train.AdamOptimizer(args.learning_rate, epsilon=args.epsilon))
elif (args.model_type == 'linear_regression'):
estimator = tf.contrib.learn.LinearRegressor(feature_columns=feature_columns, config=config, model_dir=train_dir, optimizer=tf.train.AdamOptimizer(args.learning_rate, epsilon=args.epsilon))
elif (args.model_type == 'dnn_classification'):
estimator = tf.contrib.learn.DNNClassifier(feature_columns=feature_columns, hidden_units=args.layer_sizes, n_classes=train_config['vocab_stats'][target_name]['n_classes'], config=config, model_dir=train_dir, optimizer=tf.train.AdamOptimizer(args.learning_rate, epsilon=args.epsilon))
elif (args.model_type == 'linear_classification'):
estimator = tf.contrib.learn.LinearClassifier(feature_columns=feature_columns, n_classes=train_config['vocab_stats'][target_name]['n_classes'], config=config, model_dir=train_dir, optimizer=tf.train.AdamOptimizer(args.learning_rate, epsilon=args.epsilon))
else:
raise ValueError('bad --model-type value')
return estimator | Returns a tf learn estimator.
We only support {DNN, Linear}Regressor and {DNN, Linear}Classifier. This is
controlled by the values of model_type in the args.
Args:
output_dir: Modes are saved into outputdir/train
train_config: our training config
args: command line parameters
Returns:
TF lean estimator
Raises:
ValueError: if config is wrong. | codesearchnet |
def add(self, row):
if not self._types:
raise wandb.Error(
'TypedTable.set_columns must be called before add.')
mapped_row = {}
for key, val in row.items():
try:
typed_val = self._types[key](val)
if hasattr(typed_val, 'encode'):
typed_val = typed_val.encode()
mapped_row[key] = typed_val
except KeyError:
raise wandb.Error(
'TypedTable.add received key ("%s") which wasn\'t provided to set_columns' % key)
except:
raise wandb.Error('TypedTable.add couldn\'t convert and encode ("{}") provided for key ("{}") to type ({})'.format(
val, key, self._types[key]))
self._output.add(mapped_row)
self._count += 1 | Add a row to the table.
Args:
row: A dict whose keys match the keys added in set_columns, and whose
values can be cast to the types added in set_columns. | juraj-google-style |
def download(cls, root, check=None):
path = os.path.join(root, cls.name)
check = path if check is None else check
if not os.path.isdir(check):
for url in cls.urls:
if isinstance(url, tuple):
url, filename = url
else:
filename = os.path.basename(url)
zpath = os.path.join(path, filename)
if not os.path.isfile(zpath):
if not os.path.exists(os.path.dirname(zpath)):
os.makedirs(os.path.dirname(zpath))
print('downloading {}'.format(filename))
download_from_url(url, zpath)
zroot, ext = os.path.splitext(zpath)
_, ext_inner = os.path.splitext(zroot)
if ext == '.zip':
with zipfile.ZipFile(zpath, 'r') as zfile:
print('extracting')
zfile.extractall(path)
elif ext == '.tgz' or ext == '.gz' and ext_inner == '.tar':
with tarfile.open(zpath, 'r:gz') as tar:
dirs = [member for member in tar.getmembers()]
tar.extractall(path=path, members=dirs)
elif ext == '.gz':
with gzip.open(zpath, 'rb') as gz:
with open(zroot, 'wb') as uncompressed:
shutil.copyfileobj(gz, uncompressed)
return os.path.join(path, cls.dirname) | Download and unzip an online archive (.zip, .gz, or .tgz).
Arguments:
root (str): Folder to download data to.
check (str or None): Folder whose existence indicates
that the dataset has already been downloaded, or
None to check the existence of root/{cls.name}.
Returns:
str: Path to extracted dataset. | juraj-google-style |
def Delete(self, request, global_params=None):
config = self.GetMethodConfig('Delete')
return self._RunMethod(config, request, global_params=global_params) | Deletes a snapshot.
Args:
request: (DataflowProjectsLocationsSnapshotsDeleteRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(DeleteSnapshotResponse) The response message. | github-repos |
def _parse_networks(self, config):
networks = list()
regexp = r'network (.+)/(\d+) area (\d+\.\d+\.\d+\.\d+)'
matches = re.findall(regexp, config)
for (network, netmask, area) in matches:
networks.append(dict(network=network, netmask=netmask, area=area))
return dict(networks=networks) | Parses config file for the networks advertised
by the OSPF process
Args:
config(str): Running configuration
Returns:
list: dict:
keys: network (str)
netmask (str)
area (str) | juraj-google-style |
def regularizer(name, regularization_fn, name_filter='weights'):
regex = re.compile(name_filter)
def fn(var_name, variable, phase):
if phase is pt.Phase.train and regex.search(var_name):
with tf.name_scope(None, name, [variable]):
loss = regularization_fn(variable)
if loss is not None:
tf.add_to_collection(tf.GraphKeys.REGULARIZATION_LOSSES, loss)
return variable
return fn | Wraps a regularizer in a parameter-function.
Args:
name: The name scope for this regularizer.
regularization_fn: A function with signature:
fn(variable) -> loss `Tensor` or `None`.
name_filter: A regex that will be used to filter variables by name.
Returns:
A parameter modification function that adds the loss to the
REGULARIZATION_LOSSES graph key. | juraj-google-style |
def __init__(self, comment=None):
super(EventTag, self).__init__()
self._event_identifier = None
self.comment = comment
self.event_entry_index = None
self.event_row_identifier = None
self.event_stream_number = None
self.labels = [] | Initializes an event tag attribute container.
Args:
comment (Optional[str]): comments. | juraj-google-style |
def set_input_embeddings(self, value: nn.Module):
base_model = getattr(self, self.base_model_prefix, self)
if base_model is not self:
base_model.set_input_embeddings(value)
else:
raise NotImplementedError | Set model's input embeddings.
Args:
value (`nn.Module`): A module mapping vocabulary to hidden states. | github-repos |
def get_releasenotes(project_dir=os.curdir, bugtracker_url=''):
releasenotes = ''
pkg_info_file = os.path.join(project_dir, 'PKG-INFO')
releasenotes_file = os.path.join(project_dir, 'RELEASE_NOTES')
if os.path.exists(pkg_info_file) and os.path.exists(releasenotes_file):
with open(releasenotes_file) as releasenotes_fd:
releasenotes = releasenotes_fd.read()
else:
releasenotes = api.get_releasenotes(
repo_path=project_dir,
bugtracker_url=bugtracker_url,
)
return releasenotes | Retrieves the release notes, from the RELEASE_NOTES file (if in a package)
or generates it from the git history.
Args:
project_dir(str): Path to the git repo of the project.
bugtracker_url(str): Url to the bug tracker for the issues.
Returns:
str: release notes
Raises:
RuntimeError: If the release notes could not be retrieved | juraj-google-style |
def embeddings(idx):
embed = []
embed.append((f'cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight', f'stage{idx}.patch_embed.proj.weight'))
embed.append((f'cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias', f'stage{idx}.patch_embed.proj.bias'))
embed.append((f'cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight', f'stage{idx}.patch_embed.norm.weight'))
embed.append((f'cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias', f'stage{idx}.patch_embed.norm.bias'))
return embed | The function helps in renaming embedding layer weights.
Args:
idx: stage number in original model | github-repos |
def apply(self, func, axis, *args, **kwargs):
if callable(func):
return self._callable_func(func, axis, *args, **kwargs)
elif isinstance(func, dict):
return self._dict_func(func, axis, *args, **kwargs)
elif is_list_like(func):
return self._list_like_func(func, axis, *args, **kwargs)
else:
pass | Apply func across given axis.
Args:
func: The function to apply.
axis: Target axis to apply the function along.
Returns:
A new PandasQueryCompiler. | juraj-google-style |
def cast_if_floating_dtype(x, dtype=None):
return nest.map_structure(functools.partial(cast_single_tensor, dtype=dtype), x) | Casts the given data tensors to the default floating point type.
Casts only if the input is already a floating point type.
Args:
x: tensor or list/tuple of tensors.
dtype: The dtype to which Tensors should be cast.
Returns:
Converted input. | github-repos |
def evaluate_model(self, accuracy, num_steps, feed_vars=(), feed_data=None, summary_tag=None, print_every=0):
if (not hasattr(self, '_saver')):
raise ValueError('Before evaluating, you must initialize the model with load_from_checkpoint, prepare or saver.')
self._run_init_test_vars_op()
if ((not isinstance(accuracy, collections.Sequence)) or isinstance(accuracy, six.string_types)):
accuracy = (accuracy,)
if summary_tag:
summary_tag = (summary_tag,)
if (summary_tag and (len(summary_tag) != len(accuracy))):
raise ValueError('If summaries are requested, there must be a tag per accuracy node.')
result = self.run_model(accuracy, num_steps, feed_vars=feed_vars, feed_data=feed_data, print_every=print_every, allow_initialize=False)
assert (len(result) == (len(accuracy) + 1)), ('results is wrong length, was %s but should be 1 longer than %s' % (result, accuracy))
if summary_tag:
self.add_summaries(result[0], *zip(summary_tag, result[1:]))
return result[1:] | Evaluates the given model.
Args:
accuracy: The metric that is being evaluated or a tuple of metrics.
num_steps: The number of steps to run in the evaluator.
feed_vars: A list or tuple of the variables that will be fed.
feed_data: A generator that produces tuples of the same length as
feed_vars.
summary_tag: If provided, the final result of running the model will be
published to this tag.
print_every: Print a summary every so many steps, use 0 to disable.
Returns:
The accuracy.
Raises:
ValueError: If the wrong number of summary tags are provided or previously
running QueueRunners haven't been stopped. | codesearchnet |
def GetStandardAddress(self):
for contract in self._contracts.values():
if contract.IsStandard:
return contract.ScriptHash
raise Exception('Could not find a standard contract address') | Get the Wallet's default address.
Raises:
Exception: if no default contract address is set.
Returns:
UInt160: script hash. | codesearchnet |
def transform(self, value):
with tf.name_scope(self._name + '/transform'):
no_batch_dim = value.shape.ndims == self._mean.shape.ndims
if no_batch_dim:
value = value[None, ...]
if self._center:
value -= self._mean[None, ...]
if self._scale:
value /= tf.cond(
self._count > 1, lambda: self._std() + 1e-8,
lambda: tf.ones_like(self._var_sum))[None]
if self._clip:
value = tf.clip_by_value(value, -self._clip, self._clip)
if no_batch_dim:
value = value[0]
return tf.check_numerics(value, 'value') | Normalize a single or batch tensor.
Applies the activated transformations in the constructor using current
estimates of mean and variance.
Args:
value: Batch or single value tensor.
Returns:
Normalized batch or single value tensor. | juraj-google-style |
def setData(self, data, setName=None):
if (not isinstance(data, DataFrame)):
if ((pd is not None) and isinstance(data, pd.DataFrame)):
data = DataFrame.fromPandas(data)
if (setName is None):
lock_and_call((lambda : self._impl.setData(data._impl)), self._lock)
else:
lock_and_call((lambda : self._impl.setData(data._impl, setName)), self._lock) | Assign the data in the dataframe to the AMPL entities with the names
corresponding to the column names.
Args:
data: The dataframe containing the data to be assigned.
setName: The name of the set to which the indices values of the
DataFrame are to be assigned.
Raises:
AMPLException: if the data assignment procedure was not successful. | codesearchnet |
def get_name_scope() -> str:
if context.executing_eagerly():
return context.context().scope_name.rstrip('/')
return get_default_graph().get_name_scope() | Returns the current name scope in the default_graph.
For example:
```python
with tf.name_scope('scope1'):
with tf.name_scope('scope2'):
print(tf.get_name_scope())
```
would print the string `scope1/scope2`.
Returns:
A string representing the current name scope. | github-repos |
def register_flag_by_module(self, module_name, flag):
flags_by_module = self.flags_by_module_dict()
flags_by_module.setdefault(module_name, []).append(flag) | Records the module that defines a specific flag.
We keep track of which flag is defined by which module so that we
can later sort the flags by module.
Args:
module_name: str, the name of a Python module.
flag: Flag, the Flag instance that is key to the module. | juraj-google-style |
def _is_variant(self, gemini_variant, ind_objs):
indexes = (ind.ind_index for ind in ind_objs)
for index in indexes:
gt_call = gemini_variant['gt_types'][index]
if ((gt_call == 1) or (gt_call == 3)):
return True
return False | Check if the variant is a variation in any of the individuals
Args:
gemini_variant (GeminiQueryRow): The gemini variant
ind_objs (list(puzzle.models.individual)): A list of individuals to check
Returns:
bool : If any of the individuals has the variant | codesearchnet |
def run(stream_spec, cmd='ffmpeg', capture_stdout=False, capture_stderr=False, input=None, quiet=False, overwrite_output=False):
process = run_async(stream_spec, cmd, pipe_stdin=(input is not None), pipe_stdout=capture_stdout, pipe_stderr=capture_stderr, quiet=quiet, overwrite_output=overwrite_output)
(out, err) = process.communicate(input)
retcode = process.poll()
if retcode:
raise Error('ffmpeg', out, err)
return (out, err) | Invoke ffmpeg for the supplied node graph.
Args:
capture_stdout: if True, capture stdout (to be used with
``pipe:`` ffmpeg outputs).
capture_stderr: if True, capture stderr.
quiet: shorthand for setting ``capture_stdout`` and ``capture_stderr``.
input: text to be sent to stdin (to be used with ``pipe:``
ffmpeg inputs)
**kwargs: keyword-arguments passed to ``get_args()`` (e.g.
``overwrite_output=True``).
Returns: (out, err) tuple containing captured stdout and stderr data. | codesearchnet |
def visit_Call(self, node):
if self.depth == 0:
return node
if self.ignore_exceptions is None:
ignore_exceptions = ast.Name("None", ast.Load())
else:
ignore_exceptions = ast.List(self.ignore_exceptions, ast.Load())
catch_exception_type = self.catch_exception \
if self.catch_exception else "None"
catch_exception = ast.Name(catch_exception_type, ast.Load())
depth = ast.Num(self.depth - 1 if self.depth > 0 else -1)
debug_node_name = ast.Name("debug", ast.Load())
call_extra_parameters = [] if IS_PYTHON_3 else [None, None]
node.func = ast.Call(debug_node_name,
[node.func, ignore_exceptions,
catch_exception, depth],
[], *call_extra_parameters)
return node | Propagate 'debug' wrapper into inner function calls if needed.
Args:
node (ast.AST): node statement to surround. | juraj-google-style |
def CreateRetryTask(self):
retry_task = Task(session_identifier=self.session_identifier)
retry_task.file_entry_type = self.file_entry_type
retry_task.merge_priority = self.merge_priority
retry_task.path_spec = self.path_spec
retry_task.storage_file_size = self.storage_file_size
self.has_retry = True
return retry_task | Creates a new task to retry a previously abandoned task.
The retry task will have a new identifier but most of the attributes
will be a copy of the previously abandoned task.
Returns:
Task: a task to retry a previously abandoned task. | codesearchnet |
def FromDateTimeToTimestamp(datetime_obj):
dt = datetime_obj.replace(tzinfo=None)
return int((dt - datetime.datetime(1970, 1, 1)).total_seconds()) | Converts datetime object to internal nss_cache timestamp.
Args:
datetime object
Returns:
number of seconds since epoch | github-repos |
async def get_action_context_and_template(chain, parent_link, decision_link):
actions_path = decision_link.get_artifact_full_path('public/actions.json')
all_actions = load_json_or_yaml(actions_path, is_path=True)['actions']
action_name = get_action_callback_name(parent_link.task)
action_defn = _get_action_from_actions_json(all_actions, action_name)
jsone_context = (await populate_jsone_context(chain, parent_link, decision_link, 'action'))
if (('task' in action_defn) and (chain.context.config['min_cot_version'] <= 2)):
tmpl = {'tasks': [action_defn['task']]}
elif (action_defn.get('kind') == 'hook'):
in_tree_tmpl = (await get_in_tree_template(decision_link))
action_perm = _get_action_perm(action_defn)
tmpl = _wrap_action_hook_with_let(in_tree_tmpl, action_perm)
jsone_context = {'payload': _render_action_hook_payload(action_defn, jsone_context, parent_link), 'taskId': parent_link.task_id, 'now': jsone_context['now'], 'as_slugid': jsone_context['as_slugid'], 'clientId': jsone_context.get('clientId')}
elif (action_defn.get('kind') == 'task'):
tmpl = (await get_in_tree_template(decision_link))
for k in ('action', 'push', 'repository'):
jsone_context[k] = deepcopy(action_defn['hookPayload']['decision'].get(k, {}))
jsone_context['action']['repo_scope'] = get_repo_scope(parent_link.task, parent_link.name)
else:
raise CoTError('Unknown action kind `{kind}` for action `{name}`.'.format(kind=action_defn.get('kind', '<MISSING>'), name=action_defn.get('name', '<MISSING>')))
return (jsone_context, tmpl) | Get the appropriate json-e context and template for an action task.
Args:
chain (ChainOfTrust): the chain of trust.
parent_link (LinkOfTrust): the parent link to test.
decision_link (LinkOfTrust): the parent link's decision task link.
tasks_for (str): the reason the parent link was created (cron,
hg-push, action)
Returns:
(dict, dict): the json-e context and template. | codesearchnet |
def autocorrect(query, possibilities, delta=0.75):
possibilities = [possibility.lower() for possibility in possibilities]
if (query in possibilities):
return query
options = [word for word in possibilities if word.startswith(query)]
if (len(options) > 0):
possibilities = options
query = max_substring(options)
matches = get_close_matches(query, possibilities, cutoff=delta)
try:
assert (len(matches) > 0)
except AssertionError:
raise AssertionError('No matches for "{0}" found'.format(query))
return matches[0] | Attempts to figure out what possibility the query is
This autocorrect function is rather simple right now with plans for later
improvement. Right now, it just attempts to finish spelling a word as much
as possible, and then determines which possibility is closest to said word.
Args:
query (unicode): query to attempt to complete
possibilities (list): list of unicodes of possible answers for query
delta (float): Minimum delta similarity between query and
any given possibility for possibility to be considered.
Delta used by difflib.get_close_matches().
Returns:
unicode: best guess of correct answer
Raises:
AssertionError: raised if no matches found
Example:
.. code-block:: Python
>>> autocorrect('bowtei', ['bowtie2', 'bot'])
'bowtie2' | codesearchnet |
def get_device_name(self, cached=True):
if cached and self.name is not None:
return self.name
device_name = self.get_characteristic_handle_from_uuid(UUID_DEVICE_NAME)
if device_name is None:
logger.warn('Failed to find handle for device name')
return None
self.name = self.dongle._read_attribute(self.conn_handle, device_name)
return self.name | Returns the SK8 device BLE name.
Args:
cached (bool): if True, returns the locally cached copy of the name. If this is
set to False, or the name is not cached, it will read from the device instead.
Returns:
str. The current device name. May be `None` if an error occurs. | juraj-google-style |
def union(cls, *mhs):
if (len(mhs) < 2):
raise ValueError('Cannot union less than 2 MinHash')
num_perm = len(mhs[0])
seed = mhs[0].seed
if any((((seed != m.seed) or (num_perm != len(m))) for m in mhs)):
raise ValueError('The unioning MinHash must have the same seed and number of permutation functions')
hashvalues = np.minimum.reduce([m.hashvalues for m in mhs])
permutations = mhs[0].permutations
return cls(num_perm=num_perm, seed=seed, hashvalues=hashvalues, permutations=permutations) | Create a MinHash which is the union of the MinHash objects passed as arguments.
Args:
*mhs: The MinHash objects to be united. The argument list length is variable,
but must be at least 2.
Returns:
datasketch.MinHash: A new union MinHash. | codesearchnet |
def restart(self, **kwargs):
return self.client.api.restart(self.id, **kwargs) | Restart this container. Similar to the ``docker restart`` command.
Args:
timeout (int): Number of seconds to try to stop for before killing
the container. Once killed it will then be restarted. Default
is 10 seconds.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error. | juraj-google-style |
async def _auth_cram_md5(self, username, password):
mechanism = 'CRAM-MD5'
(code, message) = (await self.do_cmd('AUTH', mechanism, success=(334,)))
decoded_challenge = base64.b64decode(message)
challenge_hash = hmac.new(key=password.encode('utf-8'), msg=decoded_challenge, digestmod='md5')
hex_hash = challenge_hash.hexdigest()
response = '{} {}'.format(username, hex_hash)
encoded_response = SMTP.b64enc(response)
try:
(code, message) = (await self.do_cmd(encoded_response, success=(235, 503)))
except SMTPCommandFailedError as e:
raise SMTPAuthenticationError(e.code, e.message, mechanism)
return (code, message) | Performs an authentication attemps using the CRAM-MD5 mechanism.
Protocol:
1. Send 'AUTH CRAM-MD5' to server ;
2. If the server replies with a 334 return code, we can go on:
1) The challenge (sent by the server) is base64-decoded ;
2) The decoded challenge is hashed using HMAC-MD5 and the user
password as key (shared secret) ;
3) The hashed challenge is converted to a string of lowercase
hexadecimal digits ;
4) The username and a space character are prepended to the hex
digits ;
5) The concatenation is base64-encoded and sent to the server.
6) If the server replies with a return code of 235, user is
authenticated.
Args:
username (str): Identifier of the user trying to authenticate.
password (str): Password for the user.
Raises:
ConnectionResetError: If the connection with the server is
unexpectedely lost.
SMTPAuthenticationError: If the authentication attempt fails.
Returns:
(int, str): A (code, message) 2-tuple containing the server
response. | codesearchnet |
def prefer_static_broadcast_shape(shape1, shape2, name='prefer_static_broadcast_shape'):
with tf.name_scope(name):
def make_shape_tensor(x):
return tf.convert_to_tensor(value=x, name='shape', dtype=tf.int32)
def get_tensor_shape(s):
if isinstance(s, tf.TensorShape):
return s
s_ = tf.get_static_value(make_shape_tensor(s))
if (s_ is not None):
return tf.TensorShape(s_)
return None
def get_shape_tensor(s):
if (not isinstance(s, tf.TensorShape)):
return make_shape_tensor(s)
if tensorshape_util.is_fully_defined(s):
return make_shape_tensor(tensorshape_util.as_list(s))
raise ValueError('Cannot broadcast from partially defined `TensorShape`.')
shape1_ = get_tensor_shape(shape1)
shape2_ = get_tensor_shape(shape2)
if ((shape1_ is not None) and (shape2_ is not None)):
return tf.broadcast_static_shape(shape1_, shape2_)
shape1_ = get_shape_tensor(shape1)
shape2_ = get_shape_tensor(shape2)
return tf.broadcast_dynamic_shape(shape1_, shape2_) | Convenience function which statically broadcasts shape when possible.
Args:
shape1: `1-D` integer `Tensor`. Already converted to tensor!
shape2: `1-D` integer `Tensor`. Already converted to tensor!
name: A string name to prepend to created ops.
Returns:
The broadcast shape, either as `TensorShape` (if broadcast can be done
statically), or as a `Tensor`. | codesearchnet |
def validate_layout_display(self, table, display_condition):
display = False
if (display_condition is None):
display = True
else:
display_query = 'select count(*) from {} where {}'.format(table, display_condition)
try:
cur = self.db_conn.cursor()
cur.execute(display_query.replace('"', ''))
rows = cur.fetchall()
if (rows[0][0] > 0):
display = True
except sqlite3.Error as e:
print('"{}" query returned an error: ({}).'.format(display_query, e))
sys.exit(1)
return display | Check to see if the display condition passes.
Args:
table (str): The name of the DB table which hold the App data.
display_condition (str): The "where" clause of the DB SQL statement.
Returns:
bool: True if the row count is greater than 0. | codesearchnet |
def BuildFindSpecs(self, artifact_filter_names, environment_variables=None):
find_specs = []
for name in artifact_filter_names:
definition = self._artifacts_registry.GetDefinitionByName(name)
if (not definition):
logger.debug('undefined artifact definition: {0:s}'.format(name))
continue
logger.debug('building find spec from artifact definition: {0:s}'.format(name))
artifact_find_specs = self._BuildFindSpecsFromArtifact(definition, environment_variables)
find_specs.extend(artifact_find_specs)
for find_spec in find_specs:
if isinstance(find_spec, file_system_searcher.FindSpec):
self.file_system_find_specs.append(find_spec)
elif isinstance(find_spec, registry_searcher.FindSpec):
self.registry_find_specs.append(find_spec)
else:
logger.warning('Unsupported find specification type: {0:s}'.format(type(find_spec))) | Builds find specifications from artifact definitions.
Args:
artifact_filter_names (list[str]): names of artifact definitions that are
used for filtering file system and Windows Registry key paths.
environment_variables (Optional[list[EnvironmentVariableArtifact]]):
environment variables. | codesearchnet |
def factored_joint_mvn(distributions):
graph_parents = [tensor for distribution in distributions for tensor in distribution._graph_parents]
with tf.compat.v1.name_scope('factored_joint_mvn', values=graph_parents):
dtype = tf.debugging.assert_same_float_dtype(distributions)
broadcast_ones = tf.ones(broadcast_batch_shape(distributions), dtype=dtype)[(..., tf.newaxis)]
return MultivariateNormalLinearOperator(loc=tf.concat([(mvn.mean() * broadcast_ones) for mvn in distributions], axis=(- 1)), scale=tfl.LinearOperatorBlockDiag([mvn.scale for mvn in distributions], is_square=True)) | Combine MultivariateNormals into a factored joint distribution.
Given a list of multivariate normal distributions
`dist[i] = Normal(loc[i], scale[i])`, construct the joint
distribution given by concatenating independent samples from these
distributions. This is multivariate normal with mean vector given by the
concatenation of the component mean vectors, and block-diagonal covariance
matrix in which the blocks are the component covariances.
Note that for computational efficiency, multivariate normals are represented
by a 'scale' (factored covariance) linear operator rather than the full
covariance matrix.
Args:
distributions: Python `iterable` of MultivariateNormal distribution
instances (e.g., `tfd.MultivariateNormalDiag`,
`tfd.MultivariateNormalTriL`, etc.). These must be broadcastable to a
consistent batch shape, but may have different event shapes
(i.e., defined over spaces of different dimension).
Returns:
joint_distribution: An instance of `tfd.MultivariateNormalLinearOperator`
representing the joint distribution constructed by concatenating
an independent sample from each input distributions. | codesearchnet |
def __frontend_limit_descriptor(self, api_info):
if api_info.frontend_limits is None:
return None
descriptor = {}
for propname, descname in (('unregistered_user_qps', 'unregisteredUserQps'),
('unregistered_qps', 'unregisteredQps'),
('unregistered_daily', 'unregisteredDaily')):
if getattr(api_info.frontend_limits, propname) is not None:
descriptor[descname] = getattr(api_info.frontend_limits, propname)
rules = self.__frontend_limit_rules_descriptor(api_info)
if rules:
descriptor['rules'] = rules
return descriptor | Builds a frontend limit descriptor from API info.
Args:
api_info: An _ApiInfo object.
Returns:
A dictionary with frontend limit information. | juraj-google-style |
def _process_string_token(self, token, start_row, start_col):
for (i, char) in enumerate(token):
if (char in QUOTES):
break
norm_quote = token[i:]
if ((len(norm_quote) >= 3) and (norm_quote[:3] in TRIPLE_QUOTE_OPTS.values())):
self._tokenized_triple_quotes[start_row] = (token, norm_quote[:3], start_row, start_col)
return
preferred_quote = SMART_QUOTE_OPTS.get(self.config.string_quote)
if (self.config.string_quote in SMART_CONFIG_OPTS):
other_quote = next((q for q in QUOTES if (q != preferred_quote)))
if ((preferred_quote in token[(i + 1):(- 1)]) and (other_quote not in token[(i + 1):(- 1)])):
preferred_quote = other_quote
if (norm_quote[0] != preferred_quote):
self._invalid_string_quote(quote=norm_quote[0], row=start_row, correct_quote=preferred_quote, col=start_col) | Internal method for identifying and checking string tokens
from the token stream.
Args:
token: the token to check.
start_row: the line on which the token was found.
start_col: the column on which the token was found. | codesearchnet |
def list_values(hive, key=None, use_32bit_registry=False, include_default=True):
return __utils__['reg.list_values'](hive=hive, key=key, use_32bit_registry=use_32bit_registry, include_default=include_default) | r'''
Enumerates the values in a registry key or hive.
Args:
hive (str):
The name of the hive. Can be one of the following:
- HKEY_LOCAL_MACHINE or HKLM
- HKEY_CURRENT_USER or HKCU
- HKEY_USER or HKU
- HKEY_CLASSES_ROOT or HKCR
- HKEY_CURRENT_CONFIG or HKCC
key (str):
The key (looks like a path) to the value name. If a key is not
passed, the values under the hive will be returned.
use_32bit_registry (bool):
Accesses the 32bit portion of the registry on 64 bit installations.
On 32bit machines this is ignored.
include_default (bool):
Toggle whether to include the '(Default)' value.
Returns:
list: A list of values under the hive or key.
CLI Example:
.. code-block:: bash
salt '*' reg.list_values HKLM 'SYSTEM\\CurrentControlSet\\Services\\Tcpip' | codesearchnet |
def prepare_message(self, message):
message.meta.update(path=self.path)
for handler in self.handlers:
handler.outgoing(message, self)
return message | Prepares the message before sending it out
Returns:
- message.Message: the message | codesearchnet |
def fn(x: tuple[int, ...]):
return x | Test function
Args:
x: The input
Returns:
The output | github-repos |
def learn(self, grad_arr, fix_opt_flag=False):
delta_arr = super().learn(grad_arr, fix_opt_flag)
if self.__add_channel_flag is True:
return delta_arr[:, 0]
else:
return delta_arr | Update this Discriminator by ascending its stochastic gradient.
Args:
grad_arr: `np.ndarray` of gradients.
fix_opt_flag: If `False`, no optimization in this model will be done.
Returns:
`np.ndarray` of delta or gradients. | juraj-google-style |
def prepare_amazon_algorithm_estimator(estimator, inputs, mini_batch_size=None):
if isinstance(inputs, list):
for record in inputs:
if (isinstance(record, amazon_estimator.RecordSet) and (record.channel == 'train')):
estimator.feature_dim = record.feature_dim
break
elif isinstance(inputs, amazon_estimator.RecordSet):
estimator.feature_dim = inputs.feature_dim
else:
raise TypeError('Training data must be represented in RecordSet or list of RecordSets')
estimator.mini_batch_size = mini_batch_size | Set up amazon algorithm estimator, adding the required `feature_dim` hyperparameter from training data.
Args:
estimator (sagemaker.amazon.amazon_estimator.AmazonAlgorithmEstimatorBase):
An estimator for a built-in Amazon algorithm to get information from and update.
inputs: The training data.
* (sagemaker.amazon.amazon_estimator.RecordSet) - A collection of
Amazon :class:~`Record` objects serialized and stored in S3.
For use with an estimator for an Amazon algorithm.
* (list[sagemaker.amazon.amazon_estimator.RecordSet]) - A list of
:class:~`sagemaker.amazon.amazon_estimator.RecordSet` objects, where each instance is
a different channel of training data. | codesearchnet |
def handle_callback(self, callback_id, ret_value, rpc_func_name): | Creates a callback handler for the asynchronous RPC.
Args:
callback_id: str, the callback ID for creating a callback handler object.
ret_value: any, the result field of the RPC response.
rpc_func_name: str, the name of the snippet function executed on the
server.
Returns:
The callback handler object. | github-repos |
def __init__(self, field, lower_bound, upper_bound):
super(BetweenClause, self).__init__(field, lower_bound, upper_bound)
self.field = field
self.lower_bound = lower_bound
self.upper_bound = upper_bound
self.validate() | Construct an expression that is true when the field value is within the given bounds.
Args:
field: LocalField Expression, denoting the field in consideration
lower_bound: lower bound constraint for given field
upper_bound: upper bound constraint for given field
Returns:
a new BetweenClause object | juraj-google-style |
def add_all_exchange_reactions(model, compartment, allow_duplicates=False):
all_reactions = {}
if (not allow_duplicates):
for rxnid in model.database.reactions:
rx = model.database.get_reaction(rxnid)
all_reactions[rx] = rxnid
added = set()
added_compounds = set()
initial_compounds = set(model.compounds)
reactions = set(model.database.reactions)
for model_compound in initial_compounds:
compound = model_compound.in_compartment(compartment)
if (compound in added_compounds):
continue
rxnid_ex = create_exchange_id(reactions, compound)
reaction_ex = Reaction(Direction.Both, {compound: (- 1)})
if (reaction_ex not in all_reactions):
model.database.set_reaction(rxnid_ex, reaction_ex)
reactions.add(rxnid_ex)
else:
rxnid_ex = all_reactions[reaction_ex]
if (not model.has_reaction(rxnid_ex)):
added.add(rxnid_ex)
model.add_reaction(rxnid_ex)
added_compounds.add(compound)
return added | Add all exchange reactions to database and to model.
Args:
model: :class:`psamm.metabolicmodel.MetabolicModel`. | codesearchnet |
def list(self, **kwargs):
request = Request('GET', '/v3/accounts')
response = self.ctx.request(request)
if (response.content_type is None):
return response
if (not response.content_type.startswith('application/json')):
return response
jbody = json.loads(response.raw_body)
parsed_body = {}
if (str(response.status) == '200'):
if (jbody.get('accounts') is not None):
parsed_body['accounts'] = [self.ctx.account.AccountProperties.from_dict(d, self.ctx) for d in jbody.get('accounts')]
elif (str(response.status) == '401'):
if (jbody.get('errorCode') is not None):
parsed_body['errorCode'] = jbody.get('errorCode')
if (jbody.get('errorMessage') is not None):
parsed_body['errorMessage'] = jbody.get('errorMessage')
elif (str(response.status) == '405'):
if (jbody.get('errorCode') is not None):
parsed_body['errorCode'] = jbody.get('errorCode')
if (jbody.get('errorMessage') is not None):
parsed_body['errorMessage'] = jbody.get('errorMessage')
else:
parsed_body = jbody
response.body = parsed_body
return response | Get a list of all Accounts authorized for the provided token.
Args:
Returns:
v20.response.Response containing the results from submitting the
request | codesearchnet |
def add_help_text(parent, filepath, prefix='!'):
import tkinter as tk
import tkinter.ttk as ttk
help_contents = get_help_data(filepath)
text = tk.Text(parent, wrap='word', font=('Helvetica', 10))
text.grid(row=0, column=0, sticky='W E N S')
text.tag_config('heading', font=('Helvetica', 14))
text.tag_config('command', font=('Courier', 10))
text.tag_config('param', font=('Courier', 10))
text.tag_config('description')
scrollbar = ttk.Scrollbar(parent, orient='vertical', command=text.yview)
scrollbar.grid(column=1, row=0, sticky='N S')
text['yscrollcommand'] = scrollbar.set
for d in help_contents:
text.insert('end', d, 'heading')
text.insert('end', '\n')
if ('commands' in d.lower()):
for c in help_contents[d]:
if ('name' not in c):
continue
command = (prefix + c['name'])
text.insert('end', command, ('command', 'description'))
if ('params' in c):
for param in c['params']:
text.insert('end', ' [{}]'.format(param), ('param', 'description'))
text.insert('end', ': ')
if ('description' in c):
text.insert('end', c['description'], 'description')
text.insert('end', '\n')
text.insert('end', '\n')
else:
text.insert('end', help_contents[d], 'description')
text.insert('end', '\n\n')
text.config(state=tk.DISABLED) | Load help text from a file and adds it to the parent
Args:
parent: A tk or ttk object
filepath (str): The file to load help text from
prefix (str): The prefix to use for commands | codesearchnet |
def match_not_exists(self, field, new_group=False):
return self.exclude_field(field, '*', new_group=new_group) | Require a field to not exist in the results.
Matches will not have ``field`` present.
Arguments:
field (str): The field to check.
The field must be namespaced according to Elasticsearch rules
using the dot syntax.
For example, ``"mdf.source_name"`` is the ``source_name`` field
of the ``mdf`` dictionary.
new_group (bool): If ``True``, will separate the term into a new parenthetical group.
If ``False``, will not.
**Default:** ``False``.
Returns:
SearchHelper: Self | codesearchnet |
def next_event_type(self):
type_ = self._libinput.libinput_next_event_type(self._li)
if (type_ == 0):
return None
else:
return EventType(type_) | Return the type of the next event in the internal queue.
This method does not pop the event off the queue and the next call
to :attr:`events` returns that event.
Returns:
~libinput.constant.EventType: The event type of the next available
event or :obj:`None` if no event is available. | codesearchnet |
def chrome_tracing_object_transfer_dump(self, filename=None):
client_id_to_address = {}
for client_info in ray.global_state.client_table():
client_id_to_address[client_info['ClientID']] = '{}:{}'.format(client_info['NodeManagerAddress'], client_info['ObjectManagerPort'])
all_events = []
for (key, items) in self.profile_table().items():
if (items[0]['component_type'] != 'object_manager'):
continue
for event in items:
if (event['event_type'] == 'transfer_send'):
(object_id, remote_client_id, _, _) = event['extra_data']
elif (event['event_type'] == 'transfer_receive'):
(object_id, remote_client_id, _, _) = event['extra_data']
elif (event['event_type'] == 'receive_pull_request'):
(object_id, remote_client_id) = event['extra_data']
else:
assert False, 'This should be unreachable.'
object_id_int = int(object_id[:2], 16)
color = self._chrome_tracing_colors[(object_id_int % len(self._chrome_tracing_colors))]
new_event = {'cat': event['event_type'], 'name': event['event_type'], 'pid': client_id_to_address[key], 'tid': client_id_to_address[remote_client_id], 'ts': self._seconds_to_microseconds(event['start_time']), 'dur': self._seconds_to_microseconds((event['end_time'] - event['start_time'])), 'ph': 'X', 'cname': color, 'args': event['extra_data']}
all_events.append(new_event)
if (event['event_type'] == 'transfer_send'):
additional_event = new_event.copy()
additional_event['cname'] = 'black'
all_events.append(additional_event)
elif (event['event_type'] == 'transfer_receive'):
additional_event = new_event.copy()
additional_event['cname'] = 'grey'
all_events.append(additional_event)
else:
pass
if (filename is not None):
with open(filename, 'w') as outfile:
json.dump(all_events, outfile)
else:
return all_events | Return a list of transfer events that can viewed as a timeline.
To view this information as a timeline, simply dump it as a json file
by passing in "filename" or using using json.dump, and then load go to
chrome://tracing in the Chrome web browser and load the dumped file.
Make sure to enable "Flow events" in the "View Options" menu.
Args:
filename: If a filename is provided, the timeline is dumped to that
file.
Returns:
If filename is not provided, this returns a list of profiling
events. Each profile event is a dictionary. | codesearchnet |
def to_json(self):
return {'xblock_id': six.text_type(self.xblock_id), 'messages': [message.to_json() for message in self.messages], 'empty': self.empty} | Convert to a json-serializable representation.
Returns:
dict: A dict representation that is json-serializable. | codesearchnet |
def read_dftbp(filename):
infile = open(filename, 'r')
lines = infile.readlines()
for ss in lines:
if ss.strip().startswith('
lines.remove(ss)
natoms = int(lines[0].split()[0])
symbols = lines[1].split()
if (lines[0].split()[1].lower() == 'f'):
is_scaled = True
scale_pos = 1
scale_latvecs = dftbpToBohr
else:
is_scaled = False
scale_pos = dftbpToBohr
scale_latvecs = dftbpToBohr
positions = []
expaned_symbols = []
for ii in range(2, (natoms + 2)):
lsplit = lines[ii].split()
expaned_symbols.append(symbols[(int(lsplit[1]) - 1)])
positions.append([(float(ss) * scale_pos) for ss in lsplit[2:5]])
origin = [float(ss) for ss in lines[(natoms + 2)].split()]
cell = []
for ii in range((natoms + 3), (natoms + 6)):
lsplit = lines[ii].split()
cell.append([(float(ss) * scale_latvecs) for ss in lsplit[:3]])
cell = np.array(cell)
if is_scaled:
atoms = Atoms(symbols=expaned_symbols, cell=cell, scaled_positions=positions)
else:
atoms = Atoms(symbols=expaned_symbols, cell=cell, positions=positions)
return atoms | Reads DFTB+ structure files in gen format.
Args:
filename: name of the gen-file to be read
Returns:
atoms: an object of the phonopy.Atoms class, representing the structure
found in filename | codesearchnet |
def get_course_runs_from_program(program):
course_runs = set()
for course in program.get("courses", []):
for run in course.get("course_runs", []):
if "key" in run and run["key"]:
course_runs.add(run["key"])
return course_runs | Return course runs from program data.
Arguments:
program(dict): Program data from Course Catalog API
Returns:
set: course runs in given program | juraj-google-style |
def put(self, filename):
from . import LocalFile
target = get_target_path(filename, self.source)
with self.open('rb') as infile, open(target, 'wb') as outfile:
shutil.copyfileobj(infile, outfile)
return LocalFile(target) | Write the file to the given path
Args:
filename(str): path to write this file to
Returns:
LocalFile: reference to the copy of the file stored at ``filename`` | juraj-google-style |
def headers(self, headers=None, **kw):
headers = (kw if kw else headers)
self._request.headers = headers
self.add_matcher(matcher('HeadersMatcher', headers)) | Defines a dictionary of arguments.
Header keys are case insensitive.
Arguments:
headers (dict): headers to match.
**headers (dict): headers to match as variadic keyword arguments.
Returns:
self: current Mock instance. | codesearchnet |
def generate_visualizations(methods, data, true_labels, base_dir = 'visualizations',
figsize=(18,10), **scatter_options):
plt.figure(figsize=figsize)
for method in methods:
preproc= method[0]
if isinstance(preproc, Preprocess):
preprocessed, ll = preproc.run(data)
output_names = preproc.output_names
else:
p1 = data
output_names = ['']
for p in preproc:
p1, ll = p.run(p1)
p1 = p1[0]
output_names[0] = output_names[0] + p.output_names[0]
preprocessed = [p1]
for r, name in zip(preprocessed, output_names):
print(name)
if r.shape[0]==2:
r_dim_red = r
else:
if sparse.issparse(r) and r.shape[0] > 100:
name = 'tsvd_' + name
tsvd = TruncatedSVD(50)
r_dim_red = tsvd.fit_transform(r.T)
try:
tsne = TSNE(2)
r_dim_red = tsne.fit_transform(r_dim_red).T
name = 'tsne_' + name
except:
tsvd2 = TruncatedSVD(2)
r_dim_red = tsvd2.fit_transform(r_dim_red).T
else:
name = 'tsne_' + name
tsne = TSNE(2)
r_dim_red = tsne.fit_transform(r.T).T
if isinstance(method[1], list):
for clustering_method in method[1]:
try:
cluster_labels = clustering_method.run(r)
except:
print('clustering failed')
continue
output_path = base_dir + '/{0}_{1}_labels.png'.format(name, clustering_method.name)
visualize_dim_red(r_dim_red, cluster_labels, output_path, **scatter_options)
else:
clustering_method = method[1]
try:
cluster_labels = clustering_method.run(r)
except:
print('clustering failed')
continue
output_path = base_dir + '/{0}_{1}_labels.png'.format(name, clustering_method.name)
visualize_dim_red(r_dim_red, cluster_labels, output_path, **scatter_options)
output_path = base_dir + '/{0}_true_labels.png'.format(name)
visualize_dim_red(r_dim_red, true_labels, output_path, **scatter_options) | Generates visualization scatters for all the methods.
Args:
methods: follows same format as run_experiments. List of tuples.
data: genes x cells
true_labels: array of integers
base_dir: base directory to save all the plots
figsize: tuple of ints representing size of figure
scatter_options: options for plt.scatter | juraj-google-style |
def get(self, element):
if self['link']['item'][element]['class'] == 'dataset':
return Dataset.read(self['link']['item'][element]['href'])
elif self['link']['item'][element]['class'] == 'collection':
return Collection.read(self['link']['item'][element]['href'])
elif self['link']['item'][element]['class'] == 'dimension':
return Dimension.read(self['link']['item'][element]['href'])
else:
raise ValueError(
"Class not allowed. Please use dataset, collection or "
"dimension'") | Gets ith element of a collection in an object of the corresponding \
class.
Args:
output(string): can accept 'jsonstat' or 'dataframe_list'
Returns:
Serialized JSONstat or a list of Pandas Dataframes,depending on \
the 'output' parameter. | juraj-google-style |
def xldate_as_datetime(xldate, datemode=0, option='to_datetime'):
if (option == 'to_float'):
d = ((xldate - 25589) * 86400.0)
else:
try:
d = (datetime.datetime(1899, 12, 30) + datetime.timedelta(days=(xldate + (1462 * datemode))))
if (option == 'to_string'):
date_format = '%Y-%m-%d %H:%M:%S'
d = d.strftime(date_format)
except TypeError:
logging.info(f'The date is not of correct type [{xldate}]')
d = xldate
return d | Converts a xls date stamp to a more sensible format.
Args:
xldate (str): date stamp in Excel format.
datemode (int): 0 for 1900-based, 1 for 1904-based.
option (str): option in ("to_datetime", "to_float", "to_string"),
return value
Returns:
datetime (datetime object, float, or string). | codesearchnet |
def remove_handler(self, handler: Handler, group: int = 0):
if isinstance(handler, DisconnectHandler):
self.disconnect_handler = None
else:
self.dispatcher.remove_handler(handler, group) | Removes a previously-added update handler.
Make sure to provide the right group that the handler was added in. You can use
the return value of the :meth:`add_handler` method, a tuple of (handler, group), and
pass it directly.
Args:
handler (``Handler``):
The handler to be removed.
group (``int``, *optional*):
The group identifier, defaults to 0. | juraj-google-style |
def set_license(self, license, **kwargs):
data = {'license': license}
return self.http_post('/license', post_data=data, **kwargs) | Add a new license.
Args:
license (str): The license string
**kwargs: Extra options to send to the server (e.g. sudo)
Raises:
GitlabAuthenticationError: If authentication is not correct
GitlabPostError: If the server cannot perform the request
Returns:
dict: The new license information | juraj-google-style |
def ParseOptions(cls, options, config_object, category=None, names=None):
for (helper_name, helper_class) in cls._helper_classes.items():
if ((category and (helper_class.CATEGORY != category)) or (names and (helper_name not in names))):
continue
try:
helper_class.ParseOptions(options, config_object)
except errors.BadConfigObject:
pass | Parses and validates arguments using the appropriate helpers.
Args:
options (argparse.Namespace): parser options.
config_object (object): object to be configured by an argument helper.
category (Optional[str]): category of helpers to apply to
the group, such as storage, output, where None will apply the
arguments to all helpers. The category can be used to add arguments
to a specific group of registered helpers.
names (Optional[list[str]]): names of argument helpers to apply,
where None will apply the arguments to all helpers. | codesearchnet |
def sample(self, features):
(logits, losses) = self(features)
if self._target_modality_is_real:
return (logits, logits, losses)
if (self.hparams.sampling_method == 'argmax'):
samples = tf.argmax(logits, axis=(- 1))
else:
assert (self.hparams.sampling_method == 'random')
def multinomial_squeeze(logits, temperature=1.0):
logits_shape = common_layers.shape_list(logits)
reshaped_logits = (tf.reshape(logits, [(- 1), logits_shape[(- 1)]]) / temperature)
choices = tf.multinomial(reshaped_logits, 1)
choices = tf.reshape(choices, logits_shape[:(- 1)])
return choices
samples = multinomial_squeeze(logits, self.hparams.sampling_temp)
return (samples, logits, losses) | Run the model and extract samples.
Args:
features: an map of string to `Tensor`.
Returns:
samples: an integer `Tensor`.
logits: a list of `Tensor`s, one per datashard.
losses: a dictionary: {loss-name (string): floating point `Scalar`}. | codesearchnet |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.