content
stringlengths 22
815k
| id
int64 0
4.91M
|
|---|---|
def set_default_styles(svg_element):
"""
sets fill, stroke, stroke-width to default so that at least the elements
is visibly displayed
:param svg_element:
:return:
"""
assert svg_element is not None
svg_element.set_fill(NONE)
svg_element.set_stroke(RED)
svg_element.set_stroke_width("1")
| 17,600
|
def float_or_none(string):
""" Returns float number iff string represents one, else return None. TESTS OK 2020-10-24. """
try:
return float(string)
except (ValueError, TypeError):
return None
| 17,601
|
def move_release_to_another_collection_folder(user: UserWithUserTokenBasedAuthentication,
username: str,
source_folder_id: int,
destination_folder_id: int,
release_id: int,
instance_id: int
) -> requests.models.Response:
"""
Move the instance of an release to another folder.
User Authentication needed.
Parameters:
user: user object (required)
username: string (required)
-> The username of the collection you are trying to retrieve.
source_folder_id: number (required)
-> The ID of the source folder.
destination_folder_id: number (required)
-> The ID of the destination folder.
release_id: number (required)
-> The ID of the release you are modifying.
instance_id: number (required)
-> The ID of the instance.
"""
url = f"{USERS_URL}/{username}/collection/folders/{source_folder_id}/releases/{release_id}/instances/{instance_id}"
params = user.params
headers = user.headers
data = {"folder_id": destination_folder_id}
return requests.post(url, headers=headers, params=params, json=data)
| 17,602
|
def test_badbase_error():
"""Test that an error is raised if base is invalid."""
C = classifier.BinaryClassifier(np.zeros(2), 1)
with pytest.raises(TypeError):
core.global_entropy(C, 4)
| 17,603
|
def load_region_maps(region_file):
"""Extracts creates a map from PHI region id to a continuous region id."""
region_ids = [] # Used mainly for eval
region_ids_inv = {} # Used in data loader
region_names_inv = {} # Used in eval
for l in region_file.read().strip().split('\n'):
tok_name_id, _ = l.strip().split(';') # second field is frequency, unused
region_name, region_id = tok_name_id.split('_')
region_name = region_name.strip()
region_id = int(region_id)
# Ignore unknown regions:
if ((region_name == 'Unknown Provenances' and region_id == 884) or
(region_name == 'unspecified subregion' and region_id == 885) or
(region_name == 'unspecified subregion' and region_id == 1439)):
continue
region_ids.append(region_id)
region_ids_inv[region_id] = len(region_ids_inv)
region_names_inv[len(region_names_inv)] = region_name
return {
'ids': region_ids,
'ids_inv': region_ids_inv,
'names_inv': region_names_inv
}
| 17,604
|
def _find_nearest_idx(a, a0):
"""Element idx in nd array `a` closest to the scalar value `a0`."""
if isinstance(a, list):
a = np.array(a)
idx = np.abs(a - a0).argmin()
return idx
| 17,605
|
def build_argument_parser():
"""
Builds the argument parser
:return: the argument parser
:rtype: ArgumentParser
"""
opts = ArgumentParser()
opts.add_argument(dest='filename', help='Filename or release name to guess', nargs='*')
naming_opts = opts.add_argument_group("Naming")
naming_opts.add_argument('-t', '--type', dest='type', default=None,
help='The suggested file type: movie, episode. If undefined, type will be guessed.')
naming_opts.add_argument('-n', '--name-only', dest='name_only', action='store_true', default=None,
help='Parse files as name only, considering "/" and "\\" like other separators.')
naming_opts.add_argument('-Y', '--date-year-first', action='store_true', dest='date_year_first', default=None,
help='If short date is found, consider the first digits as the year.')
naming_opts.add_argument('-D', '--date-day-first', action='store_true', dest='date_day_first', default=None,
help='If short date is found, consider the second digits as the day.')
naming_opts.add_argument('-L', '--allowed-languages', action='append', dest='allowed_languages', default=None,
help='Allowed language (can be used multiple times)')
naming_opts.add_argument('-C', '--allowed-countries', action='append', dest='allowed_countries', default=None,
help='Allowed country (can be used multiple times)')
naming_opts.add_argument('-E', '--episode-prefer-number', action='store_true', dest='episode_prefer_number',
default=None,
help='Guess "serie.213.avi" as the episode 213. Without this option, '
'it will be guessed as season 2, episode 13')
naming_opts.add_argument('-T', '--expected-title', action='append', dest='expected_title', default=None,
help='Expected title to parse (can be used multiple times)')
naming_opts.add_argument('-G', '--expected-group', action='append', dest='expected_group', default=None,
help='Expected release group (can be used multiple times)')
input_opts = opts.add_argument_group("Input")
input_opts.add_argument('-f', '--input-file', dest='input_file', default=None,
help='Read filenames from an input text file. File should use UTF-8 charset.')
output_opts = opts.add_argument_group("Output")
output_opts.add_argument('-v', '--verbose', action='store_true', dest='verbose', default=None,
help='Display debug output')
output_opts.add_argument('-P', '--show-property', dest='show_property', default=None,
help='Display the value of a single property (title, series, video_codec, year, ...)')
output_opts.add_argument('-a', '--advanced', dest='advanced', action='store_true', default=None,
help='Display advanced information for filename guesses, as json output')
output_opts.add_argument('-s', '--single-value', dest='single_value', action='store_true', default=None,
help='Keep only first value found for each property')
output_opts.add_argument('-l', '--enforce-list', dest='enforce_list', action='store_true', default=None,
help='Wrap each found value in a list even when property has a single value')
output_opts.add_argument('-j', '--json', dest='json', action='store_true', default=None,
help='Display information for filename guesses as json output')
output_opts.add_argument('-y', '--yaml', dest='yaml', action='store_true', default=None,
help='Display information for filename guesses as yaml output')
conf_opts = opts.add_argument_group("Configuration")
conf_opts.add_argument('-c', '--config', dest='config', action='append', default=None,
help='Filepath to the configuration file. Configuration contains the same options as '
'those command line options, but option names have "-" characters replaced with "_". '
'If not defined, guessit tries to read a configuration default configuration file at '
'~/.guessit/options.(json|yml|yaml) and ~/.config/guessit/options.(json|yml|yaml). '
'Set to "false" to disable default configuration file loading.')
conf_opts.add_argument('--no-embedded-config', dest='no_embedded_config', action='store_true',
default=None,
help='Disable default configuration.')
information_opts = opts.add_argument_group("Information")
information_opts.add_argument('-p', '--properties', dest='properties', action='store_true', default=None,
help='Display properties that can be guessed.')
information_opts.add_argument('-V', '--values', dest='values', action='store_true', default=None,
help='Display property values that can be guessed.')
information_opts.add_argument('--version', dest='version', action='store_true', default=None,
help='Display the guessit version.')
return opts
| 17,606
|
def does_user_have_product(product, username):
"""Return True/False if a user has the specified product."""
try:
instance = adobe_api.AdobeAPIObject(username)
except adobe_api.AdobeAPINoUserException:
return False
return instance.has_product(product)
| 17,607
|
def items_for_result(cl, result, form):
"""
Generate the actual list of data.
"""
def link_in_col(is_first, field_name, cl):
if cl.list_display_links is None:
return False
if is_first and not cl.list_display_links:
return True
return field_name in cl.list_display_links
first = True
pk = cl.lookup_opts.pk.attname
for field_index, field_name in enumerate(cl.list_display):
empty_value_display = cl.model_admin.get_empty_value_display()
row_classes = ['field-%s' % _coerce_field_name(field_name, field_index)]
try:
f, attr, value = lookup_field(field_name, result, cl.model_admin)
except ObjectDoesNotExist:
result_repr = empty_value_display
else:
empty_value_display = getattr(attr, 'empty_value_display', empty_value_display)
if f is None or f.auto_created:
if field_name == 'action_checkbox':
row_classes = ['action-checkbox']
boolean = getattr(attr, 'boolean', False)
result_repr = display_for_value(value, empty_value_display, boolean)
if isinstance(value, (datetime.date, datetime.time)):
row_classes.append('nowrap')
else:
if isinstance(f.remote_field, models.ManyToOneRel):
field_val = getattr(result, f.name)
if field_val is None:
result_repr = empty_value_display
else:
result_repr = field_val
else:
result_repr = display_for_field(value, f, empty_value_display)
if isinstance(f, (models.DateField, models.TimeField, models.ForeignKey)):
row_classes.append('nowrap')
elif isinstance(f, (models.BooleanField)):
row_classes.append('checkmark-td')
if value:
row_classes.append('positive')
else:
row_classes.append('negative')
elif isinstance(f, models.FileField):
row_classes.append('file-td')
row_class = mark_safe(' class="%s"' % ' '.join(row_classes))
# If list_display_links not defined, add the link tag to the first field
if link_in_col(first, field_name, cl):
table_tag = 'td'
first = False
# Display link to the result's change_view if the url exists, else
# display just the result's representation.
try:
url = cl.url_for_result(result)
except NoReverseMatch:
link_or_text = result_repr
else:
url = add_preserved_filters({'preserved_filters': cl.preserved_filters, 'opts': cl.opts}, url)
# Convert the pk to something that can be used in Javascript.
# Problem cases are non-ASCII strings.
if cl.to_field:
attr = str(cl.to_field)
else:
attr = pk
value = result.serializable_value(attr)
link_or_text = result_repr
# format_html(
# '<a href="{}"{}>{}</a>',
# url,
# format_html(
# ' data-popup-opener="{}"', value
# ) if cl.is_popup else '',
# result_repr)
yield format_html('<{}{}>{}</{}>', table_tag, row_class, link_or_text, table_tag)
else:
# By default the fields come from ModelAdmin.list_editable, but if we pull
# the fields out of the form instead of list_editable custom admins
# can provide fields on a per request basis
if (form and field_name in form.fields and not (
field_name == cl.model._meta.pk.name and
form[cl.model._meta.pk.name].is_hidden)):
bf = form[field_name]
result_repr = mark_safe(str(bf.errors) + str(bf))
yield format_html('<td{}>{}</td>', row_class, result_repr)
info = (result._meta.app_label, result._meta.model_name)
admin_url = reverse('admin:%s_%s_change' % info, args=(result.pk,))
yield format_html(f'<td><a href={admin_url}></a></td>')
if form and not form[cl.model._meta.pk.name].is_hidden:
yield format_html('<td>{}</td>', form[cl.model._meta.pk.name])
| 17,608
|
def system_to_ntp_time(timestamp):
"""Convert a system time to a NTP time.
Parameters:
timestamp -- timestamp in system time
Returns:
corresponding NTP time
"""
return timestamp + NTP_DELTA
| 17,609
|
def bcewithlogits_loss(weight=None, size_average=None, reduce=None, reduction='mean', pos_weight=None):
"""Creates a criterion that combines a `Sigmoid` layer and the `BCELoss` in one single
class
Arguments:
weights(Tensor, optional) : A manual rescaling weight given to the loss of each batch element.
size_average (bool, optional) : By default, the losses are averaged over each loss element in the batch. Note that for some losses, there are multiple
elements per sample. If the field size_average is set to False, the losses are instead summed for each minibatch. Ignored when reduce is False.
(default: True)
reduce (bool, optional) : By default, the losses are averaged or summed over observations for each minibatch depending on size_average. When reduce is
False, returns a loss per batch element instead and ignores size_average.
(default: True)
reduction (string, optional) : Specifies the reduction to apply to the output: 'none' | 'mean' | 'sum'.
(default: 'mean')
pos_weight (Tensor, optional) : a weight of positive examples. Must be a vector with length equal to the number of classes.
Returns:
BCEWithLogitsLoss
"""
return nn.BCEWithLogitsLoss(weight, size_average, reduce, reduction, pos_weight)
| 17,610
|
def transition_matrix(
adata: AnnData,
vkey: str = "velocity",
backward: bool = False,
weight_connectivities: Optional[float] = None,
sigma_corr: Optional[float] = None,
scale_by_variances: bool = False,
var_key: Optional[str] = "velocity_graph_uncertainties",
var_min: float = 0.1,
use_negative_cosines: bool = True,
self_transitions: bool = False,
perc: Optional[float] = None,
threshold: Optional[float] = None,
density_normalize: bool = True,
) -> KernelExpression:
"""
Compute a transition matrix based on a combination of RNA Velocity and transcriptomic similarity.
To learn more about the way in which the transition matrices are computed, see
:class:`cellrank.tl.kernels.VelocityKernel` for the velocity-based transition matrix and
:class:`cellrank.tl.kernels.ConnectivityKernel` for the transcriptomic-similarity-based transition matrix.
Params
------
adata: :class:`anndata.AnnData`
Annotated data object.
vkey
Key from :paramref:`adata` `.layers` to access the velocities.
backward
Direction of the process.
weight_connectivities
Weight given to transcriptomic similarities as opposed to velocities. Must be in `[0, 1]`.
use_negative_cosines
Whether to use correlations with cells that have an angle > 90 degree with :math:`v_i`.
sigma_corr
Scaling parameter for the softmax. Larger values will lead to a more concentrated distribution (more peaked).
Default is to use 1 / median_velocity_correlation.
scale_by_variances
Use velocity variances to scale the softmax.
var_key
Key from `adata.uns` to acess velocity variances.
var_min
Variances are clipped to this value at the lower end.
self_transitions
Assigns elements to the diagonal of the velocity-graph based on a confidence measure
perc
Quantile of the distribution of exponentiated velocity correlations. This is used as a threshold to set
smaller values to zero.
threshold
Set a threshold to remove exponentiated velocity correlations smaller than :paramref:`threshold`.
density_normalize
Whether to use density correction when computing the transition probabilities.
Density correction is done as by [Haghverdi16]_.
Returns
-------
:class:`cellrank.tl.KernelExpression`
A kernel expression object.
"""
# initialise the velocity kernel and compute transition matrix
vk = VelocityKernel(
adata,
backward=backward,
vkey=vkey,
use_negative_cosines=use_negative_cosines,
var_key=var_key,
)
vk.compute_transition_matrix(
sigma_corr=sigma_corr,
scale_by_variances=scale_by_variances,
var_min=var_min,
self_transitions=self_transitions,
perc=perc,
threshold=threshold,
density_normalize=density_normalize,
)
if weight_connectivities is not None:
if 0 < weight_connectivities < 1:
logg.info(
f"Using a connectivity kernel with weight `{weight_connectivities}`"
)
ck = ConnectivityKernel(adata, backward=backward).compute_transition_matrix(
density_normalize=density_normalize
)
final = (1 - weight_connectivities) * vk + weight_connectivities * ck
elif weight_connectivities == 0:
final = vk
elif weight_connectivities == 1:
final = ConnectivityKernel(
adata, backward=backward
).compute_transition_matrix(density_normalize=density_normalize)
else:
raise ValueError(
f"The parameter `weight_connectivities` must be in range `[0, 1]`, found `{weight_connectivities}`."
)
else:
final = vk
final.write_to_adata()
return final
| 17,611
|
def bcftools_ann_predict_csv():
"""Open the bcftools.csv file with predictions annotation."""
with open(BCFTOOLS_ANN_PREDICT_CSV) as f:
yield f
| 17,612
|
def create_access_token(
subject: Union[str, Any], expires_delta: timedelta = None, is_superuser: bool = False
) -> str:
"""
generate jwt token
:param subject: subject need to save in token
:param expires_delta: expires time
:return: token
"""
if expires_delta:
expire = datetime.utcnow() + expires_delta
else:
expire = datetime.utcnow() + timedelta(minutes=settings.ACCESS_TOKEN_EXPIRE)
to_encode = {"exp": expire, "sub": str(subject)}
# superuser token can always access
if is_superuser:
to_encode.pop("exp")
encoded_jwt = jwt.encode(
to_encode, settings.SECRET_KEY, algorithm=settings.TOKEN_ALGORITHMS
)
return encoded_jwt
| 17,613
|
def transparency(wavelength):
"""Returns the sky transparency in [0, 1] for wavelength in [m]"""
wavelength = wavelength / 10**-9
idx = numpy.argmin(numpy.abs(
data_transparency['wavelength'] * 1000 - wavelength))
return data_transparency['fraction'][idx]
| 17,614
|
def define_color_dict_for_file_extensions(df, create_map):
"""
Defines a color mapping for file extensions. If create_map is true, it
creates a new map; otherwise it reads an existing file.
:param df (DataFrame): A DataFrame object.
:param create_map (boolean): True to create a new map; False to read one.
"""
global COLORDICT_EXTENSIONS
path = parser.get('visualization', 'color_map_extensions')
if create_map:
all_extensions = \
df['file'].str.split('.').str[1].unique().tolist()
all_extensions.append('others')
colors = []
for i in range(len(all_extensions)):
colors.append('#{:06X}'.format(randint(0, 0xFFFFFF)))
for extension, color in zip(all_extensions, colors):
COLORDICT_EXTENSIONS[extension] = color
_write_color_map(COLORDICT_EXTENSIONS, path)
else:
COLORDICT_EXTENSIONS = _read_color_map(path)
| 17,615
|
def invert_indices(indices: Iterable[int], size: int) -> Iterator[int]:
"""
Returns the indices that are not in the given indices, up to the given size.
:param indices: The indices to invert.
:param size: The exclusive maximum index.
:return: A inverted indices.
"""
index_set = set(indices)
for i in range(size):
if i not in index_set:
yield i
| 17,616
|
def split_fortran_files(source_dir, subroutines=None):
"""Split each file in `source_dir` into separate files per subroutine.
Parameters
----------
source_dir : str
Full path to directory in which sources to be split are located.
subroutines : list of str, optional
Subroutines to split. (Default: all)
Returns
-------
fnames : list of str
List of file names (not including any path) that were created
in `source_dir`.
Notes
-----
This function is useful for code that can't be compiled with g77 because of
type casting errors which do work with gfortran.
Created files are named: ``original_name + '_subr_i' + '.f'``, with ``i``
starting at zero and ending at ``num_subroutines_in_file - 1``.
"""
if subroutines is not None:
subroutines = [x.lower() for x in subroutines]
def split_file(fname):
with open(fname, 'rb') as f:
lines = f.readlines()
subs = []
need_split_next = True
# find lines with SUBROUTINE statements
for ix, line in enumerate(lines):
m = re.match(b'^\\s+subroutine\\s+([a-z0-9_]+)\s*\\(', line, re.I)
if m and line[0] not in b'Cc!*':
if subroutines is not None:
subr_name = m.group(1).decode('ascii').lower()
subr_wanted = (subr_name in subroutines)
else:
subr_wanted = True
if subr_wanted or need_split_next:
need_split_next = subr_wanted
subs.append(ix)
# check if no split needed
if len(subs) <= 1:
return [fname]
# write out one file per subroutine
new_fnames = []
num_files = len(subs)
for nfile in range(num_files):
new_fname = fname[:-2] + '_subr_' + str(nfile) + '.f'
new_fnames.append(new_fname)
with open(new_fname, 'wb') as fn:
if nfile + 1 == num_files:
fn.writelines(lines[subs[nfile]:])
else:
fn.writelines(lines[subs[nfile]:subs[nfile+1]])
return new_fnames
exclude_pattern = re.compile('_subr_[0-9]')
source_fnames = [f for f in glob.glob(os.path.join(source_dir, '*.f'))
if not exclude_pattern.search(os.path.basename(f))]
fnames = []
for source_fname in source_fnames:
created_files = split_file(source_fname)
if created_files is not None:
for cfile in created_files:
fnames.append(os.path.basename(cfile))
return fnames
| 17,617
|
def readcsv(self):
"""
Load the whole csv file and make bar charts for language prevalence and program type.
Codes for making bar chat came from here:
https://stackoverflow.com/questions/19198920/using-counter-in-python-to-build-histogram
"""
# Load the whole csv file.
df = pd.read_csv('exchange programs.csv', sep=',')
print(" " + 'Waseda University -- exchange programs\n')
a = df.values
# Take languages as labels and then make a bar chart.
labels, values = zip(*Counter(a[:,1]).items())
indexes = np.arange(len(labels))
width = 1
plt.bar(indexes, values, width = 0.7)
plt.xticks(indexes + width * 0.1, labels)
plt.show()
print(" " + "The number of exchange programs by language")
# Take program types as labels and then make a bar chart.
labels, values = zip(*Counter(a[:,2]).items())
indexes = np.arange(len(labels))
width = 1
plt.bar(indexes, values, width = 0.5)
plt.xticks(indexes + width * 0.04, labels)
plt.show()
print(" "+ "The number of exchange programs by program type")
| 17,618
|
def selection_error_message():
"""Displays error message if permutations or combinatons checkbuttons state is not active"""
if permutations_checkbox_value.get() == 0 and combinations_checkbox_value.get() == 0 and repeat_checkbox_value.get() == 0 or 1:
messagebox.showerror('User Error', 'Please select Combinations or Permutations!')
| 17,619
|
def parse_all_moves(moves_string):
""" Parse a move string """
moves = []
if not moves_string:
raise ValueError("No Moves Given")
moves_strings = moves_string.split(" ")
for move_string in moves_strings:
move = CubeMove.parse(move_string)
moves.append(move)
return moves
| 17,620
|
def merge_samples(samples, nchannels, weight_table=None):
"""
Merges two samples
:param samples: the samples, must have the same sample rate and channel count
:param nchannels: the number of channels
:param weight_table: adds a specific weight to each sample when merging the sound
:return: the merged sample
"""
zipped = itertools.zip_longest(*samples, fillvalue=(0 for _ in range(nchannels)))
mapped = map(lambda x:
(__weighted_avg(itertools.islice(itertools.chain(*x), c, len(samples), nchannels), weight_table,
len(samples)) for c in range(nchannels)),
zipped)
return mapped
| 17,621
|
def update_presence():
"""Update presence in Discord
:returns: TODO
"""
activity = base_activity
filename = get_filename()
extension = get_extension()
if filename:
activity['details'] = f"Editing {filename}"
activity['assets']['large_text'] = f"Editing a .{get_extension()} file"
if extension:
activity['assets']['large_image'] = extension
try:
rpc_obj.set_activity(activity)
except BrokenPipeError:
# Connection to Discord is lost
pass
except NameError:
# Discord is not running
pass
| 17,622
|
def get_spam_info(msg: Message, max_score=None) -> (bool, str):
"""parse SpamAssassin header to detect whether a message is classified as spam.
Return (is spam, spam status detail)
The header format is
```X-Spam-Status: No, score=-0.1 required=5.0 tests=DKIM_SIGNED,DKIM_VALID,
DKIM_VALID_AU,RCVD_IN_DNSWL_BLOCKED,RCVD_IN_MSPIKE_H2,SPF_PASS,
URIBL_BLOCKED autolearn=unavailable autolearn_force=no version=3.4.2```
"""
spamassassin_status = msg["X-Spam-Status"]
if not spamassassin_status:
return False, ""
return get_spam_from_header(spamassassin_status, max_score=max_score)
| 17,623
|
def _check(isamAppliance, id=None):
"""
Check if the last created user has the exact same id or id exists
:param isamAppliance:
:param comment:
:return:
"""
ret_obj = get_all(isamAppliance)
if id != None:
for users in ret_obj['data']:
if users['id'] == id:
return True
return False
| 17,624
|
def plot_week_timeseries(time, value, normalise=True,
label=None, h=0.85, value2=None,
label2=None, daynames=None,
xfmt="%1.0f", ax=None):
"""
Shows a timeseries dispatched by days as bars.
@param time dates
@param value values to display as bars.
@param normalise normalise data before showing it
@param label label of the series
@param values2 second series to show as a line
@param label2 label of the second series
@param daynames names to use for week day names (default is English)
@param xfmt format number of the X axis
@param ax existing axis
@return axis
.. plot::
import datetime
import matplotlib.pyplot as plt
from mlinsights.timeseries.datasets import artificial_data
from mlinsights.timeseries.agg import aggregate_timeseries
from mlinsights.timeseries.plotting import plot_week_timeseries
dt1 = datetime.datetime(2019, 8, 1)
dt2 = datetime.datetime(2019, 9, 1)
data = artificial_data(dt1, dt2, minutes=15)
print(data.head())
agg = aggregate_timeseries(data, per='week')
plot_week_timeseries(
agg['weektime'], agg['y'], label="y",
value2=agg['y']/2, label2="y/2", normalise=False)
plt.show()
"""
if time.shape[0] != value.shape[0]:
raise AssertionError("Dimension mismatch") # pragma: no cover
def coor(ti):
days = ti.days
x = days
y = ti.seconds
return x, y
max_value = value.max()
if value2 is not None:
max_value = max(max_value, value2.max())
value2 = value2 / max_value
value = value / max_value
input_maxy = 1.
if ax is None:
ax = plt.gca()
# bars
delta = None
maxx, maxy = None, None
first = True
for i in range(time.shape[0]):
ti = time[i]
if i < time.shape[0] - 1:
ti1 = time[i + 1]
delta = (ti1 - ti) if delta is None else min(delta, ti1 - ti)
if delta == 0:
raise RuntimeError( # pragma: no cover
"The timeseries contains duplicated time values.")
else:
ti1 = ti + delta
x1, y1 = coor(ti)
x2, y2 = coor(ti1)
if y2 < y1:
x2, y2 = coor(ti + delta)
y2 = y1 + (y2 - y1) * h
if first and label:
ax.plot([x1, x1 + value[i] * 0.8], [y1, y1],
'b', alpha=0.5, label=label)
first = False
if maxx is None:
maxx = (x1, x1 + input_maxy)
maxy = (y1, y2)
else:
maxx = (min(x1, maxx[0]), # pylint: disable=E1136
max(x1 + input_maxy, maxx[1])) # pylint: disable=E1136
maxy = (min(y1, maxy[0]), # pylint: disable=E1136
max(y2, maxy[1])) # pylint: disable=E1136
rect = patches.Rectangle((x1, y1), value[i] * h, y2 - y1,
linewidth=1, edgecolor=None,
facecolor='b', fill=True,
alpha=0.5)
ax.add_patch(rect)
# days border
xticks = []
if daynames is None:
daynames = list(calendar.day_name)
maxx = [(maxx[0] // 7) * 7, maxx[1]]
new_ymin = maxy[0] - (maxy[1] * 0.025 + maxy[0] * 0.975 - maxy[0])
for i in range(int(maxx[0]), int(maxx[1] + 0.1)):
x1i = maxx[0] + input_maxy * i
x2i = x1i + input_maxy
xticks.append(x1i)
ax.plot([x1i, x1i + input_maxy], [new_ymin, new_ymin], 'k', alpha=0.5)
ax.plot([x1i, x1i + input_maxy], [maxy[1], maxy[1]], 'k', alpha=0.5)
ax.plot([x1i, x1i], [maxy[0], maxy[1]], 'k', alpha=0.5)
ax.plot([x2i, x2i], [maxy[0], maxy[1]], 'k', alpha=0.5)
ax.text(x1i, new_ymin, daynames[i])
# invert y axis
ax.invert_yaxis()
# change y labels
nby = len(ax.get_yticklabels())
ys = ax.get_yticks()
ylabels = []
for i in range(nby):
dh = ys[i]
dt = datetime.timedelta(seconds=dh)
tx = "%dh%02d" % (dt.seconds // 3600,
60 * (dt.seconds / 3600 - dt.seconds // 3600))
ylabels.append(tx)
ax.set_yticklabels(ylabels)
# change x labels
xs = ax.get_xticks()
xticks = []
xlabels = []
for i in range(0, len(xs) - 1):
if xs[i] < 0:
continue
dx = xs[i] - int(xs[i] / input_maxy) * input_maxy
xlabels.append(dx if normalise else (dx * max_value))
xticks.append(xs[i])
dx = (xs[i] + xs[i + 1]) / 2
dx = dx - int(dx / input_maxy) * input_maxy
xlabels.append(dx if normalise else (dx * max_value))
xticks.append((xs[i] + xs[i + 1]) / 2)
if len(xticks) < len(xlabels):
xticks.append(xs[-1])
ax.set_xticks(xticks)
ax.set_xticklabels(
[xfmt % x for x in xlabels] if xfmt else xlabels)
ax.tick_params(axis='x', rotation=30)
# value2
if value2 is not None:
value = value2.copy()
if normalise:
value = value / max_value
first = True
xs = []
ys = []
for i in range(time.shape[0]):
ti = time[i]
if i < time.shape[0] - 1:
ti1 = time[i + 1]
else:
ti1 = ti + delta
x1, y1 = coor(ti)
x2, y2 = coor(ti1)
if y2 < y1:
x2, y2 = coor(ti + delta)
y2 = y1 + (y2 - y1) * h
x2 = x1 + value[i] * h
if len(ys) > 0 and y2 < ys[-1]:
if first and label2 is not None:
ax.plot(xs, ys, color='orange', linewidth=2, label=label2)
first = False
else:
ax.plot(xs, ys, color='orange', linewidth=2)
xs, ys = [], []
xs.append(x2)
ys.append((y1 + y2) / 2)
if len(xs) > 0:
ax.plot(xs, ys, color='orange', linewidth=2)
return ax
| 17,625
|
def plot_boxes_on_image(image, boxes, color=(0,255,255), thickness=2):
"""
Plot the boxes onto the image.
For the boxes a center, size representation is expected: [cx, cy, w, h].
:param image: The image onto which to draw.
:param boxes: The boxes which shall be plotted.
:return: An image with the boxes overlayed over the image.
"""
for box in boxes:
start_point = tuple([int(x) for x in box[:2] - box[2:] // 2])
end_point = tuple([int(x) for x in box[:2] + box[2:] // 2])
image = cv2.rectangle(image, start_point, end_point, color, thickness)
return image
| 17,626
|
def get_text(markup: str) -> str:
"""Remove html tags, URLs and spaces using regexp"""
text = re.sub(r"<.*?>", "", markup)
url_pattern = r"(http|ftp)s?://(?:[a-zA-Z]|[0-9]|[$-_@.&#+]|[!*\(\),]|\
(?:%[0-9a-fA-F][0-9a-fA-F]))+"
text = re.sub(url_pattern, "", text)
text = re.sub(r"\s+", " ", text)
return text.strip()
def preprocess_token(token: Token) -> str:
"""Remove grave accents and return lemmatized token lower case"""
result = remplace_accents(token.lemma_.strip().lower())
return result
def is_token_allowed(token: Token) -> bool:
"""No Stop words, No Punctuations or len token >= 3"""
# Avoid token: inmiscuyéndose lemma_ "inmiscuir el"
if (
not token
or token.is_space
or token.is_stop
or token.is_punct
or len(token) < 3
or " " in token.lemma_.strip()
):
return False
return True
| 17,627
|
def intercept_channel(channel, *interceptors):
"""Intercepts a channel through a set of interceptors.
This is an EXPERIMENTAL API.
Args:
channel: A Channel.
interceptors: Zero or more objects of type
UnaryUnaryClientInterceptor,
UnaryStreamClientInterceptor,
StreamUnaryClientInterceptor, or
StreamStreamClientInterceptor.
Interceptors are given control in the order they are listed.
Returns:
A Channel that intercepts each invocation via the provided interceptors.
Raises:
TypeError: If interceptor does not derive from any of
UnaryUnaryClientInterceptor,
UnaryStreamClientInterceptor,
StreamUnaryClientInterceptor, or
StreamStreamClientInterceptor.
"""
from grpc import _interceptor # pylint: disable=cyclic-import
return _interceptor.intercept_channel(channel, *interceptors)
| 17,628
|
def create_static_ip(compute, project, region, name):
"""Create global static IP
:param compute: GCE compute resource object using googleapiclient.discovery
:param project: string, GCE Project Id
:param region: string, GCE region
:param name: string, Static IP name
:return: Operation information
:rtype: dict
"""
return compute.addresses().insert(project=project, region=region, body={
'name': name,
}).execute()
| 17,629
|
def stream_trec07p(dataset_path):
"""2007 TREC’s Spam Track dataset.
The data contains 75,419 chronologically ordered items, i.e. 3 months of emails delivered
to a particular server in 2007. Spam messages represent 66.6% of the dataset.
The goal is to predict whether an email is a spam or not.
Parsed features are: sender, recipients, date, subject, body.
Parameters:
dataset_path (str): The directory where the data is stored.
Yields:
tuple: 5 features (`sender`, `recipients`, `date`, `subject`, `body`) and `y` the target.
References:
1. `TREC 2007 Spam Track Overview <https://trec.nist.gov/pubs/trec16/papers/SPAM.OVERVIEW16.pdf>`_
"""
warnings.filterwarnings('ignore', category=UserWarning, module='bs4')
with open(f'{dataset_path}/full/index') as full_index:
for row in full_index:
label, filepath = row.split()
ix = filepath.split('.')[-1]
with open(f'{dataset_path}/data/inmail.{ix}', 'rb') as email_file:
message = email.message_from_binary_file(email_file)
yield (
message['from'],
message['to'],
message['date'],
message['subject'],
parse_body(message),
label
)
| 17,630
|
def ctc_loss(encoder_outputs, labels, frame_lens, label_lens, reduction, device):
"""
All sorts of stupid restrictions from documentation:
In order to use CuDNN, the following must be satisfied:
1. targets must be in concatenated format,
2. all input_lengths must be T.
3. blank=0
4. target_lengths \leq 256,
5. the integer arguments must be of dtype torch.int32.
"""
assert (frame_lens[1:] - frame_lens[:-1] >= 0).all() # assert in increasing len
# req (5)
labels, frame_lens, label_lens = transform_data(lambda data: torch.tensor(data, dtype=torch.int32).to(device),
labels, frame_lens, label_lens)
# req (4)
skipped_indices, working_indices = filter_data_on_len(label_lens, max_len=256)
if len(skipped_indices) > 0:
print('some labels too long, unable to compute CTC...')
if len(working_indices) == 0:
print('skipping entire batch')
return None
print('skipping indices in batch: ' + str(skipped_indices))
working_indices = torch.LongTensor(working_indices).to(device)
(encoder_outputs, labels, frame_lens,
label_lens) = transform_data(lambda data: data.index_select(0, working_indices),
encoder_outputs, labels, frame_lens, label_lens)
# frame_lens 1, 1, 2, 3, 3, 3, 4
# frame_len[1:] 1, 2, 3, 3, 3, 4
# frame_lebs[:-1] 1, 1, 2, 3, 3, 3
# diff 0, 1, 1, 0, 0, 1
# nonzero_idx 1, 2, 5
# change_points 2, 3, 6
change_points = (frame_lens[1:] - frame_lens[:-1]).nonzero().squeeze(dim=-1) + 1
change_points = torch.cat([change_points, torch.LongTensor([len(frame_lens)]).to(device)]) # add last portion
# req 2
prev_change_point = 0
total_loss = 0
count = 0
global_encoder_outputs, global_labels, global_frame_lens, global_label_lens = encoder_outputs, labels, frame_lens, label_lens
for change_point in change_points:
# we call this a minibatch
minibatch_size = len(frame_lens)
(encoder_outputs, labels, frame_lens,
label_lens) = transform_data(lambda data: data[prev_change_point:change_point],
global_encoder_outputs, global_labels, global_frame_lens, global_label_lens)
# req 3; moves up so that we leave idx=0 to blank
labels = labels + 1
# req 1
concat_labels = torch.cat([label[:label_len] for label, label_len in zip(labels, label_lens)])
loss = F.ctc_loss(encoder_outputs.transpose(0, 1).cpu(), concat_labels.cpu(), frame_lens.cpu(), label_lens.cpu(), blank=0, reduction=reduction)
if torch.isinf(loss):
print('inf CTC loss occurred...')
skipped_indices, working_indices = ctc_fallback(encoder_outputs, labels, frame_lens, label_lens, 0)
if len(working_indices) == 0:
print('skipping the entire minibatch')
continue
print('skipping indices in minibatch: ' + str(skipped_indices))
working_indices = torch.LongTensor(working_indices).to(device)
(encoder_outputs, labels, frame_lens,
label_lens) = transform_data(lambda data: data.index_select(0, working_indices),
encoder_outputs, labels, frame_lens, label_lens)
concat_labels = torch.cat([label[:label_len] for label, label_len in zip(labels, label_lens)])
loss = F.ctc_loss(encoder_outputs.transpose(0, 1).cpu(), concat_labels.cpu(), frame_lens.cpu(), label_lens.cpu(), blank=0, reduction=reduction)
minibatch_size = len(working_indices)
if reduction == 'mean':
loss *= minibatch_size
count += minibatch_size
total_loss += loss
prev_change_point = change_point
if total_loss == 0:
# all data points failed
return None
return total_loss / count if reduction == 'mean' else total_loss
| 17,631
|
def expand_parameters(host, params):
"""Expand parameters in hostname.
Examples:
* "target{N}" => "target1"
* "{host}.{domain} => "host01.example.com"
"""
pattern = r"\{(.*?)\}"
def repl(match):
param_name = match.group(1)
return params[param_name]
return re.sub(pattern, repl, host)
| 17,632
|
def p_term(p):
"""term : factor snp_check_precedence_and_create_quadruple_for_op term1"""
| 17,633
|
def search_s1(saturation, size, startTime):
"""
First stage for sequential adsorption.
Returns list of circles, current saturation, list of times and list
of saturations.
Keyword arguments:
size -- radius of single circle
saturation -- max saturation
startTime -- start time of algorithm
"""
D = size*2
rC = size*5
com_sat = 0
N = 0
ntimeList = []
satList = []
circles = [plt.Circle((np.random.rand(),np.random.rand()), size)]
while(com_sat < saturation and N <= 1000):
N += 1
newX = np.random.rand()
newY = np.random.rand()
neighborList = neighbors(newX, newY, circles, rC)
if len(neighborList) != 0:
for e in neighborList:
circleX = circles[e].get_center()[0]
circleY = circles[e].get_center()[1]
if (math.sqrt((newX - circleX)**2 + (newY - circleY)**2) < D or
math.sqrt((newX - circleX-V)**2 + (newY - circleY)**2) < D or
math.sqrt((newX - circleX+V)**2 + (newY - circleY)**2) < D or
math.sqrt((newX - circleX)**2 + (newY - circleY-V)**2) < D or
math.sqrt((newX - circleX)**2 + (newY - circleY+V)**2) < D or
math.sqrt((newX - circleX+V)**2 + (newY - circleY+V)**2) < D or
math.sqrt((newX - circleX-V)**2 + (newY - circleY+V)**2) < D or
math.sqrt((newX - circleX+V)**2 + (newY - circleY-V)**2) < D or
math.sqrt((newX - circleX-V)**2 + (newY - circleY-V)**2) < D):
collision = 1
break
else:
collision = 0
if (collision == 0):
circles.append(plt.Circle((newX, newY), size))
com_sat = math.pi * size**2 * len(circles) * 100
ntimeList.append(time.time() - startTime)
satList.append(com_sat)
N = 0
else:
circles.append(plt.Circle((newX, newY), size))
return circles, com_sat, satList, ntimeList
| 17,634
|
def draw_labeled_bounding_boxes(img, labeled_frame, num_objects):
"""
Starting from labeled regions, draw enclosing rectangles in the original color frame.
"""
# Iterate through all detected cars
for car_number in range(1, num_objects + 1):
# Find pixels with each car_number label value
rows, cols = np.where(labeled_frame == car_number)
# Find minimum enclosing rectangle
x_min, y_min = np.min(cols), np.min(rows)
x_max, y_max = np.max(cols), np.max(rows)
cv2.rectangle(img, (x_min, y_min), (x_max, y_max), color=(255, 0, 0), thickness=6)
return img
| 17,635
|
def add_scalebar(
x_units=None,
y_units=None,
anchor=(0.98, 0.02),
x_size=None,
y_size=None,
y_label_space=0.02,
x_label_space=-0.02,
bar_space=0.06,
x_on_left=True,
linewidth=3,
remove_frame=True,
omit_x=False,
omit_y=False,
round=True,
usetex=True,
ax=None,
):
"""
Automagically add a set of x and y scalebars to a matplotlib plot
Inputs:
x_units: str or None
y_units: str or None
anchor: tuple of floats
-- bottom right of the bbox (in axis coordinates)
x_size: float or None
-- Manually set size of x scalebar (or None for automatic sizing)
y_size: float or None
-- Manually set size of y scalebar (or None for automatic sizing)
text_spacing: tuple of floats
-- amount to offset labels from respective scalebars (in axis units)
bar_space: float
-- amount to separate bars from eachother (in axis units)
linewidth: numeric
-- thickness of the scalebars
remove_frame: bool (default False)
-- remove the bounding box, axis ticks, etc.
omit_x/omit_y: bool (default False)
-- skip drawing the x/y scalebar
round: bool (default True)
-- round units to the nearest integer
ax: matplotlib.axes object
-- manually specify the axes object to which the scalebar should be added
"""
# Basic input processing.
if ax is None:
ax = plt.gca()
if x_units is None:
x_units = ''
if y_units is None:
y_units = ''
# Do y scalebar.
if not omit_y:
if y_size is None:
y_span = ax.get_yticks()[:2]
y_length = y_span[1] - y_span[0]
y_span_ax = ax.transLimits.transform(np.array([[0, 0], y_span]).T)[
:, 1
]
else:
y_length = y_size
y_span_ax = ax.transLimits.transform(
np.array([[0, 0], [0, y_size]])
)[:, 1]
y_length_ax = y_span_ax[1] - y_span_ax[0]
if round:
y_length = int(np.round(y_length))
# y-scalebar label
if y_label_space <= 0:
horizontalalignment = 'left'
else:
horizontalalignment = 'right'
if usetex:
y_label_text = '${}${}'.format(y_length, y_units)
else:
y_label_text = '{}{}'.format(y_length, y_units)
ax.text(
anchor[0] - y_label_space,
anchor[1] + y_length_ax / 2 + bar_space,
y_label_text,
verticalalignment='center',
horizontalalignment=horizontalalignment,
size='small',
transform=ax.transAxes,
)
# y scalebar
ax.plot(
[anchor[0], anchor[0]],
[anchor[1] + bar_space, anchor[1] + y_length_ax + bar_space],
'k-',
linewidth=linewidth,
clip_on=False,
transform=ax.transAxes,
)
# Do x scalebar.
if not omit_x:
if x_size is None:
x_span = ax.get_xticks()[:2]
x_length = x_span[1] - x_span[0]
x_span_ax = ax.transLimits.transform(np.array([x_span, [0, 0]]).T)[
:, 0
]
else:
x_length = x_size
x_span_ax = ax.transLimits.transform(
np.array([[0, 0], [x_size, 0]])
)[:, 0]
x_length_ax = x_span_ax[1] - x_span_ax[0]
if round:
x_length = int(np.round(x_length))
# x-scalebar label
if x_label_space <= 0:
verticalalignment = 'top'
else:
verticalalignment = 'bottom'
if x_on_left:
Xx_text_coord = anchor[0] - x_length_ax / 2 - bar_space
Xx_bar_coords = [
anchor[0] - x_length_ax - bar_space,
anchor[0] - bar_space,
]
else:
Xx_text_coord = anchor[0] + x_length_ax / 2 + bar_space
Xx_bar_coords = [
anchor[0] + x_length_ax + bar_space,
anchor[0] + bar_space,
]
if usetex:
x_label_text = '${}${}'.format(x_length, x_units)
else:
x_label_text = '{}{}'.format(x_length, x_units)
ax.text(
Xx_text_coord,
anchor[1] + x_label_space,
x_label_text,
verticalalignment=verticalalignment,
horizontalalignment='center',
size='small',
transform=ax.transAxes,
)
# x scalebar
ax.plot(
Xx_bar_coords,
[anchor[1], anchor[1]],
'k-',
linewidth=linewidth,
clip_on=False,
transform=ax.transAxes,
)
if remove_frame:
ax.axis('off')
| 17,636
|
def test_config_slack_handler_default_error(monkeypatch):
"""Tests setting the slack handler channel to 'default' raised error.
"""
monkeypatch.setenv('LOGGING_CONFIG_FILE', 'logging_config_slack_default.json')
config_dir = os.path.dirname(__file__)
with pytest.raises(ValueError):
_ = SlackLogger('__main__', config_dir=config_dir)
| 17,637
|
def retweet_fun():
"""
Retweets if there are any particular hastags.
"""
retweet_fav_post(api)
| 17,638
|
def get_checks(cur, table):
"""
Gets CHECK constraints for a table, adding them to the table.
:param cur: A cursor to execute commands on.
:param table: The table to get CHECKs for.
"""
cur.execute(f"""select oid, conname, consrc, pg_get_constraintdef(oid)
from pg_constraint
where conrelid = {table.oid}
and contype = 'c'
order by conname;""")
for row in cur:
table.add_check(Check(row[0], table, row[1], row[2], row[3]))
| 17,639
|
def sigint_handler(*args):
"""Handler for the SIGINT signal."""
pg.exit()
exit(0)
| 17,640
|
def _skip_comments_and_whitespace(lines, idx):
###############################################################################
"""
Starting at idx, return next valid idx of lines that contains real data
"""
if (idx == len(lines)):
return idx
comment_re = re.compile(r'^[#!]')
lines_slice = lines[idx:]
for line in lines_slice:
line = line.strip()
if (comment_re.match(line) is not None or line == ""):
idx += 1
else:
return idx
return idx
| 17,641
|
def load_scalar_attributes(mapper, state, attribute_names):
"""initiate a column-based attribute refresh operation."""
# assert mapper is _state_mapper(state)
session = state.session
if not session:
raise orm_exc.DetachedInstanceError(
"Instance %s is not bound to a Session; "
"attribute refresh operation cannot proceed" %
(state_str(state)))
has_key = bool(state.key)
result = False
# in the case of inheritance, particularly concrete and abstract
# concrete inheritance, the class manager might have some keys
# of attributes on the superclass that we didn't actually map.
# These could be mapped as "concrete, dont load" or could be completely
# exluded from the mapping and we know nothing about them. Filter them
# here to prevent them from coming through.
if attribute_names:
attribute_names = attribute_names.intersection(mapper.attrs.keys())
if mapper.inherits and not mapper.concrete:
# because we are using Core to produce a select() that we
# pass to the Query, we aren't calling setup() for mapped
# attributes; in 1.0 this means deferred attrs won't get loaded
# by default
statement = mapper._optimized_get_statement(state, attribute_names)
if statement is not None:
result = load_on_ident(
session.query(mapper).
options(
strategy_options.Load(mapper).undefer("*")
).from_statement(statement),
None,
only_load_props=attribute_names,
refresh_state=state
)
if result is False:
if has_key:
identity_key = state.key
else:
# this codepath is rare - only valid when inside a flush, and the
# object is becoming persistent but hasn't yet been assigned
# an identity_key.
# check here to ensure we have the attrs we need.
pk_attrs = [mapper._columntoproperty[col].key
for col in mapper.primary_key]
if state.expired_attributes.intersection(pk_attrs):
raise sa_exc.InvalidRequestError(
"Instance %s cannot be refreshed - it's not "
" persistent and does not "
"contain a full primary key." % state_str(state))
identity_key = mapper._identity_key_from_state(state)
if (_none_set.issubset(identity_key) and
not mapper.allow_partial_pks) or \
_none_set.issuperset(identity_key):
util.warn_limited(
"Instance %s to be refreshed doesn't "
"contain a full primary key - can't be refreshed "
"(and shouldn't be expired, either).",
state_str(state))
return
result = load_on_ident(
session.query(mapper),
identity_key,
refresh_state=state,
only_load_props=attribute_names)
# if instance is pending, a refresh operation
# may not complete (even if PK attributes are assigned)
if has_key and result is None:
raise orm_exc.ObjectDeletedError(state)
| 17,642
|
def named_payload(name, parser_fn):
"""Wraps a parser result in a dictionary under given name."""
return lambda obj: {name: parser_fn(obj)}
| 17,643
|
def action_train_protocol(sentence, Train=True):
"""function to train the action prediction model"""
if Train:
training_data = []
with open('datasets/action_dataset.json') as data_file:
data = json.load(data_file)
for line in data:
#fetching training data
training_data.append(line)
action_train(training_data, 20000, 'bulb')
print(action_predict(sentence, 'bulb'))
| 17,644
|
def commonModeCSPAD2x2(evt, type, key, mask=None):
"""Subtraction of common mode using median value of masked pixels (left and right half of detector are treated separately).
Adds a record ``evt["analysis"]["cm_corrected - " + key]``.
Args:
:evt: The event variable
:type(str): The event type (e.g. photonPixelDetectors)
:key(str): The event key (e.g. CCD)
Kwargs:
:mask: Binary mask
:Authors:
Max F. Hantke (hantke@xray.bmc.uu.se)
Benedikt J. Daurer (benedikt@xray.bmc.uu.se)
"""
data = evt[type][key].data
dataCorrected = np.copy(data)
lData = data[:,:data.shape[1]/2]
rData = data[:,data.shape[1]/2:]
if mask is None:
lMask = np.ones(shape=lData.shape, dtype="bool")
rMask = np.ones(shape=rData.shape, dtype="bool")
else:
lMask = mask[:,:data.shape[1]/2] == False
rMask = mask[:,data.shape[1]/2:] == False
if lMask.sum() > 0:
dataCorrected[:,:data.shape[1]/2] -= np.median(lData[lMask])
if rMask.sum() > 0:
dataCorrected[:,data.shape[1]/2:] -= np.median(rData[rMask])
add_record(evt["analysis"], "analysis", "cm_corrected - " + key, dataCorrected)
| 17,645
|
def _SoftmaxCrossEntropyWithLogitsGrad(op, grad_loss, grad_grad):
"""Gradient function for SoftmaxCrossEntropyWithLogits."""
# grad_loss is the backprop for cost, and we multiply it with the gradients
# (which is output[1])
# grad_grad is the backprop for softmax gradient.
#
# Second derivative is just softmax derivative w.r.t. logits.
softmax_grad = op.outputs[1]
grad = _BroadcastMul(grad_loss, softmax_grad)
def IsZero(g):
# Some introspection to check if the gradient is feeding zeros
if context.executing_eagerly():
# TODO(apassos) add an efficient way to detect eager zeros here.
return False
if g.op.type in ("ZerosLike", "Zeros"):
return True
const_fill_value = tensor_util.constant_value(g)
return const_fill_value is not None and (const_fill_value == 0).all()
logits = op.inputs[0]
if grad_grad is not None and not IsZero(grad_grad):
softmax = nn_ops.softmax(logits)
grad += ((grad_grad - array_ops.squeeze(
math_ops.matmul(
array_ops.expand_dims(grad_grad, 1),
array_ops.expand_dims(softmax, 2)),
axis=1)) * softmax)
return grad, _BroadcastMul(grad_loss, -nn_ops.log_softmax(logits))
| 17,646
|
def assign_coord_ip_addresses(topo: Topology) -> None:
"""Assigns IP addresses for communication between coordinator, other control services and ASes.
:param topo: Topology with a coordinator. No IP addresses must be assigned yet in the
coordinator's network.
"""
bridge = topo.coordinator.bridge
host_gen = bridge.valid_ip_iter()
topo.coordinator.reserve_ip_addresses(host_gen)
for service in topo.additional_services:
service.reserve_ip_addresses(host_gen)
for isd_as in topo.ases.keys():
bridge.assign_ip_address(isd_as, pref_ip=next(host_gen))
| 17,647
|
def prod_finished(job):
"""Check if prod stage is finished."""
try:
step = "prod" + str(job.doc.prod_replicates_done - 1)
except (KeyError, AttributeError):
step = "prod" + "0"
run_file = job.ws + "/run.{}".format(step)
if job.isfile("run.{}".format(step)):
with open(run_file) as myfile:
return "Program ended" in myfile.read()
else:
return False
| 17,648
|
def PrimaryCaps(layer_input, name, dim_capsule, channels, kernel_size=9, strides=2, padding='valid'):
""" PrimaryCaps layer can be seen as a convolutional layer with a different
activation function (squashing)
:param layer_input
:param name
:param dim_capsule
:param channels
:param kernel_size
"""
assert channels % dim_capsule == 0, "Invalid size of channels and dim_capsule"
# I.e. each primary capsule contains 8 convoutional units with a 9x9 kernel and a stride of 2.
num_filters = channels * dim_capsule
conv_layer = layers.Conv2D(
name=name,
filters=num_filters,
kernel_size=kernel_size,
strides=strides,
activation=None, # We apply squasing later, therefore no activation funciton is needed here
padding=padding)(layer_input)
# In total PrimaryCapsules has [32x6x6] capsule outputs (each outpus is an 8D vector) and each
# capsule in the [6x6] grid is sharing their weights with each other
# See https://keras.io/layers/core/#reshape
reshaped_conv = layers.Reshape(target_shape=(-1, dim_capsule))(conv_layer)
# Now lets apply the squashing function
return layers.Lambda(squashing)(reshaped_conv)
| 17,649
|
async def create_log_file(nats_handler, shared_storage, logger):
"""
Creates CSV for logger outputs
Args:
nats_handler (NatsHandler): NatsHandler used to interact with NATS
shared_storage (dict): Dictionary to persist memory across callbacks
logger (JSONLogger): Logger that can be used to log info, error, etc.
"""
if shared_storage["log_path"][-1] == "/":
shared_storage["log_path"] = shared_storage["log_path"] + f"log-{datetime.utcnow().isoformat()}.csv"
with open(shared_storage["log_path"], "a+") as f:
writer = csv.writer(f)
writer.writerow(["sender_id", "time_sent", "message"])
elif shared_storage["log_path"][-4:] == ".csv":
return
else:
raise ValueError(f"Invalid logging file path {shared_storage['log_path']} is neither folder nor csv")
| 17,650
|
def get_font(args):
"""
Gets a font.
:param args: Arguments (ttf and ttfsize).
:return: Font.
"""
try:
return ImageFont.truetype(args.ttf, args.ttfsize)
except:
return ImageFont.load_default()
| 17,651
|
def zeros(shape):
"""
Creates and returns a new array with the given shape which is filled with zeros.
"""
mat = empty(shape)
return fill(mat, 0.0)
| 17,652
|
def unpack_puzzle_input(dir_file: str) -> tuple[list, list]:
"""
Args:
dir_file (str): location of .txt file to pull data from
Returns:
bingo numbers and bingo cards in list format
"""
with open(dir_file, "r") as file:
content = file.read().splitlines()
bingo_numbers = [int(i) for i in content[0].split(",")]
bingo_cards = []
for index in range(2, len(content)):
if content[index-1] == '':
bingo_cards.append([[int(i) for i in content[index].split()]])
elif content[index] != '':
bingo_cards[-1].append([int(i) for i in content[index].split()])
return bingo_numbers, bingo_cards
| 17,653
|
def blank_response():
"""Fixture that constructs a response with a blank body."""
return build_response(data="")
| 17,654
|
def dense_image_warp(image:torch.Tensor, flow:torch.Tensor) -> torch.Tensor:
"""Image warping using per-pixel flow vectors.
See [1] for the original reference (Note that the tensor shape is different, etc.).
[1] https://www.tensorflow.org/addons/api_docs/python/tfa/image/dense_image_warp
Parameters
----------
image : torch.Tensor [shape=(batch, channels, height, width)]
flow : torch.Tensor [shape=(batch, 2, height, width)]
Returns
-------
warped_image : torch.Tensor [shape=(batch, channels, height, width)]
"""
batch_size, channels, height, width = image.shape
# The flow is defined on the image grid. Turn the flow into a list of query
# points in the grid space.
y_range = torch.arange(0., height, device=image.device, requires_grad=False)
x_range = torch.arange(0., width, device=image.device, requires_grad=False)
y_grid, x_grid = torch.meshgrid(y_range, x_range)
stacked_grid = torch.stack((y_grid, x_grid), dim=0) # shape=(2, height, width)
batched_grid = stacked_grid.unsqueeze(0) # shape=(1, 2, height, width)
query_points_on_grid = batched_grid - flow # shape=(batch_size, 2, height, width)
query_points_flattened = einops.rearrange(query_points_on_grid, 'b x h w -> b (h w) x') # shape=(batch_size, height * width, 2)
# Compute values at the query points, then reshape the result back to the
# image grid.
interpolated = interpolate_bilinear(image, query_points_flattened) # shape=(batch_size, channels, n_queries)
interpolated = einops.rearrange(interpolated, 'b c (h w) -> b c h w', h=height, w=width)
return interpolated
| 17,655
|
def test_registration_of_action_on_pointer_of_pointer(workers):
"""
Ensure actions along a chain of pointers are registered as expected.
"""
bob = workers["bob"]
alice = workers["alice"]
tensor = torch.tensor([1, 2, 3, 4.0])
ptr = tensor.send(bob)
ptr = ptr.send(alice)
ptr_action = ptr + ptr
assert len(alice._tensors) == 2
assert len(bob._tensors) == 2
| 17,656
|
def validateRange(rangeStr : str) -> bool:
"""Validates the range argument"""
# type cast and compare
try:
# get range indices
ranges = rangeStr.split(",", 1)
rangeFrom = 0 if ranges[0] == "" else int(ranges[0])
rangeTo = 0 if ranges[1] == "" else int(ranges[1])
# check first if both ranges are not set
# using the -r , hack
if ranges == ["", ""]:
return False
# check if any of the range param is set
# and do testing per side
# if either range start/end is set and is <= 0:
if (ranges[0] != "" and rangeFrom < 0) or\
(ranges[1] != "" and rangeTo < 0):
return False
elif (ranges[0] != "") and (ranges[1] != ""):
# if both are set, do conditions here
# if from == to or from > to or from,to <=0, fail
if (rangeFrom == rangeTo) or\
(rangeFrom > rangeTo) or\
((rangeFrom <= 0) or (rangeTo <= 0)):
return False
except (ValueError, IndexError, AttributeError):
return False
return True
| 17,657
|
def update_log_ip_dict_per_ingress_egress_point(flow_ingress_asn, flow_ip, origin_asn, ip_prefix, country_code, flow_bytes, flow_packets, d_ipsrc_level_analysis_perpoint):
"""
Account for unique IPAddresses, BGP prefixes, origin_asn per ingress/egress points.
:param flow_ingress_asn:
:param flow_ip:
:param origin_asn:
:param ip_prefix:
:param d_ipsrc_level_analysis_perpoint:
:return: dict of dict {'1234': {('10.10.10.1', 23456, '10.0.0.0/8'): [1]},
'5678': {('181.3.50.1', 98765, '181.3.50.0/20'): [1]}, ...}
"""
k = (flow_ip, origin_asn, ip_prefix, country_code)
values = [1, flow_bytes, flow_packets]
flow_ingress_asn = frozenset(flow_ingress_asn)
if flow_ingress_asn not in d_ipsrc_level_analysis_perpoint.keys():
d_ipsrc_level_analysis_perpoint[flow_ingress_asn] = dict()
d_ipsrc_level_analysis_perpoint[flow_ingress_asn][k] = values
else:
if k not in d_ipsrc_level_analysis_perpoint[flow_ingress_asn]:
d_ipsrc_level_analysis_perpoint[flow_ingress_asn][k] = values
else:
d_ipsrc_level_analysis_perpoint[flow_ingress_asn][k] = map(add, d_ipsrc_level_analysis_perpoint[flow_ingress_asn][k], values)
return d_ipsrc_level_analysis_perpoint
| 17,658
|
def _config_from_file(configfile):
"""Return a dict containing all of the config values found in the given
configfile.
"""
conf = {}
# set from config if possible
if configfile:
with open(configfile, 'r') as fp:
config_yaml = yaml.load(fp)
conf = config_yaml
# in the config yaml, 'years' is a map of years to styles; in the config
# dict used in this module, 'year_styles' is that map and 'years' is
# simply a list of the years to graph
conf['year_styles'] = conf.pop('years', {})
conf['years'] = list(conf['year_styles'].keys())
return conf
| 17,659
|
def error(bot, context, wut):
"""Log Errors caused by Updates."""
print(wut)
logger.warning('Update "%s" caused error "%s"', bot, context.error)
| 17,660
|
def odr_planar_fit(points, rand_3_estimate=False):
"""
Fit a plane to 3d points.
Orthogonal distance regression is performed using the odrpack.
Parameters
----------
points : list of [x, y, z] points
rand_3_estimate : bool, optional
First estimation of the plane using 3 random points from the input points list.
Default is False which implies a regular least square fit for the first estimation.
Returns
-------
ndarray
"""
def f_3(beta, xyz):
""" implicit definition of the plane"""
return beta[0] * xyz[0] + beta[1] * xyz[1] + beta[2] * xyz[2] + beta[3]
# # Coordinates of the 2D points
x = points[:, 0]
y = points[:, 1]
z = points[:, 2]
# x = np.r_[9, 35, -13, 10, 23, 0]
# y = np.r_[34, 10, 6, -14, 27, -10]
# z = np.r_[100, 101, 101, 100, 101, 101]
if rand_3_estimate:
# initial guess for parameters
# select 3 random points
i = np.random.choice(len(x), size=3, replace=False)
# Form the 3 points
r_point_1 = np.r_[x[i[0]], y[i[0]], z[i[0]]]
r_point_2 = np.r_[x[i[1]], y[i[1]], z[i[1]]]
r_point_3 = np.r_[x[i[2]], y[i[2]], z[i[2]]]
# Two vectors on the plane
v_1 = r_point_1 - r_point_2
v_2 = r_point_1 - r_point_3
# normal to the 3-point-plane
u_1 = np.cross(v_1, v_2)
# Construct the first estimation, beta0
d_0 = u_1[0] * r_point_1[0] + u_1[1] * r_point_1[1] + u_1[2] * r_point_1[2]
beta0 = np.r_[u_1[0], u_1[1], u_1[2], d_0]
else:
beta0 = lstsq_planar_fit(points)
# Create the data object for the odr. The equation is given in the implicit form 'a*x + b*y + c*z + d = 0' and
# beta=[a, b, c, d] (beta is the vector to be fitted). The positional argument y=1 means that the dimensionality
# of the fitting is 1.
lsc_data = odr.Data(np.row_stack([x, y, z]), y=1)
# Create the odr model
lsc_model = odr.Model(f_3, implicit=True)
# Create the odr object based on the data, the model and the first estimation vector.
lsc_odr = odr.ODR(lsc_data, lsc_model, beta0)
# run the regression.
lsc_out = lsc_odr.run()
return lsc_out.beta / lsc_out.beta[3]
| 17,661
|
def adjust_learning_rate_lrstep(epoch, opt):
"""Sets the learning rate to the initial LR decayed by decay rate every steep step"""
steps = np.sum(epoch > np.asarray(opt.lr_decay_epochs))
if steps > 0:
new_lr = opt.lr_init * (opt.lr_decay_rate ** steps)
return new_lr
return opt.lr_init
| 17,662
|
def set_sock_quickack(sock):
"""
open quickack for the socket
I.g.
::
sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_QUICKACK, 1)
"""
sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
try:
sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_QUICKACK, 1)
except AttributeError:
pass
| 17,663
|
def subplot(n, m, k):
"""
Create a subplot command
Example::
import numpy as np
x = np.linspace(-5, 5, 1000)
figure(1)
subplot(2, 1, 1)
plot(x, np.sin(x), "r+")
subplot(2, 1, 2)
plot(x, np.cos(x), "g-")
show()
"""
global _current_axes
lig = (k - 1) / m
col = (k - 1) % m
fig = gcf()
axe = fig.get_axes(lig, col)
_current_axes = axe
return axe
| 17,664
|
def _create_gdcm_image(src: bytes, **kwargs: Any) -> "gdcm.Image":
"""Return a gdcm.Image from the `src`.
Parameters
----------
src : bytes
The raw image frame data to be encoded.
**kwargs
Required parameters:
* `rows`: int
* `columns`: int
* `samples_per_pixel`: int
* `number_of_frames`: int
* `bits_allocated`: int
* `bits_stored`: int
* `pixel_representation`: int
* `photometric_interpretation`: str
Returns
-------
gdcm.Image
An Image containing the `src` as a single uncompressed frame.
"""
rows = kwargs['rows']
columns = kwargs['columns']
samples_per_pixel = kwargs['samples_per_pixel']
number_of_frames = kwargs['number_of_frames']
pixel_representation = kwargs['pixel_representation']
bits_allocated = kwargs['bits_allocated']
bits_stored = kwargs['bits_stored']
photometric_interpretation = kwargs['photometric_interpretation']
pi = gdcm.PhotometricInterpretation.GetPIType(
photometric_interpretation
)
# GDCM's null photometric interpretation gets used for invalid values
if pi == gdcm.PhotometricInterpretation.PI_END:
raise ValueError(
"An error occurred with the 'gdcm' plugin: invalid photometric "
f"interpretation '{photometric_interpretation}'"
)
# `src` uses little-endian byte ordering
ts = gdcm.TransferSyntax.ImplicitVRLittleEndian
image = gdcm.Image()
image.SetNumberOfDimensions(2)
image.SetDimensions((columns, rows, 1))
image.SetPhotometricInterpretation(
gdcm.PhotometricInterpretation(pi)
)
image.SetTransferSyntax(gdcm.TransferSyntax(ts))
pixel_format = gdcm.PixelFormat(
samples_per_pixel,
bits_allocated,
bits_stored,
bits_stored - 1,
pixel_representation
)
image.SetPixelFormat(pixel_format)
if samples_per_pixel > 1:
# Default `src` is planar configuration 0 (i.e. R1 G1 B1 R2 G2 B2)
image.SetPlanarConfiguration(0)
# Add the Pixel Data element and set the value to `src`
elem = gdcm.DataElement(gdcm.Tag(0x7FE0, 0x0010))
elem.SetByteStringValue(src)
image.SetDataElement(elem)
return cast("gdcm.Image", image)
| 17,665
|
def parallel_upload_chunks(vol, files, bin_paths, chunk_size, num_workers):
"""Push tif images as chunks in CloudVolume object in Parallel
Arguments:
vol {cloudvolume.CloudVolume} -- volume that will contain image data
files {list} -- strings of tif image filepaths
bin_paths {list} -- binary paths to tif files
chunk_size {list} -- 3 ints for original tif image dimensions
num_workers {int} -- max number of concurrently running jobs
"""
tiff_jobs = int(num_workers / 2) if num_workers == cpu_count() else num_workers
with tqdm_joblib(tqdm(desc="Load tiffs", total=len(files))) as progress_bar:
tiffs = Parallel(
tiff_jobs, timeout=1800, backend="multiprocessing", verbose=50
)(delayed(tf.imread)(i) for i in files)
with tqdm_joblib(tqdm(desc="Load ranges", total=len(bin_paths))) as progress_bar:
ranges = Parallel(
tiff_jobs, timeout=1800, backend="multiprocessing", verbose=50
)(delayed(get_data_ranges)(i, chunk_size) for i in bin_paths)
print("loaded tiffs and bin paths")
vol_ = CloudVolume(vol.layer_cloudpath, parallel=False, mip=vol.mip)
with tqdm_joblib(tqdm(desc="Upload chunks", total=len(ranges))) as progress_bar:
Parallel(tiff_jobs, timeout=1800, backend="multiprocessing", verbose=50)(
delayed(upload_chunk)(vol_, r, i) for r, i in zip(ranges, tiffs)
)
| 17,666
|
def _4_graphlet_contains_3star(adj_mat):
"""Check if a given graphlet of size 4 contains a 3-star"""
return (4 in [a.sum() for a in adj_mat])
| 17,667
|
def _one_day(args):
"""Prompts for index file update
`_need_to_index` has already filtered jpg preferred over other
formats, that is, if there is a duplicate name, it will list the
jpg, not the arw, pcd, etc. in the index.
Args:
args (list): files to index
"""
def _extract_jpg(image):
for e, s in (
# You can view arw files by modifying the camera type:
# exiftool -sonymodelid="ILCE-7M2" -ext ARW
# but better to extract the jpg preview and not modify the
# camera type
('arw', ['exiftool', '-b', '-PreviewImage', image]),
# Suffix [5] produces an image 3072 by 2048 ("16 Base")
('pcd', ['convert', image + '[5]']),
):
if not image.endswith('.' + e):
continue
p = re.sub(f'\\.{e}$', '.jpg', image)
if e == 'pcd':
s.append(p)
i = subprocess.check_output(s)
with open(p, 'wb') as f:
f.write(i)
return p
return image
if not args:
return
cwd = os.getcwd()
simple_msg = None
for a in args:
img = os.path.basename(a)
d = os.path.dirname(a)
if d:
os.chdir(d)
if not os.path.exists(img):
continue
preview = _extract_jpg(img)
if simple_msg:
msg = simple_msg
else:
if common.MOVIE_SUFFIX.search(img):
subprocess.check_call(['open', '-a', 'QuickTime Player.app', img])
else:
subprocess.check_call(['open', '-a', 'Preview.app', preview])
msg = input(a + ': ')
if not msg:
status = False
break
if msg == '?':
simple_msg = msg
if os.path.exists(img):
if msg == '!':
os.remove(img)
if preview != img:
os.remove(preview)
print(a + ': removed')
else:
with open('index.txt', 'a') as f:
f.write(preview + ' ' + msg + '\n')
else:
print(a + ': does not exist')
if d:
os.chdir(cwd)
try:
os.remove('index.txt~')
except Exception:
pass
return
| 17,668
|
def af4_path() -> Path:
"""Return the abspath of Go bio-target-rna-fusion binary. Builds the binary if necessary"""
global AF4_PATH
if not AF4_PATH:
af4_label = "//go/src/github.com/grailbio/bio/cmd/bio-fusion"
build([af4_label])
AF4_PATH = go_executable(af4_label)
return AF4_PATH
| 17,669
|
def get_sentence_content(sentence_token):
"""Extrac sentence string from list of token in present in sentence
Args:
sentence_token (tuple): contains length of sentence and list of all the token in sentence
Returns:
str: setence string
"""
sentence_content = ''
for word in sentence_token[1]:
sentence_content += word.text
return sentence_content
| 17,670
|
def timestamp_error(item: SensorItem):
"""this function fixes an error when timestamp is logged in a smaller unit then seconds."""
if float(item.timestamp) / 3600 * 24 * 356 > 2019 and \
"." not in str(item.timestamp) and \
len(str(item.timestamp)) > 10:
# this bug has fixed in 2018
# 1471117570183 -> 1471117570.183
item.timestamp = item.timestamp[:10] + "." + item.timestamp[10:]
item.timestamp = float(item.timestamp)
| 17,671
|
def _parse_filename(filename):
"""Parse meta-information from given filename.
Parameters
----------
filename : str
A Market 1501 image filename.
Returns
-------
(int, int, str, str) | NoneType
Returns a tuple with the following entries:
* Unique ID of the individual in the image
* Index of the camera which has observed the individual
* Filename without extension
* File extension
Returns None if the given filename is not a valid filename.
"""
filename_base, ext = os.path.splitext(filename)
if '.' in filename_base:
# Some images have double filename extensions.
filename_base, ext = os.path.splitext(filename_base)
if ext != ".jpg":
return None
person_id, cam_seq, frame_idx, detection_idx = filename_base.split('_')
return int(person_id), int(cam_seq[1]), filename_base, ext
| 17,672
|
def sequence_generator(data, look_back = 50):
"""\
Description:
------------
Input data for LSTM: Convert to user trajectory (maximum length: look back)
"""
train,test, valid = [],[],[]
unique_users = set(data[:,0])
items_per_user = {int(user):[0 for i in range(look_back)] for user in unique_users}
for (idx,row) in enumerate(data):
user,item,time = int(row[0]),int(row[1]),row[2]
items_per_user[user] = items_per_user[user][1:]+[item+1]
current_items = items_per_user[user]
if row[3]==0:
train.append([current_items[:-1],current_items[-1]])
elif row[3]==2:
test.append([current_items[:-1],current_items[-1]])
else:
valid.append([current_items[:-1],current_items[-1]])
return train,test
| 17,673
|
def prefit_clf__svm(gamma: float = 0.001) -> base.ClassifierMixin:
"""Returns an unfitted SVM classifier object.
:param gamma: ...
:return:
"""
return svm.SVC(gamma=gamma)
| 17,674
|
def option_not_exist_msg(option_name, existing_options):
""" Someone is referencing an option that is not available in the current package
options
"""
result = ["'options.%s' doesn't exist" % option_name]
result.append("Possible options are %s" % existing_options or "none")
return "\n".join(result)
| 17,675
|
def ConverterCommand(options):
"""Convert the default probe statements to project specific statements."""
probe_statement_path = hwid_utils.GetProbeStatementPath(options.project)
converted_results_obj = converter.ConvertToProbeStatement(
options.database, probe_statement_path)
converted_results_data = json_utils.DumpStr(converted_results_obj,
pretty=True)
if options.output_file == '-':
Output(converted_results_data)
else:
file_utils.WriteFile(options.output_file, converted_results_data)
if options.output_checksum_file:
checksum = hashlib.sha1(converted_results_data.encode('utf-8')).hexdigest()
file_utils.WriteFile(options.output_checksum_file, checksum)
| 17,676
|
def elslib_CylinderD2(*args):
"""
:param U:
:type U: float
:param V:
:type V: float
:param Pos:
:type Pos: gp_Ax3
:param Radius:
:type Radius: float
:param P:
:type P: gp_Pnt
:param Vu:
:type Vu: gp_Vec
:param Vv:
:type Vv: gp_Vec
:param Vuu:
:type Vuu: gp_Vec
:param Vvv:
:type Vvv: gp_Vec
:param Vuv:
:type Vuv: gp_Vec
:rtype: void
"""
return _ElSLib.elslib_CylinderD2(*args)
| 17,677
|
def cmorization(in_dir, out_dir, cfg, _):
"""Cmorization func call."""
cmor_table = cfg['cmor_table']
glob_attrs = cfg['attributes']
# run the cmorization
for var, vals in cfg['variables'].items():
in_files = collect_files(in_dir, var, cfg)
logger.info("CMORizing var %s from input set %s", var, vals['name'])
raw_info = cfg['variables'][var]
raw_info.update({
'var': var,
'reference_year': cfg['custom']['reference_year'],
})
glob_attrs['mip'] = vals['mip']
extract_variable(in_files, out_dir, glob_attrs, raw_info, cmor_table)
| 17,678
|
def main() -> None:
"""Interactive use of task scheduler implementation."""
try:
scheduler = data_input()
result_output(scheduler=scheduler)
except KeyboardInterrupt:
pass
| 17,679
|
def get_version_message(version: str):
"""Get the message for the zygrader version from the changelog"""
changelog = load_changelog()
msg = [f"zygrader version {version}", ""]
version_index = 0
for line in changelog:
if line == version:
version_index = changelog.index(line) + 1
line = changelog[version_index]
while line:
msg.append(line)
version_index += 1
line = changelog[version_index]
return msg
| 17,680
|
def handle_right(left_entry_box, right_entry_box, mqtt_sender):
"""
Tells the robot to move using the speeds in the given entry boxes,
but using the negative of the speed in the right entry box.
:type left_entry_box: ttk.Entry
:type right_entry_box: ttk.Entry
:type mqtt_sender: com.MqttClient
"""
print('right', left_entry_box.get(), right_entry_box.get())
left = int(left_entry_box.get())
right = -int(right_entry_box.get())
mqtt_sender.send_message('movement', [str(left), str(right)])
| 17,681
|
def get_arity(p, b_addr):
"""
Retrieves the arity by inspecting a funciton call
:param p: angr project
:param b_addr: basic block address
:return: arity of the function
"""
return len(get_ord_arguments_call(p, b_addr))
| 17,682
|
def draw_des3_plot():
"""
This function is to draw the plot of DES 3.
"""
objects = ('Singapore', 'Uruguay', 'Chile', 'Belgium', 'Denmark', 'Qatar', 'Portugal', 'Canada', 'Spain', 'Ireland')
y_pos = np.arange(len(objects))
performance = [71, 69, 68, 66, 65, 65, 64, 63, 63, 62]
plt.xkcd()
fig = plt.figure(figsize=(9, 6), dpi=35)
fig.suptitle('Number of people fully vaccinated by country')
ax = fig.add_subplot(111)
ax.barh(y_pos, performance, align='center', alpha=0.5)
plt.yticks(y_pos, objects)
ax.set_xlabel('Share of people fully vaccinated')
return fig
| 17,683
|
def main():
"""
This function will use various utility functions, classes, and methods to determine
the location of two Resistance members: Rey and Chewbacca. Nothing is returned from
this function, however a file <updated_information.json> is produced.
Parameters:
None
Returns:
None
"""
pass
| 17,684
|
def get_pi_id(rc):
"""
Gets the database id of the group PI
Parameters
----------
rc: runcontrol object
The runcontrol object. It must contain the 'groups' and 'people'
collections in the needed databases
Returns
-------
The database '_id' of the group PI
"""
groupiter = list(all_docs_from_collection(rc.client, "groups"))
peoplecoll = all_docs_from_collection(rc.client, "people")
pi_ref = [i.get("pi_name") for i in groupiter if
i.get("name").casefold() == rc.groupname.casefold()]
pi = fuzzy_retrieval(peoplecoll, ["_id", "aka", "name"], pi_ref[0])
return pi.get("_id")
| 17,685
|
def gather_from_processes(chunk, split_sizes, displacements, comm=MPI.COMM_WORLD):
"""Gather data chunks on rank zero
:param chunk: Data chunks, living on ranks 0, 1, ..., comm.size-1
:type chunk: np.ndarray
:param split_sizes: Chunk lenghts on individual ranks
:type split_sizes: np.ndarray
:param displacements: Chunk displacements (compare scatter_to_processes)
:type displacements: np.ndarray
:return: Dataset gathered again, living on rank 0
:type return: np.ndarray
Inspired by: https://stackoverflow.com/a/36082684
Licensed under the Academic Free License version 3.0
"""
comm.Barrier()
total_length = np.array(chunk.shape[0])
gathered = np.empty((comm.allreduce(total_length), chunk.shape[1]), dtype=chunk.dtype)
comm.Gatherv(chunk, [gathered, split_sizes, displacements, MPI.DOUBLE], root=0)
return gathered
| 17,686
|
def pluginExists(name):
"""pluginExists(name) -> True if found, or False if not.
This function is the same as load(), but only checks for the existence of a plugin rather than loading it.
If there is no slash in the name then the pluginPath() is searched for it. If there is a slash then the name is used directly as a
filename, if it does not start with a slash the name is relative to the directory containing any plugin being currently loaded.
If no filename extension is provided, it will try appending '.so' (or whatever your OS dynamic library extension is) and finding
nothing will also try to append '.tcl' and '.py'.
@param name: Plugin name or filename.
@return: True if found, or False if not.
"""
pass
| 17,687
|
def test_deep_nested_env_no_merge_databag():
"""merge flag is honored"""
data = api.get_env_databag(root_dir, False, "dev/special")
logger.debug(data)
assert data.get("new_value") is None
assert data.get("common_value") is None
assert data.get("special_value") == "special"
assert api.get_output_databag_filename() == os.path.join(
root_dir,
constants.ENV_DIR,
"dev/special/output_databag.yaml"
)
| 17,688
|
async def publish_file_as_upload(
background_tasks: BackgroundTasks, file_data: UploadFile = File(...)
) -> tp.Union[IpfsPublishResponse, GenericResponse]:
"""
Publish file to IPFS using local node (if enabled by config) and / or pin to Pinata pinning cloud (if enabled by config).
File is accepted as an UploadFile (multipart form data)
"""
try:
# temporary fix using on disk caching, need to be reworked to work without saving data on the disk
cache_dir = "cache"
if not os.path.exists(cache_dir):
os.mkdir(cache_dir)
path = f"{cache_dir}/{file_data.filename}"
with open(path, "wb") as f:
f.write(file_data.file.read())
cid, uri = await publish_file(Path(path), background_tasks)
message = f"File {file_data.filename} published"
logger.info(message)
return IpfsPublishResponse(status=status.HTTP_200_OK, details=message, ipfs_cid=cid, ipfs_link=uri)
except Exception as e:
message = f"An error occurred while publishing file to IPFS: {e}"
logger.error(message)
return GenericResponse(status=status.HTTP_500_INTERNAL_SERVER_ERROR, details=message)
| 17,689
|
def edit_collab() :
"""
Endpoint to edit a specified collaboration's member variables. This endpoint requires the requesting user to be an
authenticated user to properly function.
Request Body Parameters:
id: string, JSON, required
owner: string, JSON, optional
size: int, JSON, optional
members: array of strings, JSON, optional
date: int, JSON, optional
duration: int, JSON, optional
location, string, JSON, optional
status: bool, JSON, optional
title: string, JSON, optional
description: string, JSON, optional
classes: array of strings, JSON, optional
skills: array of strings, JSON, optional
applicants: array of strings, JSON, optional
This endpoint queries the database for the specified collaboration. If the collaboration is found, other variables
included, if any, are updated. If the search fails, an appropriate error message is returned.
"""
data = request.get_json()
collab_id = data['id']
record = collabDB.find({'_id' : ObjectId(collab_id)})
if record is None:
return json.dumps({'error': "No collaborations to update matched id", 'code': 996})
else:
try:
if 'owner' in data and isinstance(data['owner'], str):
record = collabDB.update_one(
{"_id": ObjectId(collab_id)},
{
"$set": {
"owner": data['owner']
}
}
)
if 'size' in data and isinstance(data['size'], int):
record = collabDB.update_one(
{"_id": ObjectId(collab_id)},
{
"$set": {
"size": data['size']
}
}
)
if 'members' in data and isinstance(data['members'], list):
record = collabDB.update_one(
{"_id": ObjectId(collab_id)},
{
"$set": {
"members": data['members']
}
}
)
if 'date' in data and isinstance(data['date'], int):
record = collabDB.update_one(
{"_id": ObjectId(collab_id)},
{
"$set": {
"date": data['date']
}
}
)
if 'duration' in data and isinstance(data['duration'], int):
record = collabDB.update_one(
{"_id": ObjectId(collab_id)},
{
"$set": {
"duration": data['duration']
}
}
)
if 'location' in data and isinstance(data['location'], str):
record = collabDB.update_one(
{"_id": ObjectId(collab_id)},
{
"$set": {
"location": data['location']
}
}
)
if 'status' in data and isinstance(data['status'], bool):
record = collabDB.update_one(
{"_id": ObjectId(collab_id)},
{
"$set": {
"status": data['status']
}
}
)
if 'title' in data and isinstance(data['title'], str):
record = collabDB.update_one(
{"_id": ObjectId(collab_id)},
{
"$set": {
"title": data['title']
}
}
)
if 'description' in data and isinstance(data['description'], str):
record = collabDB.update_one(
{"_id": ObjectId(collab_id)},
{
"$set": {
"description": data['description']
}
}
)
if 'classes' in data and isinstance(data['classes'], list):
record = collabDB.update_one(
{"_id": ObjectId(collab_id)},
{
"$set": {
"classes": data['classes']
}
}
)
if 'skills' in data and isinstance(data['skills'], list):
record = collabDB.update_one(
{"_id": ObjectId(collab_id)},
{
"$set": {
"skills": data['skills']
}
}
)
if 'applicants' in data and isinstance(data['applicants'], list):
record = collabDB.update_one(
{"_id": ObjectId(collab_id)},
{
"$set": {
"applicants": data['applicants']
}
}
)
if record.modified_count > 0:
return json.dumps({'success': True})
else:
return json.dumps({'success': True})
except Exception as e:
print(e)
return json.dumps({'error': "Error while trying to update existing doc.", 'code': 997})
| 17,690
|
def create_lkas_ui(packer, main_on, enabled, steer_alert, defog, ahbc, ahbcramping, config, noipma, stats, persipma, dasdsply, x30, daschime, lines):
"""Creates a CAN message for the Ford Steer Ui."""
values = {
"PersIndexIpma_D_Actl": persipma,
"DasStats_D_Dsply": dasdsply,
"Set_Me_X30": x30,
"Lines_Hud": lines,
"Hands_Warning_W_Chime": steer_alert,
"CamraDefog_B_Req": defog,
"AhbHiBeam_D_Rq": ahbc,
"AhbcRampingV_D_Rq": ahbcramping,
"FeatConfigIpmaActl": config,
"FeatNoIpmaActl": noipma,
"CamraStats_D_Dsply": stats,
"DasWarn_D_Dsply": daschime,
}
return packer.make_can_msg("Lane_Keep_Assist_Ui", 0, values)
| 17,691
|
def unique(seq, key=identity):
""" Return only unique elements of a sequence
>>> tuple(unique((1, 2, 3)))
(1, 2, 3)
>>> tuple(unique((1, 2, 1, 3)))
(1, 2, 3)
Uniqueness can be defined by key keyword
>>> tuple(unique(['cat', 'mouse', 'dog', 'hen'], key=len))
('cat', 'mouse')
"""
seen = set()
for item in seq:
tag = key(item)
if tag not in seen:
seen.add(tag)
yield item
| 17,692
|
def test_validation(celery_app):
"""Task and shared task decorators must check required `Injector`
attributes."""
with pytest.raises(DependencyError) as exc_info:
@contrib.task
class Container1(Injector):
name = "foo.bar.baz"
run = lambda: None # noqa: E731
message = str(exc_info.value)
assert message == "'Container1' can not resolve attribute 'app'"
with pytest.raises(DependencyError) as exc_info:
@contrib.task
class Container2(Injector):
app = celery_app
run = lambda: None # noqa: E731
message = str(exc_info.value)
assert message == "'Container2' can not resolve attribute 'name'"
with pytest.raises(DependencyError) as exc_info:
@contrib.task
class Container3(Injector):
app = celery_app
name = "foo.bar.baz"
message = str(exc_info.value)
assert message == "'Container3' can not resolve attribute 'run'"
with pytest.raises(DependencyError) as exc_info:
@contrib.shared_task
class Container4(Injector):
run = lambda: None # noqa: E731
message = str(exc_info.value)
assert message == "'Container4' can not resolve attribute 'name'"
with pytest.raises(DependencyError) as exc_info:
@contrib.shared_task
class Container5(Injector):
name = "foo.bar.baz"
message = str(exc_info.value)
assert message == "'Container5' can not resolve attribute 'run'"
| 17,693
|
def _contextual_loss(x, y, reduction='mean'):
"""Contextual loss
"""
loss = -torch.log(_contextual_similarity(x, y))
if reduction == 'mean':
loss = loss.mean()
return loss
| 17,694
|
def _actual_center(pos, angle):
"""
Calculate the position of the geometric center of the agent
The value of self.cur_pos is the center of rotation.
"""
dir_vec = get_dir_vec(angle)
return pos + (CAMERA_FORWARD_DIST - (ROBOT_LENGTH / 2)) * dir_vec
| 17,695
|
def load_one_batch_mnist(batch_size=64, shuffle=True):
"""Return a single batch (inputs, labels) of MNIST data."""
dataloader = get_mnist_dataloder(batch_size, shuffle)
X, y = next(iter(dataloader))
return X, y
| 17,696
|
def replace_math_functions(input_string):
""" FIXME: Temporarily replace std:: invocations of math functions with non-std:: versions to prevent linker errors
NOTE: This can lead to correctness issues when running tests, since the correct version of the math function (exp/expf) might not get called.
Plan is to remove this function once HIP supports std:: math function calls inside device code
"""
output_string = input_string
output_string = re.sub("std::exp\(", "::exp(", output_string)
output_string = re.sub("std::log\(", "::log(", output_string)
output_string = re.sub("std::pow\(", "::pow(", output_string)
return output_string
| 17,697
|
def __validate_id_key(self, key, value):
"""Shorthand function to validate ID keys."""
test_object = db.get_object_as_dict_by_id(value)
if not test_object:
raise TypeError("No object with the ID given in the key '" + key + "' was found. (" + value + ")")
elif self.id_key_types[key] != "any" and not test_object['object_type'] == self.id_key_types[key]:
raise TypeError("The object given in the key '" + key + "' does not have the correct object type. (is " + test_object['object_type'] + ", should be " + self.id_key_types[key] + ")")
| 17,698
|
def KmeansInterCompare(k, data, nbTests):
"""Réalisation d'un nombre donné de classification Kmeans.
Le meilleur résultat selon le critère d'inertie inter-groupe est affiché"""
KmeansResults = []
for i in range(0, nbTests):
KmeansResults.append(Kmeans(k, data))
# on maximise l'inertie inter-groupe donc on privilégie la séparation des groupes
best_kmeans = 0
for i in range(1, nbTests):
if inerInter(KmeansResults[best_kmeans][0], KmeansResults[best_kmeans][1]) < inerInter(KmeansResults[i][0], KmeansResults[i][1]):
best_kmeans = i
return KmeansResults[best_kmeans]
| 17,699
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.