content
stringlengths 22
815k
| id
int64 0
4.91M
|
|---|---|
def cherry_pick_cli(
ctx, dry_run, pr_remote, abort, status, push, config_path, commit_sha1, branches
):
"""cherry-pick COMMIT_SHA1 into target BRANCHES."""
click.echo("\U0001F40D \U0001F352 \u26CF")
chosen_config_path, config = load_config(config_path)
try:
cherry_picker = CherryPicker(
pr_remote,
commit_sha1,
branches,
dry_run=dry_run,
push=push,
config=config,
chosen_config_path=chosen_config_path,
)
except InvalidRepoException:
click.echo(f"You're not inside a {config['repo']} repo right now! \U0001F645")
sys.exit(-1)
except ValueError as exc:
ctx.fail(exc)
if abort is not None:
if abort:
cherry_picker.abort_cherry_pick()
else:
cherry_picker.continue_cherry_pick()
elif status:
click.echo(cherry_picker.status())
else:
try:
cherry_picker.backport()
except BranchCheckoutException:
sys.exit(-1)
except CherryPickException:
sys.exit(-1)
| 5,336,500
|
def category(category: str) -> List[str]:
"""Get list of emojis in the given category"""
emoji_url = f"https://emojipedia.org/{category}"
page = requests.get(emoji_url)
soup = BeautifulSoup(page.content, 'lxml')
symbols: List[str]
try:
ul = soup.find('ul', class_="emoji-list")
spans = ul.find_all('span', class_='emoji')
symbols = [span.get_text() for span in spans]
except:
symbols = list()
return symbols
| 5,336,501
|
def calc_cumulative_bin_metrics(
labels: np.ndarray,
probability_predictions: np.ndarray,
number_bins: int = 10,
decimal_points: Optional[int] = 4) -> pd.DataFrame:
"""Calculates performance metrics for cumulative bins of the predictions.
Args:
labels: An array of true binary labels represented by 1.0 and 0.0.
probability_predictions: An array of predicted probabilities between 0.0 and
1.0.
number_bins: Number of cumulative bins that we want to divide the ranked
predictions into. Default is 10 bins such that the 1st bin contains the
highest 10% of the predictions, 2nd bin contains the highest 20% of the
predictions and so on.
decimal_points: Number of decimal points to use when outputting the
calculated performance metrics.
Returns:
bin_metrics: Following metrics calculated for each cumulative bin.
cumulative_bin_number: Bin number starting from 1.
bin_size: Total numbers of instances in the bin,
bin_size_proportion: Proportion of instances in the bin out of all the
instances in the labels.
positive_instances: Numbers of positive instances in the bin,
precision: Proportion of positive instances out of all the instances
in the bin,
coverage (recall): Proportion of positives instances in the bin out of
all the positive instances in the labels,
prop_label_positives: Proportion of positive instances in the labels,
precision_uplift: Uplift of precision of the bin compared to the
precision of the random prediction (prop_label_positives).
"""
utils.assert_label_values_are_valid(labels)
utils.assert_prediction_values_are_valid(probability_predictions)
utils.assert_label_and_prediction_length_match(labels,
probability_predictions)
# Separate the probability_predictions into bins.
label_predictions = pd.DataFrame(
list(zip(labels, probability_predictions)),
columns=['label', 'prediction'])
label_predictions = label_predictions.sort_values(
by='prediction', ascending=False)
number_total_instances = label_predictions.shape[0]
equal_bin_size = number_total_instances / number_bins
number_total_positive_instances = label_predictions[
label_predictions['label'] > 0].shape[0]
prop_label_positives = round(
number_total_positive_instances / number_total_instances, decimal_points)
cumulative_bin_metrics_list = list()
for i in range(1, (number_bins + 1)):
current_bin_size = round(equal_bin_size * i)
bin_size_proportion = round(current_bin_size / number_total_instances,
decimal_points)
bin_instances = label_predictions.head(current_bin_size)
number_bin_positive_instances = bin_instances[
bin_instances['label'] > 0].shape[0]
bin_precision = round(number_bin_positive_instances / current_bin_size,
decimal_points)
bin_recall = round(
number_bin_positive_instances / number_total_positive_instances,
decimal_points)
bin_precision_uplift = round(bin_precision / prop_label_positives,
decimal_points)
cumulative_bin_metrics_list.append(
(i, current_bin_size, bin_size_proportion,
number_bin_positive_instances, bin_precision, bin_recall,
prop_label_positives, bin_precision_uplift))
return pd.DataFrame(
cumulative_bin_metrics_list,
columns=[
'cumulative_bin_number', 'bin_size', 'bin_size_proportion',
'positive_instances', 'precision', 'coverage (recall)',
'prop_label_positives', 'precision_uplift'
])
| 5,336,502
|
def rootbeta_cdf(x, alpha, beta_, a, b, bounds=(), root=2.):
"""
Calculates the cumulative density function of the log-beta distribution, i.e.::
F(z; a, b) = I_z(a, b)
where ``z=(ln(x)-ln(a))/(ln(b)-ln(a))`` and ``I_z(a, b)`` is the regularized incomplete beta function.
Parameters
----------
x : float or array_like, shape (n,)
Realization.
alpha : float
Shape parameter 1.
beta_ : float
Shape parameter 2.
a : float
Minimum.
b : float
Maximum.
bounds : tuple
Tuple of minimum and maximum attainable realizations
root : float
Root.
Returns
-------
p : float or array_like, shape (n,)
Probability.
"""
_chk_root_mmm_inp(a, b)
if not bounds:
bounds = (a, b)
_chk_beta_inp(alpha, beta_)
_chk_dist_inp(x, bounds)
return beta_cdf(sqrt(x, root), alpha, beta_, sqrt(a, root), sqrt(b, root))
| 5,336,503
|
def get_scores(treatment, outcome, prediction, p, scoring_range=(0,1), plot_type='all'):
"""Calculate AUC scoring metrics.
Parameters
----------
treatment : array-like
outcome : array-like
prediction : array-like
p : array-like
Treatment policy (probability of treatment for each row).
scoring_range : 2-tuple
Fractional range over which frost score is calculated. First element
must be less than second, and both must be less than 1.
Returns
-------
scores : dict
A dictionary containing the following values. Each is also appended
with `_cgains` and `_aqini` for the corresponding values for the
cumulative gains curve and adjusted qini curve, respectively.
q1: Traditional Q score normalized by the theoretical
maximal qini. Note the theoretical max here goes up with a slope of 2.
q2: Traditional Q score normalized by the practical maximal qini. This
curve increases with a slope of 1.
Q: Area between qini curve and random selection line. This is named
after the notation in Radcliffe & Surry 2011, but note that they
normalize their curves differently.
Q_max: Maximal possible qini score, which is used for normalization
of qini to get frost score. Only obtainable by overfitting.
Q_practical_max: Practical maximal qini score, if you are not
overfitting. This assumes that all (outcome, treatment) = (1,1) were
persuadables, but that there are also an equal number of persuadables
in the control group. This is the best possible scenario, but likely
assumes too few "sure things".
overall_lift: The lift expected from random application of treatment.
"""
treatment = _ensure_array(treatment)
outcome = _ensure_array(outcome)
prediction = _ensure_array(prediction)
p = _ensure_array(p)
Nt1o1, Nt0o1, Nt1o0, Nt0o0 = _get_counts(treatment, outcome, p)
Nt1, Nt0, N = _get_tc_counts(Nt1o1, Nt0o1, Nt1o0, Nt0o0)
def riemann(x, y):
avgy = [(a+b)/2 for (a,b) in zip(y[:-1], y[1:])]
dx = [b-a for (a,b) in zip(x[:-1], x[1:])]
return sum([a*b for (a,b) in zip(dx, avgy)])
qini_riemann = riemann(*_maximal_qini_curve(_get_overfit_counts, Nt1o1, Nt0o1, Nt1o0, Nt0o0))
practical_qini_riemann = riemann(*_maximal_qini_curve(_get_no_sure_thing_counts, Nt1o1, Nt0o1, Nt1o0, Nt0o0))
overall_lift = (Nt1o1/Nt1-Nt0o1/Nt0)
qini_max = qini_riemann - 0.5*overall_lift
practical_qini_max = practical_qini_riemann - 0.5*overall_lift
# The predicted Qini curve.
# First we need to reorder the y values and y_pred based on this reordering
# We calculate TOT roughly here so we have a way of distinguishing those that (ordered, treated) and those that (ordered, untreated).
y = (2*treatment - 1)*outcome
def sortbyprediction(vec):
list2 = list(zip(prediction,vec))
# Sort by prediction.
list2.sort(key=lambda tup: tup[0], reverse=True) # included the tup[0] because otherwise we run into problems when there are only a few predicted values -- it orders by index i instead -- not what we want!
# Extract `y`, sorted by prediction.
_, vec_ordered = zip(*list2)
return vec_ordered
y_ordered = sortbyprediction(y)
tr_ordered = sortbyprediction(treatment)
p_ordered = sortbyprediction(p)
def auc(method='qini'):
# Calculate the area.
uplift_last = 0
nt1o1 = 0
nt0o1 = 0
nt1 = EPS
nt0 = EPS
pred_riemann = 0
uplifts = []
for i in range(round(scoring_range[0]*len(treatment)), round(scoring_range[1]*len(treatment))):
if y_ordered[i] > 0:
nt1o1 += 0.5*(1/p_ordered[i])
elif y_ordered[i] < 0:
nt0o1 += 0.5*(1/(1-p_ordered[i]))
if tr_ordered[i] == 1:
nt1 += 0.5*(1/p_ordered[i])
else:
nt0 += 0.5*(1/(1-p_ordered[i]))
if method=='qini':
uplift_next = nt1o1/Nt1-nt0o1/Nt0
elif method=='cgains':
uplift_next = (nt1o1/nt1-nt0o1/nt0)*(nt1+nt0)/N
elif method=='aqini':
uplift_next = nt1o1/Nt1-nt0o1*nt1/(nt0*Nt1 + EPS)
uplifts.append(uplift_next)
# each point corresponds to an x delta of 1/N
pred_riemann += 1/2*(uplift_next+uplift_last)/N
uplift_last = uplift_next
AUC = pred_riemann - 0.5*overall_lift*(scoring_range[1]**2 - scoring_range[0]**2)
maxgain = np.amax(uplifts)
return AUC, maxgain
# Dictionary to store all scores.
scores = {}
# Raw max scores.
scores['Q_max'] = qini_max
scores['overall_lift'] = overall_lift
scores['Q_practical_max'] = practical_qini_max
if (plot_type=='qini') or (plot_type=='all'):
# Qini curve scores.
scores['Q_qini'], scores['max_qini'] = auc(method='qini')
scores['q1_qini'] = scores['Q_qini']/scores['Q_max']
scores['q2_qini'] = scores['Q_qini']/scores['Q_practical_max']
if (plot_type=='cgains') or (plot_type=='all'):
# Scores for cumulative gains curve.
scores['Q_cgains'], scores['max_cgains'] = auc(method='cgains')
scores['q1_cgains'] = scores['Q_cgains']/scores['Q_max']
scores['q2_cgains'] = scores['Q_cgains']/scores['Q_practical_max']
if (plot_type=='aqini') or (plot_type=='all'):
# Scores for adjusted qini curve.
scores['Q_aqini'], scores['max_aqini'] = auc(method='aqini')
scores['q1_aqini'] = scores['Q_aqini']/scores['Q_max']
scores['q2_aqini'] = scores['Q_aqini']/scores['Q_practical_max']
return scores
| 5,336,504
|
def get_arima_nemo_pipeline():
""" Function return complex pipeline with the following structure
arima \
linear
nemo |
"""
node_arima = PrimaryNode('arima')
node_nemo = PrimaryNode('exog_ts')
node_final = SecondaryNode('linear', nodes_from=[node_arima, node_nemo])
pipeline = Pipeline(node_final)
return pipeline
| 5,336,505
|
def conditional_entropy(x,
y,
nan_strategy=REPLACE,
nan_replace_value=DEFAULT_REPLACE_VALUE):
"""
Calculates the conditional entropy of x given y: S(x|y)
Wikipedia: https://en.wikipedia.org/wiki/Conditional_entropy
**Returns:** float
Parameters
----------
x : list / NumPy ndarray / Pandas Series
A sequence of measurements
y : list / NumPy ndarray / Pandas Series
A sequence of measurements
nan_strategy : string, default = 'replace'
How to handle missing values: can be either 'drop' to remove samples
with missing values, or 'replace' to replace all missing values with
the nan_replace_value. Missing values are None and np.nan.
nan_replace_value : any, default = 0.0
The value used to replace missing values with. Only applicable when
nan_strategy is set to 'replace'.
"""
if nan_strategy == REPLACE:
x, y = replace_nan_with_value(x, y, nan_replace_value)
elif nan_strategy == DROP:
x, y = remove_incomplete_samples(x, y)
y_counter = Counter(y)
xy_counter = Counter(list(zip(x, y)))
total_occurrences = sum(y_counter.values())
entropy = 0.0
for xy in xy_counter.keys():
p_xy = xy_counter[xy] / total_occurrences
p_y = y_counter[xy[1]] / total_occurrences
entropy += p_xy * math.log(p_y / p_xy)
return entropy
| 5,336,506
|
def peaks_in_time(dat, troughs=False):
"""Find indices of peaks or troughs in data.
Parameters
----------
dat : ndarray (dtype='float')
vector with the data
troughs : bool
if True, will return indices of troughs instead of peaks
Returns
-------
nadarray of int
indices of peaks (or troughs) in dat
Note
----
This function does not deal well with flat signal; when the signal is not
increasing, it is assumed to be descreasing. As a result, this function
finds troughs where the signal begins to increase after either decreasing
or remaining constant
"""
diff_dat = diff(dat)
increasing = zeros(len(diff_dat))
increasing[diff_dat > 0] = 1 # mask for all points where dat is increasing
flipping = diff(increasing) # peaks are -1, troughs are 1, the rest is zero
target = -1 if not troughs else 1
return where(flipping == target)[0] + 1
| 5,336,507
|
def read_submod_def(line):
"""Attempt to read SUBMODULE definition line"""
submod_match = SUBMOD_REGEX.match(line)
if submod_match is None:
return None
else:
parent_name = None
name = None
trailing_line = line[submod_match.end(0):].split('!')[0]
trailing_line = trailing_line.strip()
parent_match = WORD_REGEX.match(trailing_line)
if parent_match is not None:
parent_name = parent_match.group(0).lower()
if len(trailing_line) > parent_match.end(0)+1:
trailing_line = trailing_line[parent_match.end(0)+1:].strip()
else:
trailing_line = ''
#
name_match = WORD_REGEX.match(trailing_line)
if name_match is not None:
name = name_match.group(0).lower()
return 'smod', SMOD_info(name, parent_name)
| 5,336,508
|
def predict(model, images, labels=None):
"""Predict.
Parameters
----------
model : tf.keras.Model
Model used to predict labels.
images : List(np.ndarray)
Images to classify.
labels : List(str)
Labels to return.
"""
if type(images) == list:
images = tf.stack(images)
predictions = model(images)
predictions = tf.math.argmax(predictions, axis=1)
if labels is not None:
predictions = [labels[pred] for pred in predictions]
return predictions
| 5,336,509
|
def process_span_file(doc, filename):
"""Reads event annotation from filename, and add to doc
:type filename: str
:type doc: nlplingo.text.text_theory.Document
<Event type="CloseAccount">
CloseAccount 0 230
anchor 181 187
CloseAccount/Source 165 170
CloseAccount/Source 171 175
CloseAccount/Source 176 180
CloseAccount/Target 191 198
CloseAccount/Target 207 214
CloseAccount/Target 215 229
</Event>
"""
lines = []
""":type: list[str]"""
with codecs.open(filename, 'r', encoding='utf-8') as f:
for line in f:
lines.append(line.strip())
i = 0
while i < len(lines):
line = lines[i]
if line.startswith('<Event type='):
event_type = re.search(r' type="(.*?)"', line).group(1)
event_id = '{}.e-{}'.format(doc.docid, len(doc.events))
event = Event(event_id, event_type)
i += 1
line = lines[i]
while not line.startswith('</Event>'):
tokens = line.split()
info = tokens[0]
offset = IntPair(int(tokens[1]), int(tokens[2]))
if info == event_type or info == 'anchor' or '/' in info:
text = doc.get_text(offset.first, offset.second)
if text is None or text == '':
logger.warning('skipping annotation span {} {}-{} (doc length: {}, #sentences:{})'.format(doc.docid, offset.first, offset.second, doc.text_length(), len(doc.sentences)))
else:
# sometimes, the UI captures an extra trailing space. Check for that and adjust ending offset
if text[-1] == ' ':
text = text[0:-1]
offset.second = offset.second - 1
if info == event_type: # this is an event span
id = '{}.s-{}'.format(event_id, len(event.event_spans))
event.add_event_span(EventSpan(id, offset, text, event_type))
elif info == 'anchor': # anchor span
id = '{}.t-{}'.format(event_id, len(event.anchors))
#print('Spannotator, adding ANCHOR with text "{}"'.format(text))
newtext, newoffset = remove_trailing_periods(text, offset)
if text != newtext:
print('- revising anchor, text=[%s] offset=(%d,%d) newtext=[%s] newoffset=(%d,%d)' % (text, offset.first, offset.second, newtext, newoffset.first, newoffset.second))
event.add_anchor(Anchor(id, newoffset, newtext, event_type))
elif '/' in info: # argument span
em_id = 'm-{}-{}'.format(offset.first, offset.second)
newtext, newoffset = remove_trailing_periods(text, offset)
if text != newtext:
print('- revising argument, text=[%s] offset=(%d,%d) newtext=[%s] newoffset=(%d,%d)' % (text, offset.first, offset.second, newtext, newoffset.first, newoffset.second))
em = EntityMention(em_id, newoffset, newtext, 'dummy')
# we just use a dummy em first, for creating the EventArgument (notice that this em is not added to the doc)
# later, when we annotate sentence, we will find an actual EntityMention that is backed by tokens
# and use that to back the EventArgument
# Ref: text_theory.annotate_sentence_with_events()
arg_role = info[info.index('/') + 1:]
arg_id = '{}.t-{}'.format(event_id, len(event.arguments))
event.add_argument(EventArgument(arg_id, em, arg_role))
i += 1
line = lines[i]
doc.add_event(event)
i += 1
return doc
| 5,336,510
|
def versions(output, libraries):
"""
(DEPRECATED) List installed dependencies. This is a wrapper for
`pip freeze` and will be removed in wq.core 2.0.
"""
click.echo(
"Warning: wq versions is now an alias for pip freeze",
err=True,
)
print_versions(output, libraries)
| 5,336,511
|
def main(debug, env_file, no_env_files):
"""
Will assume there is a .env file in the root of the package. This is for simple development.
To use .env files as in production use the --env-file arg to specify path.
To make force the application to discard all .env files use the --no-env-files flag.
"""
if debug:
log_level = logging.DEBUG
else:
log_level = logging.INFO
logging.basicConfig(
level=log_level,
format="%(asctime)s,%(msecs)d %(levelname)s: %(message)s",
datefmt="%H:%M:%S",
)
try:
uvloop.install()
config = Config(env_file, no_env_files=no_env_files)
asyncio.run(async_main(config), debug=True)
except KeyboardInterrupt:
pass
| 5,336,512
|
def mask_frame_around_position(
frame: np.ndarray,
position: Tuple[float, float],
radius: float = 5,
) -> np.ndarray:
"""
Create a circular mask with the given ``radius`` at the given
position and set the frame outside this mask to zero. This is
sometimes required for the ``Gaussian2D``-based photometry methods
to prevent the Gaussian to try and fit some part of the data that
is far from the target ``position``.
Args:
frame: A 2D numpy array of shape `(x_size, y_size)` containing
the data on which to run the aperture photometry.
position: A tuple `(x, y)` specifying the position at which to
estimate the flux. The position should be in astropy /
photutils coordinates.
radius: The radius of the mask; this should approximately match
the size of a planet signal.
Returns:
A masked version of the given ``frame`` on which we can perform
photometry based on fitting a 2D Gaussian to the data.
"""
# Define shortcuts
frame_size = (frame.shape[0], frame.shape[1])
masked_frame = np.array(np.copy(frame))
# Get circle mask; flip the position because numpy convention
circle_mask = get_circle_mask(
mask_size=frame_size, radius=radius, center=position[::-1]
)
# Apply the mask
masked_frame[~circle_mask] = 0
return masked_frame
| 5,336,513
|
def label(Z, n):
"""Correctly label clusters in unsorted dendrogram."""
uf = LinkageUnionFind(n)
for i in range(n - 1):
x, y = int(Z[i, 0]), int(Z[i, 1])
x_root, y_root = uf.find(x), uf.find(y)
if x_root < y_root:
Z[i, 0], Z[i, 1] = x_root, y_root
else:
Z[i, 0], Z[i, 1] = y_root, x_root
Z[i, 3] = uf.merge(x_root, y_root)
| 5,336,514
|
def apogeeid_digit(arr):
"""
NAME:
apogeeid_digit
PURPOSE:
Extract digits from apogeeid because its too painful to deal with APOGEE ID in h5py
INPUT:
arr (ndarray): apogee_id
OUTPUT:
apogee_id with digits only (ndarray)
HISTORY:
2017-Oct-26 - Written - Henry Leung (University of Toronto)
"""
import numpy as np
if isinstance(arr, np.ndarray) or isinstance(arr, list):
arr_copy = np.array(arr) # make a copy
for i in range(arr_copy.shape[0]):
arr_copy[i] = str(''.join(filter(str.isdigit, arr_copy[i])))
return arr_copy
else:
return str(''.join(filter(str.isdigit, arr)))
| 5,336,515
|
def transform_user_weekly_artist_chart(chart):
"""Converts lastfm api weekly artist chart data into neo4j friendly
weekly artist chart data
Args:
chart (dict): lastfm api weekly artist chart
Returns:
list - neo4j friendly artist data
"""
chart = chart['weeklyartistchart']
artists = []
for artist in chart['artist']:
artists.append(transform_artist(artist))
return artists
| 5,336,516
|
def plotter(fdict):
""" Go """
pgconn = get_dbconn('isuag')
ctx = get_autoplot_context(fdict, get_description())
threshold = 50
threshold_c = temperature(threshold, 'F').value('C')
hours1 = ctx['hours1']
hours2 = ctx['hours2']
station = ctx['station']
oldstation = XREF[station]
df = read_sql("""
with obs as (
select valid, c300, lag(c300) OVER (ORDER by valid ASC) from hourly
where station = %s),
agg1 as (
select valid,
case when c300 > %s and lag < %s then 1
when c300 < %s and lag > %s then -1
else 0 end as t from obs),
agg2 as (
SELECT valid, t from agg1 where t != 0),
agg3 as (
select valid, lead(valid) OVER (ORDER by valid ASC),
t from agg2),
agg4 as (
select extract(year from valid) as yr, valid, lead,
rank() OVER (PARTITION by extract(year from valid) ORDER by valid ASC)
from agg3 where t = 1
and (lead - valid) >= '%s hours'::interval),
agg5 as (
select extract(year from valid) as yr, valid, lead
from agg3 where t = -1)
select f.yr, f.valid as fup, f.lead as flead, d.valid as dup,
d.lead as dlead from agg4 f JOIN agg5 d ON (f.yr = d.yr)
where f.rank = 1 and d.valid > f.valid
ORDER by fup ASC
""", pgconn, params=(oldstation,
threshold, threshold, threshold, threshold, hours1),
index_col=None)
if df.empty:
raise NoDataFound("No Data Found")
df2 = read_sql("""
with obs as (
select valid, tsoil_c_avg,
lag(tsoil_c_avg) OVER (ORDER by valid ASC) from sm_hourly
where station = %s),
agg1 as (
select valid,
case when tsoil_c_avg > %s and lag < %s then 1
when tsoil_c_avg < %s and lag > %s then -1
else 0 end as t from obs),
agg2 as (
SELECT valid, t from agg1 where t != 0),
agg3 as (
select valid, lead(valid) OVER (ORDER by valid ASC),
t from agg2),
agg4 as (
select extract(year from valid) as yr, valid, lead,
rank() OVER (PARTITION by extract(year from valid) ORDER by valid ASC)
from agg3 where t = 1
and (lead - valid) >= '%s hours'::interval),
agg5 as (
select extract(year from valid) as yr, valid, lead
from agg3 where t = -1)
select f.yr, f.valid as fup, f.lead as flead, d.valid as dup,
d.lead as dlead from agg4 f JOIN agg5 d ON (f.yr = d.yr)
where f.rank = 1 and d.valid > f.valid
ORDER by fup ASC
""", pgconn, params=(station,
threshold_c, threshold_c, threshold_c, threshold_c,
hours1),
index_col=None)
if df2.empty:
raise NoDataFound("No Data Found")
(fig, ax) = plt.subplots(1, 1, figsize=(8, 6))
d2000 = utc(2000, 1, 1, 6)
for d in [df, df2]:
for _, row in d.iterrows():
if row['dlead'] is None:
continue
f0 = (row['fup'].replace(year=2000) - d2000).total_seconds()
f1 = (row['flead'].replace(year=2000) - d2000).total_seconds()
d0 = (row['dup'].replace(year=2000) - d2000).total_seconds()
d1 = (row['dlead'].replace(year=2000) - d2000).total_seconds()
if d1 < d0:
continue
ax.barh(row['fup'].year, (f1-f0), left=f0, facecolor='r',
align='center', edgecolor='r')
color = 'lightblue' if (d1 - d0) < (hours2 * 3600) else 'b'
ax.barh(row['fup'].year, (d1-d0), left=d0, facecolor=color,
align='center', edgecolor=color)
xticks = []
xticklabels = []
for i in range(1, 13):
d2 = d2000.replace(month=i)
xticks.append((d2 - d2000).total_seconds())
xticklabels.append(d2.strftime("%-d %b"))
ax.set_xticks(xticks)
ax.set_xticklabels(xticklabels)
ax.set_xlim(xticks[2], xticks[6])
ax.grid(True)
nt = NetworkTable("ISUSM")
nt2 = NetworkTable("ISUAG", only_online=False)
ab = nt.sts[station]['archive_begin']
if ab is None:
raise NoDataFound("Unknown station metadata.")
ax.set_title(("[%s] %s 4 Inch Soil Temps\n[%s] %s used for pre-%s dates"
) % (station, nt.sts[station]['name'], oldstation,
nt2.sts[oldstation]['name'],
ab.year))
ax.set_ylim(df['yr'].min() - 1, df2['yr'].max() + 1)
p0 = plt.Rectangle((0, 0), 1, 1, fc="r")
p1 = plt.Rectangle((0, 0), 1, 1, fc="lightblue")
p2 = plt.Rectangle((0, 0), 1, 1, fc="b")
ax.legend((p0, p1, p2), (
'First Period Above %s for %s+ Hours' % (threshold, hours1),
'Below %s for 1+ Hours' % (threshold, ),
'Below %s for %s+ Hours' % (threshold, hours2)),
ncol=2, fontsize=11, loc=(0., -0.2))
box = ax.get_position()
ax.set_position([box.x0, box.y0 + box.height * 0.1, box.width,
box.height * 0.9])
return fig, df
| 5,336,517
|
def config_namespace(config_file=None, auto_find=False,
verify=True, **cfg_options):
"""
Return configuration options as a Namespace.
.. code:: python
reusables.config_namespace(os.path.join("test", "data",
"test_config.ini"))
# <Namespace: {'General': {'example': 'A regul...>
:param config_file: path or paths to the files location
:param auto_find: look for a config type file at this location or below
:param verify: make sure the file exists before trying to read
:param cfg_options: options to pass to the parser
:return: Namespace of the config files
"""
return ConfigNamespace(**config_dict(config_file, auto_find,
verify, **cfg_options))
| 5,336,518
|
def extract_peaks(peaks, sequences, signals, controls=None, chroms=None,
in_window=2114, out_window=1000, max_jitter=128, min_counts=None,
max_counts=None, verbose=False):
"""Extract sequences and signals at coordinates from a peak file.
This function will take in genome-wide sequences, signals, and optionally
controls, and extract the values of each at the coordinates specified in
the peak file and return them as tensors.
Signals and controls are both lists with the length of the list, n_s
and n_c respectively, being the middle dimension of the returned
tensors. Specifically, the returned tensors of size
(len(peaks), n_s/n_c, (out_window/in_wndow)+max_jitter*2).
The values for sequences, signals, and controls, can either be filepaths
or dictionaries of numpy arrays or a mix of the two. When a filepath is
passed in it is loaded using pyfaidx or pyBigWig respectively.
Parameters
----------
peaks: str or pandas.DataFrame
Either the path to a bed file or a pandas DataFrame object containing
three columns: the chromosome, the start, and the end, of each peak.
sequences: str or dictionary
Either the path to a fasta file to read from or a dictionary where the
keys are the unique set of chromosoms and the values are one-hot
encoded sequences as numpy arrays or memory maps.
signals: list of strs or list of dictionaries
A list of filepaths to bigwig files, where each filepath will be read
using pyBigWig, or a list of dictionaries where the keys are the same
set of unique chromosomes and the values are numpy arrays or memory
maps.
controls: list of strs or list of dictionaries or None, optional
A list of filepaths to bigwig files, where each filepath will be read
using pyBigWig, or a list of dictionaries where the keys are the same
set of unique chromosomes and the values are numpy arrays or memory
maps. If None, no control tensor is returned. Default is None.
chroms: list or None, optional
A set of chromosomes to extact peaks from. Peaks in other chromosomes
in the peak file are ignored. If None, all peaks are used. Default is
None.
in_window: int, optional
The input window size. Default is 2114.
out_window: int, optional
The output window size. Default is 1000.
max_jitter: int, optional
The maximum amount of jitter to add, in either direction, to the
midpoints that are passed in. Default is 128.
min_counts: float or None, optional
The minimum number of counts, summed across the length of each example
and across all tasks, needed to be kept. If None, no minimum. Default
is None.
max_counts: float or None, optional
The maximum number of counts, summed across the length of each example
and across all tasks, needed to be kept. If None, no maximum. Default
is None.
verbose: bool, optional
Whether to display a progress bar while loading. Default is False.
Returns
-------
seqs: torch.tensor, shape=(n, 4, in_window+2*max_jitter)
The extracted sequences in the same order as the peaks in the peak
file after optional filtering by chromosome.
signals: torch.tensor, shape=(n, len(signals), out_window+2*max_jitter)
The extracted signals where the first dimension is in the same order
as peaks in the peak file after optional filtering by chromosome and
the second dimension is in the same order as the list of signal files.
controls: torch.tensor, shape=(n, len(controls), out_window+2*max_jitter)
The extracted controls where the first dimension is in the same order
as peaks in the peak file after optional filtering by chromosome and
the second dimension is in the same order as the list of control files.
If no control files are given, this is not returned.
"""
seqs, signals_, controls_ = [], [], []
in_width, out_width = in_window // 2, out_window // 2
# Load the sequences
if isinstance(sequences, str):
sequences = pyfaidx.Fasta(sequences)
# Load the peaks or rename the columns to be consistent
names = ['chrom', 'start', 'end']
if isinstance(peaks, str):
peaks = pandas.read_csv(peaks, sep="\t", usecols=(0, 1, 2),
header=None, index_col=False, names=names)
else:
peaks = peaks.copy()
peaks.columns = names
if chroms is not None:
peaks = peaks[numpy.isin(peaks['chrom'], chroms)]
# Load the signal and optional control tracks if filenames are given
for i, signal in enumerate(signals):
if isinstance(signal, str):
signals[i] = pyBigWig.open(signal, "r")
if controls is not None:
for i, control in enumerate(controls):
if isinstance(control, str):
controls[i] = pyBigWig.open(control, "r")
desc = "Loading Peaks"
d = not verbose
for chrom, start, end in tqdm(peaks.values, disable=d, desc=desc):
mid = start + (end - start) // 2
start = mid - out_width - max_jitter
end = mid + out_width + max_jitter
# Extract the signal from each of the signal files
signals_.append([])
for signal in signals:
if isinstance(signal, dict):
signal_ = signal[chrom][start:end]
else:
signal_ = signal.values(chrom, start, end, numpy=True)
signal_ = numpy.nan_to_num(signal_)
signals_[-1].append(signal_)
# For the sequences and controls extract a window the size of the input
start = mid - in_width - max_jitter
end = mid + in_width + max_jitter
# Extract the controls from each of the control files
if controls is not None:
controls_.append([])
for control in controls:
if isinstance(control, dict):
control_ = control[chrom][start:end]
else:
control_ = control.values(chrom, start, end, numpy=True)
control_ = numpy.nan_to_num(control_)
controls_[-1].append(control_)
# Extract the sequence
if isinstance(sequences, dict):
seq = sequences[chrom][start:end].T
else:
seq = one_hot_encode(sequences[chrom][start:end].seq.upper(),
alphabet=['A', 'C', 'G', 'T', 'N']).T
seqs.append(seq)
seqs = torch.tensor(numpy.array(seqs), dtype=torch.float32)
signals_ = torch.tensor(numpy.array(signals_), dtype=torch.float32)
idxs = torch.ones(signals_.shape[0], dtype=torch.bool)
if max_counts is not None:
idxs = (idxs) & (signals_.sum(dim=(1, 2)) < max_counts)
if min_counts is not None:
idxs = (idxs) & (signals_.sum(dim=(1, 2)) > min_counts)
if controls is not None:
controls_ = torch.tensor(numpy.array(controls_), dtype=torch.float32)
return seqs[idxs], signals_[idxs], controls_[idxs]
return seqs[idxs], signals_[idxs]
| 5,336,519
|
def findfiles(which, where='.'):
"""Returns list of filenames from `where` path matched by 'which'
shell pattern. Matching is case-insensitive.
# findfiles('*.ogg')
"""
# TODO: recursive param with walk() filtering
rule = re.compile(fnmatch.translate(which), re.IGNORECASE)
fn_names = [name for name in os.listdir(where) if rule.match(name)]
return [os.path.join(where, f) for f in fn_names]
| 5,336,520
|
def map_feature(value, f_type):
""" Builds the Tensorflow feature for the given feature information """
if f_type == np.dtype('object'):
return bytes_feature(value)
elif f_type == np.dtype('int'):
return int64_feature(value)
elif f_type == np.dtype('float'):
return float64_feature(value)
elif f_type == np.dtype('bool'):
return int64_feature(value.astype('int'))
else:
raise ValueError('Do not know how to store value {} with type {}'
.format(value, f_type))
| 5,336,521
|
def is_text_area(input):
"""
Template tag to check if input is file
:param input: Input field
:return: True if is file, False if not
"""
return input.field.widget.__class__.__name__ == "Textarea"
| 5,336,522
|
def print_album_list(album_list):
"""Print album list and return the album name choice.
If return is all then all photos on page will be download."""
for i in range(len(album_list)):
print("{}. {} ({} photo(s))".format(
i + 1, album_list[i]['name'], album_list[i]['count']))
choice = raw_input("Please enter your choice (0 for all): ")
return int(choice) - 1
| 5,336,523
|
def dprepb_imaging(vis_input):
"""The DPrepB/C imaging pipeline for visibility data.
Args:
vis_input (array): array of ARL visibility data and parameters.
Returns:
restored: clean image.
"""
# Load the Input Data
# ------------------------------------------------------
vis1 = vis_input[0]
vis2 = vis_input[1]
channel = vis_input[2]
stations = vis_input[3]
lofar_stat_pos = vis_input[4]
APPLY_IONO = vis_input[5]
APPLY_BEAM = vis_input[6]
MAKE_PLOTS = vis_input[7]
UV_CUTOFF = vis_input[8]
PIXELS_PER_BEAM = vis_input[9]
POLDEF = vis_input[10]
RESULTS_DIR = vis_input[11]
FORCE_RESOLUTION = vis_input[12]
ionRM1 = vis_input[13]
times1 = vis_input[14]
time_indices1 = vis_input[15]
ionRM2 = vis_input[16]
times2 = vis_input[17]
time_indices2 = vis_input[18]
twod_imaging = vis_input[19]
npixel_advice = vis_input[20]
cell_advice = vis_input[21]
# Make a results directory on the worker:
os.makedirs(RESULTS_DIR, exist_ok=True)
# Redirect stdout, as Dask cannot print on workers
# ------------------------------------------------------
sys.stdout = open('%s/dask-log.txt' % (RESULTS_DIR), 'w')
# Prepare Measurement Set
# ------------------------------------------------------
# Combine MSSS snapshots:
vis = append_visibility(vis1, vis2)
# Apply a uv-distance cut to the data:
vis = uv_cut(vis, UV_CUTOFF)
# Make some basic plots:
if MAKE_PLOTS:
uv_cov(vis)
uv_dist(vis)
# Imaging and Deconvolution
# ------------------------------------------------------
# Convert from XX/XY/YX/YY to I/Q/U/V:
vis = convert_to_stokes(vis, POLDEF)
# Image I, Q, U, V, per channel:
if twod_imaging:
dirty, psf = image_2d(vis, npixel_advice, cell_advice, channel, RESULTS_DIR)
else:
dirty, psf = wstack(vis, npixel_advice, cell_advice, channel, RESULTS_DIR)
# Deconvolve (using complex Hogbom clean):
comp, residual = deconvolve_cube_complex(dirty, psf, niter=100, threshold=0.001, \
fracthresh=0.001, window_shape='', gain=0.1, \
algorithm='hogbom-complex')
# Convert resolution (FWHM in arcmin) to a psfwidth (standard deviation in pixels):
clean_res = (((FORCE_RESOLUTION/2.35482004503)/60.0)*np.pi/180.0)/cell_advice
# Create the restored image:
restored = restore_cube(comp, psf, residual, psfwidth=clean_res)
# Save to disk:
export_image_to_fits(restored, '%s/imaging_clean_WStack-%s.fits'
% (RESULTS_DIR, channel))
return restored
| 5,336,524
|
def test_change_tone(test_file, tone):
"""
Test the tone changing function.
"""
# change audio file tone
change_tone(infile=test_file, tone=tone)
# check result
fname = "%s_augmented_%s_toned.wav" % (test_file.split(".wav")[0], str(tone))
time.sleep(5)
assert_file_exists(fname)
| 5,336,525
|
def request_mult(n=100, size=1024):
"""Method sends multiple TCP requests
Args:
n (int): count of requests
size (int): size
Returns:
void
"""
for i in range(n):
c = socket()
try:
c.connect(('127.0.0.1', 22))
c.sendall(('0' * size).encode('utf-8'))
except error:
pass
finally:
c.close()
| 5,336,526
|
def procure_data(args):
"""Load branches from specified file as needed to calculate
all fit and cut expressions. Then apply cuts and binning, and
return only the processed fit data."""
# look up list of all branches in the specified root file
# determine four-digit number of DRS board used
# apply shorthand (a1 -> area_xxxx_1, etc.)
root_file = args.run if os.sep in args.run else ROOT_FILE.format(args.run)
branches_all = fileio.get_keys(root_file)
# find all channels present by matching noise_*
channels = {_.rpartition("_")[2] for _ in branches_all if _.startswith("noise_")}
# any matching channels: fill replacements using templates in SHORTHAND
if channels:
test_branch = next(_ for _ in branches_all if _.startswith("noise_"))
board = test_branch.partition('_')[2].partition('_')[0]
replacements = {}
for pre,post in SHORTHAND.items():
if "{ch}" in pre:
replacements.update(
{pre.format(ch=_):post.format(board=board,ch=_) for _ in channels}
)
else:
replacements.update(
{pre:post.format(board=board,ch=next(_ for _ in channels))}
)
# no matching branches found: don't apply shorthand
else:
replacements = {}
# set of branches needed to evaluate all fits, cuts, defs, and xfs
branches_needed = set()
# compile expressions and update branches_needed
fn_fits = []
for fit in args.fits:
# skip fits where the first entry is None. This happens when
# the positional argument is not specified, so handling this
# case lets us supply all fits via --fit if desired.
if fit[0] is None:
continue
fn = expr.check_and_compile(replace_names(fit[0], replacements))
fn_fits.append(fn)
branches_needed |= fn.kwargnames
# copy at this point to capture branches needed for fit expressions
branches_needed_fit = branches_needed.copy()
fn_cuts = []
for cut in args.cuts:
fn = expr.check_and_compile(replace_names(cut[0], replacements))
fn_cuts.append(fn)
branches_needed |= fn.kwargnames
# copy branches_needed at this point to capture which are needed
# explicitly for fits and cuts
branches_fit_and_cut = branches_needed.copy()
fn_defs = []
for def_ in args.defs:
fn = expr.check_and_compile(replace_names(def_[1], replacements))
fn_defs.append(fn)
branches_needed |= fn.kwargnames
fn_xfs = []
for xf in args.xfs:
raise Exception("xfs not implemented yet")
fn = expr.check_and_compile(replace_names(xf[1], replacements))
fn_xfs.append(fn)
branches_needed |= fn.kwargnames
# load branches from specified root file, allowing for missing
# branches. missing branches must be generated by one of the
# defs or xfs included.
branches = fileio.load_branches(root_file, branches_needed - set(BRANCHES_CONSTRUCT))
# initialize the branch manager instance with the resulting branches
bm = data.BranchManager(branches, export_copies=False, import_copies=False)
# construct branches if needed
if "entry" in branches_needed:
bm.bud(data.bud_entry)
# apply scaler rectification
if args.rectify_scalers:
if any(_.startswith("scaler_") for _ in bm.keys):
bm.bud(data.rectify_scaler(), overwrite=True)
# apply timestamp fix and localization
if any(_.startswith("timestamp_") for _ in bm.keys):
bm.bud(data.fix_monotonic_timestamp(), overwrite=True)
if args.localize_timestamps:
bm.bud(data.localize_timestamp(), overwrite=True)
# process defs and xfs to create new branches
# todo: current implementation is slightly inefficient. defs and xfs
# are evaluated before applying any cuts, resulting in excess
# computation in the case where cuts do not depend on defs or xfs.
# an implementation which applies each cut as soon as it is able to,
# and prioritizes defs and xfs which enable cuts, would be faster.
fn_defs_remain = [True for _ in fn_defs]
fn_xfs_remain = [True for _ in fn_xfs]
n_remaining = len(fn_defs) + len(fn_xfs)
while n_remaining:
for i,remain in enumerate(fn_defs_remain):
if remain and fn_defs[i].kwargnames.issubset(bm.keys):
this_name = args.defs[i][0]
this_fn = fn_defs[i]
bm.bud(
lambda man:{this_name:this_fn(**{_:man[_] for _ in this_fn.kwargnames})}
)
fn_defs_remain[i] = False
# # xfs not implemented yet
# for i,remain in enumerate(fn_xfs_remain):
# if remain and fn_xfs[i].kwargnames.issubset(bm.keys):
# bm.bud()
# fn_xfs_remain[i] = False
# if we have all branches needed for fits and cuts, there's
# no need to keep evaluating defs and xfs
if branches_fit_and_cut.issubset(bm.keys):
break
# check to see if progress has been made
# if not, then it never will, and we have to exit
n_remaining_now = sum(fn_defs_remain) + sum(fn_xfs_remain)
if n_remaining_now == n_remaining:
print("could not evaluate all definititions and transformations")
print("missing one or more variables for completion")
sys.exit(1)
n_remaining = n_remaining_now
# wrapper functions to capture loop variable values
# if we don't use these, the overwritten value of fn and other
# variables used in the loop will change, and the change will affect
# the function calls to calculate masks
def mask_bool(fn):
mask = lambda man:fn(**{_:man[_] for _ in fn.kwargnames})
return mask
def mask_range(fn,lo,hi):
mask = lambda man:data.inrange(fn(**{_:man[_] for _ in fn.kwargnames}),lo,hi)
return mask
# process cuts
masks = []
for icut,fn in enumerate(fn_cuts):
this_cut = args.cuts[icut]
# no bounds specified: boolean expression
if (this_cut[1] is None) and (this_cut[2] is None):
masks.append(mask_bool(fn))
# at least one bound specified: lo<expression<hi
else:
masks.append(mask_range(fn,this_cut[1],this_cut[2]))
# apply cuts
if masks:
data_fit_raw = bm.mask(
data.mask_all(*masks),
branches_needed_fit,
apply_mask = False,
)
else:
data_fit_raw = {_:bm[_] for _ in branches_needed_fit}
# data_fit_raw are all the branches that show up in the expression
# for at least one fit. to get the fit data, we have still have to
# evaluate the expressions.
fit_data = []
for fn in fn_fits:
fit_data.append(fn(**{_:data_fit_raw[_] for _ in fn.kwargnames}))
# get counts and edges by binning data_fit_raw
fit_counts = []
fit_edges = []
for i,fit in enumerate(args.fits):
this_data = fit_data[i]
# determine bin edges
lo = this_data.min() if fit[1] in [None,-np.inf] else fit[1]
hi = this_data.max() if fit[2] in [None, np.inf] else fit[2]
if fit[3]:
nbins = fit[3]
else:
this_ndata = data.inrange(this_data,lo,hi,True,True).sum()
nbins = data.bin_count_from_ndata(this_ndata)
if fit[4].startswith("li"):
this_edges = data.edges_lin(lo,hi,nbins)
elif fit[4].startswith("lo"):
if lo<=0:
lo = this_data[this_data>0].min()
this_edges = data.edges_log(lo,hi,nbins)
elif fit[4].startswith("s"):
this_edges = data.edges_symlog(lo,hi,nbins)
# calculate histogram counts and append
this_counts, _ = np.histogram(this_data, this_edges)
fit_counts.append(this_counts)
fit_edges.append(this_edges)
return fit_counts, fit_edges
| 5,336,527
|
def after_file_name(file_to_open):
"""
Given a file name return as:
[file_to_open root]_prep.[file-to_open_ending]
Parameters
----------
file_to_open : string
Name of the input file.
Returns
--------
after_file : string
Full path to the (new) file.
Examples
---------
>>> from preparenovonix.novonix_io import after_file_name
>>> after_file_name('example_data/example_data.csv')
"""
# Extract the path and file name
dirname, fname = os.path.split(os.path.abspath(file_to_open))
root = fname.split(".")[0]
ending = fname.split(".")[1]
fname = root + "_prep." + ending
after_file = os.path.join(dirname, fname)
return after_file
| 5,336,528
|
def display_image(img):
""" Show an image with matplotlib:
Args:
Image as numpy array (H,W,3)
"""
#
# You code here
#
plt.figure()
plt.imshow(img)
plt.axis('off')
plt.show()
| 5,336,529
|
def read_bool(data):
"""
Read 1 byte of data as `bool` type.
Parameters
----------
data : io.BufferedReader
File open to read in binary mode
Returns
-------
bool
True or False
"""
s_type = "=%s" % get_type("bool")
return struct.unpack(s_type, data.read(1))[0]
| 5,336,530
|
def sectorize(position):
""" Returns a tuple representing the sector for the given `position`.
Parameters
----------
position : tuple of len 3
Returns
-------
sector : tuple of len 3
"""
x, y, z = normalize(position)
x, y, z = x // GameSettings.SECTOR_SIZE, y // GameSettings.SECTOR_SIZE, z // GameSettings.SECTOR_SIZE
return (x, 0, z)
| 5,336,531
|
def add_random_phase_shift(hkl, phases, fshifts=None):
"""
Introduce a random phase shift, at most one unit cell length along each axis.
Parameters
----------
hkl : numpy.ndarray, shape (n_refls, 3)
Miller indices
phases : numpy.ndarray, shape (n_refls,)
phase values in degrees, ordered as hkl
fshifts : numpy.ndarray, shape (3,), optional
fractional shifts along (a,b,c) to apply; if None, apply random shifts
Returns
-------
shifted_phases : numpy.ndarray, shape (n_refls,)
phase values in degrees, ordered as hkl
fshifts : numpy.ndarray, shape (3,)
fractional shifts applied along (a,b,c)
"""
if fshifts is None:
fshifts = np.array([random.random() for i in range(3)])
shifted_phases = wrap_phases(phases - 360 * np.dot(hkl, fshifts).ravel())
return shifted_phases, fshifts
| 5,336,532
|
def hierarchical_dataset(root, opt, select_data="/", data_type="label", mode="train"):
"""select_data='/' contains all sub-directory of root directory"""
dataset_list = []
dataset_log = f"dataset_root: {root}\t dataset: {select_data[0]}"
print(dataset_log)
dataset_log += "\n"
for dirpath, dirnames, filenames in os.walk(root + "/"):
if not dirnames:
select_flag = False
for selected_d in select_data:
if selected_d in dirpath:
select_flag = True
break
if select_flag:
if data_type == "label":
dataset = LmdbDataset(dirpath, opt, mode=mode)
else:
dataset = LmdbDataset_unlabel(dirpath, opt)
sub_dataset_log = f"sub-directory:\t/{os.path.relpath(dirpath, root)}\t num samples: {len(dataset)}"
print(sub_dataset_log)
dataset_log += f"{sub_dataset_log}\n"
dataset_list.append(dataset)
concatenated_dataset = ConcatDataset(dataset_list)
return concatenated_dataset, dataset_log
| 5,336,533
|
def main() -> None:
"""Run main entrypoint."""
# Parse command line arguments
get_args()
# Ensure environment tokens are present
try:
SLACK_TOKEN = os.environ["PAGEY_SLACK_TOKEN"]
except KeyError:
print("Error, env variable 'PAGEY_SLACK_TOKEN' not set", file=sys.stderr)
sys.exit(1)
try:
PD_TOKEN = os.environ["PAGEY_PD_TOKEN"]
except KeyError:
print("Error, env variable 'PAGEY_PD_TOKEN' not set", file=sys.stderr)
sys.exit(1)
# Initialize Pagerduty module
pagerduty = PageyPD(PD_TOKEN)
def commandCallback(command: str) -> str:
"""This is a callback function for Slack to evaluate response based on given command.
Args:
command (str): the command/message after the bot mention (e.g.: @pagey <command>).
Returns:
str: The reply to be sent to Slack.
"""
# [Command: oncall] Get Pagerduty schedules
if command.startswith("oncall"):
schedules = pagerduty.get_schedules()
response = ""
for team, users in schedules.items():
response += f"*{team}*\n"
# Sort by escalation level
users.sort(key=lambda s: s["level"])
for user in users:
if int(user["level"]) == 1:
response += (
f"* [lvl: *{user['level']}* -> {user['until']}] *{user['name']}*\n"
)
else:
response += (
f"* [lvl: *{user['level']}* -> {user['until']}] {user['name']}\n"
)
response += "\n"
return response
# [Command: info] Report some info
if command.startswith("info"):
return f"{DEF_NAME} ({DEF_VERSION}) - {DEF_DESC}\nFind me here: {DEF_GITHUB}\n"
return "Available commands: " + ", ".join(COMMANDS)
# Connect to Slack (RTM mode)
slack = PageySlack(SLACK_TOKEN, commandCallback)
if not slack.connect():
print("Connection to Slack failed. Exception traceback printed above.", file=sys.stderr)
sys.exit(1)
print("Pagey connected to Slack and running!")
slack.run()
| 5,336,534
|
def save_vistrail_bundle_to_zip_xml(save_bundle, filename, vt_save_dir=None, version=None):
"""save_vistrail_bundle_to_zip_xml(save_bundle: SaveBundle, filename: str,
vt_save_dir: str, version: str)
-> (save_bundle: SaveBundle, vt_save_dir: str)
save_bundle: a SaveBundle object containing vistrail data to save
filename: filename to save to
vt_save_dir: directory storing any previous files
Generates a zip compressed version of vistrail.
It raises an Exception if there was an error.
"""
if save_bundle.vistrail is None:
raise VistrailsDBException('save_vistrail_bundle_to_zip_xml failed, '
'bundle does not contain a vistrail')
if not vt_save_dir:
vt_save_dir = tempfile.mkdtemp(prefix='vt_save')
# abstractions are saved in the root of the zip file
# abstraction_dir = os.path.join(vt_save_dir, 'abstractions')
#thumbnails and mashups have their own folder
thumbnail_dir = os.path.join(vt_save_dir, 'thumbs')
mashup_dir = os.path.join(vt_save_dir, 'mashups')
# Save Vistrail
xml_fname = os.path.join(vt_save_dir, 'vistrail')
save_vistrail_to_xml(save_bundle.vistrail, xml_fname, version)
# Save Log
if save_bundle.vistrail.db_log_filename is not None:
xml_fname = os.path.join(vt_save_dir, 'log')
if save_bundle.vistrail.db_log_filename != xml_fname:
shutil.copyfile(save_bundle.vistrail.db_log_filename, xml_fname)
save_bundle.vistrail.db_log_filename = xml_fname
if save_bundle.log is not None:
xml_fname = os.path.join(vt_save_dir, 'log')
save_log_to_xml(save_bundle.log, xml_fname, version, True)
save_bundle.vistrail.db_log_filename = xml_fname
# Save Abstractions
saved_abstractions = []
for obj in save_bundle.abstractions:
if isinstance(obj, basestring):
# FIXME we should have an abstraction directory here instead
# of the abstraction_ prefix...
if not os.path.basename(obj).startswith('abstraction_'):
obj_fname = 'abstraction_' + os.path.basename(obj)
else:
obj_fname = os.path.basename(obj)
# xml_fname = os.path.join(abstraction_dir, obj_fname)
xml_fname = os.path.join(vt_save_dir, obj_fname)
saved_abstractions.append(xml_fname)
# if not os.path.exists(abstraction_dir):
# os.mkdir(abstraction_dir)
# print "obj:", obj
# print "xml_fname:", xml_fname
if obj != xml_fname:
# print 'copying %s -> %s' % (obj, xml_fname)
try:
shutil.copyfile(obj, xml_fname)
except Exception, e:
saved_abstractions.pop()
debug.critical('copying %s -> %s failed: %s' % \
(obj, xml_fname, str(e)))
else:
raise VistrailsDBException('save_vistrail_bundle_to_zip_xml failed, '
'abstraction list entry must be a filename')
# Save Thumbnails
saved_thumbnails = []
for obj in save_bundle.thumbnails:
if isinstance(obj, basestring):
obj_fname = os.path.basename(obj)
png_fname = os.path.join(thumbnail_dir, obj_fname)
saved_thumbnails.append(png_fname)
if not os.path.exists(thumbnail_dir):
os.mkdir(thumbnail_dir)
try:
shutil.copyfile(obj, png_fname)
except shutil.Error, e:
#files are the same no need to show warning
saved_thumbnails.pop()
except IOError, e2:
saved_thumbnails.pop()
debug.warning('copying thumbnail %s -> %s failed: %s' % \
(obj, png_fname, str(e2)))
else:
raise VistrailsDBException('save_vistrail_bundle_to_zip_xml failed, '
'thumbnail list entry must be a filename')
# Save Mashups
saved_mashups = []
#print " mashups:"
if len(save_bundle.mashups) > 0 and not os.path.exists(mashup_dir):
os.mkdir(mashup_dir)
for obj in save_bundle.mashups:
#print " ", obj
try:
xml_fname = os.path.join(mashup_dir, str(obj.id))
save_mashuptrail_to_xml(obj, xml_fname)
saved_mashups.append(obj)
except Exception, e:
raise VistrailsDBException('save_vistrail_bundle_to_zip_xml failed, '
'when saving mashup: %s'%str(e))
# call package hooks
# it will fail if package manager has not been constructed yet
try:
from vistrails.core.packagemanager import get_package_manager
pm = get_package_manager()
for package in pm.enabled_package_list():
package.saveVistrailFileHook(save_bundle.vistrail, vt_save_dir)
except Exception, e:
debug.warning("Could not call package hooks", str(e))
tmp_zip_dir = tempfile.mkdtemp(prefix='vt_zip')
tmp_zip_file = os.path.join(tmp_zip_dir, "vt.zip")
z = zipfile.ZipFile(tmp_zip_file, 'w')
try:
with Chdir(vt_save_dir):
# zip current directory
for root, dirs, files in os.walk('.'):
for f in files:
z.write(os.path.join(root, f))
z.close()
shutil.copyfile(tmp_zip_file, filename)
finally:
os.unlink(tmp_zip_file)
os.rmdir(tmp_zip_dir)
save_bundle = SaveBundle(save_bundle.bundle_type, save_bundle.vistrail,
save_bundle.log, thumbnails=saved_thumbnails,
abstractions=saved_abstractions,
mashups=saved_mashups)
return (save_bundle, vt_save_dir)
| 5,336,535
|
def parse_cluster_file(filename):
"""
Parse the output of the CD-HIT clustering and return a dictionnary of clusters.
In order to parse the list of cluster and sequences, we have to parse the CD-HIT
output file. Following solution is adapted from a small wrapper script
([source code on Github](https://github.com/Y-Lammers/CD-HIT-Filter/blob/master/CD-HIT-Filter.py),
author: Youri Lammers).
"""
# parse through the .clstr file and create a dictionary
# with the sequences per cluster
# open the cluster file and set the output dictionary
cluster_file, cluster_dic = open(filename), {}
# parse through the cluster file and store the cluster name + sequences in the dictionary
# This is a generator comprehension which groups lines together based of wether the
# line starts with a ">".
cluster_groups = (x[1] for x in itertools.groupby(cluster_file, key=lambda line: line[0] == '>'))
# Now we get alternate groups of cluster name and sequence list.
for cluster in cluster_groups:
# Note: next(cluster) retrieves the first line of the cluster i (>cluster name)
name = next(cluster).strip()
name = re.sub(' ', '_', name[1:])
# Note: next(cluster_groups) retrieves the next cluster i+1 containing the sequences
# the cluster is itself an iterator (every line)
seqs = [seq.split('>')[1].split('...') for seq in next(cluster_groups)]
# Write a boolean value True if sequence is the reference sequence from the cluster
seqs = [[seq[0], (True if seq[1] == ' *\n' else False)] for seq in seqs]
cluster_dic[name] = seqs
# return the cluster dictionary
return cluster_dic
| 5,336,536
|
def app(par=None):
"""
Return the Miniweb object instance.
:param par: Dictionary with configuration parameters. (optional parameter)
:return: Miniweb object instance.
"""
return Miniweb.get_instance(par)
| 5,336,537
|
def openTopics():
"""Opens topics file
:return: list of topics
"""
topicsFile = 'topics'
with open(topicsFile) as f:
topics = f.read().split()
return topics
| 5,336,538
|
def fix_behaviour_widget_render_forced_renderer(utils):
"""
Restore the behaviour where the "renderer" parameter of Widget.render() may not be supported by subclasses.
"""
from django.forms.boundfield import BoundField
original_as_widget = BoundField.as_widget
def as_widget(self, widget=None, attrs=None, only_initial=False):
widget = widget or self.field.widget
from django.utils.inspect import func_supports_parameter, func_accepts_kwargs
if not (
func_supports_parameter(widget.render, "renderer")
or func_accepts_kwargs(widget.render)
):
original_widget_render = widget.render
utils.emit_warning(
"Add the `renderer` argument to the render() method of %s. "
"It will be mandatory in Django 2.1." % widget.__class__,
RemovedInDjango21Warning,
stacklevel=2,
)
def instance_render(name, value, attrs=None, renderer=None):
del renderer # restore non-mandatory support for this parameter
return original_widget_render(name=name, value=value, attrs=attrs)
utils.inject_callable(
widget, "render", instance_render
) # beware, function stored in INSTANCE
return original_as_widget(
self, widget=widget, attrs=attrs, only_initial=only_initial
)
utils.inject_callable(BoundField, "as_widget", as_widget)
| 5,336,539
|
def split_blocks(blocks:List[Block], ncells_per_block:int,direction:Direction=None):
"""Split blocks is used to divide an array of blocks based on number of cells per block. This code maintains the greatest common denominator of the parent block. Number of cells per block is simply an estimate of how many you want. The actual number will change to meet the greatest common denominator (GCD). GCD of 4 means multigrid of 3 e.g. grid/4 (coarse), 2 (fine), and 1 (finest). If a direction is not specified then for each block the longest index either i,j, or k is used.
Wisdom from Dave Rigby:
For example, for radial equilibrium we must integrate across the span. Some codes (GlennHT used to) would want a single block across the entire span. In that case you would want some additional control.
Another example might be if you would like a block to include the entire boundary layer. In that case you might introduce an aspect ratio control.
Args:
blocks (List[Block]): List of blocks
ncells_per_block (int): number of cells desired per block
direction (Direction): direction to split the blocks in. Direction.(i,j,k). Defaults to None. None means it will pick the direction for you based on which is greater IMAX, JMAX, or KMAX
Returns:
Blocks (List[Block]): list of blocks split in the specified direction
"""
direction_to_use = direction # store the user input variable
new_blocks = list()
for block_indx in range(len(blocks)):
block = blocks[block_indx]
total_cells = block.IMAX*block.JMAX*block.KMAX
if direction==None:
indx = np.argmin(np.array([block.IMAX,block.JMAX,block.KMAX]))
if indx == 0:
direction_to_use=Direction.i
elif indx == 1:
direction_to_use=Direction.j
elif indx == 2:
direction_to_use=Direction.k
if total_cells>ncells_per_block:
# Use greatest common divsor to maintain multi-grid so say the entire block is divisible by 4 then we want to maintain than for all the splits!
greatest_common_divisor =gcd(block.IMAX-1, gcd(block.JMAX-1, block.KMAX-1)) # Gets the maximum number of partitions that we can make for this given block
if direction_to_use == Direction.i:
# In order to get close to the number of cells per block, we need to control how many steps of the greatest_common_divisor to advance so for example if you have a multigrid mesh that has gcd of 16 (fine) => 8 (coarse) => 4 (coarser) => 2 (coarsest) and you want 400K cells per block then JMAX*KMAX*gcd*some_factor has to be close to 400K cells
denominator = block.JMAX*block.KMAX
step_size = __step_search(total_cells,greatest_common_divisor,ncells_per_block,denominator,direction='backward')
if step_size==-1:
step_size = __step_search(total_cells,greatest_common_divisor,ncells_per_block,denominator,direction='forward')
if step_size==-1:
assert('no valid step size found, do you have multi-block? gcd > 1')
# step_size-1 is the IMAX of the sub_blocks e.g. 0 to 92 this shows IMAX=93, (93-1) % 4 = 0 (good)
iprev = 0
for i in range(step_size,block.IMAX,step_size):
if (i+1) > block.IMAX:
break
X = block.X[iprev:i+1,:,:] # New X, Y, Z splits
Y = block.Y[iprev:i+1,:,:] # This indexes to iprev:i so if iprev=2 and i = 10 it will go from 2 to 9
Z = block.Z[iprev:i+1,:,:]
iprev=i # Blocks have to share the same face, Pick the previous face
new_blocks.append(Block(X,Y,Z))
# Check for remainder
if i+1 < block.IMAX:
# Add remainder to last block
X = block.X[i:,:,:] # New X, Y, Z splits
Y = block.Y[i:,:,:]
Z = block.Z[i:,:,:]
new_blocks.append(Block(X,Y,Z))
elif direction_to_use == Direction.j:
denominator = block.IMAX*block.KMAX
step_size = __step_search(total_cells,greatest_common_divisor,ncells_per_block,denominator,direction='backward')
if step_size==-1:
step_size = __step_search(total_cells,greatest_common_divisor,ncells_per_block,denominator,direction='forward')
if step_size==-1:
assert('no valid step size found, do you have multi-block? gcd > 1')
jprev = 0
for j in range(step_size,block.JMAX,step_size):
if (j+1) > block.IMAX:
break
X = block.X[:,jprev:j,:] # New X, Y, Z splits
Y = block.Y[:,jprev:j,:]
Z = block.Z[:,jprev:j,:]
jprev=j
new_blocks.append(Block(X,Y,Z))
# Check for remainder
if j+1 < block.JMAX:
# Add remainder to last block
X = block.X[:,j:,:] # New X, Y, Z splits
Y = block.Y[:,j:,:]
Z = block.Z[:,j:,:]
new_blocks.append(Block(X,Y,Z))
else:
denominator = block.IMAX*block.JMAX
step_size = __step_search(total_cells,greatest_common_divisor,ncells_per_block,denominator,direction='backward')
if step_size==-1:
step_size = __step_search(total_cells,greatest_common_divisor,ncells_per_block,denominator,direction='forward')
if step_size==-1:
assert('no valid step size found, do you have multi-block? gcd > 1')
kprev = 0
for k in range(step_size,block.KMAX,step_size):
if (k+1) > block.KMAX:
break
X = block.X[:,:,kprev:k+1] # New X, Y, Z splits
Y = block.Y[:,:,kprev:k+1]
Z = block.Z[:,:,kprev:k+1]
kprev=k
new_blocks.append(Block(X,Y,Z))
# Check for remainder
if k+1 < block.KMAX:
# Add remainder to last block
X = block.X[:,:,k:] # New X, Y, Z splits
Y = block.Y[:,:,k:]
Z = block.Z[:,:,k:]
new_blocks.append(Block(X,Y,Z)) # replace it
return new_blocks
| 5,336,540
|
def get_entry_details(db_path, entry_id):
"""Get all information about an entry in database.
Args:
db_path: path to database file
entry_id: string
Return:
out: dictionary
"""
s = connect_database(db_path)
# find entry
try:
sim = s.query(Main).filter(Main.entry_id == entry_id).one()
except NoResultFound:
print("No entry found with entry_id {} in {}.".format(entry_id, db_path))
return {}
# details from main table
out = sim.__dict__
# groups
out["groups"] = [g.name for g in sim.groups]
# tags
out["tags"] = [t.name for t in sim.keywords if t.value == None]
# keywords
out["keywords"] = {k.name: k.value for k in sim.keywords if k.value != None}
# meta data
meta = {}
for meta_group in sim.meta.all():
meta[meta_group.name] = {m.name: m.value for m in meta_group.entries}
out["meta"] = meta
s.close()
# clean up output
try:
del out["_sa_instance_state"]
except:
pass
return out
| 5,336,541
|
def pose_interp(poses, timestamps_in, timestamps_out, r_interp='slerp'):
"""
:param poses: N x 7, (t,q)
:param timestamps: (N,)
:param t: (K,)
:return: (K,)
"""
# assert t_interp in ['linear', 'spline']
assert r_interp in ['slerp', 'squad']
assert len(poses)>1
assert len(poses) == len(timestamps_in)
input_ts = poses[:,:3]
input_rs= poses[:,3:] #quaternions
timestamps_in = np.array(timestamps_in)
#sort the inputs
inds = np.argsort(timestamps_in)
poses = poses[inds]
timestamps_in = timestamps_in[inds]
if r_interp == 'squad':
input_rs_ = quaternion.from_float_array(input_rs)
output_rs = quaternion.squad( input_rs, timestamps_in, timestamps_out)
output_rs = quaternion.as_float_array(output_rs)
elif r_interp == 'slerp':
output_rs = []
for t in timestamps_out:
input_rs_ = quaternion.from_float_array(input_rs)
idx = bisect.bisect_left(timestamps_in)
output_r = quaternion.slerp(input_rs_[idx],input_rs_[idx+1], timestamps_in[idx], timestamps_in[idx+1],t )
output_r = quaternion.as_float_array(output_r)
output_rs.append(output_r)
output_ts = []
for t in timestamps_out:
idx = bisect_left.bisect_left(timestamps_in)
if idx>=len(timestamps_in)-1:
idx -= 1
t1 = timestamps_in[idx]
t2 = timestamps_in[idx+1]
output_t = ((t-t1)*input_ts[idx+1] + (t2-t) *input_ts[idx]) / (t2-t1)
output_ts.append(output_t)
output_ts =np.concatenate(output_ts, axis=0 )
output_rs =np.concatenate(output_rs, axis=0 )
new_pose = np.concatenate([output_ts, output_rs], axis=1)
return new_pose
| 5,336,542
|
def checklist_saved_action(report_id):
"""
View saved report
"""
report = Report.query.filter_by(id=report_id).first()
return render_template(
'checklist_saved.html',
uid=str(report.id),
save_date=datetime.now(),
report=report,
title='Отчет | %s' % TITLE
)
| 5,336,543
|
def get_sensor_data():
"""Collect sensor data."""
log.debug("get_sensor_data() called")
get_seneye_data()
get_atlas_data()
| 5,336,544
|
def train(train_loader, model, criterion, optimizer, epoch):
"""
One epoch's training.
:param train_loader: DataLoader for training data
:param model: model
:param criterion: content loss function (Mean Squared-Error loss)
:param optimizer: optimizer
:param epoch: epoch number
"""
model.train() # training mode enables batch normalization
batch_time = AverageMeter() # forward prop. + back prop. time
data_time = AverageMeter() # data loading time
losses = AverageMeter() # loss
start = time.time()
# Batches
for i, (lr_imgs, hr_imgs) in enumerate(train_loader):
data_time.update(time.time() - start)
# Move to default device
lr_imgs = lr_imgs.to(device) # (batch_size (N), 3, 24, 24), imagenet-normed
hr_imgs = hr_imgs.to(device) # (batch_size (N), 3, 96, 96), in [-1, 1]
# Forward prop.
sr_imgs = model(lr_imgs) # (N, 3, 96, 96), in [-1, 1]
# Loss
loss = criterion(sr_imgs, hr_imgs) # scalar
# Backward prop.
optimizer.zero_grad()
loss.backward()
# Clip gradients, if necessary
if grad_clip is not None:
clip_gradient(optimizer, grad_clip)
# Update model
optimizer.step()
# Keep track of loss
losses.update(loss.item(), lr_imgs.size(0))
# Keep track of batch time
batch_time.update(time.time() - start)
# Reset start time
start = time.time()
# Print status
if i % print_freq == 0:
print('Epoch: [{0}][{1}/{2}]----'
'Batch Time {batch_time.val:.3f} ({batch_time.avg:.3f})----'
'Data Time {data_time.val:.3f} ({data_time.avg:.3f})----'
'Loss {loss.val:.4f} ({loss.avg:.4f})'.format(epoch, i, len(train_loader),
batch_time=batch_time,
data_time=data_time, loss=losses))
del lr_imgs, hr_imgs, sr_imgs
| 5,336,545
|
def annotate_plot(fig, domain, outcome, metric):
"""Adds x/y labels and suptitles."""
fig.supxlabel('Epoch')
fig.supylabel(METRIC_FULL_NAME[metric], x=0)
fig.suptitle(f'Continual Learning model comparison \n'
f'Outcome: {outcome} | Domain Increment: {domain}', y=1.1)
| 5,336,546
|
def test_default_transfer_syntaxes():
"""Test that the default transfer syntaxes are correct."""
assert len(DEFAULT_TRANSFER_SYNTAXES) == 4
assert "1.2.840.10008.1.2" in DEFAULT_TRANSFER_SYNTAXES
assert "1.2.840.10008.1.2.1" in DEFAULT_TRANSFER_SYNTAXES
assert "1.2.840.10008.1.2.1.99" in DEFAULT_TRANSFER_SYNTAXES
assert "1.2.840.10008.1.2.2" in DEFAULT_TRANSFER_SYNTAXES
| 5,336,547
|
def plot_histogram(
args: SharedArgs,
vector: torch.Tensor,
step: int,
prefix: str = "train",
cols: int = 3,
rows: int = 6,
bins: int = 30,
):
"""Plot a histogram over the batch"""
vector = torch.flatten(vector, start_dim=1).detach().cpu()
vector_np = vector.numpy()
matplotlib.use("Agg")
fig, plots = plt.subplots(figsize=(8, 12), ncols=cols, nrows=rows)
# fig.suptitle("Xi histogram")
for j in range(rows):
for i in range(cols):
_ = plots[j][i].hist(vector_np[:, j * cols + i], bins=np.linspace(-15, 15, bins))
fig.tight_layout()
log_dict = {
f"{prefix}_histogram": fig,
f"{prefix}_xi_min": vector_np.min(),
f"{prefix}_xi_max": vector_np.max(),
f"{prefix}_xi_nans": float(bool(np.isnan(vector_np).any())),
f"{prefix}_xi_tensor": vector,
}
wandb_log(args, log_dict, step=step)
| 5,336,548
|
def trilinear_memory_efficient(a, b, d, use_activation=False):
"""W1a + W2b + aW3b."""
n = tf.shape(a)[0]
len_a = tf.shape(a)[1]
len_b = tf.shape(b)[1]
w1 = tf.get_variable('w1', shape=[d, 1], dtype=tf.float32)
w2 = tf.get_variable('w2', shape=[d, 1], dtype=tf.float32)
w3 = tf.get_variable('w3', shape=[1, 1, d], dtype=tf.float32)
a_reshape = tf.reshape(a, [-1, d]) # [bs*len_a, d]
b_reshape = tf.reshape(b, [-1, d]) # [bs*len_b, d]
part_1 = tf.reshape(tf.matmul(a_reshape, w1), [n, len_a]) # [bs, len_a]
part_1 = tf.tile(tf.expand_dims(part_1, 2),
[1, 1, len_b]) # [bs, len_a, len_b]
part_2 = tf.reshape(tf.matmul(b_reshape, w2), [n, len_b]) # [bs, len_b]
part_2 = tf.tile(tf.expand_dims(part_2, 1),
[1, len_a, 1]) # [bs, len_a, len_b]
a_w3 = a * w3 # [bs, len_a, d]
part_3 = tf.matmul(a_w3, tf.transpose(b, perm=[0, 2, 1])) # [bs,len_a,len_b]
## return the unnormalized logits matrix : [bs,len_a,len_b]
if use_activation:
return tf.nn.relu(part_1 + part_2 + part_3)
return part_1 + part_2 + part_3
| 5,336,549
|
def private_questions_get_unique_code(assignment_id: str):
"""
Get all questions for the given assignment.
:param assignment_id:
:return:
"""
# Try to find assignment
assignment: Assignment = Assignment.query.filter(
Assignment.id == assignment_id
).first()
# Verify that the assignment exists
req_assert(assignment is not None, message='assignment does not exist')
# Assert that the assignment is within the course context
assert_course_context(assignment)
assigned_question_count = AssignedStudentQuestion.query.filter(
AssignedStudentQuestion.assignment_id == assignment.id
).count()
return success_response({
'assignment_name': assignment.name,
'questions': get_all_questions(assignment),
'questions_assigned': assigned_question_count > 0,
'assigned_question_count': assigned_question_count,
})
| 5,336,550
|
def make_ngram(tokenised_corpus, n_gram=2, threshold=10):
"""Extract bigrams from tokenised corpus
Args:
tokenised_corpus (list): List of tokenised corpus
n_gram (int): maximum length of n-grams. Defaults to 2 (bigrams)
threshold (int): min number of n-gram occurrences before inclusion
Returns:
ngrammed_corpus (list)
"""
tokenised = tokenised_corpus.copy()
t = 1
# Loops while the ngram length less / equal than our target
while t < n_gram:
phrases = models.Phrases(tokenised, threshold=threshold)
bigram = models.phrases.Phraser(phrases)
tokenised = bigram[tokenised]
t += 1
return list(tokenised)
| 5,336,551
|
def bw_estimate(samples):
"""Computes Abraham's bandwidth heuristic."""
sigma = np.std(samples)
cand = ((4 * sigma**5.0) / (3.0 * len(samples)))**(1.0 / 5.0)
if cand < 1e-7:
return 1.0
return cand
| 5,336,552
|
def process_coins():
"""calculate the amount of money paid based on the coins entered"""
number_of_quarters = int(input("How many quarters? "))
number_of_dimes = int(input("How many dimes? "))
number_of_nickels = int(input("How many nickels? "))
number_of_pennies = int(input("How many pennies? "))
quarters = number_of_quarters * 0.25
dimes = number_of_dimes * 0.10
nickels = number_of_nickels * 0.05
pennies = number_of_pennies * 0.01
total_inserted = quarters + dimes + nickels + pennies
return total_inserted
| 5,336,553
|
def predict_Seq2Seq(training_args, name, model, dataset, tokenizer, data_collator, compute_metrics):
"""
"""
trainer = Seq2SeqTrainer(
model=model,
args=training_args,
tokenizer=tokenizer,
data_collator=data_collator,
compute_metrics=compute_metrics
)
predict_results = trainer.predict(
dataset,
max_length=128,
num_beams=4,
)
predictions = np.where(predict_results.predictions != -100, predict_results.predictions, tokenizer.pad_token_id)
metrics = predict_results.metrics
print(metrics)
predictions = tokenizer.batch_decode(
sequences=predictions,
skip_special_tokens=True,
clean_up_tokenization_spaces=True
)
final = postprocess(predictions, dataset)
predictions = [pred.strip() for pred in final]
output_prediction_file = "./predict/" + name + "generated_predictions"
with open(output_prediction_file, "w") as writer:
writer.write("\n".join(predictions))
return
| 5,336,554
|
def test_expand_single_quoted_symbol():
"""TEST 1.12: Quoting is a shorthand syntax for calling the `quote` form.
Examples:
'foo -> (quote foo)
'(foo bar) -> (quote (foo bar))
"""
assert_equals(["foo", ["quote", "nil"]], parse("(foo 'nil)"))
| 5,336,555
|
def getLastSegyTraceHeader(SH,THN='cdp',data='none', bheadSize = 3600, endian='>'): # added by A Squelch
"""
getLastSegyTraceHeader(SH,TraceHeaderName)
"""
bps=getBytePerSample(SH)
if (data=='none'):
data = open(SH["filename"]).read()
# SET PARAMETERS THAT DEFINE THE LOCATION OF THE LAST HEADER
# AND THE TRACE NUMBER KEY FIELD
THpos=STH_def[THN]["pos"]
THformat=STH_def[THN]["type"]
ntraces=SH["ntraces"]
pos=THpos+bheadSize+(SH["ns"]*bps+240)*(ntraces-1);
txt="getLastSegyTraceHeader : Reading last trace header " + THN + " " + str(pos)
printverbose(txt,20);
thv,index = getValue(data,pos,THformat,endian,1)
txt="getLastSegyTraceHeader : " + THN + "=" + str(thv)
printverbose(txt,30);
return thv
| 5,336,556
|
def get_data_url(data_type):
"""Gets the latest url from the kff's github data repo for the given data type
data_type: string value representing which url to get from the github api; must be either 'pct_total' or 'pct_share'
"""
data_types_to_strings = {
'pct_total': 'Percent of Total Population that has Received a COVID-19 Vaccine by RaceEthnicity',
'pct_share': 'COVID19 Vaccinations by RE',
'pct_population': 'Distribution of Vaccinations, Cases, Deaths',
}
df = gcs_to_bq_util.load_json_as_df_from_web_based_on_key(BASE_GITHUB_API_URL, "tree")
df = df.loc[df['path'].str.contains(data_types_to_strings[data_type])]
urls = df.loc[df['path'] == df['path'].max()].url
if len(urls) != 1:
raise ValueError("Found %d urls, should have only found 1" % len(urls))
return urls.values[0]
| 5,336,557
|
def kl(p, q):
"""Kullback-Leibler divergence D(P || Q) for discrete distributions
Parameters
----------
p, q : array-like, dtype=float, shape=n
Discrete probability distributions.
"""
p = np.asarray(p, dtype=np.float)
q = np.asarray(q, dtype=np.float)
return np.sum(np.where(p != 0, p * np.log(p / q), 0))
| 5,336,558
|
def get_loaders(opt):
""" Make dataloaders for train and validation sets
"""
# train loader
opt.mean = get_mean(opt.norm_value, dataset=opt.mean_dataset)
# opt.std = get_std()
if opt.no_mean_norm and not opt.std_norm:
norm_method = transforms.Normalize([0, 0, 0], [1, 1, 1])
elif not opt.std_norm:
norm_method = transforms.Normalize(opt.mean, [1, 1, 1])
else:
norm_method = transforms.Normalize(opt.mean, opt.std)
spatial_transform = transforms.Compose([
# crop_method,
transforms.Scale((opt.sample_size, opt.sample_size)),
#grayscale
# transforms.Grayscale(num_output_channels=1),
transforms.ToTensor(),
norm_method
])
temporal_transform = None #TemporalRandomCrop(16)
target_transform = ClassLabel()
training_data = get_training_set(opt, spatial_transform,
temporal_transform, target_transform)
train_loader = torch.utils.data.DataLoader(
training_data,
batch_size=opt.batch_size,
shuffle=True,
num_workers=opt.num_workers,
pin_memory=True)
# validation loader
validation_data = get_validation_set(opt, spatial_transform,
temporal_transform, target_transform)
val_loader = torch.utils.data.DataLoader(
validation_data,
batch_size=opt.batch_size,
shuffle=False,
num_workers=opt.num_workers,
pin_memory=True)
return train_loader, val_loader
| 5,336,559
|
def trapezoidal(f, a, b, n):
"""Trapezoidal integration via iteration."""
h = (b-a)/float(n)
I = f(a) + f(b)
for k in xrange(1, n, 1):
x = a + k*h
I += 2*f(x)
I *= h/2
return I
| 5,336,560
|
def writetree(tree, sent, key, fmt, comment=None, morphology=None,
sentid=False):
"""Convert a tree to a string representation in the given treebank format.
:param tree: should have indices as terminals
:param sent: contains the words corresponding to the indices in ``tree``
:param key: an identifier for this tree; part of the output with some
formats or when ``sentid`` is True.
:param fmt: Formats are ``bracket``, ``discbracket``, Negra's ``export``
format, and ``alpino`` XML format, as well unlabeled dependency
conversion into ``mst`` or ``conll`` format (requires head rules).
The formats ``tokens`` and ``wordpos`` are to strip away tree structure
and leave only lines with space-separated tokens or ``token/POS``.
When using ``bracket``, make sure tree is canonicalized.
:param comment: optionally, a string that will go in the format's comment
field (supported by ``export`` and ``alpino``), or at the end of the
line preceded by a tab (``discbracket``); ignored by other formats.
Should be a single line.
:param sentid: for line-based formats, prefix output by ``key|``.
Lemmas, functions, and morphology information will be empty unless nodes
contain a 'source' attribute with such information."""
if fmt == 'bracket':
result = writebrackettree(tree, sent)
# if comment:
# result = '# %s\n%s\n' % (comment, result.rstrip('\n'))
elif fmt == 'discbracket':
result = writediscbrackettree(tree, sent)
if comment:
result = '%s\t%s\n' % (result.rstrip('\n'), comment)
elif fmt == 'tokens':
result = '%s\n' % ' '.join(sent)
elif fmt == 'wordpos':
result = '%s\n' % ' '.join('%s/%s' % (word, pos) for word, (_, pos)
in zip(sent, sorted(tree.pos())))
elif fmt == 'export':
result = writeexporttree(tree, sent, key, comment, morphology)
elif fmt == 'alpino':
result = writealpinotree(tree, sent, key, comment)
elif fmt in ('conll', 'mst'):
result = writedependencies(tree, sent, fmt)
else:
raise ValueError('unrecognized format: %r' % fmt)
if sentid and fmt in ('tokens', 'wordpos', 'bracket', 'discbracket'):
return '%s|%s' % (key, result)
return result
| 5,336,561
|
def xor_string(hash1, hash2, hash_size):
"""Encrypt/Decrypt function used for password encryption in
authentication, using a simple XOR.
Args:
hash1 (str): The first hash.
hash2 (str): The second hash.
Returns:
str: A string with the xor applied.
"""
xored = [h1 ^ h2 for (h1, h2) in zip(hash1, hash2)]
return struct.pack("{0}B".format(hash_size), *xored)
| 5,336,562
|
def test_gas_generic(dev, apdev):
"""Generic GAS query"""
bssid = apdev[0]['bssid']
params = hs20_ap_params()
params['hessid'] = bssid
hostapd.add_ap(apdev[0]['ifname'], params)
dev[0].scan()
req = dev[0].request("GAS_REQUEST " + bssid + " 00 000102000101")
if "FAIL" in req:
raise Exception("GAS query request rejected")
ev = dev[0].wait_event(["GAS-RESPONSE-INFO"], timeout=10)
if ev is None:
raise Exception("GAS query timed out")
get_gas_response(dev[0], bssid, ev)
| 5,336,563
|
def test_prod_create(
testing_opentrons_emulation_configuration: OpentronsEmulationConfiguration,
prod_create_virtual_machine_cmd: List[str],
) -> None:
"""Confirm that prod virtual-machine is created correctly."""
cmds = (
TopLevelParser(testing_opentrons_emulation_configuration)
.parse(prod_create_virtual_machine_cmd)
.get_commands()
)
assert cmds == EXPECTED_PROD_CREATE
| 5,336,564
|
def create_secret_key(string):
"""
:param string: A string that will be returned as a md5 hash/hexdigest.
:return: the hexdigest (hash) of the string.
"""
h = md5()
h.update(string.encode('utf-8'))
return h.hexdigest()
| 5,336,565
|
def _maybe_echo_status_and_message(request, servicer_context):
"""Sets the response context code and details if the request asks for them"""
if request.HasField("response_status"):
servicer_context.set_code(request.response_status.code)
servicer_context.set_details(request.response_status.message)
| 5,336,566
|
def decode_password(base64_string: str) -> str:
"""
Decode a base64 encoded string.
Args:
base64_string: str
The base64 encoded string.
Returns:
str
The decoded string.
"""
base64_bytes = base64_string.encode("ascii")
sample_string_bytes = base64.b64decode(base64_bytes)
return sample_string_bytes.decode("ascii")
| 5,336,567
|
def test_consistency(hr_name, sample):
"""
Check that the result of loading a *_hr.dat file and converting it
back to that format creates a result that is consistent with the
original file.
"""
hr_file = sample(hr_name)
model = tbmodels.Model.from_wannier_files(hr_file=hr_file, occ=28, sparse=True)
lines_new = model.to_hr().split("\n")
with open(hr_file) as f:
lines_old = [line.rstrip(" \r\n") for line in f.readlines()]
assert len(lines_new) == len(lines_old)
for l_new, l_old in zip(lines_new[1:], lines_old[1:]):
assert l_new.replace("-0.00000000000000", " 0.00000000000000") == l_old.replace(
"-0.00000000000000", " 0.00000000000000"
)
| 5,336,568
|
def specific_temperature(battery):
"""
@brief generate list of temperature values based on battery object
@param battery : battery object with module voltages
"""
global temperature_values
temperature_values = [(int(module.temperatures_cel[0] * 1000),
int(module.temperatures_cel[1] * 1000))
for module in battery.modules]
| 5,336,569
|
def _generate_overpass_api(endpoint=None):
""" Create and initialise the Overpass API object.
Passing the endpoint argument will override the default
endpoint URL.
"""
# Create API object with default settings
api = overpass.API()
# Change endpoint if desired
if endpoint is not None:
api.endpoint = endpoint
return api
| 5,336,570
|
def train(train_loader, model,
proxies, criterion, optimizer, epoch, scheduler):
"""Training loop for one epoch"""
batch_time = AverageMeter()
data_time = AverageMeter()
val_loss = AverageMeter()
val_acc = AverageMeter()
# switch to train mode
model.train()
end = time.time()
for i, (x, y) in enumerate(train_loader):
# measure data loading time
data_time.update(time.time() - end)
if len(x.shape) == 5:
batch_size, nviews = x.shape[0], x.shape[1]
x = x.view(batch_size * nviews, 3, 224, 224)
if len(y) == args.batch_size:
if args.cuda:
x = x.cuda()
y = y.cuda()
x = Variable(x)
# embed
x_emb = model(x)
loss, acc = criterion(x_emb, y, proxies)
val_loss.update(to_numpy(loss), x.size(0))
val_acc.update(acc, x.size(0))
# compute gradient and do SGD step
optimizer.zero_grad()
loss.backward()
optimizer.step()
scheduler.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
txt = ('Epoch [%d] (Time %.2f Data %.2f):\t'
'Loss %.4f\t Acc %.4f' %
(epoch, batch_time.avg * i, data_time.avg * i,
val_loss.avg, val_acc.avg * 100.))
print(txt)
write_logs(txt)
| 5,336,571
|
def to_pascal_case(value):
"""
Converts the value string to PascalCase.
:param value: The value that needs to be converted.
:type value: str
:return: The value in PascalCase.
:rtype: str
"""
return "".join(character for character in value.title() if not character.isspace())
| 5,336,572
|
def drop(n: int, it: Iterable[Any]) -> List[Any]:
"""
Return a list of N elements drop from the iterable object
Args:
n: Number to drop from the top
it: Iterable object
Examples:
>>> fpsm.drop(3, [1, 2, 3, 4, 5])
[4, 5]
"""
return list(it)[n:]
| 5,336,573
|
def kill_services():
"""On 10.12, both the locationd and cfprefsd services like to not respect
preference changes so we force them to reload."""
proc = subprocess.Popen(['/usr/bin/killall', '-9', 'cfprefsd'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
proc = subprocess.Popen(['/usr/bin/killall', '-9', 'locationd'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
| 5,336,574
|
def _ci_configure_impl(repository_ctx):
"""This repository rule tells other rules whether we're running in CI.
Other rules can use this knowledge to make decisions about enforcing certain
things on buildkite while relaxing restrictions during local development.
"""
running_in_ci = repository_ctx.os.environ.get("FRC971_RUNNING_IN_CI", "0") == "1"
repository_ctx.file("ci.bzl", """\
RUNNING_IN_CI = {}
""".format(running_in_ci))
repository_ctx.file("BUILD", "")
| 5,336,575
|
def test_bool_int_value_info(tests_path, json_filename):
"""
Check consistency of boolean and integer info in JSON parameter files.
"""
path = os.path.join(tests_path, '..', json_filename)
with open(path, 'r') as pfile:
pdict = json.load(pfile)
maxint = np.iinfo(np.int16).max
for param in sorted(pdict.keys()):
# find param type based on value
val = pdict[param]['value']
while isinstance(val, list):
val = val[0]
valstr = str(val)
val_is_boolean = valstr in ('True', 'False')
val_is_integer = (not bool('.' in valstr or abs(val) > maxint) and
not val_is_boolean)
# check that val_is_integer is consistent with integer type
integer_type = pdict[param]['value_type'] == 'integer'
if val_is_integer != integer_type:
msg = 'param,value_type,valstr= {} {} {}'
msg = msg.format(str(param),
pdict[param]['value_type'],
valstr)
assert msg == 'ERROR: integer_value param has non-integer value'
# check that val_is_boolean is consistent with boolean_value
boolean_type = pdict[param]['value_type'] == 'boolean'
if val_is_boolean != boolean_type:
msg = 'param,value_type,valstr= {} {} {}'
msg = msg.format(str(param),
pdict[param]['value_type'],
valstr)
assert msg == 'ERROR: boolean_value param has non-boolean value'
| 5,336,576
|
def generate_classification_style_dataset(classification='multiclass'):
"""
Dummy data to test models
"""
x_data = np.array([
[1,1,1,0,0,0],
[1,0,1,0,0,0],
[1,1,1,0,0,0],
[0,0,1,1,1,0],
[0,0,1,1,0,0],
[0,0,1,1,1,0]])
if classification=='multiclass':
y_data = np.array([
[1, 0, 0],
[1, 0, 0],
[0, 0, 1],
[0, 0, 1],
[0, 1, 0],
[0, 1, 0]])
elif classification=='binary':
y_data = np.array([
[1],
[1],
[1],
[-1],
[-1],
[-1]])
else:
raise Exception("Only binary or multiclass classification supported")
print("Returning classification style dataset")
return x_data, y_data
| 5,336,577
|
def c2_get_platform_current_status_display(reference_designator):
"""
Get C2 platform Current Status tab contents, return current_status_display.
Was: #status = _c2_get_instrument_driver_status(instrument['reference_designator'])
"""
start = dt.datetime.now()
timing = False
contents = []
platform_info = {}
platform_deployment = _get_platform(reference_designator)
if platform_deployment:
platform_code = "-".join([platform_deployment['mooring_code'], platform_deployment['platform_code'] ])
# Get instruments for this platform
instruments, oinstruments = _get_instruments(platform_code)
for instrument in instruments:
istart = dt.datetime.now()
row = {}
if not instrument['display_name']:
row['display_name'] = instrument['reference_designator']
else:
row['display_name'] = instrument['display_name']
row['reference_designator'] = instrument['reference_designator']
# Get instrument operational status based on instrument driver and agent status
status = _get_instrument_operational_status(instrument['reference_designator'])
row['operational_status'] = status
platform_info[instrument['reference_designator']] = row
if timing:
iend = dt.datetime.now()
iexecution_time = str(iend-istart)
message = '\t debug --- Execution time: %s ' % iexecution_time
print '\n', message
# Create list of dictionaries representing row(s) for 'data' (ordered by reference_designator)
# 'data' == rows for initial grid ('Current Status')
for instrument_reference_designator in oinstruments:
if instrument_reference_designator in platform_info:
contents.append(platform_info[instrument_reference_designator])
if timing:
end = dt.datetime.now()
execution_time = str(end-start)
message = '\t debug --- Total Execution time: %s ' % execution_time
print '\n', message
return jsonify(current_status_display=contents)
| 5,336,578
|
def getSupportedDatatypes():
"""
Gets the datatypes that are supported by the framework
Returns:
a list of strings of supported datatypes
"""
return router.getSupportedDatatypes()
| 5,336,579
|
def run_stacking(named_data, subjects_data, cv=10, alphas=None,
train_sizes=None, n_jobs=None):
"""Run stacking.
Parameters
----------
named_data : list(tuple(str, pandas.DataFrame))
List of tuples (name, data) with name and corresponding features
to be used for predictions by linear models.
subjects_data : pandas.DataFrame
Information about subjects from CamCAN dataset.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross validation,
- integer, to specify the number of folds in a `(Stratified)KFold`,
- :term:`CV splitter`,
- An iterable yielding (train, test) splits as arrays of indices.
For integer/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass, :class:`StratifiedKFold` is used. In all
other cases, :class:`KFold` is used.
alphas : numpy.ndarray
Values for parameter alpha to be tested. Default is
np.logspace(start=-3, stop=1, num=50, base=10.0).
train_sizes : array-like, shape (n_ticks,), dtype float or int
Relative or absolute numbers of training examples that will be used to
generate the learning curve. If the dtype is float, it is regarded as a
fraction of the maximum size of the training set (that is determined
by the selected validation method), i.e. it has to be within (0, 1].
Otherwise it is interpreted as absolute sizes of the training sets.
Note that for classification the number of samples usually have to
be big enough to contain at least one sample from each class.
(default: np.linspace(0.1, 1.0, 5))
n_jobs : int or None, optional (default=None)
The number of CPUs to use to do the computation.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
"""
if alphas is None:
alphas = np.logspace(-3, 5, 100)
if train_sizes is None:
train_sizes = np.linspace(.1, 1.0, 5)
rnd_state = 42
names = []
combined_data = []
# extract data and estimator names
for name, data in named_data:
names.append(name)
combined_data.append(data)
data = pd.concat(combined_data, axis=1, join='inner')
feature_col_lens = tuple(d.shape[1] for d in combined_data)
estimators = []
subjects = data.index.values
# prepare first-level estimators for stacking
for i_data, _ in enumerate(named_data):
feature_transformers = []
ft_begin = 0
ft_end = 0
# prepare input information for ColumnTransformer
for i_ct, (name, col_len) in enumerate(zip(names, feature_col_lens)):
trans_name = ('pass_' if i_data == i_ct else 'drop_') + name
transformer = 'passthrough' if i_data == i_ct else 'drop'
ft_end = ft_end + col_len
trans_slice = slice(ft_begin, ft_end)
ft_begin = ft_begin + col_len
feature_transformers.append((trans_name, transformer, trans_slice))
est_name = 'reg_' + named_data[i_data][0]
est_pipeline = make_pipeline(
ColumnTransformer(feature_transformers),
StandardScaler(), RidgeCV(alphas))
estimators.append((est_name, est_pipeline))
final_estimator = RandomForestRegressor(n_estimators=100,
random_state=rnd_state,
oob_score=True)
reg = StackingRegressor(estimators=estimators,
final_estimator=final_estimator, cv=cv,
random_state=rnd_state, n_jobs=n_jobs)
y = subjects_data.loc[subjects].age.values
X = data.values
cv = check_cv(cv)
mae = cross_val_score(reg, X, y, scoring='neg_mean_absolute_error',
cv=cv, n_jobs=n_jobs)
r2 = cross_val_score(reg, X, y, scoring='r2', cv=cv, n_jobs=n_jobs)
y_pred = cross_val_predict(reg, X, y, cv=cv, n_jobs=n_jobs)
train_sizes, train_scores, test_scores = \
learning_curve(reg, X, y, cv=cv, train_sizes=train_sizes,
scoring='neg_mean_absolute_error', n_jobs=n_jobs)
fold = _get_fold_indices(cv, X, y)
df_pred = pd.DataFrame(dict(y=y_pred, fold=fold), index=subjects,
dtype=float)
return df_pred, mae, r2, train_sizes, train_scores, test_scores
| 5,336,580
|
def add_random_shadow(img, w_low=0.6, w_high=0.85):
"""
Overlays supplied image with a random shadow poligon
The weight range (i.e. darkness) of the shadow can be configured via the interval [w_low, w_high)
"""
cols, rows = (img.shape[0], img.shape[1])
top_y = np.random.random_sample() * rows
bottom_y = np.random.random_sample() * rows
bottom_y_right = bottom_y + np.random.random_sample() * (rows - bottom_y)
top_y_right = top_y + np.random.random_sample() * (rows - top_y)
if np.random.random_sample() <= 0.5:
bottom_y_right = bottom_y - np.random.random_sample() * (bottom_y)
top_y_right = top_y - np.random.random_sample() * (top_y)
poly = np.asarray([[[top_y, 0], [bottom_y, cols], [bottom_y_right, cols], [top_y_right, 0]]], dtype=np.int32)
mask_weight = np.random.uniform(w_low, w_high)
origin_weight = 1 - mask_weight
mask = np.copy(img).astype(np.int32)
cv2.fillPoly(mask, poly, (0, 0, 0))
# masked_image = cv2.bitwise_and(img, mask)
return cv2.addWeighted(img.astype(np.int32), origin_weight, mask, mask_weight, 0).astype(np.uint8)
| 5,336,581
|
def csv_args(value):
"""Parse a CSV string into a Python list of strings.
Used in command line parsing."""
return map(str, value.split(","))
| 5,336,582
|
def sync_dashboards(app=None):
"""Import, overwrite fixtures from `[app]/fixtures`"""
if not cint(frappe.db.get_single_value('System Settings', 'setup_complete')):
return
if app:
apps = [app]
else:
apps = frappe.get_installed_apps()
for app_name in apps:
print("Updating Dashboard for {app}".format(app=app_name))
for module_name in frappe.local.app_modules.get(app_name) or []:
frappe.flags.in_import = True
make_records_in_module(app_name, module_name)
frappe.flags.in_import = False
| 5,336,583
|
def get_tokens():
"""
Returns a tuple of tokens in the format {{site/property}} that will be used to build the dictionary passed into execute
"""
return (HAWQMASTER_PORT, HAWQSTANDBY_ADDRESS)
| 5,336,584
|
def projl1_epigraph(center):
"""
Project center=proxq.true_center onto the l1 epigraph. The bound term is
center[0], the coef term is center[1:]
The l1 epigraph is the collection of points $(u,v): \|v\|_1 \leq u$
np.fabs(coef).sum() <= bound.
"""
norm = center[0]
coef = center[1:]
sorted_coefs = np.sort(np.fabs(coef))
n = sorted_coefs.shape[0]
csum = sorted_coefs.sum()
for i, c in enumerate(sorted_coefs):
csum -= c
if csum - (n - i - 1) * c <= norm + c:
# this will terminate as long as norm >= 0
# when it terminates, we know that the solution is between
# sorted_coefs[i-1] and sorted_coefs[i]
# we set the cumulative sum back to the value at i-1
csum += c
idx = i-1
break
if i == n-1: # if it hasn't terminated early, then even soft-thresholding at the largest value was insufficent, answer is 0
return np.zeros_like(center)
# the solution is such that csum - (n-idx-1)*x = norm+x
thold = (csum - norm) / (n-idx)
result = np.zeros_like(center)
result[0] = norm + thold
result[1:] = st(coef, thold)
return result
| 5,336,585
|
def crypto_command(text):
""" <ticker> -- Returns current value of a cryptocurrency """
try:
encoded = quote_plus(text)
request = requests.get(API_URL.format(encoded))
request.raise_for_status()
except (requests.exceptions.HTTPError, requests.exceptions.ConnectionError) as e:
return "Could not get value: {}".format(e)
data = request.json()
if "error" in data:
return "{}.".format(data['error'])
updated_time = datetime.fromtimestamp(data['timestamp'])
if (datetime.today() - updated_time).days > 2:
# the API retains data for old ticker names that are no longer updated
# in these cases we just return a "not found" message
return "Currency not found."
change = float(data['change'])
if change > 0:
change_str = "\x033{}%\x0f".format(change)
elif change < 0:
change_str = "\x035{}%\x0f".format(change)
else:
change_str = "{}%".format(change)
return "{} // \x0307${:,.2f}\x0f USD - {:,.7f} BTC // {} change".format(data['symbol'].upper(),
float(data['price']['usd']),
float(data['price']['btc']),
change_str)
| 5,336,586
|
def ListELB(region, node_types=None):
"""Print load balancer configuration in this region. If 'node_types' is not None, only return the corresponding
load balancers.
"""
elbs = GetLoadBalancers(region, node_types)
for l in elbs:
zone_count = {z:0 for z in l.availability_zones}
instances = ListInstances(region, instances=[i.id for i in l.instances])
instances_dict = {i.id: i for i in instances}
unknown = 0
for i in instances:
if i.placement in zone_count.keys():
zone_count[i.placement] += 1
else:
unknown += 1
zone_str = 'zones: ' + ' '.join(['%s[%d]' % (k, v) for k, v in zone_count.iteritems()])
if unknown > 0:
zone_str += ' unknown[%d]' % unknown
print '%s: %s' % (l.name, zone_str)
states = l.get_instance_health()
for s in states:
print ' %-16s %-20s %-30s' % (s.instance_id, instances_dict[s.instance_id].placement, s.state)
| 5,336,587
|
def byol_loss_multi_views_func(p: torch.Tensor, z: torch.Tensor,p1: torch.Tensor, z1: torch.Tensor, simplified: bool = True) -> torch.Tensor:
"""Computes BYOL's loss given batch of predicted features p and projected momentum features z.
Args:
p, p1 (torch.Tensor): NxD Tensor containing predicted features from view 1
z, z1 (torch.Tensor): NxD Tensor containing projected momentum features from view 2
simplified (bool): faster computation, but with same result. Defaults to True.
Returns:
torch.Tensor: BYOL's loss.
"""
if simplified:
loss = F.cosine_similarity(p, z.detach(), dim=-1).mean() + F.cosine_similarity(p1, z1.detach(), dim=-1).mean()
return 2 - 2 * loss
p = F.normalize(p, dim=-1)
z = F.normalize(z, dim=-1)
p1 = F.normalize(p1, dim=-1)
z1 = F.normalize(z1, dim=-1)
return 2 - 2 * ((p * z.detach()).sum(dim=1).mean() +(p1 * z1.detach()).sum(dim=1).mean())
| 5,336,588
|
def a_test_model(n_classes=2):
"""
recover model and test data from disk, and test the model
"""
images_test, labels_test, data_num_test = load_test_data_full()
model = load_model(BASE_PATH + 'models/Inception_hemorrhage_model.hdf5')
adam_optimizer = keras.optimizers.Adam(
lr=0.0001,
beta_1=0.9,
beta_2=0.999,
epsilon=None,
decay=0.0,
amsgrad=False)
model.compile(optimizer=adam_optimizer, loss='binary_crossentropy', metrics=['accuracy'])
# score the test data
test_data_generator = generate_testing_from_hdf5(TEST_INDICES, batch_size=BATCH_SIZE)
scores = model.evaluate_generator(test_data_generator, steps=N_STEPS_PER_EPOCH_TEST)
# refresh the data generator and generate predictions
test_data_generator = generate_testing_from_hdf5(TEST_INDICES, batch_size=batch_size)
predictions = model.predict_generator(test_data_generator, steps=N_STEPS_PER_EPOCH_TEST)
classes = np.argmax(predictions, axis=1)
pred_ground_truth = np.column_stack((predictions, classes, labels_test))
pred_ground_truth = pd.DataFrame(
pred_ground_truth,
columns=[
'Proba Neg',
'Proba Pos',
'Class Proba',
'Neg Label',
'Pos Label'])
# Compute ROC curve and ROC area for each class
fpr, tpr, thresholds = roc_curve(
pred_ground_truth['Class Proba'],
pred_ground_truth['Pos Label'])
roc_auc = auc(fpr, tpr)
accuracy, precision, recall, f1_score, cm = vol_inception_utils.calc_metrics(
pred_ground_truth['Pos Label'],
pred_ground_truth['Class Proba'])
np.savetxt(BASE_PATH + 'results/confusion_matrix.csv', (cm), delimiter=',')
return pred_ground_truth, accuracy, precision, recall, f1_score, cm, fpr, tpr, thresholds, roc_auc
| 5,336,589
|
def generate_synchronous_trajectory(initial_state):
"""
Simulate the network starting from a given initial state in the synchronous strategy
:param initial_state: initial state of the network
:return: a trajectory in matrix from, where each row denotes a state
"""
trajectory = [initial_state]
state_index_set = {state_to_index(initial_state)} # if a state reoccurs, an attractor or fixed point is
# reached, stop.
s = initial_state
while True:
new_s = update(s) # synchronous
new_s_index = state_to_index(new_s)
if new_s_index in state_index_set:
break
trajectory.append(new_s)
state_index_set.add(new_s_index)
s = new_s
return np.array(trajectory)
| 5,336,590
|
def _add_remove_peaks(axis, add_peak):
"""
Gives options to add, edit, or remove peaks and peak markers on the figure.
Parameters
----------
axis : plt.Axes
The axis to add or remove peaks from. Contains all of the
peaks information within axis.lines. Each line for a peak
has a label corresponding to '-PEAK-peak_name'.
add_peak : bool or None
If True, will give window to add a peak; if False, will give
window to remove peaks; if None, will give window to edit
peaks.
"""
remove_peak = False
validations = {'line': {'floats': [], 'user_inputs': [], 'constraints': []},
'marker': {'floats': [], 'user_inputs': [], 'constraints': []}}
peaks = {}
non_peaks = {}
for i, line in enumerate(axis.lines):
if line.get_label().startswith('-PEAK-'):
key = ''.join(line.get_label().split('-PEAK-'))
if key not in peaks:
peaks[key] = {'peaks': [], 'annotations': []}
peaks[key]['peaks'].append(line)
elif not all(np.isnan(line.get_xdata())):
non_peaks[i] = line
for annotation in axis.texts:
if annotation.get_text() in peaks:
peaks[annotation.get_text()]['annotations'].append(annotation)
if add_peak:
window_text = 'Add Peak'
non_peak_labels = [
f'Line #{key + 1} ({line.get_label()})' for key, line in non_peaks.items()
]
inner_layout = [
[sg.Text('Peak Label:'),
sg.Input(key='label', size=(10, 1))],
[sg.Check('Place label above each marker', key='show_label')],
[sg.Text('Defining Axis:'),
sg.Combo(('x', 'y'), key='defining_axis', default_value='x',
size=(5, 1), readonly=True)],
[sg.Text('Peak Positions (separate\nmultiple entries with a comma):'),
sg.Input(key='positions', size=(10, 1))],
[sg.Text('Select all lines to add peaks to')],
[sg.Listbox(non_peak_labels, select_mode='multiple',
size=(30, 5), key='peak_listbox')],
[sg.Text('Peak Label Type:'),
sg.Radio('Marker', 'label_type', default=True, key='radio_marker',
enable_events=True),
sg.Radio('Line', 'label_type', key='radio_line', enable_events=True)],
[sg.TabGroup([[
sg.Tab('Options', [
[sg.Text('Face Color:'),
sg.Combo(COLORS, default_value=COLORS[1], size=(9, 1),
key='face_color_', readonly=True),
sg.Input(key='face_chooser_', enable_events=True, visible=False),
sg.ColorChooserButton('..', target='face_chooser_')],
[sg.Text('Edge Color:'),
sg.Combo(COLORS, default_value=COLORS[1], size=(9, 1),
key='edge_color_', readonly=True),
sg.Input(key='edge_chooser_', enable_events=True, visible=False),
sg.ColorChooserButton('..', target='edge_chooser_')],
[sg.Text('Edge Line Width:'),
sg.Input(plt.rcParams['lines.markeredgewidth'],
key='edge_width', size=(4, 1))],
[sg.Text('Style:'),
sg.Combo(MARKERS, default_value=MARKERS[1],
key='marker_style', size=(13, 1))],
[sg.Text('Size:'),
sg.Input(plt.rcParams['lines.markersize'],
key='marker_size', size=(4, 1))]
], key='tab_marker'),
sg.Tab('Options', [
[sg.Text('Color:'),
sg.Combo(COLORS, default_value=COLORS[1], size=(9, 1),
key='line_color_', readonly=True),
sg.Input(key='line_chooser_', enable_events=True, visible=False),
sg.ColorChooserButton('..', target='line_chooser_')],
[sg.Text('Style:'),
sg.Combo(list(LINE_MAPPING), readonly=True, size=(10, 1),
default_value=list(LINE_MAPPING)[1], key='line_style')],
[sg.Text('Line Width:'),
sg.Input(plt.rcParams['lines.linewidth'], key='line_size',
size=(4, 1))]
], visible=False, key='tab_line')
]], tab_background_color=sg.theme_background_color(), key='tab')]
]
for key in ('line', 'marker'):
validations[key]['user_inputs'].extend([
['label', 'Peak Label', utils.string_to_unicode, False, None],
['positions', 'Peak Positions', float]
])
validations[key]['floats'].append(
[f'{key}_size', f'{key} size']
)
validations[key]['constraints'].append(
[f'{key}_size', f'{key} size', '> 0']
)
validations['marker']['user_inputs'].append(
['marker_style', 'marker style', utils.string_to_unicode, True, None]
)
validations['marker']['floats'].append(['edge_width', 'edge line width'])
validations['marker']['constraints'].append(
['edge_width', 'edge line width', '> 0']
)
elif add_peak is None:
window_text = 'Edit Peaks'
column_layout = []
for i, peak in enumerate(peaks):
column_layout.extend([
[sg.Text(f'Peak #{i + 1}', relief='ridge', justification='center')],
[sg.Text('Peak Label:'),
sg.Input(utils.stringify_backslash(peak),
key=f'label_{i}', size=(10, 1))],
[sg.Text('Positions:')]
])
for j, line in enumerate(peaks[peak]['peaks']):
column_layout.append([
sg.Text(f' Position #{j + 1}: '),
sg.Check('Delete?', key=f'delete_peak_{i}_{j}')
])
for k, data in enumerate(line.get_xydata()):
column_layout.append([
sg.Text(f' X{k + 1}:'),
sg.Input(data[0], size=(8, 1), key=f'x_{i}_{j}_{k}'),
sg.Text(f'Y{k + 1}:'),
sg.Input(data[1], size=(8, 1), key=f'y_{i}_{j}_{k}')
])
validations['marker']['floats'].extend([
[f'x_{i}_{j}_{k}', f'Position #{j + 1}, X{k + 1} for peak #{i + 1}'],
[f'y_{i}_{j}_{k}', f'Position #{j + 1}, Y{k + 1} for peak #{i + 1}']
])
validations['marker']['user_inputs'].append(
[f'label_{i}', f'Peak Label {i + 1}', utils.string_to_unicode, False, None]
)
if peaks[peak]['peaks'][0].get_xdata().size > 1: # a line
for style, linestyle in LINE_MAPPING.items():
if linestyle == line.get_linestyle():
break
else: # in case no break
style = line.get_linestyle()
column_layout.extend([
[sg.Text('Color:'),
sg.Combo(COLORS, default_value=line.get_color(), size=(9, 1),
key=f'line_color_{i}', readonly=True),
sg.Input(key=f'line_chooser_{i}', enable_events=True, visible=False),
sg.ColorChooserButton('..', target=f'line_chooser_{i}')],
[sg.Text('Style:'),
sg.Combo(list(LINE_MAPPING), readonly=True, size=(10, 1),
default_value=style, key=f'line_style_{i}')],
[sg.Text('Line Width:'),
sg.Input(line.get_linewidth(), key=f'line_size_{i}',
size=(5, 1))]
])
validations['marker']['floats'].append(
[f'line_size_{i}', f'line width for peak #{i + 1}']
)
validations['marker']['constraints'].append(
[f'line_size_{i}', f'line width for peak #{i + 1}', '> 0']
)
else: # a marker
marker = utils.stringify_backslash(line.get_marker())
for j, mark in enumerate(MARKERS):
if mark[0] == marker:
marker = MARKERS[j]
break
column_layout.extend([
[sg.Text('Face Color:'),
sg.Combo(COLORS, default_value=line.get_markerfacecolor(),
size=(9, 1), key=f'face_color_{i}', readonly=True),
sg.Input(key=f'face_chooser_{i}', enable_events=True, visible=False),
sg.ColorChooserButton('..', target=f'face_chooser_{i}')],
[sg.Text('Edge Color:'),
sg.Combo(COLORS, default_value=line.get_markeredgecolor(),
size=(9, 1), key=f'edge_color_{i}', readonly=True),
sg.Input(key=f'edge_chooser_{i}', enable_events=True, visible=False),
sg.ColorChooserButton('..', target=f'edge_chooser_{i}')],
[sg.Text('Edge Line Width:'),
sg.Input(line.get_markeredgewidth(),
key=f'edge_width_{i}', size=(4, 1))],
[sg.Text('Style:'),
sg.Combo(MARKERS, default_value=marker,
key=f'marker_style_{i}', size=(13, 1))],
[sg.Text('Size:'),
sg.Input(line.get_markersize(),
key=f'marker_size_{i}', size=(4, 1))],
])
validations['marker']['floats'].extend([
[f'marker_size_{i}', f'marker size for peak #{i + 1}'],
[f'edge_width_{i}', f'edge line width for peak #{i + 1}']
])
validations['marker']['constraints'].extend([
[f'marker_size_{i}', f'marker size for peak #{i + 1}', '> 0'],
[f'edge_width_{i}', f'edge line width for peak #{i + 1}', '> 0']
])
validations['marker']['user_inputs'].append(
[f'marker_style_{i}', f'marker style for peak #{i + 1}',
utils.string_to_unicode, True, None]
)
inner_layout = [
[sg.Column(column_layout, #size=(None, 400),
scrollable=True, vertical_scroll_only=True)]
]
else:
remove_peak = True
window_text = 'Remove Peaks'
labels = {}
for peak in peaks:
labels[utils.stringify_backslash(peak)] = peak
inner_layout = [
[sg.Text('All markers and text for selected peaks will be deleted!\n')],
[sg.Listbox(list(labels.keys()), select_mode='multiple', size=(20, 5),
key='peak_listbox')]
]
layout = [
*inner_layout,
[sg.Text('')],
[sg.Button('Back'),
sg.Button('Submit', bind_return_key=True, button_color=utils.PROCEED_COLOR)]
]
window = sg.Window(window_text, layout, finalize=True, icon=utils._LOGO)
window.TKroot.grab_set()
while True:
event, values = window.read()
if event in (sg.WIN_CLOSED, 'Back'):
add_peak = False
remove_peak = False
break
elif event.startswith('radio'):
if values['radio_marker']:
window['tab_marker'].update(visible=True)
window['tab_marker'].select()
window['tab_line'].update(visible=False)
else:
window['tab_line'].update(visible=True)
window['tab_line'].select()
window['tab_marker'].update(visible=False)
# color chooser button
elif 'chooser' in event:
if values[event] != 'None':
property_type = event.split('_')[0]
index = event.split('_')[-1]
window[f'{property_type}_color_{index}'].update(value=values[event])
elif event == 'Submit':
window.TKroot.grab_release()
close = True
if add_peak:
if values['radio_marker']:
close = utils.validate_inputs(values, **validations['marker'])
else:
close = utils.validate_inputs(values, **validations['line'])
if close:
if values['label'] in peaks:
close = False
sg.popup(
'The selected peak label is already a peak.\n',
title='Error', icon=utils._LOGO
)
elif not values['peak_listbox']:
close = False
sg.popup(
'Please select a line on which to add peak markers.\n',
title='Error', icon=utils._LOGO
)
elif add_peak is None:
close = utils.validate_inputs(values, **validations['marker'])
if close:
labels = [
values[label] for label in values if label.startswith('label')
]
if len(labels) != len(set(labels)):
close = False
sg.popup(
'There cannot be repeated peak labels.\n',
title='Error', icon=utils._LOGO
)
else:
close = values['peak_listbox']
if not close:
sg.popup('Please select a peak to delete.\n', title='Error', icon=utils._LOGO)
if not close:
window.TKroot.grab_set()
else:
break
window.close()
del window
if add_peak:
# main designates defining axis, secondary designates non-defining axis
positions = values['positions']
secondary_limits = getattr(
axis, f'get_{"xy".replace(values["defining_axis"], "")}lim')()
offset = 0.05 * (secondary_limits[1] - secondary_limits[0])
plot_data = {'x': [], 'y': []}
for peak in values['peak_listbox']:
line = non_peaks[int(peak.split(' ')[1].replace('#', '')) - 1]
main_data = getattr(line, f'get_{values["defining_axis"]}data')()
secondary_data = getattr(
line, f'get_{"xy".replace(values["defining_axis"], "")}data')()
for position in positions:
plot_data[values['defining_axis']].append(
(position, position) if values['radio_line'] else (position,)
)
if values['radio_marker']:
data_point = (secondary_data[np.abs(main_data - position).argmin()] + offset,)
else:
min_secondary = secondary_data.min()
max_secondary = secondary_data.max()
data_point = (min_secondary - offset, max_secondary + offset)
plot_data['xy'.replace(values['defining_axis'], '')].append(data_point)
for data in zip(plot_data['x'], plot_data['y']):
axis.plot(
*data,
label='-PEAK-' + values['label'],
marker=values['marker_style'].split(' ')[0] if values['radio_marker'] else 'None',
markersize=values['marker_size'] if values['radio_marker'] else None,
markerfacecolor=values['face_color_'] if values['radio_marker'] else 'None',
markeredgecolor=values['edge_color_'] if values['radio_marker'] else 'None',
markeredgewidth=values[f'edge_width'] if values['radio_marker'] else None,
color=values['line_color_'] if values['radio_line'] else 'None',
linewidth=values['line_size'] if values['radio_line'] else None,
linestyle=LINE_MAPPING[values['line_style']] if values['radio_line'] else ''
)
if values['show_label']:
annotation_position = (
data[0][-1] + offset if values['defining_axis'] == 'y' else data[0][-1],
data[1][-1] + offset if values['defining_axis'] == 'x' else data[1][-1]
)
axis.annotate(
values['label'],
xy=annotation_position,
rotation=90 if values['defining_axis'] == 'x' else 0,
horizontalalignment='center' if values['defining_axis'] == 'x' else 'left',
verticalalignment='center' if values['defining_axis'] == 'y' else 'baseline',
annotation_clip=False,
in_layout=False,
)
elif add_peak is None:
for i, key in enumerate(peaks):
for annotation in peaks[key]['annotations']:
annotation.update({'text': values[f'label_{i}']})
deleted_peaks = []
for j, line in enumerate(peaks[key]['peaks']):
if values[f'delete_peak_{i}_{j}']:
deleted_peaks.append(line)
else:
line.update({
'xdata': [values[entry] for entry in values if entry.startswith(f'x_{i}_{j}_')],
'ydata': [values[entry] for entry in values if entry.startswith(f'y_{i}_{j}_')],
'label': '-PEAK-' + values[f'label_{i}'],
'marker': values.get(f'marker_style_{i}', 'None').split(' ')[0],
'markerfacecolor': values.get(f'face_color_{i}', 'None'),
'markeredgecolor': values.get(f'edge_color_{i}', 'None'),
'markeredgewidth': values.get(f'edge_width_{i}', 0),
'markersize': values.get(f'marker_size_{i}', 0),
'linestyle': LINE_MAPPING[values.get(f'line_style_{i}', 'None')],
'linewidth': values.get(f'line_size_{i}', 0),
'color': values.get(f'line_color_{i}', 'None'),
})
for line in deleted_peaks:
line.remove()
elif remove_peak:
for entry in values['peak_listbox']:
for line in peaks[labels[entry]]['peaks']:
line.remove()
for annotation in peaks[labels[entry]]['annotations']:
annotation.remove()
| 5,336,591
|
def arg_names(level=2):
"""Try to determine names of the variables given as arguments to the caller
of the caller. This works only for trivial function invocations. Otherwise
either results may be corrupted or exception will be raised.
level: 0 is current frame, 1 is the caller, 2 is caller of the caller
"""
try:
caller_frame_info = inspect.stack()[level]
caller_context = caller_frame_info.code_context
code = dedent(''.join(caller_context))
tree = ast.parse(code, '', 'eval')
always_assert(isinstance(tree.body, ast.Call))
args = tree.body.args
names = [astunparse.unparse(arg).strip() for arg in args]
return names
except Exception as ex:
raise Exception('Cannot determine arg names') from None
| 5,336,592
|
def check_cuda():
""" Check Cuda for Linux or Windows """
if OS_VERSION[0] == "Linux":
check_cuda_linux()
elif OS_VERSION[0] == "Windows":
check_cuda_windows()
| 5,336,593
|
def test_good_values(capsys, threads):
"""Test for valid values."""
config_expected = dict(
flac_bin='/bin/bash',
lame_bin='/bin/bash',
ignore_art=False,
ignore_lyrics=False,
threads=threads,
flac_dir='/tmp',
mp3_dir='/tmp',
quiet=False,
)
argv = ['/tmp/', '/tmp', '--flac-bin-path=/bin/bash', '--lame-bin-path=/bin/bash']
cli_config_settings = parse_n_check(docopt(convert_music__doc__, argv=argv))
assert config_expected == cli_config_settings
stdout_actual, stderr_actual = capsys.readouterr()
stdout_expected = ""
stderr_expected = ""
assert stdout_expected == stdout_actual
assert stderr_expected == stderr_actual
| 5,336,594
|
def test_expose_header_return_header_true(client, monkeypatch, mock_uuid):
"""
Tests that it does return the Access-Control-Allow-Origin when EXPOSE_HEADER is set to True
and RETURN_HEADER is True
"""
from django_guid.config import settings as guid_settings
monkeypatch.setattr(guid_settings, 'EXPOSE_HEADER', True)
response = client.get('/api')
assert response.get('Access-Control-Expose-Headers')
| 5,336,595
|
def render_template(path, ctx):
"""Render a Jinja2 template"""
with path.open() as f:
content = f.read()
tmpl = jinja2.Template(content)
return html_minify(tmpl.render(**ctx))
| 5,336,596
|
def define_visualizations():
"""Unfortunately, these mappings are defined in the database, when they probably
should be defined in code. This routine pre-populates the database with the expected
visualizations."""
norm = HtmlVisualizationFormat()
norm.slug = "norm"
norm.button_title = "normalized"
norm.title = "Normalized Text"
analytic = HtmlVisualizationFormat()
analytic.slug = "analytic"
analytic.button_title = "analytic"
analytic.title = "Analytic Visualization"
dipl = HtmlVisualizationFormat()
dipl.slug = "dipl"
dipl.button_title = "diplomatic"
dipl.title = "Diplomatic Edition"
sahidica = HtmlVisualizationFormat()
sahidica.slug = "sahidica"
sahidica.button_title = "chapter"
sahidica.title = "Sahidica Chapter View"
for vis in [norm, analytic, dipl, sahidica]:
try:
HtmlVisualizationFormat.objects.get(slug__exact=vis.slug)
except HtmlVisualizationFormat.DoesNotExist:
vis.save()
| 5,336,597
|
def linux_compute_tile_singlecore(optimsoc_buildroot):
"""
Module-scoped fixture: build a Linux image for a single-core compute tile
"""
# Get the buildroot base directory from the optimsoc_buildroot() fixture.
# Note that this directory is cached between pytest runs. Make sure the
# commands executed as part of this test can deal with that and rebuild
# artifacts as needed.
src_optimsoc_buildroot = optimsoc_buildroot.join('optimsoc-buildroot')
src_buildroot = optimsoc_buildroot.join('buildroot')
config = 'optimsoc_computetile_singlecore_defconfig'
# buildroot doesn't like our OpTiMSoC compiler being in the path. Error is:
# ---
# You seem to have the current working directory in your
# LD_LIBRARY_PATH environment variable. This doesn't work.
# support/dependencies/dependencies.mk:21: recipe for target 'dependencies' failed
# ---
env = dict(os.environ, LD_LIBRARY_PATH='', PATH='/bin:/usr/bin:/usr/local/bin')
cmd = ['make',
'-C', str(src_buildroot),
'BR2_EXTERNAL='+str(src_optimsoc_buildroot),
config]
subprocess.check_output(cmd, env=env, stderr=subprocess.STDOUT,
universal_newlines=True)
cmd = ['make',
'-C', str(src_buildroot)]
env = dict(os.environ, LD_LIBRARY_PATH='')
subprocess.check_output(cmd, env=env, stderr=subprocess.STDOUT,
universal_newlines=True)
linux_img = src_buildroot.join('output/images/vmlinux')
return linux_img
| 5,336,598
|
def reverse( sequence ):
"""Return the reverse of any sequence
"""
return sequence[::-1]
| 5,336,599
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.