content stringlengths 22 815k | id int64 0 4.91M |
|---|---|
def export_result(fname, num_trans, candidate_rules, minsup, minconf, itemset_sups, rule_confs, processing_time):
"""Prints association rules to Rules file."""
try:
f = open("Rules", "w")
try:
print("Summary:", file=f)
print("---------------------------------------------------", file=f)
print("Data filename: " + fname, file=f)
print("Total rows in the original set: " + str(num_trans), file=f)
print("Total rules discovered: " + str(len(candidate_rules)), file=f)
print("The selected measures: Support=" + str("%.2f" % minsup) + " Confidence=" + str("%.2f" % minconf), file=f)
print("Total processing time: %.3f" % processing_time + " seconds", file=f)
print("---------------------------------------------------", end="", file=f)
print("\nDiscovered Rules:\n", end="", file=f)
for i, candidate_rule in enumerate(candidate_rules, 1):
print("\nRule #" + str(i) + ": (Support=" + str("%.2f" % itemset_sups[candidate_rule[0] | candidate_rule[1]])
+ ", Confidence=" + str("%.2f" % rule_confs[candidate_rule]) + ")\n{ ", end="", file=f)
for antecendent in candidate_rule[0]:
print(antecendent[0] + "=" + antecendent[1] + " ", end="", file=f)
print("}\n ---> { ", end="", file=f)
for consequence in candidate_rule[1]:
print(consequence[0] + "=" + consequence[1] + " ", end="", file=f)
print("} ", file=f)
finally:
f.close()
except IOError:
print("\nERROR: Unable to create \"Rules\" file.")
else:
print("\nApriori algorithm finished.")
print("Total processing time: %.3f" % processing_time + " seconds.")
if candidate_rules:
print("Association rules saved in the file \"Rules.\"\n")
else:
print("WARNING: 0 association rules were discovered. \"Rules\" file is empty.\n") | 5,326,800 |
def children_shape_ranks(rank, n):
"""
Return the partition of leaves associated
with the children of the tree of rank `rank`, and
the ranks of each child tree.
"""
part = []
for prev_part in partitions(n):
num_trees_with_part = num_tree_pairings(prev_part)
if rank < num_trees_with_part:
part = prev_part
break
rank -= num_trees_with_part
else:
if n != 1:
raise ValueError("Rank is out of bounds.")
grouped_part = group_partition(part)
child_ranks = []
next_child = 0
for g in grouped_part:
next_child += len(g)
k = g[0]
# TODO precompute vector up front
rest_children = part[next_child:]
rest_num_pairings = num_tree_pairings(rest_children)
shapes_comb_rank = rank // rest_num_pairings
g_shape_ranks = Combination.with_replacement_unrank(
shapes_comb_rank, num_shapes(k), len(g)
)
child_ranks += g_shape_ranks
rank %= rest_num_pairings
return part, child_ranks | 5,326,801 |
def url_mapper(url, package):
"""
In a package.json, the "url" field is a redirection to a package download
URL published somewhere else than on the public npm registry.
We map it to a download url.
"""
if url:
package.download_urls.append(url)
return package | 5,326,802 |
def electricPotential(n, V_SD_grid, V_G_grid):
"""
Function to compute the electric potential of the QDot.
:param n: the number of electrons in the dot
:param V_SD_grid: the 2d array of source-drain voltage values
:param V_G_grid: the 2d array of gate voltage values
:return: The Electric Potential for adding the nth electron to the dot
"""
E_N = E_C*(((n)**2-(n-1)**2)/n*5+random()/9*n) # arbitrary random formula used to increase diamond width as more electrons are added
return (n - N_0 - 1/2) * E_C - (E_C / e) * (C_S * V_SD_grid + C_G * V_G_grid) + E_N | 5,326,803 |
def mettre_a_jour_uids(nom_fichier, organisateurs, uids):
""" Met à jour le fichier CSV UID,EMAIL à partir du dictionnaire """
nouveaux_uids = False
for id_reunion in organisateurs:
if organisateurs[id_reunion]["id_organisateur"] not in uids:
uids[organisateurs[id_reunion]["id_organisateur"]] = organisateurs[id_reunion]["email_organisateur"]
nouveaux_uids = True
if nouveaux_uids:
with open(nom_fichier, "w", encoding="utf-8") as fichier:
for uid in uids:
fichier.write(
"{:s},{:s}\n".format(
uid,
uids[uid],
)
)
return uids | 5,326,804 |
def _spectrogram(signal, dB=True, log_prefix=20, log_reference=1,
yscale='linear', unit=None,
window='hann', window_length=1024, window_overlap_fct=0.5,
cmap=mpl.cm.get_cmap(name='magma'), ax=None):
"""Plot the magnitude spectrum versus time.
See pyfar.line.spectogram for more information.
"""
# check input
if not isinstance(signal, Signal):
raise TypeError('Input data has to be of type: Signal.')
_check_time_unit(unit)
_check_axis_scale(yscale, 'y')
if window_length > signal.n_samples:
raise ValueError("window_length exceeds signal length")
if np.prod(signal.cshape) > 1:
warnings.warn(("Using only the first channel of "
f"{np.prod(signal.cshape)}-channel signal."))
# take only the first channel of time data
first_channel = tuple(np.zeros(len(signal.cshape), dtype='int'))
# get spectrogram
frequencies, times, spectrogram = dsp.spectrogram(
signal[first_channel], window, window_length, window_overlap_fct)
# get magnitude data in dB
if dB:
eps = np.finfo(float).eps
spectrogram = log_prefix*np.log10(
np.abs(spectrogram) / log_reference + eps)
# auto detect the time unit
if unit is None:
unit = _time_auto_unit(times[..., -1])
# set the unit
if unit == 'samples':
times *= signal.sampling_rate
else:
factor, unit = _deal_time_units(unit)
times = times * factor
# plot the data
_, ax = _prepare_plot(ax)
ax.pcolormesh(times, frequencies, spectrogram, cmap=cmap,
shading='gouraud')
# Adjust axes:
ax.set_ylabel('Frequency in Hz')
ax.set_xlabel(f'Time in {unit}')
ax.set_xlim((times[0], times[-1]))
ax.set_ylim((max(20, frequencies[1]), signal.sampling_rate/2))
# color limits
if dB:
for PCM in ax.get_children():
if type(PCM) == mpl.collections.QuadMesh:
break
ymax = np.nanmax(spectrogram)
ymin = ymax - 90
ymax = ymax + 10
PCM.set_clim(ymin, ymax)
if yscale == 'log':
ax.set_yscale('symlog')
ax.yaxis.set_major_locator(LogLocatorITAToolbox())
ax.yaxis.set_major_formatter(LogFormatterITAToolbox())
ax.grid(ls='dotted', color='white')
plt.tight_layout()
return ax, spectrogram | 5,326,805 |
def _merge_cwlinputs(items_by_key, input_order, parallel):
"""Merge multiple cwl records and inputs, handling multiple data items.
Special cases:
- Single record but multiple variables (merging arrayed jobs). Assign lists
of variables to the record.
"""
items_by_key = _maybe_nest_bare_single(items_by_key, parallel)
if parallel == "multi-combined":
items_by_key, input_order = _concat_records(items_by_key, input_order)
var_items = set([_item_count(items_by_key[tuple(k.split("__"))])
for (k, t) in input_order.items() if t == "var"])
rec_items = set([_item_count(items_by_key[k]) for (k, t) in input_order.items() if t == "record"])
if var_items:
num_items = var_items
if len(num_items) == 2 and 1 in num_items:
num_items.remove(1)
items_by_key_test = _check_for_single_nested(num_items.pop(), items_by_key, input_order)
var_items = set([_item_count(items_by_key_test[tuple(k.split("__"))])
for (k, t) in input_order.items() if t == "var"])
num_items = var_items
assert len(num_items) == 1, "Non-consistent variable data counts in CWL input:\n%s" % \
(pprint.pformat(items_by_key))
items_by_key, num_items = _nest_vars_in_rec(var_items, rec_items, input_order, items_by_key, parallel)
else:
num_items = rec_items
assert len(num_items) == 1, "Non-consistent record data counts in CWL input:\n%s" % \
(pprint.pformat(items_by_key))
target_items = num_items.pop()
out = [{} for _ in range(target_items)]
for (cwl_key, cwl_type) in input_order.items():
if cwl_type == "var":
cwl_key = tuple(cwl_key.split("__"))
cur_vals = items_by_key[cwl_key]
if _is_nested_single(cur_vals, target_items):
cur_vals = [[x] for x in cur_vals[0]]
for i, cur_val in enumerate(cur_vals):
if isinstance(cwl_key, (list, tuple)):
# nested batches with records
if (parallel.startswith(("batch", "multi-parallel")) and
isinstance(out[i], (list, tuple))):
for j in range(len(out[i])):
out[i][j] = _update_nested(list(cwl_key), cur_val, out[i][j], allow_overwriting=True)
else:
out[i] = _update_nested(list(cwl_key), cur_val, out[i], allow_overwriting=True)
elif out[i] == {}:
out[i] = cur_val
else:
# Handle single non-batched records
if isinstance(cur_val, (list, tuple)) and len(cur_val) == 1:
cur_val = cur_val[0]
assert isinstance(cur_val, dict), (cwl_key, cur_val)
for k, v in cur_val.items():
out[i] = _update_nested([k], v, out[i], allow_overwriting=True)
return out | 5,326,806 |
def polar_distance(x1, x2):
"""
Given two arrays of numbers x1 and x2, pairs the cells that are the
closest and provides the pairing matrix index: x1(index(1,:)) should be as
close as possible to x2(index(2,:)). The function outputs the average of
the absolute value of the differences abs(x1(index(1,:))-x2(index(2,:))).
Parameters
----------
x1:
vector 1
x2:
vector 2
Returns
-------
d:
minimum distance between d
index:
the permutation matrix
"""
x1 = np.reshape(x1, (1, -1), order="F")
x2 = np.reshape(x2, (1, -1), order="F")
N1 = x1.size
N2 = x2.size
diffmat = np.arccos(np.cos(x1 - np.reshape(x2, (-1, 1), order="F")))
min_N1_N2 = np.min([N1, N2])
index = np.zeros((min_N1_N2, 2), dtype=int)
if min_N1_N2 > 1:
for k in range(min_N1_N2):
d2 = np.min(diffmat, axis=0)
index2 = np.argmin(diffmat, axis=0)
index1 = np.argmin(d2)
index2 = index2[index1]
index[k, :] = [index1, index2]
diffmat[index2, :] = float("inf")
diffmat[:, index1] = float("inf")
d = np.mean(np.arccos(np.cos(x1[:, index[:, 0]] - x2[:, index[:, 1]])))
else:
d = np.min(diffmat)
index = np.argmin(diffmat)
if N1 == 1:
index = np.array([1, index])
else:
index = np.array([index, 1])
return d, index | 5,326,807 |
def _calc_data_point_locations(num_points, x_values=None):
"""Returns the x-axis location for each of the data points to start at.
Note: A numpy array is returned so that the overloaded "+" operator can be
used on the array.
The x-axis locations are scaled by x_values if it is provided, or else the
x-axis locations are evenly spaced. In either case, the x-axis locations
will always be in the range [1, num_points].
"""
if x_values is None:
# Evenly space the x-axis locations.
x_locs = np.arange(1, num_points + 1)
else:
if len(x_values) != num_points:
raise ValueError("The number of x-axis values must match the "
"number of data points.")
# Scale to the range [1, num_points]. Taken from
# http://www.heatonresearch.com/wiki/Range_Normalization
x_min = min(x_values)
x_max = max(x_values)
x_range = x_max - x_min
n_range = num_points - 1
x_locs = np.array([(((x_val - x_min) * n_range) / float(x_range)) + 1
for x_val in x_values])
return x_locs | 5,326,808 |
def _update_config(args, config_dict):
"""update param_dict by args. config in args is prior to config_dict.
Args:
args (ArgumentParser): [in] parsed aguemnt parser
config_dict (dict): [in/out] config dict
Returns: None
Raises: NULL
"""
if 'trainer' in config_dict:
train_config = config_dict['trainer']
_update_cuda_setting(args.device, train_config)
if args.save_path is not None:
train_config['output_path'] = args.save_path
if args.checkpoint is not None:
train_config['load_checkpoint'] = args.checkpoint
if args.parameters is not None:
train_config['load_parameters'] = args.parameters
if 'predictor' in config_dict:
predict_config = config_dict['predictor']
_update_cuda_setting(args.device, predict_config)
if args.infer_model is not None:
predict_config['inference_model_path'] = args.infer_model
if args.save_path is not None:
predict_config['save_predict_file'] = args.save_path | 5,326,809 |
def is_base(base_pattern, str):
"""
base_pattern is a compiled python3 regex.
str is a string object.
return True if the string match the base_pattern or False if it is not.
"""
return base_pattern.match(str, 0, len(str)) | 5,326,810 |
def stations_by_river(stations):
"""For a list of MonitoringStation objects (stations),
returns a dictionary that maps river names (key) to a list of MonitoringStation objects on a given river."""
# Dictionary containing river names and their corresponding stations
rivers = {}
for station in stations:
# Check if river is already in the dictionary
if station.river in rivers:
# Check if the station has already been added to the list
if station not in rivers[station.river]:
rivers[station.river].append(station)
else:
rivers.update({station.river: [station]})
return rivers | 5,326,811 |
def dQ_dY(time):
"""Derivative of transformation matrix for nutation/presession with regards to the Y coordinate of CIP in GCRS
"""
# Rotation matrices
R3_E = R3(E(time))
R3_s = R3(s(time))
R2_md = R2(-d(time))
R3_mE = R3(-E(time))
dR3_s = dR3(s(time))
dR3_E = dR3(E(time))
dR3_mE = dR3(-E(time))
dR2_md = dR2(-d(time))
return (
dR3_mE @ R2_md @ R3_E @ R3_s * (-dE_dY(time))
+ R3_mE @ dR2_md @ R3_E @ R3_s * (-dd_dY(time))
+ R3_mE @ R2_md @ dR3_E @ R3_s * (dE_dY(time))
+ R3_mE @ R2_md @ R3_E @ dR3_s * (ds_dY(time))
) | 5,326,812 |
def calculate(x: int, y: int = 1, operation: str = None) -> int:
"""Calculates the sum (or difference) of two numbers.
Parameters:
`x` : int
The first number
`y` : int, optional
The second number (default is `1`)
`operation`: str, optional
Pass "subtract" to perform subtraction (default is `None`)
Returns:
int
"""
if operation == "subtract":
return x - y
else:
return x + y | 5,326,813 |
def test_client_get_payment_related_invalid_id(client, endpoint, errorstr):
"""An invalid formatted object ID should raise an error."""
with pytest.raises(IdentifierError, match=errorstr):
getattr(client, endpoint).with_parent_id("tr_12345").get("invalid") | 5,326,814 |
def get_power_state(instance):
"""Return the power state of the received instance.
:param instance: nova.objects.instance.Instance
:return: nova.compute.power_state
"""
instance_info = manage.VBoxManage.show_vm_info(instance)
return instance_info.get(constants.VM_POWER_STATE) | 5,326,815 |
def toBoolean(val, default=True):
"""convert strings from CSV to Python bool
if they have an empty string - default to true unless specified otherwise
"""
if default:
trueItems = ["true", "t", "yes", "y", "1", "on", ""]
falseItems = ["false", "f", "no", "n", "none", "0"]
else:
trueItems = ["true", "t", "yes", "y", "1", "on"]
falseItems = ["false", "f", "no", "n", "none", "0", ""]
if str(val).strip().lower() in trueItems:
return True
if str(val).strip().lower() in falseItems:
return False | 5,326,816 |
def cache_put(
connection: 'Connection', cache: Union[str, int], key, value,
key_hint=None, value_hint=None, binary=False, query_id=None,
) -> 'APIResult':
"""
Puts a value with a given key to cache (overwriting existing value if any).
:param connection: connection to Ignite server,
:param cache: name or ID of the cache,
:param key: key for the cache entry. Can be of any supported type,
:param value: value for the key,
:param key_hint: (optional) Ignite data type, for which the given key
should be converted,
:param value_hint: (optional) Ignite data type, for which the given value
should be converted.
:param binary: (optional) pass True to keep the value in binary form.
False by default,
:param query_id: (optional) a value generated by client and returned as-is
in response.query_id. When the parameter is omitted, a random value
is generated,
:return: API result data object. Contains zero status if a value
is written, non-zero status and an error description otherwise.
"""
query_struct = Query(
OP_CACHE_PUT,
[
('hash_code', Int),
('flag', Byte),
('key', key_hint or AnyDataObject),
('value', value_hint or AnyDataObject),
],
query_id=query_id,
)
return query_struct.perform(connection, {
'hash_code': cache_id(cache),
'flag': 1 if binary else 0,
'key': key,
'value': value,
}) | 5,326,817 |
def _process_rows(app, sheet_name, rows, names_map, lang=None):
"""
Processes the rows of a worksheet of translations.
This is the complement of get_bulk_app_sheets_by_name() and
get_bulk_app_single_sheet_by_name(), from
corehq/apps/translations/app_translations/download.py, which creates
these worksheets and rows.
:param app: The application being translated
:param sheet_name: The tab name of the sheet being processed.
e.g. "menu1", "menu1_form1", or "Menus_and_forms"
:param rows: The rows in the worksheet
:param names_map: A map of sheet_name to module/form unique_id, used
to fetch a module/form even if it has been moved
since the worksheet was created
:param lang: The language that the app is being translated into
:return: A list of error messages or an empty list
"""
if not sheet_name or not rows:
return []
if is_modules_and_forms_sheet(sheet_name):
updater = BulkAppTranslationModulesAndFormsUpdater(app, names_map, lang=lang)
return updater.update(rows)
if is_module_sheet(sheet_name):
unique_id = names_map.get(sheet_name)
try:
updater = BulkAppTranslationModuleUpdater(app, sheet_name, unique_id, lang=lang)
except ModuleNotFoundException:
return [(
messages.error,
_('Invalid menu in row "%s", skipping row.') % sheet_name
)]
return updater.update(rows)
if is_form_sheet(sheet_name):
unique_id = names_map.get(sheet_name)
try:
updater = BulkAppTranslationFormUpdater(app, sheet_name, unique_id, lang=lang)
except FormNotFoundException:
return [(
messages.error,
_('Invalid form in row "%s", skipping row.') % sheet_name
)]
return updater.update(rows)
return [(
messages.error,
_('Did not recognize "%s", skipping row.') % sheet_name
)] | 5,326,818 |
def parse_line(line, metric):
"""Parses statistics from a line an experiment log file"""
if "top-k" in line:
return f"top-k.{metric}", parse_csv(line)
elif "bottom-k" in line:
return f"bottom-k.{metric}", parse_csv(line)
else:
return f"ml.{metric}", parse_csv(line) | 5,326,819 |
def lock(pth):
"""
Lock file
"""
assert exists(pth), "%s does not exist!" % pth
svnopen(['lock', pth]) | 5,326,820 |
def test_for_usb_ext4():
"""Test Purpose: Verify that if the EXT4 filesystem support is enable in the kernel
Args:
None
"""
cmd = "cat /boot/config-* | grep ^CONFIG_EXT4_FS="
ret = subprocess.call(cmd + " > /dev/null 2>&1", shell=True)
assert ret == 0, "EXT4 support is disabled in the kernel"
return | 5,326,821 |
def print_menu():
"""Prints main categories of animals"""
animal_menu = {'1': 'Fish', '2': 'Birds', '3': 'Mammals', '4': 'Insects', '5': 'Exit'}
print('Learn about Animal Migration Patterns!!\nChoose a category...')
for key in animal_menu:
print(key + ". " + animal_menu[key]) | 5,326,822 |
def get_adjusted_pvalues(pvals: pd.Series, fdr_thresh: float = 0.05) \
-> Tuple[pd.Series, float]:
"""
Function that controls FDR rate.
Accepts an unsorted list of p-values and an FDR threshold (1).
Returns:
1) the FDR associated with each p-value,
2) the p-value cutoff for the given FDR.
References:
(1) Storey, J. D., & Tibshirani, R. (2003). Statistical significance
for genomewide studies. Proceedings of the National Academy of Sciences,
100(16), 9440-9445. https://doi.org/10.1073/pnas.1530509100
"""
m = pvals.size
# sort list of p-values
sort_ids = np.argsort(pvals) # returns indices for sorting
p_sorted = pvals.values[sort_ids] # sorts the list
adj_p = np.nan * np.zeros(len(p_sorted), dtype=np.float64)
crit_p = 0
# go over all p-values, starting with the largest
crossed = False
adj_p[-1] = p_sorted[-1]
i = m-2
while i >= 0:
FP = m*p_sorted[i] # calculate false positives
FDR = FP / (i+1) # calculate FDR
adj_p[i] = min(FDR, adj_p[i+1])
if FDR <= fdr_thresh and not crossed:
crit_p = p_sorted[i]
crossed = True
i -= 1
# reverse sorting
unsort_ids = np.argsort(sort_ids) # indices for reversing the sort
adj_p = adj_p[unsort_ids]
adj_p = pd.Series(index=pvals.index, data=adj_p, name='adjusted_pval')
return adj_p, crit_p | 5,326,823 |
def solve_nonogram(constraints):
"""this function is solving all kinds of boards of the game and returning
the all possible solutions for it""" # BTM
return [solve_easy_nonogram(constraints)] | 5,326,824 |
def get_libs():
"""
Get all of the libraries defined in lib/definitions.
This is called automatically when the package is imported.
"""
print(">> Checking for libraries to download..")
definitions = os.listdir("lib/definitions")
tried = 0
downloaded = 0
failed = 0
exists = 0
definitions.remove("__init__.py")
for filename in definitions:
if not filename.endswith(".json"):
print("Unknown definition file type: %s" % filename)
try:
fh = open("lib/definitions/%s" % filename, "r")
tests = json.load(fh)
packs = tests["packages"]
except Exception as e:
print("[ERROR] Unable to load definitions file %s - %s" % e)
else:
for pack in packs:
if not os.path.exists("lib/%s" % pack["filename"]):
tried += 1
print(">> Downloading library: %s" % pack["name"])
print(" > Attribution: %s" % pack["attrib"])
if "." in pack["module"]:
folders = pack["module"].split(".")
folders.pop()
path = "lib/%s" % "/".join(folders)
try:
if not os.path.exists(path):
os.makedirs(path)
current_path = "lib/"
for folder in folders:
current_path += (folder + "/")
open("%s/__init__.py" % current_path, "w")
except Exception as e:
print("[ERROR] Unable to create path %s - %s" \
% (path, e))
continue
try:
rq = urllib2.urlopen(pack["url"])
except Exception as e:
print("[ERROR] %s" % e)
print("[ERROR] Please report this to the developers."
" Attempted URL: %s" % pack["url"])
print("")
failed += 1
else:
try:
fh = open("lib/%s" % pack["filename"], "w")
data = rq.read()
data = data.replace("\r\n", "\n")
fh.write(data)
fh.flush()
fh.close()
except Exception as e:
print("[ERROR] Unable to write file: %s" % e)
print("[ERROR] Do you have write access to this "
"file?")
print("")
failed += 1
else:
try:
importlib.import_module(
"lib.%s" % pack["module"]
)
except Exception as e:
print("[ERROR] Unable to import module: %s"
% e)
print("[ERROR] Please report this to the "
"developers.")
print("")
failed += 1
else:
downloaded += 1
else:
exists += 1
if not tried:
print(">> All libraries are present. Nothing to do.")
else:
print("")
print(">> Done - %s failed / %s succeeded" % (failed, downloaded))
print("")
return {"tried": tried, "downloaded": downloaded,
"failed": failed, "exists": exists} | 5,326,825 |
def adjust_list_human_readable(entry_context_with_spaces, entry_context):
"""Change keys in human readable data to match the headers.
"""
entry_context_with_spaces["ID"] = entry_context.get("ID", "") | 5,326,826 |
def downsample_data(data, scale_factor, order):
"""
Downsample data
TODO: Scikit-image has a transform module that works better,
this function should have the option to use either
"""
return scipy.ndimage.interpolation.zoom(data, scale_factor, order=order, mode="constant") | 5,326,827 |
def uwid(string):
"""Return the width of a string"""
if not PY3:
string = string.decode('utf-8', 'ignore')
return sum(utf_char_width(c) for c in string) | 5,326,828 |
def check_double_quote(inpstring):
"""
Check if some strings needs of a double quote (if some space are inside the string, it will need to be inside two double quote). E.g.: --sfmt="TIFF (unstitched, 3D)"
Input:
inpstring: input string or array of strings
Output:
newstring = new string (or array of strings) corrected by quoting if necessary
"""
if type(inpstring) == list:
newstring = []
for index in inpstring:
tmp1 = index.find(" ")
if tmp1 != -1:
tmp2 = index.find('"')
if tmp2 == -1:
dummy = index.find("=")
if dummy != -1:
newstring.append(
index[0 : dummy + 1] + '"' + index[dummy + 1 :] + '"'
)
else:
newstring.append('"' + index + '"')
else:
newstring.append(index)
else:
newstring.append(index)
else:
tmp1 = inpstring.find(" ")
if tmp1 != -1:
tmp2 = inpstring.find('"')
if tmp2 == -1:
dummy = inpstring.find("=")
if dummy != -1:
newstring = (
inpstring[0 : dummy + 1] + '"' + inpstring[dummy + 1 :] + '"'
)
else:
newstring = '"' + inpstring + '"'
else:
newstring = inpstring
else:
newstring = inpstring
return newstring | 5,326,829 |
def get_wheel_index_data(py_version, platform_version, url=torch_nightly_wheel_index, override_file=torch_nightly_wheel_index_override):
"""
"""
if os.path.isfile(override_file) and os.stat(override_file).st_size:
with open(override_file) as f:
data = f.read()
else:
r = requests.get(url)
r.raise_for_status()
data = r.text
soup = BeautifulSoup(data, 'html.parser')
data = defaultdict(dict)
for link in soup.find_all('a'):
pkg, version, py, py_m, platform = re.search("([a-z]*)-(.*)-(.*)-(.*)-(.*)\.whl", link.text).groups()
version = urllib.parse.unquote(version)
if py == py_version and platform == platform_version:
full_url = os.path.join(torch_wheel_nightly_base, link.text)
data[pkg][version] = full_url
return data | 5,326,830 |
def get_linear_schedule_with_warmup(
num_warmup_steps, num_training_steps, last_epoch=-1
):
"""
Create a schedule with a learning rate that decreases linearly from the initial lr set in the optimizer to 0,
after a warmup period during which it increases linearly from 0 to the initial lr set in the optimizer.
Args:
num_warmup_steps (:obj:`int`):
The number of steps for the warmup phase.
num_training_steps (:obj:`int`):
The total number of training steps.
last_epoch (:obj:`int`, `optional`, defaults to -1):
The index of the last epoch when resuming training.
Return:
function handle to create `torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule.
"""
def lr_lambda(current_step: int):
if current_step < num_warmup_steps:
return float(current_step) / float(max(1, num_warmup_steps))
return max(
0.0,
float(num_training_steps - current_step)
/ float(max(1, num_training_steps - num_warmup_steps)),
)
return partial(lr_scheduler.LambdaLR, lr_lambda=lr_lambda, last_epoch=last_epoch) | 5,326,831 |
def search_image_targets_for_tag(trust_data: dict, image: Image):
"""
Searches in the `trust_data` for a digest, given an `image` with tag.
"""
image_tag = image.tag
if image_tag not in trust_data:
return None
base64_digest = trust_data[image_tag]["hashes"]["sha256"]
return base64.b64decode(base64_digest).hex() | 5,326,832 |
def cleanup():
"""Cleanup method on Ctrl+C"""
global ledc, pif, switch_listener
switch_listener.deactivate()
for x in range(8):
pif.leds[x].turn_off()
pif.deinit_board()
print ("")
print ("PiFace cleanup complete.") | 5,326,833 |
def convert_gene_ids(geneList,target):
"""
takes a list of gene ids (int) and returns the target field
Normally, the database can be used, however this script is
used for example if a species has not yet been entered in the db.
"""
if target not in ['taxid','symbol']:
raise Exception("Invalid target")
geneInfoFile = os.path.join(os.path.split(os.path.abspath(__file__))[0],"gene_info.db")
if os.path.exists(geneInfoFile) == False:
raise Exception("ERROR: cannot find gene info file")
geneInfoFID = open(geneInfoFile,'rU')
header = geneInfoFID.next()
if type(geneList) != type(np.array([])):
geneList = np.array(geneList)
toReturn = np.array([None for i in range(len(geneList))])
found = 0
for record in geneInfoFID:
record = record.rstrip("\n")
record = record.split("\t")
if re.search("^\#",record[0]) or len(record) != 15:
continue
geneID = int(record[1])
#if geneID not in geneList:
# continue
indx = np.where(geneList == geneID)[0]
if len(indx) == 0:
continue
found += 1
indx = indx[0]
if target == 'taxid':
taxID = int(record[0])
toReturn[indx] = taxID
if target == 'symbol':
symbol = record[2]
toReturn[indx] = symbol
#LocusTag = record[3]
#Synonyms = record[4]
#dbXrefs = record[5]
#chromosome = record[6]
#map_location = record[7]
#description = record[8]
#type_of_gene = record[9]
#Symbol_from_nomenclature_authority = record[10]
#Full_name_from_nomenclature_authority = record[11]
#Nomenclature_status = record[12]
#otherDesignations = record[13]
#Modification_date = record[14]
toReturn = toReturn.tolist()
print("convert_gene_ids: %s/%s genes found."%(found,len(toReturn)))
return toReturn | 5,326,834 |
def calculate_intersection(a: BoundingBox, b: BoundingBox) -> int:
"""Calculate the intersection of two bounding boxes.
:param BoundingBox a: The first bounding box.
:param BoundingBox b: The second Bounding box.
:returns iou: The intersection of ``a`` and ``b``.
:rtype: int
"""
left = max(a.upper_left_corner.x, b.upper_left_corner.x)
right = min(
a.upper_left_corner.x + a.size.width, b.upper_left_corner.x + b.size.width
)
top = max(a.upper_left_corner.y, b.upper_left_corner.y)
bottom = min(
a.upper_left_corner.y + a.size.height, b.upper_left_corner.y + b.size.height
)
return max((bottom - top) * (right - left), 0) | 5,326,835 |
def get_linux_ip(eth):
"""在Linux下获取IP"""
assert os.name == 'posix', NotLinuxSystemError('不是Linux系统')
import fcntl
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
ip = socket.inet_ntoa(fcntl.ioctl(s.fileno(), 0x8915, struct.pack('256s', eth[:15])))
return ip[20:24] | 5,326,836 |
def callback_help(call: CallbackQuery):
"""
Help statement detailed help
:param call:
:return:
"""
try:
d_id = int(call.data[5:])
dispatchers = get_manager().dispatchers
show_usage_for(call.message, dispatchers[d_id])
except Exception as e:
logger.error(f'{call.id}:Unknown error!', e)
logger.error(traceback.format_exc())
bot.answer_callback_query(call.id, _("Unknown error!\n"+traceback.format_exc())) | 5,326,837 |
def test_out_of_date_range(client: FlaskClient):
"""
We have generated summaries for this product, but the date is out of the product's date range.
"""
html = get_html(client, "/wofs_albers/2010")
# The common error here is to say "No data: not yet generated" rather than "0 datasets"
assert check_dataset_count(html, 0)
assert "Historic Flood Mapping Water Observations from Space" in html.text | 5,326,838 |
def build_nn_model(input_shape):
"""Generate NN model
:param: input_shape (tuple): shape of the input
:return model: NN model
"""
model = keras.Sequential([
# input layer
# multi demensional array and flatten it out
# inputs.shape[1]: the intervals
# inputs.shape[2]: the value of the mfcc for that intervals
keras.layers.Flatten(input_shape=input_shape),
# 1st hidden layer
keras.layers.Dense(512, activation="relu", kernel_regularizer=keras.regularizers.l2(0.001)),
keras.layers.Dropout(0.3),
# 2nd hidden layer
keras.layers.Dense(256, activation="relu", kernel_regularizer=keras.regularizers.l2(0.001)),
keras.layers.Dropout(0.3),
# 3rd hidden layer
keras.layers.Dense(64, activation="relu", kernel_regularizer=keras.regularizers.l2(0.001)),
keras.layers.Dropout(0.3),
# output layer
# softmax: the sum of the result of all the labels = 1
# predicting: pick the neuron hav highest value
keras.layers.Dense(10, activation="softmax")
])
return model | 5,326,839 |
def main():
"""
Main routine for this workload; spawn a single proposer and a variable
number of acceptors (NETWORK_SIZE - 1).
"""
# a network is a dictionary of names => (host, port)
# we first build a network; then we spawn proposers, and finally
# spawn replicas
network = {}
# initialize the network
for i in xrange(NETWORK_SIZE):
name = "M" + str(i)
network[name] = (HOST, START_PORT + i)
# initialize the proposer process
proposer = Process(target = dying_proposer_entrypoint, args = ("M0", network))
proposer.start()
proposer = Process(target = surviving_proposer_entrypoint, args = ("M1", network))
proposer.start()
# initialize all the replicas
for name in network.keys():
# M0 is our proposer; we ignore it
if name == "M0" or name == "M1":
continue
replicas = Process(target = replicas_entrypoint, args = (name, network))
replicas.start() | 5,326,840 |
def save_checkpoint(state, is_best, filename='checkpoint.pth.tar'):
"""
Save the given Pytorch object as a checkpoint.
If is_best is true, create a copy of the object called 'model_best.pth.tar'
Parameters
----------
state : PyTorch object
The object to save
is_best : bool
If true, create a copy of the saved object.
filename : string, default = 'checkpoint.pth.tar'
The name to give the saved object
"""
torch.save(state, filename)
if is_best:
shutil.copyfile(filename, 'model_best.pth.tar') | 5,326,841 |
def window_sumsquare(
window,
n_frames,
hop_length=512,
win_length=None,
n_fft=2048,
dtype=np.float32,
norm=None,
):
"""Compute the sum-square envelope of a window function at a given hop length.
This is used to estimate modulation effects induced by windowing observations
in short-time Fourier transforms.
Parameters
----------
window : string, tuple, number, callable, or list-like
Window specification, as in `get_window`
n_frames : int > 0
The number of analysis frames
hop_length : int > 0
The number of samples to advance between frames
win_length : [optional]
The length of the window function. By default, this matches ``n_fft``.
n_fft : int > 0
The length of each analysis frame.
dtype : np.dtype
The data type of the output
Returns
-------
wss : np.ndarray, shape=``(n_fft + hop_length * (n_frames - 1))``
The sum-squared envelope of the window function
Examples
--------
For a fixed frame length (2048), compare modulation effects for a Hann window
at different hop lengths:
>>> n_frames = 50
>>> wss_256 = librosa.filters.window_sumsquare('hann', n_frames, hop_length=256)
>>> wss_512 = librosa.filters.window_sumsquare('hann', n_frames, hop_length=512)
>>> wss_1024 = librosa.filters.window_sumsquare('hann', n_frames, hop_length=1024)
>>> import matplotlib.pyplot as plt
>>> fig, ax = plt.subplots(nrows=3, sharey=True)
>>> ax[0].plot(wss_256)
>>> ax[0].set(title='hop_length=256')
>>> ax[1].plot(wss_512)
>>> ax[1].set(title='hop_length=512')
>>> ax[2].plot(wss_1024)
>>> ax[2].set(title='hop_length=1024')
"""
if win_length is None:
win_length = n_fft
n = n_fft + hop_length * (n_frames - 1)
x = np.zeros(n, dtype=dtype)
# Compute the squared window at the desired length
win_sq = get_window(window, win_length)
win_sq = util.normalize(win_sq, norm=norm) ** 2
win_sq = util.pad_center(win_sq, n_fft)
# Fill the envelope
__window_ss_fill(x, win_sq, n_frames, hop_length)
return x | 5,326,842 |
def main():
"""The main file of the script."""
parser = argparse.ArgumentParser()
parser.add_argument('--create_backup', action='store_true', dest='create_backup', default=False,
help='Create a new backup of terminator config and remove all previous autobash sections from it.')
parser.add_argument('--append_backup', dest='project_details', nargs=3, default=[],
help=('Append project details to backup. Usage:\n'
'--append_backup PROJECT_NAME PROJECT_DIR PROJECT_CONDA_BASHRC'))
parser.add_argument('--use_backup', action='store_true', dest='use_backup', default=False,
help='Copy backup in its current state as the new terminator config file.')
results = parser.parse_args()
create_backup, project_details, use_backup = results.create_backup, results.project_details, results.use_backup
# print('create_backup =', create_backup)
# print('project_details =', project_details)
# print('use_backup =', use_backup)
run(create_backup, project_details, use_backup) | 5,326,843 |
def getLatestCode(appDbConnStr: str) -> Optional[ICode]:
"""get latest created code from db
Args:
appDbConnStr (str): app db connection string
Returns:
Optional[ICode]: code object
"""
latestIdFetchsql = """
select id
from code_book.op_codes
where code_issue_time=(select max(code_issue_time) from code_book.op_codes where is_deleted=0)
and is_deleted=0
order by id desc
"""
# initialise code object
code: Optional[ICode] = None
colNames = []
dbRows = []
dbConn = None
dbCur = None
try:
# get connection with raw data table
dbConn = cx_Oracle.connect(appDbConnStr)
# get cursor and execute fetch sql
dbCur = dbConn.cursor()
dbCur.execute(latestIdFetchsql)
colNames = [row[0] for row in dbCur.description]
# fetch all rows
dbRows = dbCur.fetchall()
except Exception as err:
dbRows = []
print('Error while fetching latest code id from app db')
print(err)
finally:
# closing database cursor and connection
if dbCur is not None:
dbCur.close()
if dbConn is not None:
dbConn.close()
targetColumns = ["ID"]
if (False in [(col in targetColumns) for col in colNames]):
# all desired columns not fetched, hence return empty
return None
if len(dbRows) == 0:
return None
row = dbRows[0]
latestCodeId: ICode["id"] = row[colNames.index('ID')]
# get latest code by id
code = getCodeById(appDbConnStr=appDbConnStr, codeId=latestCodeId)
return code | 5,326,844 |
async def create_arrest_csv(data: list, FILE="./data/csv/arrest/arrest.csv"):
"""
:param data:
:param FILE:
:return:
"""
header = "arrest_time, case_number, arrest_location, offense, arrestee_name, arrestee_birthday, city, state, zip, status, officer\n"
with open(FILE, 'w+') as fp:
fp.write(header)
for d in data:
fp.write(d)
fp.close() | 5,326,845 |
def start_initialization_pd(update: Update, context: CallbackContext) -> str:
"""When touch "Заполнить данные"."""
u = User.get_user(update, context)
current_text = update.effective_message.text
update.effective_message.edit_text(
text=current_text
)
context.bot.send_message(
chat_id=u.user_id,
text=static_text.ABOUT_FILLING_PERSONAL_DATA
)
update.effective_message.reply_text(
text=static_text.ASK_LAST_NAME,
parse_mode=ParseMode.HTML
)
return LAST_NAME | 5,326,846 |
def rpm_comments(table=RPMComment, prefix='comment_', relationships=False):
"""Get filters for rpm comments.
:param sqlalchemy.ext.declarative.api.declarativemeta table: database model
:param string prefix: prefix of the name of the filter
:return dict: dict of filters
"""
filters = dict(
**request_parser.equals(
table.id,
name=prefix + 'id',
function=(lambda x: int(x))
),
)
if relationships:
filters.update(dict(
**request_parser.equals(table.id_user, name=prefix + 'id_user'),
**request_parser.equals(
table.id_comp,
name=prefix + 'id_comp',
function=(lambda x: int(x))
),
**request_parser.equals(
table.id_diff,
name=prefix + 'id_diff',
function=(lambda x: int(x))
),
))
return filters | 5,326,847 |
def split_line(line) -> list:
"""Split a line from a dmp file"""
return [x.strip() for x in line.split(" |")] | 5,326,848 |
def test_qguiapplication_functions():
"""Test functions mapping for QtGui.QGuiApplication."""
assert QtGui.QGuiApplication.exec_ is not None | 5,326,849 |
def translate(text):
"""."""
return text | 5,326,850 |
def stream_bytes(data, chunk_size=default_chunk_size):
"""Gets a buffered generator for streaming binary data.
Returns a buffered generator which encodes binary data as
:mimetype:`multipart/form-data` with the corresponding headers.
Parameters
----------
data : bytes
The data bytes to stream
chunk_size : int
The maximum size of each stream chunk
Returns
-------
(generator, dict)
"""
stream = BytesFileStream(data, chunk_size=chunk_size)
return stream.body(), stream.headers() | 5,326,851 |
def helper(n, big):
"""
:param n: int, an integer number
:param big: the current largest digit
:return: int, the final largest digit
"""
n = abs(n)
if n == 0:
return big
else:
# check the last digit of a number
if big < int(n % 10):
big = int(n % 10)
# check the rest of the digits
return helper(n/10, big) | 5,326,852 |
def test_phrase_search(query, output):
"""Makes sure searching for individual instances of a query works."""
corpus = Corpus(
["The dog ran to the cat", "The dog ran to the other dog", "The cat sat"]
)
assert corpus.search_occurrences(query) == output | 5,326,853 |
async def get_transfer_list(request: Request):
"""This function checks for transfer list for an authenticated user"""
transfer_status_list = []
# Code for globus
tokens = await globus.verify_globus_code(request)
if tokens:
globus_item_count = 10
if 'globus_item_count' in request.query_params:
globus_item_count = request.path_params['globus_item_count']
transfer_client = await globus.get_transfer_client(request)
transfer_response = await globus.get_transfer_globus_list(transfer_client, globus_item_count)
transfer_status_list.append(transfer_response)
else:
error_response = {'globus' : 'No authorization available'}
transfer_status_list.append(error_response)
# TODO Other type of transfers
transfer_status_json = jsonable_encoder(transfer_status_list)
return JSONResponse(content=transfer_status_json, status_code=200) | 5,326,854 |
def conv_out_shp(IR, IC, KR, KC, border_mode, subsample):
"""
.. todo::
WRITEME
"""
ssR, ssC = subsample
def ceildiv(x, y):
r = x // y
if r * y < x:
return r + 1
return r
if border_mode == 'valid':
OR, OC = ceildiv(IR - KR + 1,ssR), ceildiv(IC - KC + 1,ssC)
elif border_mode == 'full':
OR, OC = ceildiv(IR + KR - 1,ssR), ceildiv(IC + KC - 1,ssC)
else:
raise NotImplementedError(border_mode)
return OR, OC | 5,326,855 |
def getPageNumber(ffile):
"""
Extract the page number from the file name
:param ffile:
:return: image URI as a string
"""
return str(ffile).split('_')[-1].split('.')[0] | 5,326,856 |
def colors_stepsort(r,g,b,repetitions=1):
"""
Sort colors in hue steps for more perceptually uniform colormaps
"""
lum = np.sqrt( .241 * r + .691 * g + .068 * b )
h, s, v = colorsys.rgb_to_hsv(r,g,b)
h2 = int(h * repetitions)
lum2 = int(lum * repetitions)
v2 = int(v * repetitions)
if h2 % 2 == 1:
v2 = repetitions - v2
lum = repetitions - lum
return (h2, lum, v2) | 5,326,857 |
def _verify_monotonic(haystack):
"""Haystack items shall be non-descending. Dups are OK."""
prev = haystack[0]
for item in haystack:
assert item >= prev, item
prev = item | 5,326,858 |
def get_string_hash(string: str, algorithm_name: str):
"""Calculates the hash digest of a string.
Args:
string: str: The string to digest.
algorithm_name: str: The name of the algorithm to hash the string with.
Returns:
A hash digest in string form.
"""
hash_algorithm = _get_algorithm(algorithm_name)
hash_algorithm.update(string.encode('utf-8'))
return hash_algorithm.hexdigest() | 5,326,859 |
def psd_error(times,rates,errors):
"""
obtain errors for the best frequency estimate of the signal
"""
"""
print(len(times),len(rates),len(errors))
newdatachoice = np.random.choice(len(times),size=int(0.1*len(times)))
newtimes = list(np.array([times[0]])) + list(np.array([times[-1]])) + list(times[np.array(list(set(newdatachoice)))])
newrates = list(np.array([rates[0]])) + list(np.array([rates[-1]])) + list(rates[np.array(list(set(newdatachoice)))])
newerrs = list(np.array([errors[0]])) + list(np.array([errors[-1]])) + list(errors[np.array(list(set(newdatachoice)))])
times = newtimes
rates = newrates
errors = newerrs
print(len(times),len(rates),len(errors))
"""
freqs_list = []
psds_list = []
for j in tqdm(range(1000)):
new_rates = np.zeros(len(rates))
for i in range(len(rates)):
if rates[i] != 0:
new_rates[i] = np.random.normal(loc=rates[i],scale=errors[i])
trunc_times = times-times[0]
newchoice = np.random.choice(len(trunc_times),size=len(trunc_times))
rand_times = trunc_times[np.array(list(set(newchoice)))]
rand_rates = new_rates[np.array(list(set(newchoice)))]
omega,psd,prob3,prob4,prob5 = lsp(rand_times,rand_rates)
nu_reg = omega/(2.0*np.pi)
freq = omega/(2*np.pi)
psds_list.append( np.max(psd[(freq>=8.2e-6)&(freq<=8.4e-6)]) )
freqs_list.append( freq[psd==psds_list[-1]][0])
#plt.figure()
#plt.plot(freq,psd,'rx-')
#plt.show()
return freqs_list,psds_list | 5,326,860 |
def solve(global_step):
"""add solver to losses"""
# learning reate
lr = _configure_learning_rate(82783, global_step)
optimizer = _configure_optimizer(lr)
tf.summary.scalar('learning_rate', lr)
# compute and apply gradient
losses = tf.get_collection(tf.GraphKeys.LOSSES)
regular_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
regular_loss = tf.add_n(regular_losses)
out_loss = tf.add_n(losses)
total_loss = tf.add_n(losses + regular_losses)
tf.summary.scalar('total_loss', total_loss)
tf.summary.scalar('out_loss', out_loss)
tf.summary.scalar('regular_loss', regular_loss)
update_ops = []
variables_to_train = _get_variables_to_train()
# update_op = optimizer.minimize(total_loss)
gradients = optimizer.compute_gradients(total_loss, var_list=variables_to_train)
grad_updates = optimizer.apply_gradients(gradients,
global_step=global_step)
update_ops.append(grad_updates)
# update moving mean and variance
if FLAGS.update_bn:
update_bns = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
update_bn = tf.group(*update_bns)
update_ops.append(update_bn)
return tf.group(*update_ops) | 5,326,861 |
def get_features_from_policy(env, policy):
"""Represent policies with average feature vector.
This only makes sense for linear reward functions, but it is only used for the
HighwayDriving environment.
"""
assert isinstance(env.unwrapped, HighwayDriving)
assert isinstance(policy, FixedPolicy)
N = 10
features = np.zeros(env.Ndim_repr)
for i in range(N):
obs = env.reset()
done = False
while not done:
act = policy.get_action(obs)
obs, reward, done, info = env.step(act)
features += info["gp_repr"]
features /= N
return features | 5,326,862 |
def get_files_from_folder(directory, extension=None):
"""Get all files within a folder that fit the extension """
# NOTE Can be replaced by glob for newer python versions
label_files = []
for root, _, files in os.walk(directory):
for some_file in files:
label_files.append(os.path.abspath(os.path.join(root, some_file)))
if extension is not None:
label_files = list(filter(lambda x: x.endswith(extension), label_files))
return label_files | 5,326,863 |
def predicate(line):
"""
Remove lines starting with ` # `
"""
if "#" in line:
return False
return True | 5,326,864 |
def pad_slices(ctvol, max_slices): #Done testing
"""For <ctvol> of shape (slices, side, side) pad the slices to shape
max_slices for output of shape (max_slices, side, side)"""
padding_needed = max_slices - ctvol.shape[0]
assert (padding_needed >= 0), 'Image slices exceed max_slices by'+str(-1*padding_needed)
if padding_needed > 0:
before_padding = int(padding_needed/2.0)
after_padding = padding_needed - before_padding
ctvol = np.pad(ctvol, pad_width = ((before_padding, after_padding), (0,0), (0,0)),
mode = 'constant', constant_values = np.amin(ctvol))
assert ctvol.shape[0]==max_slices
return ctvol | 5,326,865 |
def average_img_from_dir(path_data_dir,filepat="*", \
parentaslabel=False,\
labels=[],\
sampling_rate=0.001,\
title="average image") :
"""
create and visualize average image of given dataset
dataset_path = path to dataset
sampling_rate = sampling ratio to visualize value in [0,1]
seed = seed number to use for random value generation
return
average_image = average image of the given dataset
"""
data = [ f for lbl, f in gen_find(filepat,path_data_dir,parentaslabel,labels) \
if not parentaslabel or ( parentaslabel and lbl in labels) ]
num_elem = len(data)
ds_size= int(num_elem * sampling_rate)
print ("# sample : {} sampling_rate : {} # of data : {}".format(ds_size,sampling_rate,num_elem))
sampled_data = random.sample(data, ds_size)
img_avg = cv2.imread(sampled_data[0])
h, w = img_avg.shape[:2]
nSum = 1
for i in sampled_data[1:] :
imga = cv2.resize(cv2.imread(i),(w, h), interpolation = cv2.INTER_CUBIC)
weight_avg = float(nSum)/float(nSum+1)
weight_a = float(1)/float(nSum+1)
img_avg = cv2.addWeighted(img_avg,weight_avg,imga,weight_a,0)
#print ("Weight_avg : {} + Weight_a : {} = Total Weight {} " \
# .format(weight_avg,weight_a,weight_avg+weight_a))
nSum+=1
# Make w as 10 - 10 = w : v : h
vr = 10 * h / w
plt.figure(figsize=(10,vr))
plt.title(title)
plt.imshow(img_avg,interpolation='nearest', aspect='auto'),plt.show()
return img_avg | 5,326,866 |
def register_socketio(socketio_in : SocketIO) -> None:
"""Register the socket.io connection manager for room management.
Args:
socketio_in (SocketIO): The socket.io connection manager.
"""
global socketio
socketio = socketio_in | 5,326,867 |
def get_size(positions):
"""Get the size of bounding rectangle that embodies positions.
Args:
positions (dict of Dendrogram: np.array): positions xy coordinates of dendrograms
Returns:
Tuple of width and height of bounding rectangle.
"""
max_y_list = [dendrogram.height + coords[1] for dendrogram, coords in positions.items()]
coords = np.array(list(positions.values()))
width = np.max(coords[:, 0]) - np.min(coords[:, 0])
height = np.max(max_y_list) - np.min(coords[:, 1])
return width, height | 5,326,868 |
def getContactInfo(dic):
"""Returns the Contact info for Chapters.
dic -- Dictionary from the JSON with all values.
"""
return str(dic["content"]["$t"]).split(',')[1].split(':')[1].strip() | 5,326,869 |
def draw_pointcloud(x: torch.Tensor, x_mask: torch.Tensor, grid_on=True):
""" Make point cloud image
:param x: Tensor([B, N, 3])
:param x_mask: Tensor([B, N])
:param grid_on
:return: Tensor([3 * B, W, H])
"""
tic = time.time()
figw, figh = 16., 12.
W, H = 256, int(256 * figh / figw)
imgs = list()
for p, m in zip(x, x_mask):
p = p[~m, :]
p = p.cpu()
fig = plt.figure(figsize=(figw, figh))
ax = fig.gca(projection='3d')
ax.set_facecolor('xkcd:steel')
ax.w_xaxis.set_pane_color((0., 0., 0., 1.0))
ax.w_yaxis.set_pane_color((0., 0., 0., 1.0))
ax.w_zaxis.set_pane_color((0., 0., 0., 1.0))
ax.scatter(-p[:, 2], p[:, 0], p[:, 1], color=(1, 1, 1), marker='o', s=100)
fig.tight_layout()
fig.canvas.draw()
buf = fig.canvas.buffer_rgba()
l, b, w, h = fig.bbox.bounds
img = np.frombuffer(buf, np.uint8).copy()
img.shape = int(h), int(w), 4
img = img[:, :, 0:3]
img = cv2.resize(img, dsize=(W, H), interpolation=cv2.INTER_CUBIC) # [H, W, 3]
imgs.append(torch.tensor(img).transpose(2, 0).transpose(2, 1)) # [3, H, W]
plt.close(fig)
return torch.stack(imgs, dim=0) | 5,326,870 |
def semantic_dsm(word_list, keyed_vectors):
"""Calculate a semantic dissimilarity matrix."""
vectors = np.array([keyed_vectors.word_vec(word) for word in word_list])
dsm = np.clip(pdist(vectors, metric="cosine"), 0, 1)
return dsm | 5,326,871 |
def prompt_id_num(message, length=ID_WIDTH):
""" Asks the user to enter a identifier which is a numeric string.
The length is the length of the identifier asked.
:param message: message to ask the input
:param length: the length of the identifier
:return: input
"""
response = input(message)
while len(response) != length:
response = input(f"Entrée incorrecte. Veuillez renseigner"
f" un identifiant contenant {length} nombres: ")
response_is_not_number = True
while response_is_not_number:
try:
int(response)
response_is_not_number = False
except ValueError:
response = input(f"Entrée incorrecte. Veuillez renseigner"
f" un identifiant contenant {length} nombres: ")
return response | 5,326,872 |
def click_snr(wl, Spec):
"""Calculate snr in a specific range given by clicks on a plot """
plt.figure()
plt.plot(wl, Spec)
plt.show(block=True)
# points from click
# temp values untill implement above
point2 = np.max(wl)
point1 = np.min(wl)
map2 = wl < point2
map1 = wl > point1
wl_slice = wl[map1 * map2]
Spec_slice = Spec[map1 * map2]
# Calculate SNR on the slice
SNR = snr(Spec_slice)
return SNR | 5,326,873 |
def plot_spectrogram(f, t, Sxx):
"""This function plots a spectrogram using matplotlib
Arguments
----------
f: the frequency output of the scipy.signal.spectrogram
t: the time series output of the scipy.signal.spectrogram
Sxx: the spectrogram output of scipy.signal.spectrogram
Returns
--------
None
Note: the function scipy.signal.spectrogram returns f, t, Sxx in that order
"""
plt.pcolormesh(t, f, Sxx)
plt.ylabel('Frequency [Hz]')
plt.xlabel('Time [sec]')
plt.show() | 5,326,874 |
def rnn_cell_forward(xt, a_prev, parameters):
"""
Implements a single forward step of the RNN-cell
Arguments:
xt -- your input data at timestep "t", numpy array of shape (n_x, m).
a_prev -- Hidden state at timestep "t-1", numpy array of shape (n_a, m)
parameters -- python dictionary containing:
Wax -- Weight matrix multiplying the input, numpy array of shape (n_a, n_x)
Waa -- Weight matrix multiplying the hidden state, numpy array of shape (n_a, n_a)
Wya -- Weight matrix relating the hidden-state to the output, numpy array of shape (n_y, n_a)
ba -- Bias, numpy array of shape (n_a, 1)
by -- Bias relating the hidden-state to the output, numpy array of shape (n_y, 1)
Returns:
a_next -- next hidden state, of shape (n_a, m)
yt_pred -- prediction at timestep "t", numpy array of shape (n_y, m)
cache -- tuple of values needed for the backward pass, contains (a_next, a_prev, xt, parameters)
"""
# Retrieve parameters from "parameters"
Wax = parameters["Wax"]
Waa = parameters["Waa"]
Wya = parameters["Wya"]
ba = parameters["ba"]
by = parameters["by"]
# compute next activation state using the formula given above
a_next = np.tanh(np.dot(Waa, a_prev) + np.dot(Wax, xt) + ba)
# compute output of the current cell using the formula given above
yt_pred = softmax(np.dot(Wya, a_next) + by)
# store values you need for backward propagation in cache
cache = (a_next, a_prev, xt, parameters)
return a_next, yt_pred, cache | 5,326,875 |
def load_random_batch(cfg, data_paths):
"""
Loads a random batch (batch_size, image, masks, weights)
Parameters:
-----------
cfg: contains the cfg.BATCH_SIZE
data_paths: list containing strings
paths to the folder where the images, masks and weights are in
Returns:
--------
batch containing the images, masks (and optionally weights)
"""
train_imgs_dirs = list()
valid_imgs_dirs = list()
test_imgs_dirs = list()
for data_path in data_paths:
train_imgs_dirs.append(os.path.join(data_path, 'train', 'images'))
valid_imgs_dirs.append(os.path.join(data_path, 'valid', 'images'))
#test_imgs_dirs.append(os.path.join(data_path, 'test', 'images'))
imgs_list = []
for train_imgs_dir in train_imgs_dirs:
imgs_list.extend([os.path.join(train_imgs_dir, s) for s in os.listdir(train_imgs_dir)])
for valid_imgs_dir in valid_imgs_dirs:
imgs_list.extend([os.path.join(valid_imgs_dir, s) for s in os.listdir(valid_imgs_dir)])
#for test_imgs_dir in test_imgs_dirs:
#imgs_list.extend([os.path.join(test_imgs_dir, s) for s in os.listdir(test_imgs_dir)])
imgs_list.sort()
# print(imgs_list)
n_imgs = len(imgs_list)
# Ziehen ohne Zurücklegen
# Takes cfg.BATCH_SIZE elements out of len(imgs_list)
batch_list = random.sample(range(0, n_imgs), cfg.BATCH_SIZE)
print("batch_list", batch_list)
# rand_img = random.randrange(0, n_imgs)
# print(rand_img)
#img_path = imgs_list[rand_img]
# Get the image paths of this batch
batch_imgs_list = [imgs_list[i] for i in batch_list]
# The first image will be batched with no other image
first_img = True
for img_path in batch_imgs_list:
# Get the image
print(img_path)
img = load_image(img_path)
# Get the corresponding masks
msk_paths = find_msk_paths(cfg, img_path)
print(msk_paths)
msk = concat_msks(msk_paths)
#print("mask shape", msk.shape)
# Put it together
if first_img:
imgs = np.expand_dims(img, axis=0)
msks = np.expand_dims(msk, axis=0)
first_img = False
else:
imgs = batch_data(imgs, img, multichannel=True)
msks = batch_data(msks, msk, multichannel=True)
#print("masks shape", msks.shape)
# Normalize it
imgs = imgs / cfg.RESCALE
msks = msks / cfg.RESCALE_MSK
if cfg.WEIGHTING:
# The first image will be batched with no other image
first_img = True
for img_path in batch_imgs_list:
wgt_paths = find_weight_paths(cfg, img_path)
#print(wgt_paths)
weight = concat_wgts(wgt_paths)
# Put it together
if first_img:
wgts = np.expand_dims(weight, axis=0)
first_img = False
else:
wgts = batch_data(wgts, weight, multichannel=True)
# print(wgts.shape)
#print("wgts min max shape", np.min(wgts), np.max(wgts), wgts.shape)
return imgs, msks, wgts
else:
return imgs, msks | 5,326,876 |
def test_stars_pickleable():
"""
Verify that EPSFStars can be successfully
pickled/unpickled for use multiprocessing
"""
from multiprocessing.reduction import ForkingPickler
# Doesn't need to actually contain anything useful
stars = EPSFStars([1])
# This should not blow up
ForkingPickler.loads(ForkingPickler.dumps(stars)) | 5,326,877 |
def test_memory_consumption_before_backward_hessian():
"""Check memory consumption during splitting."""
_ = example_sequence()
print("No splitting")
mem_stat1 = memory_report()
_ = example_sequence_parallel()
print("With splitting")
mem_stat2 = memory_report()
assert mem_stat1 == mem_stat2
_ = example_sequence_parallel(10)
print("With splitting")
mem_stat2 = memory_report()
assert mem_stat1 == mem_stat2 | 5,326,878 |
def load_song(trainsize=5000, testsize=5000):
""" The million song dataset
Not a good dataset for feature selection or regression
Standard linear regression performs only a little bit better than a random vector.
Additional complex models, such as interesting kernels, are needed
To improve performance
"""
if trainsize + testsize < 5000:
filename = 'datasets/YearPredictionMSD_small.csv'
else:
filename = 'datasets/YearPredictionMSD.csv'
dataset = loadcsv(filename)
trainset, testset = splitdataset(dataset,trainsize, testsize,outputfirst=True)
return trainset,testset | 5,326,879 |
def convert_node(node_data: NodeData):
"""
Convenience method for converting NodeData to a packed TLV message.
:param core.emulator.data.NodeData node_data: node data to convert
:return: packed node message
"""
node = node_data.node
services = None
if node.services is not None:
services = "|".join([x.name for x in node.services])
server = None
if node.server is not None:
server = node.server.name
tlv_data = structutils.pack_values(
coreapi.CoreNodeTlv,
[
(NodeTlvs.NUMBER, node.id),
(NodeTlvs.TYPE, node.apitype.value),
(NodeTlvs.NAME, node.name),
(NodeTlvs.MODEL, node.type),
(NodeTlvs.EMULATION_SERVER, server),
(NodeTlvs.X_POSITION, int(node.position.x)),
(NodeTlvs.Y_POSITION, int(node.position.y)),
(NodeTlvs.CANVAS, node.canvas),
(NodeTlvs.SERVICES, services),
(NodeTlvs.LATITUDE, str(node.position.lat)),
(NodeTlvs.LONGITUDE, str(node.position.lon)),
(NodeTlvs.ALTITUDE, str(node.position.alt)),
(NodeTlvs.ICON, node.icon),
],
)
return coreapi.CoreNodeMessage.pack(node_data.message_type.value, tlv_data) | 5,326,880 |
def parse_null_value(
null_value_node: "NullValueNode", schema: "GraphQLSchema"
) -> None:
"""
Returns the value of an AST null value node.
:param null_value_node: AST null value node to treat
:param schema: the GraphQLSchema instance linked to the engine
:type null_value_node: NullValueNode
:type schema: GraphQLSchema
"""
# pylint: disable=unused-argument
return None | 5,326,881 |
def SetDataTypesFromColInfo(df, tblCI):
"""
Use colinfo dictionaries to set newly-imported (CSV) DataFrame column types and Boolean Flag columns
"""
for col in df.columns:
#If col is a flag column (1/blank), convert to Boolean for memory and feather file size efficiency
if (col in tblCI.dict_isflagcol):
if (tblCI.dict_isflagcol[col]): df = pdutil.ConvertFlagColToBoolean(df, col)
#If the column is in the data type dictionary, set its type using either .to_datetime() or .astype()
if col in tblCI.dict_types:
if tblCI.dict_types[col] == 'dt':
df[col] = pd.to_datetime(df[col])
else:
df[col] = df[col].astype(tblCI.dict_types[col])
return df | 5,326,882 |
def cli_purge_rainbowagent_logs(host, port, password, rootpassword):
"""Summary
Args:
host (TYPE): Description
port (TYPE): Description
password (TYPE): Description
"""
check_host_option(host)
oxe_purge_rainbowagent_logs(host, port, password, rootpassword) | 5,326,883 |
def run_and_wait(request, _):
"""Implementation of RunAndWait."""
process_runner = new_process.ProcessRunner(request.executable_path,
request.default_args)
args = {}
protobuf_utils.get_protobuf_field(args, request.popen_args, 'bufsize')
protobuf_utils.get_protobuf_field(args, request.popen_args, 'executable')
protobuf_utils.get_protobuf_field(args, request.popen_args, 'shell')
protobuf_utils.get_protobuf_field(args, request.popen_args, 'cwd')
if request.popen_args.env_is_set:
args['env'] = request.popen_args.env
else:
args['env'] = None
args['additional_args'] = request.additional_args
protobuf_utils.get_protobuf_field(args, request, 'timeout')
protobuf_utils.get_protobuf_field(args, request, 'terminate_before_kill')
protobuf_utils.get_protobuf_field(args, request, 'terminate_wait_time')
protobuf_utils.get_protobuf_field(args, request, 'input_data')
protobuf_utils.get_protobuf_field(args, request, 'max_stdout_len')
logs.log('Running command: %s' % process_runner.get_command())
return untrusted_runner_pb2.RunAndWaitResponse(
result=process_result_to_proto(process_runner.run_and_wait(**args))) | 5,326,884 |
def import_dicom_series(path, files_start_with=None, files_end_with=None,
exclude_files_end_with=('.dat', '.txt', '.py', '.pyc', '.nii', '.gz')):
"""Rudimentary file to load dicom serie from a directory. """
N = 0
paths = []
slices = []
files = os.listdir(path)
for file_name in files:
file_valid = True
if files_start_with is not None:
if not file_name.startswith(files_start_with):
file_valid = False
if files_end_with is not None:
if not file_name.endswith(files_end_with):
file_valid = False
for s in exclude_files_end_with:
if file_name.endswith(s):
file_valid = False
if file_valid:
full_path = path + os.sep + file_name
# read moco information from files
paths.append(full_path)
f = dicom.read_file(full_path)
slice = f.pixel_array
slices.append(slice)
N += 1
instance_number = f.get(0x00200013).value
creation_time = f.get(0x00080013).value
# print "Instance number: ",instance_number
# print "Creation time: ",creation_time
array = numpy.zeros((slices[0].shape[0], slices[0].shape[1], N), dtype=numpy.float32)
for i in range(N):
slice = numpy.float32(slices[i]) # FIXME: handle other data types
array[:, :, i] = slice
# return occiput_from_array(array)
return array | 5,326,885 |
def quick_bench(iterations, size):
"""Convenience entry point to time different function samples"""
functions = {}
for name, func in globals().items():
if name.startswith("_bench"):
name = name[1:]
functions[name] = partial(func, size)
bench = BenchmarkRunner(functions, iterations=iterations)
bench.run()
print(bench.report()) | 5,326,886 |
def fight(player, enemy):
"""
This starts a round of combat between the user and their selected enemy.
It returns a list of information relating to combat, to be used in the
view function to display it, if required.
"""
# Random player damage based on 80-100% of player damage stat.
dmg_range_roll = random.randrange(80, 101) / 100
dmg_roll_player = round(player.damage * dmg_range_roll)
looted_power_crystals = 0
looted_gold = 0
if enemy.hp_max <= dmg_roll_player:
# Randomly generated loot values, added to player object.
resources_range_roll = random.randrange(75, 101) / 100
looted_power_crystals = round(
enemy.power_crystals * resources_range_roll)
player.power_crystals += looted_power_crystals
resources_range_roll = random.randrange(75, 101) / 100
looted_gold = round(enemy.gold * resources_range_roll)
player.gold += looted_gold
dmg_roll_enemy = 0
result = True
else:
result = False
# Random enemy damage, based on 80-100% of their damage stat.
dmg_range_roll = random.randrange(80, 101) / 100
dmg_roll_enemy = round(enemy.damage * dmg_range_roll)
player.hp_current -= dmg_roll_enemy
return [player, dmg_roll_player, dmg_roll_enemy, result, looted_gold, looted_power_crystals] | 5,326,887 |
async def completed(trace_name='',
name='',
sleep_interval=0.05,
streams: List[torch.cuda.Stream] = None):
"""Async context manager that waits for work to complete on given CUDA
streams."""
if not torch.cuda.is_available():
yield
return
stream_before_context_switch = torch.cuda.current_stream()
if not streams:
streams = [stream_before_context_switch]
else:
streams = [s if s else stream_before_context_switch for s in streams]
end_events = [
torch.cuda.Event(enable_timing=DEBUG_COMPLETED_TIME) for _ in streams
]
if DEBUG_COMPLETED_TIME:
start = torch.cuda.Event(enable_timing=True)
stream_before_context_switch.record_event(start)
cpu_start = time.monotonic()
logger.debug('%s %s starting, streams: %s', trace_name, name, streams)
grad_enabled_before = torch.is_grad_enabled()
try:
yield
finally:
current_stream = torch.cuda.current_stream()
assert current_stream == stream_before_context_switch
if DEBUG_COMPLETED_TIME:
cpu_end = time.monotonic()
for i, stream in enumerate(streams):
event = end_events[i]
stream.record_event(event)
grad_enabled_after = torch.is_grad_enabled()
# observed change of torch.is_grad_enabled() during concurrent run of
# async_test_bboxes code
assert (grad_enabled_before == grad_enabled_after
), 'Unexpected is_grad_enabled() value change'
are_done = [e.query() for e in end_events]
logger.debug('%s %s completed: %s streams: %s', trace_name, name,
are_done, streams)
with torch.cuda.stream(stream_before_context_switch):
while not all(are_done):
await asyncio.sleep(sleep_interval)
are_done = [e.query() for e in end_events]
logger.debug(
'%s %s completed: %s streams: %s',
trace_name,
name,
are_done,
streams,
)
current_stream = torch.cuda.current_stream()
assert current_stream == stream_before_context_switch
if DEBUG_COMPLETED_TIME:
cpu_time = (cpu_end - cpu_start) * 1000
stream_times_ms = ''
for i, stream in enumerate(streams):
elapsed_time = start.elapsed_time(end_events[i])
stream_times_ms += f' {stream} {elapsed_time:.2f} ms'
logger.info('%s %s %.2f ms %s', trace_name, name, cpu_time,
stream_times_ms) | 5,326,888 |
async def POST_Dataset(request):
""" Handler for POST /datasets"""
log.request(request)
app = request.app
params = request.rel_url.query
if not request.has_body:
msg = "POST_Dataset with no body"
log.error(msg)
raise HTTPBadRequest(reason=msg)
body = await request.json()
log.info(f"POST_Dataset, body: {body}")
if "bucket" in params:
bucket = params["bucket"]
elif "bucket" in body:
bucket = params["bucket"]
else:
bucket = None
dset_id = get_obj_id(request, body=body)
if not isValidUuid(dset_id, obj_class="dataset"):
log.error(f"Unexpected dataset_id: {dset_id}")
raise HTTPInternalServerError()
# verify the id doesn't already exist
obj_found = await check_metadata_obj(app, dset_id, bucket=bucket)
if obj_found:
log.error( "Post with existing dset_id: {}".format(dset_id))
raise HTTPInternalServerError()
if "root" not in body:
msg = "POST_Dataset with no root"
log.error(msg)
raise HTTPInternalServerError()
root_id = body["root"]
try:
validateUuid(root_id, "group")
except ValueError:
msg = "Invalid root_id: " + root_id
log.error(msg)
raise HTTPInternalServerError()
if "type" not in body:
msg = "POST_Dataset with no type"
log.error(msg)
raise HTTPInternalServerError()
type_json = body["type"]
if "shape" not in body:
msg = "POST_Dataset with no shape"
log.error(msg)
raise HTTPInternalServerError()
shape_json = body["shape"]
layout = None
if "layout" in body:
layout = body["layout"] # client specified chunk layout
# ok - all set, create committed type obj
now = int(time.time())
log.debug("POST_dataset typejson: {}, shapejson: {}".format(type_json, shape_json))
dset_json = {"id": dset_id, "root": root_id, "created": now, "lastModified": now, "type": type_json, "shape": shape_json, "attributes": {} }
if "creationProperties" in body:
dset_json["creationProperties"] = body["creationProperties"]
if layout is not None:
dset_json["layout"] = layout
await save_metadata_obj(app, dset_id, dset_json, bucket=bucket, notify=True, flush=True)
resp_json = {}
resp_json["id"] = dset_id
resp_json["root"] = root_id
resp_json["created"] = dset_json["created"]
resp_json["type"] = type_json
resp_json["shape"] = shape_json
resp_json["lastModified"] = dset_json["lastModified"]
resp_json["attributeCount"] = 0
resp = json_response(resp_json, status=201)
log.response(request, resp=resp)
return resp | 5,326,889 |
def draw_platform_family_by_age(id, title):
"""draws how many users listened or not a track in a certain
platform_family by age"""
sub_sample = sample[['platform_family','user_age','is_listened']]
sub_sample = sub_sample[(sub_sample.platform_family == id)]
ages = sub_sample['user_age'].unique()
ages.sort()
table = sub_sample.groupby(['platform_family','user_age', 'is_listened']).size()
table = table.sort_index(level='is_listened')
table_matrix = table.as_matrix()
plt.title(title)
# bug: when the number of ages is not exactly 13, but with all the data set, less probable.
plt.bar(ages, table_matrix[13:], width=0.5, color='g', align='center')
plt.bar(ages+0.5, table_matrix[:13], width=0.5, color='r', align='center')
plt.legend(('Is Listened', 'Not Listened'), loc='upper right')
plt.ylabel("Quantity");
plt.xlabel("Ages");
plt.show() | 5,326,890 |
def sort_list_by_list(L1,L2):
"""Sort a list by another list"""
return [x for (y,x) in sorted(zip(L2,L1), key=lambda pair: pair[0])] | 5,326,891 |
def _to_protobuf_value(value: type_utils.PARAMETER_TYPES) -> struct_pb2.Value:
"""Creates a google.protobuf.struct_pb2.Value message out of a provide
value.
Args:
value: The value to be converted to Value message.
Returns:
A google.protobuf.struct_pb2.Value message.
Raises:
ValueError if the given value is not one of the parameter types.
"""
if isinstance(value, str):
return struct_pb2.Value(string_value=value)
elif isinstance(value, (int, float)):
return struct_pb2.Value(number_value=value)
elif isinstance(value, bool):
return struct_pb2.Value(bool_value=value)
elif isinstance(value, dict):
return struct_pb2.Value(
struct_value=struct_pb2.Struct(
fields={k: _to_protobuf_value(v) for k, v in value.items()}))
elif isinstance(value, list):
return struct_pb2.Value(
list_value=struct_pb2.ListValue(
values=[_to_protobuf_value(v) for v in value]))
else:
raise ValueError('Value must be one of the following types: '
'str, int, float, bool, dict, and list. Got: '
f'"{value}" of type "{type(value)}".') | 5,326,892 |
def setup(bot: Bot) -> None:
"""
This function is called automatically when this cog is loaded by the bot.
It's only purpose is to load the cog above, and to pass the Bot instance into it.
"""
bot.add_cog(LemonFacts(bot)) | 5,326,893 |
def remove_image_permissions(apps, schema_editor):
"""Reverse the above additions of permissions."""
ContentType = apps.get_model("contenttypes.ContentType")
Permission = apps.get_model("auth.Permission")
image_content_type = ContentType.objects.get(
model="image",
app_label="wagtailimages",
)
# This cascades to Group
Permission.objects.filter(
content_type=image_content_type,
codename__in=("add_image", "change_image", "delete_image"),
).delete() | 5,326,894 |
def umap(adata, **kwargs) -> Union[Axes, List[Axes], None]:
"""\
Scatter plot in UMAP basis.
Parameters
----------
{adata_color_etc}
{edges_arrows}
{scatter_bulk}
{show_save_ax}
Returns
-------
If `show==False` a :class:`~matplotlib.axes.Axes` or a list of it.
"""
return embedding(adata, 'umap', **kwargs) | 5,326,895 |
def _prepare_grid(times, time_step):
"""Prepares grid of times for path generation.
Args:
times: Rank 1 `Tensor` of increasing positive real values. The times at
which the path points are to be evaluated.
time_step: Scalar real `Tensor`. Maximal distance between time grid points
Returns:
Tuple `(all_times, mask)`.
`all_times` is a 1-D real `Tensor` containing all points from 'times` and
the uniform grid of points between `[0, times[-1]]` with grid size equal to
`time_step`. The `Tensor` is sorted in ascending order and may contain
duplicates.
`mask` is a boolean 1-D `Tensor` of the same shape as 'all_times', showing
which elements of 'all_times' correspond to THE values from `times`.
Guarantees that times[0]=0 and mask[0]=False.
"""
additional_times = tf.range(
start=time_step, limit=times[-1], delta=time_step, dtype=times.dtype)
zeros = tf.constant([0], dtype=times.dtype)
all_times = tf.concat([zeros] + [times] + [additional_times], axis=0)
additional_times_mask = tf.zeros_like(additional_times, dtype=tf.bool)
mask = tf.concat([
tf.cast(zeros, dtype=tf.bool),
tf.ones_like(times, dtype=tf.bool)
] + [additional_times_mask], axis=0)
perm = tf.argsort(all_times, stable=True)
all_times = tf.gather(all_times, perm)
mask = tf.gather(mask, perm)
return all_times, mask | 5,326,896 |
def parse_res_list(res_list, file, *operator):
"""传入一个 Resource 实例的列表,并传入一个临时文件名,将调出默认程序修改名字,并调用对象的 operation 方法"""
if file:
with open(file, 'w', encoding='utf_8') as f:
for res in res_list:
f.write(str(res) + '\n')
os.startfile(file)
input('修改完文件名后按回车继续。')
with open(file, encoding='utf_8') as f:
for res in res_list:
res.name = f.readline().rstrip('\n')
res.operation(*operator)
else:
for res in res_list:
res.operation(*operator) | 5,326,897 |
def delete_row_by_id(table, id_):
"""
Deletes the row matching the given ID from the given table, raises
MissingRecordError if it can not be found
:param table: the table to be searched
:param id_: the id of the record to delete
"""
log.info(" Deleting row from %s with ID: %s", table.__tablename__, id_)
row = get_row_by_id(table, id_)
with DeleteQuery(table, row) as delete_query:
delete_query.execute_query() | 5,326,898 |
def run_setup(command):
"""Run setup.py file
:return:
"""
virtual_env = get_virtual_env()
if virtual_env:
with virtualenv(virtual_env):
with lcd(BASE_DIR):
local('python setup.py {}'.format(str(command)))
else:
print 'Virtual environment needs to be installed first'
print 'Please run `fab install_virtualenv` first' | 5,326,899 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.