content
stringlengths 22
815k
| id
int64 0
4.91M
|
|---|---|
def install_signal_handlers():
"""Sets up a the global terminator greenlet to:
* Set GLOBAL_SHUTDOWN on an interrupt signal (which should occur at
LUCI_CONTEXT['deadline']['soft_deadline'], OR if the build is canceled).
* Set GLOBAL_QUITQUITQUIT after LUCI_CONTEXT['deadline']['grace_period']-1
seconds after GLOBAL_SHUTDOWN.
Sets LUCI_CONTEXT['deadline'] for the duration of this contextmanager.
"""
d = sections_pb2.Deadline()
deadline_raw = luci_context.read('deadline')
if deadline_raw:
d = jsonpb.ParseDict(deadline_raw, d)
else:
# per LUCI_CONTEXT spec. missing deadline means presumed 30s grace period.
d.grace_period = 30
# now adjust deadline to reserve 1 second of grace_period for any processes
# the engine launches. This should give the engine sufficient time to killpg
# any stray process groups.
d.grace_period = max(d.grace_period - 1, 0)
# terminator_greenlet reacts to signal from parent, which occurs during
# cancellation or timeout.
def _terminator_greenlet():
GLOBAL_SHUTDOWN.wait()
gevent.wait([GLOBAL_QUITQUITQUIT], timeout=d.grace_period)
if not GLOBAL_QUITQUITQUIT.ready():
LOG.info('Setting GLOBAL_QUITQUITQUIT')
GLOBAL_QUITQUITQUIT.set()
else:
LOG.info('Engine quitting normally')
for pgroup in UNKILLED_PROC_GROUPS:
_kill_proc_group(pgroup)
terminator_greenlet = gevent.spawn(_terminator_greenlet)
def _set_shutdown(signum, _frame):
LOG.info('Got signal (%d), Setting GLOBAL_SHUTDOWN', signum)
GLOBAL_SHUTDOWN.set()
old_handlers = [
signal.signal(signum, _set_shutdown)
for signum in _INTERRUPT_SIGNALS
]
try:
with luci_context.write('deadline', jsonpb.MessageToDict(d)):
yield
finally:
for signum, old_handler in zip(_INTERRUPT_SIGNALS, old_handlers):
signal.signal(signum, old_handler)
# By this point we needn't have any mercy; All steps have returned so any
# dangling groups are fair game.
GLOBAL_SHUTDOWN.set()
GLOBAL_QUITQUITQUIT.set()
terminator_greenlet.get()
| 13,400
|
def get_inputs():
"""
inputsdict contains {'Yte': Yte, 'Ytr': Ytr, 'Xtr': Xtr, 'Xte': Xte} where values are np.arrays
np. arrays are truncated to evenly split into batches of size = batchsize
"""
with open(os.path.join(dpath, dfile), 'rb') as f:
d_all = pickle.load(f)
return d_all
| 13,401
|
def _apply(input_bundle_name, output_bundle_name, pipeline_class_name, pipeline_args,
input_tags, output_tags, output_bundle_uuid=None, force=False):
"""Apply a pipeline to an input bundle, and save the results in an
output bundle.
Args:
input_bundle_name: The human name of the input bundle
output_bundle_name: The human name of the output bundle
pipeline_class_name: Name of the pipeline class to run
pipeline_args: Optional arguments to pass to the pipeline class
pipeline_args: list
input_tags: Set of tags to find input bundle
output_tags: Set of tags to give the output bundle
output_bundle_uuid: A UUID specifying the version to save within
the output bundle; default `None`
force: If `True` force recomputation of all upstream pipe requirements
"""
_logger.debug("Applying '{}' to '{}' to get '{}'".format(pipeline_class_name, input_bundle_name, output_bundle_name))
apply_kwargs = {
'input_bundle': input_bundle_name,
'output_bundle': output_bundle_name,
'output_bundle_uuid': output_bundle_uuid,
'pipe_params': json.dumps(disdat.common.parse_params(pipeline_args)),
'pipe_cls': pipeline_class_name,
'input_tags': input_tags,
'output_tags': output_tags,
'force': force
}
p = Process(target=disdat.apply.apply, kwargs=apply_kwargs)
p.start()
p.join()
return p.exitcode == 0
| 13,402
|
def reviews(
app_id,
token=None,
pagination_delay=1,
review_size=100,
sort_by='most_relevant',
rating=0,
lang='en'):
"""Generator, gets all reviews.
Parameters
----------
app_id : str
App id/Package name.
token : str | None
For continuation of reviews, you must provide this token.
pagination_delay : int | float
Time between each scrape.
review_size : int
Reviews by page, except page 1.
sort_by : str
Sorting option, available 'most_relevant', 'newest', 'rating'.
rating : int
Shows reviews by rating. Zero (0) means all ratings.
lang : str
Language of reviews.
Yields
------
list of dict
Raises
------
TypeError | ValueError
"""
validators.reviews(
app_id, token, pagination_delay, review_size, sort_by,
rating, lang
)
try:
token = token or -1
while token:
form_next_page = forms.reviews_next_page(
app_id, None if token == -1 else token,
review_size, SORT_TYPE[sort_by], rating
)
response = _do_post_next_reviews(form_next_page, lang)
results, token = parsers.reviews_next_page(response.text)
if results:
yield {
'reviews': results,
'next': {
'app_id': app_id,
'token': token,
'pagination_delay': pagination_delay,
'review_size': review_size,
'sort_by': sort_by,
'rating': rating,
'lang': lang,
}
}
time.sleep(pagination_delay)
except GeneratorExit:
return
except requests.exceptions.RequestException:
logging.exception('Unexpected end.')
except Exception:
logging.exception('Unexpected end.')
| 13,403
|
def arcColor(renderer, x, y, rad, start, end, color):
"""Draws an arc to the renderer with a given color.
The start and end of the arc are defined in units of degrees, with 0 being
the bottom of the arc circle and increasing counter-clockwise (e.g. 90 being
the rightmost point of the circle).
If the rendering color has any transparency, blending will be enabled.
Args:
renderer (:obj:`SDL_Renderer`): The renderer to draw on.
x (int): The X coordinate of the center of the circle.
y (int): The Y coordinate of the center of the circle.
rad (int): The radius (in pixels) of the circle.
start (int): The start of the arc (in degrees).
end (int): The end of the arc (in degrees).
color (int): The color to draw with as a 32-bit ``0xRRGGBBAA`` integer
(e.g. ``0xFF0000FF`` for solid red).
Returns:
int: 0 on success, or -1 on failure.
"""
return _funcs["arcColor"](renderer, x, y, rad, start, end, color)
| 13,404
|
def SMLSL(cpu_context: ProcessorContext, instruction: Instruction):
"""Signed multiply-subtract long (vector form)"""
logger.debug("%s instruction not currently implemented.", instruction.mnem)
| 13,405
|
def save_json(filepath, data_dict):
"""
將 dict 轉成 JSON 檔案,並指定縮排以提升文件可讀性
Note: 可用於保存 Keras 模型的 JSON 檔案
此文件可搭配 Keras 的 models.model_from_json()
"""
# solve models.model_from_json()
if isinstance(data_dict, str):
data_dict = json.loads(data_dict)
with open(filepath, 'w') as f:
json.dump(data_dict, f, indent=4)
| 13,406
|
def delete_original_and_processed_slice_from_storage(mapper: Mapper, connection: Connection, target: Slice) -> None:
"""Delete original and processed Slices from storage."""
original_slice = OriginalSlice.get(id=target.id)
original_slice.delete()
processed_slice = ProcessedSlice.get(id=target.id)
processed_slice.delete()
| 13,407
|
def test_auth_info():
"""
Test singleton AuthInfo
:return:
"""
auth_info = qradar_utils.AuthInfo.get_authInfo()
auth_info.create(host,
username=username,
password=password,
token=None,
cafile=cafile)
assert auth_info.api_url == "https://{}/api/".format(host)
assert auth_info.cafile == cafile
assert auth_info.qradar_token == None
assert auth_info.headers["Authorization"] == b"Basic " + base64.b64encode((username + ':' + password).encode("ascii"))
assert auth_info.headers["Accept"] == "application/json"
# use token to auth
auth_info.create(host,
username=None,
password=None,
token=token,
cafile=cafile)
assert auth_info.headers["SEC"] == token
| 13,408
|
def dos_element_spd(
folder,
element_spd_dict,
output='dos_element_spd.png',
fill=True,
alpha=0.3,
linewidth=1.5,
sigma=0.05,
energyaxis='x',
color_list=None,
legend=True,
total=True,
figsize=(4, 3),
erange=[-6, 6],
spin='up',
combination_method='add',
fontsize=7,
save=True,
):
"""
This function plots the element projected density of states of the s, p, and d orbitals.
Parameters:
folder (str): This is the folder that contains the VASP files
element_spd_dict (dict[str:str]): A dictionary that contains the individual atoms and the corresponding
orbitals to project onto. For example, if the user wants to project onto the s, p, d orbitals
of In and the p orbitals of As for an InAs structure then the dictionary would be {'In':'spd', 'As':'p'}
output (str): File name of the resulting plot.
fill (bool): Determines wether or not to fill underneath the plot
alpha (float): Alpha value for the fill
linewidth (float): Linewidth of lines
sigma (float): Standard deviation for gaussian filter
energyaxis (str): Determines the axis to plot the energy on ('x' or 'y')
color_list (list): List of colors that is the same length as the number of projections
in the plot.
legend (bool): Determines whether to draw the legend or not
total (bool): Determines wheth to draw the total density of states or not
spin (str): Which spin direction to parse ('up' or 'down')
figsize (list / tuple): Desired size of the image in inches (width, height)
erange (list): Energy range for the DOS plot ([lower bound, upper bound])
combination_method (str): If spin == 'both', this determines if the spin up and spin down
desnities are added or subtracted. ('add' or 'sub')
fontsize (float): Font size of the text in the figure.
save (bool): Determines whether to automatically save the figure or not. If not
the figure and axis are return for further manipulation.
Returns:
If save == True, this function will return nothing and directly save the image as
the output name. If save == False, the function will return the matplotlib figure
and axis for further editing.
"""
dos = Dos(folder=folder, spin=spin, combination_method=combination_method)
fig = plt.figure(figsize=figsize, dpi=400)
ax = fig.add_subplot(111)
_figure_setup_dos(ax=ax, fontsize=fontsize, energyaxis=energyaxis)
dos.plot_element_spd(
ax=ax,
element_spd_dict=element_spd_dict,
fill=fill,
alpha=alpha,
linewidth=linewidth,
sigma=sigma,
energyaxis=energyaxis,
color_list=color_list,
legend=legend,
total=total,
erange=erange,
)
plt.tight_layout(pad=0.2)
if save:
plt.savefig(output)
else:
return fig, ax
| 13,409
|
def load(m, schema=UPLOAD_MANIFEST_SCHEMA):
""" Load and validate a manifest.
"""
manifest = yaml.load(m)
validate(
manifest, schema=schema,
)
return manifest
| 13,410
|
def transform_phenotype(inv_root, y, fam_indices, null_mean = None):
"""
Transform phenotype based on inverse square root of phenotypic covariance matrix.
If the null model included covariates, the fitted mean is removed rather than the overall mean
"""
# Mean normalise phenotype
if null_mean is None:
y = y - np.mean(y)
else:
y = y - null_mean
# Transform by family
for fam in fam_indices.keys():
famsize = fam_indices[fam].shape[0]
if famsize == 1:
y[fam_indices[fam]] = inv_root[1] * y[fam_indices[fam]]
else:
y[fam_indices[fam]] = inv_root[famsize].dot(y[fam_indices[fam]])
return y
| 13,411
|
def download_site_build(event_file: str, download_path: str = "build-site.tar.gz") -> int:
"""Will download the site bulid if this is a forked PR bulid.
Args:
event_file (str): event file from the workflow
Returns:
int: PR num of the build if relevant
"""
with open(event_file, 'r') as f:
github_event = json.load(f)
target_url = github_event['target_url']
print(f'target_url: {target_url}')
# target_url is of the form:
# https://circleci.com/gh/demisto/content-docs/142?utm_campaign=vcs-integration-link&utm_medium=referral&utm_source=github-build-li
target_url = target_url.split('?')[0]
build_num = target_url.split('/')[-1]
print(f'circleci build: {build_num}')
circle_url = f'https://circleci.com/api/v1.1/project/github/demisto/content-docs/{build_num}'
print(f'Checking circleci url: {circle_url}')
res = requests.get(circle_url, verify=VERIFY_SSL)
res.raise_for_status()
build_json = res.json()
# check that this is a pull request
if not build_json.get('pull_requests') or not build_json.get('pull_requests')[0].get('url'):
print('Not a pull request. Skipping')
return 0
branch = build_json.get('branch')
if not branch or not branch.startswith('pull/'):
print(f'Skipping branch as it is not an external pull: {branch}')
return 0
pr_num = branch.split('/')[1]
# get artifacts
res = requests.get(f'{circle_url}/artifacts', verify=VERIFY_SSL)
res.raise_for_status()
artifacts = res.json()
download_url = None
for art in artifacts:
if 'build-site.tar.gz' in art.get('path'):
download_url = art.get('url')
break
if not download_url:
raise ValueError(f"download url missing for artifacts: {artifacts}")
print(f'Downloading build artifact from: {download_url} (pr num: {pr_num}) to: {download_path} ...')
download_file(download_url, download_path)
return int(pr_num)
| 13,412
|
def createVskDataDict(labels,data):
"""Creates a dictionary of vsk file values from labels and data.
Parameters
----------
labels : array
List of label names for vsk file values.
data : array
List of subject measurement values corresponding to the label
names in `labels`.
Returns
-------
vsk : dict
Dictionary of vsk file values. Dictionary keys correspond to
names in `labels` and dictionary values correspond to values in
`data`.
Examples
--------
This example tests for dictionary equality through python instead of
doctest since python does not guarantee the order in which dictionary
elements are printed.
>>> labels = ['MeanLegLength', 'LeftKneeWidth', 'RightAnkleWidth']
>>> data = [940.0, 105.0, 70.0]
>>> res = createVskDataDict(labels, data)
>>> res == {'MeanLegLength':940.0, 'LeftKneeWidth':105.0, 'RightAnkleWidth':70.0}
True
"""
vsk={}
for key,data in zip(labels,data):
vsk[key]=data
return vsk
| 13,413
|
def remove_dates(scan_result):
"""
Remove date fields from scan.
"""
for scanned_file in scan_result['files']:
scanned_file.pop('date', None)
| 13,414
|
def window_partition(x, window_size):
"""
Args:
x: (B, H, W, C)
window_size (int): window size
Returns:
windows: (num_windows*B, window_size, window_size, C)
"""
B, H, W, L, C = x.shape
#print(x.shape)
#print(window_size[0])
x = x.view(B, H // window_size[0], window_size[0], W // window_size[1], window_size[1], L // window_size[2], window_size[2], C)
windows = x.permute(0, 1, 3, 5, 2, 4, 6, 7).contiguous().view(-1, window_size[0], window_size[1], window_size[2], C)
return windows
| 13,415
|
def _get_unique(node_list, key, mode=None):
"""
Returns number or names of unique nodes in a list.
:param node_list: List of dictionaries returned by Neo4j transactions.
:param key: Key accessing specific node in dictionary.
:param mode: If 'num', the number of unique nodes is returned.
:return: Unique nodes (list of nodes) or node number
"""
unique_samples = list()
if node_list:
# it is possible that nodes do not yet exist
for item in node_list:
unique_samples.append(item[key].get('name'))
unique_samples = set(unique_samples)
if mode == 'num':
unique_samples = len(unique_samples)
return unique_samples
| 13,416
|
def calculate_cpufreq_weighted_time_in_state(
final_time_in_cpufreq_state_by_cpu, time_in_cpufreq_state_by_cpu):
"""Calculate the weighted average in each CPU frequency state.
Args:
final_time_in_cpufreq_state_by_cpu: Final time in each CPU frequency
state. See the return value of parse_cpufreq_stats_time_in_state() for
the format.
time_in_cpufreq_state_by_cpu: Initial time in each CPU frequency
state. See the return value of parse_cpufreq_stats_time_in_state() for
the format.
Returns:
(weighted_time_in_cpufreq_state, weighted_average_cpufreq) tuple where
weighted_time_in_cpufreq_state is a dictionary that contains the
fractional time (0..1) in each CPU frequency state keyed by CPU number and
weighted_average_cpufreq is a dictionary containing the overall weighted
average CPU frequency keyed by CPU number.
"""
weighted_average_cpufreq = dict([(c, 0.0)
for c in time_in_cpufreq_state_by_cpu])
weighted_time_in_cpufreq_state_by_cpu = {}
for cpu, time_in_cpufreq_state in time_in_cpufreq_state_by_cpu.iteritems():
final_time_in_cpufreq_state = final_time_in_cpufreq_state_by_cpu[cpu]
weighted_time_in_cpufreq_state = {}
delta_time_in_cpufreq_state = {}
total_time = 0.0
for freq in time_in_cpufreq_state:
delta_time_in_cpufreq_state[freq] = (
final_time_in_cpufreq_state.get(freq, 0) -
time_in_cpufreq_state.get(freq, 0))
total_time += delta_time_in_cpufreq_state[freq]
for freq, cpu_time in delta_time_in_cpufreq_state.iteritems():
weight = float(cpu_time) / total_time
weighted_time_in_cpufreq_state[freq] = weight
weighted_average_cpufreq[cpu] += freq * weight
weighted_time_in_cpufreq_state_by_cpu[cpu] = weighted_time_in_cpufreq_state
return (weighted_time_in_cpufreq_state_by_cpu, weighted_average_cpufreq)
| 13,417
|
def array2imgdata_pil(A, format='PNG'):
"""get png data from array via converting to PIL Image"""
from PIL import Image
if A.shape[2] == 3:
mode = 'RGB'
elif A.shape[2] == 4:
mode = 'RGBA'
else:
mode = 'L'
img = Image.fromstring(mode, A.shape[:2], A.tostring())
return pil2imgdata(img, format)
| 13,418
|
def h_eval(data):
"""
Function takes dictionary
Evaluate values and convert string to correct type (boolean/int/float/long/string)
"""
if isinstance(data, dict):
for _k in list(data.keys()):
data[_k] = h_eval(data[_k])
if data[_k] is None or (isinstance(data[_k], dict) and not data[_k]):
data.pop(_k)
return data
if isinstance(data, list) or isinstance(data, tuple) or isinstance(data, set):
res = []
for _k in data:
res.append(h_eval(_k))
if isinstance(data, tuple):
return tuple(res)
if isinstance(data, set):
return set(res)
return res
try:
if isinstance(data, str):
if data.endswith("%"):
data = data[:-1]
if data.lower() == "false":
return False
if data.lower() == "true":
return True
if data.lower() == "n/e":
return None
try:
return int(data)
except Exception:
pass
try:
return float(data)
except Exception:
pass
return data
except Exception:
return data
| 13,419
|
def _recursive_simplify(expr):
""" Simplify the expression as much as possible based on
domain knowledge. """
input_expr = expr
# Simplify even further, based on domain knowledge:
# windowses = ('WIN32', 'WINRT')
apples = ("MACOS", "UIKIT", "IOS", "TVOS", "WATCHOS")
bsds = ("FREEBSD", "OPENBSD", "NETBSD")
androids = ("ANDROID", "ANDROID_EMBEDDED")
unixes = (
"APPLE",
*apples,
"BSD",
*bsds,
"LINUX",
*androids,
"HAIKU",
"INTEGRITY",
"VXWORKS",
"QNX",
"WASM",
)
unix_expr = simplify_logic("UNIX")
win_expr = simplify_logic("WIN32")
false_expr = simplify_logic("false")
true_expr = simplify_logic("true")
expr = expr.subs(Not(unix_expr), win_expr) # NOT UNIX -> WIN32
expr = expr.subs(Not(win_expr), unix_expr) # NOT WIN32 -> UNIX
# UNIX [OR foo ]OR WIN32 -> ON [OR foo]
expr = _simplify_expressions(expr, Or, (unix_expr, win_expr), true_expr)
# UNIX [AND foo ]AND WIN32 -> OFF [AND foo]
expr = _simplify_expressions(expr, And, (unix_expr, win_expr), false_expr)
expr = _simplify_flavors_in_condition("WIN32", ("WINRT",), expr)
expr = _simplify_flavors_in_condition("APPLE", apples, expr)
expr = _simplify_flavors_in_condition("BSD", bsds, expr)
expr = _simplify_flavors_in_condition("UNIX", unixes, expr)
expr = _simplify_flavors_in_condition("ANDROID", ("ANDROID_EMBEDDED",), expr)
# Simplify families of OSes against other families:
expr = _simplify_os_families(expr, ("WIN32", "WINRT"), unixes)
expr = _simplify_os_families(expr, androids, unixes)
expr = _simplify_os_families(expr, ("BSD", *bsds), unixes)
for family in ("HAIKU", "QNX", "INTEGRITY", "LINUX", "VXWORKS"):
expr = _simplify_os_families(expr, (family,), unixes)
# Now simplify further:
expr = simplify_logic(expr)
while expr != input_expr:
input_expr = expr
expr = _recursive_simplify(expr)
return expr
| 13,420
|
def dropzone(url, **kwargs):
"""Dropzone component
A basic dropzone component that supports drag and drop uploading
of files which are posted to the URL provided.
>>> zoom.system.site = zoom.sites.Site()
>>> zoom.system.site.packages = {}
>>> zoom.system.request = zoom.utils.Bunch(app=zoom.utils.Bunch(name='hello', packages={}))
>>> c = dropzone('/app/files')
>>> isinstance(c, zoom.Component)
True
"""
zoom.requires('dropzone')
id = 'dropzone_' + uuid.uuid4().hex
js = """
var %(id)s = new Dropzone("#%(id)s", {url: "%(url)s"});
""" % dict(id=id, url=url)
html = div(classed='dropzone', id=id, **kwargs)
return zoom.Component(html)
| 13,421
|
def fmt_bytes(size_bytes):
"""Return a nice 'total_size' string with Gb, Mb, Kb, and Byte ranges"""
units = ["Bytes", "KB", "MB", "GB"]
if size_bytes == 0:
return f"{0} Bytes"
for unit in units:
digits = int(math.log10(size_bytes)) + 1
if digits < 4:
return f"{round(size_bytes, 1)} {unit}"
size_bytes /= 1024
return f"{size_bytes} TB"
| 13,422
|
def plot_points(x, point_size=0.005, c='g'):
"""
x: point_nr,3
"""
if c == 'b':
k = 245
elif c == 'g':
k = 25811000
elif c == 'r':
k = 11801000
elif c == 'black':
k = 2580
else:
k = 2580
colors = np.ones(x.shape[0]) * k
plot = k3d.plot(name='points')
print(colors.shape)
plt_points = k3d.points(x, colors.astype(np.uint32), point_size=point_size)
plot += plt_points
plt_points.shader = '3d'
plot.display()
| 13,423
|
def julianDays(year, month, day, hour, minute):
"""
Calculate the julian day (day of year) based on the known date/time
information.
:param year: :class:`numpy.ndarray` of the year of all observations.
:param month: As for year, but for the month of observation.
:param day: As for year, but for the day of observation.
:param hour: As for year, but for the hour of the observation.
:param minute: As for year, but for the hour of the observation.
:returns: :class:`numpy.ndarray` of julian day values for each
observation.
"""
LOG.debug("Calculating julian day (day of year) values")
if np.any(year < 0):
raise ValueError("Error in input year information - check input file")
if np.any(month >= 13):
raise ValueError("Error in input month information - check input file")
if np.any(day > 31):
raise ValueError("Error in input day information - check input file")
if np.any(hour > 24):
raise ValueError("Error in input hour information - check input file")
if np.any(minute > 60):
raise ValueError(
"Error in input minute information - check input file")
# set all years prior to 1900 to 1904 - strftime() requires year >=1900;
# and in the Gregorian calendar, 1900 is not a leap year (and there are
# many years prior to 1900 that are!).
second = np.zeros((hour.size), 'i')
jyear = np.copy(year)
jyear[np.where(jyear < 1900)] = 1904
day = [datetime(jyear[i], month[i], day[i], hour[i], minute[i],
second[i]) for i in xrange(year.size)]
jdays = np.array([int(day[i].strftime("%j")) for
i in xrange(year.size)])
return jdays
| 13,424
|
def get_valid_split(records: dict, train_val_test: Union[list, np.ndarray]) -> dict:
""" Gets a train, val, test split with at least one instance of every class
Keep doing train_test_split until each split of the data has at least one single example of every behavior
in the dataset. it would be bad if your train data had class counts: [1000, 0, 0, 10] and your test data had
class counts: [500, 100, 300, 0]
Parameters
----------
records: dict of dicts
See train_val_test_split
train_val_test: list, np.ndarray
See train_val_test_split
Returns
-------
split_dict: dict
See train_val_test_split
"""
is_wrong = True
split_dict = None
while is_wrong:
split_dict = train_val_test_split(records, train_val_test)
should_continue = do_all_classes_have_labels(records, split_dict)
if not should_continue:
warnings.warn('Not all classes in the dataset have *any* labels!')
return split_dict
is_wrong = False
for split in ['train', 'val', 'test']:
labelfiles = [records[i]['label'] for i in split_dict[split]]
if len(labelfiles) > 0:
_, class_counts, _, _, _ = read_all_labels(labelfiles)
if not np.all(class_counts > 0):
is_wrong = True
return split_dict
| 13,425
|
def _random_op(sites, ldim, hermitian=False, normalized=False, randstate=None,
dtype=np.complex_):
"""Returns a random operator of shape (ldim,ldim) * sites with local
dimension `ldim` living on `sites` sites in global form.
:param sites: Number of local sites
:param ldim: Local ldimension
:param hermitian: Return only the hermitian part (default False)
:param normalized: Normalize to Frobenius norm=1 (default False)
:param randstate: numpy.random.RandomState instance or None
:returns: numpy.ndarray of shape (ldim,ldim) * sites
>>> A = _random_op(3, 2); A.shape
(2, 2, 2, 2, 2, 2)
"""
op = _randfuncs[dtype]((ldim**sites,) * 2, randstate=randstate)
if hermitian:
op += np.transpose(op).conj()
if normalized:
op /= np.linalg.norm(op)
return op.reshape((ldim,) * 2 * sites)
| 13,426
|
def test_history_beats_optimizer():
"""Test overwriting from history vs whatever the optimizer reports."""
problem = CRProblem(
x_guesses=np.array([0.25, 0.25]).reshape(1, -1)
).get_problem()
max_fval = 10
scipy_options = {"maxfun": max_fval}
result_hist = optimize.minimize(
problem=problem,
optimizer=optimize.ScipyOptimizer(method="TNC", options=scipy_options),
n_starts=1,
options=optimize.OptimizeOptions(history_beats_optimizer=True),
filename=None,
progress_bar=False,
)
result_opt = optimize.minimize(
problem=problem,
optimizer=optimize.ScipyOptimizer(method="TNC", options=scipy_options),
n_starts=1,
options=optimize.OptimizeOptions(history_beats_optimizer=False),
filename=None,
progress_bar=False,
)
for result in (result_hist, result_opt):
# number of function evaluations
assert result.optimize_result.list[0]['n_fval'] <= max_fval + 1
# optimal value in bounds
assert np.all(problem.lb <= result.optimize_result.list[0]['x'])
assert np.all(problem.ub >= result.optimize_result.list[0]['x'])
# entries filled
for key in ('fval', 'x', 'grad'):
val = result.optimize_result.list[0][key]
assert val is not None and np.all(np.isfinite(val))
# TNC funnily reports the last value if not converged
# (this may break if their implementation is changed at some point ...)
assert (
result_hist.optimize_result.list[0]['fval']
< result_opt.optimize_result.list[0]['fval']
)
| 13,427
|
def enron_dataset_part012() -> Path:
"""
Returns:
A directory with two PST files:
chris_dorland_000_1_1_1.pst
chris_dorland_001_1_1_1.pst
"""
name = "chris_dorland"
files = ["chris_dorland_000_1_1_1.pst", "chris_dorland_001_1_1_1.pst"]
url = f"{ENRON_DATASET_URL}/chris_dorland.zip"
yield fetch_enron_dataset(name, files, url)
| 13,428
|
def set_compute_type(type):
""" Sets the compute type of the convolution operation, and other operations """
global COMPUTE_TYPE
COMPUTE_TYPE = type
| 13,429
|
def minimize(system, positions, platform=None, tolerance=1.0*unit.kilocalories_per_mole/unit.angstroms, maxIterations=50):
"""Minimize the energy of the given system.
Parameters
----------
platform : simtk.openmm.Platform or None, optional
If None, the global GLOBAL_ALCHEMY_PLATFORM will be used.
tolerance : simtk.unit.Quantity with units compatible with energy/distance, optional, default = 1*kilocalories_per_mole/angstroms
Minimization tolerance
maxIterations : int, optional, default=50
Maximum number of iterations for minimization
Returns
-------
minimized_positions : simtk.openmm.Quantity with shape [nparticle,3] with units compatible with distance
The energy-minimized positions.
"""
timestep = 1.0 * unit.femtoseconds
integrator = openmm.VerletIntegrator(timestep)
context = create_context(system, integrator, platform)
context.setPositions(positions)
openmm.LocalEnergyMinimizer.minimize(context, tolerance, maxIterations)
minimized_positions = context.getState(getPositions=True).getPositions(asNumpy=True)
del context, integrator
return minimized_positions
| 13,430
|
async def fetch(url="", headers=DEFAULT_HEADERS, params={}, payload={}, method="GET", loop=None):
"""fetch content from the url"""
if not url:
return
async with aiohttp.ClientSession(loop=loop, headers=headers) as session:
_method = getattr(session, method.lower())
async with _method(url, params=params, data=payload) as resp:
return await resp.json()
| 13,431
|
def retrieve_prefix_fixture():
"""Load test fixture data."""
j = json.load(open("./tests/fixtures/s3_prefix_list.json"))
return j
| 13,432
|
def test_multiline_attribute():
""" Test parsing multiline attributes in LDIF. """
text = "dn: cn=unimaginably+sn=very,ou=very,dc=very,dc=long,\n dc=line\ncn: unimaginably\nsn: very\nsn: long\n"
with StringIO(text) as test:
reader = LDIFReader(test)
ent = next(reader)
assert ent.dn == "cn=unimaginably+sn=very,ou=very,dc=very,dc=long,dc=line"
assert ent["cn"][0] == "unimaginably"
assert ent["sn"][0] == "very"
assert ent["sn"][1] == "long"
| 13,433
|
def parse_args(version: str) -> Namespace:
"""
Parse arguments passed to the application.
A custom argument parser handles multiple commands and options to launch
the desired function.
Parameters
----------
version : string
A ``string`` of the Bobber version.
Returns
-------
Namespace
Returns a ``Namespace`` of all of the arguments that were parsed from
the application during runtime.
"""
parser = ArgumentParser(f'Bobber Version: {version}')
parser.add_argument('--version', action='version', version=__version__)
# Required positional command subparser which should be specified first
commands = parser.add_subparsers(dest='command', metavar='command')
commands_parent = ArgumentParser(add_help=False)
# More general options which apply to a majority of the running commands
# Note that all arguments prepended with '--' are optional
commands_parent.add_argument('log_path', metavar='log-path', help='Path '
'used to store log files on the head node')
commands_parent.add_argument('hosts', help='Comma-separated list of '
'hostnames or IP addresses',
type=unique_hosts)
commands_parent.add_argument('--config-path', help='Read a JSON config '
'file with expected parameters and use those '
'values for testing. Ignores all other '
'optional flags')
commands_parent.add_argument('--gpus', help='Number of GPUs contained '
'within a system or systems under test '
'(heterogeneous counts not supported)',
type=int)
commands_parent.add_argument('--compute-gid', help='The compute gid. '
'defaults to 0 - check with "show_gids" '
'command. A non-default gid is needed for '
'Ethernet (frequently gid 3)', type=int,
default=0)
commands_parent.add_argument('--nccl-tc', help='NCCL setting required to '
'use prio3 traffic for Ethernet. Set to 106 '
'for Ethernet, and do not set for IB.',
type=int)
commands_parent.add_argument('--batch-size-sm', help='Batch size to use '
'with DALI data ingest tests for small '
'images', type=int)
commands_parent.add_argument('--batch-size-lg', help='Batch size to use '
'with DALI data ingest tests for large '
'images', type=int)
commands_parent.add_argument('--nccl-max', help='Specify the maximum data '
'size to test with NCCL, in Gigabytes '
'(default is 1 GB)', type=int)
commands_parent.add_argument('--nccl-ib-hcas', help='Specify the list of '
'interfaces to use for NCCL test multinode '
'communication', default='')
commands_parent.add_argument('--ssh-iface', help='Specify ssh interface '
'for the system(s) under test ', default='')
commands_parent.add_argument('--no-direct', help='Disable running with '
'direct IO for applications that support it',
action='store_true')
commands_parent.add_argument('--io-depth', help='Customize the IO depth '
'for direct IO testing', type=int, default=16)
commands_parent.add_argument('--bw-threads', help='Maximum number of '
'threads to use for bandwidth tests',
type=int)
commands_parent.add_argument('--125k-threads', dest='stg_125k_threads',
help='Maximum number of threads to use for '
'125K IO size tests', type=int)
commands_parent.add_argument('--iops-threads', help='Maximum number of '
'threads to use for iops tests', type=int)
commands_parent.add_argument('--read-pattern', help='Specify IO pattern '
'for fio read tests. Supported values: '
'read, randread. Defaults to read.',
default='read',
choices=READ_PATTERNS)
commands_parent.add_argument('--write-pattern', help='Specify IO pattern '
'for fio write tests. Supported values: '
'write, randwrite. Defaults to write.',
default='write',
choices=WRITE_PATTERNS)
commands_parent.add_argument('--iterations', help='Number of iterations to'
' execute per test - a seperate log file will'
' be generated for each iteration', type=int,
default=10)
commands_parent.add_argument('--sweep', help='If present, will run all '
'tests for all specified iterations from a '
'single system to the number of systems '
'specified in the --hosts flag, with a step '
'of a single system (so, 3 systems specified '
'would result in tests for 1, 2, and 3 '
'systems)', action='store_true')
commands_parent.add_argument('--system', help='If system is specified, '
'iops-threads, 125k-threads, bw-threads, '
'gpus, batch size, and network interface '
'names are given default values - override '
'by specifying the flags you\'d prefer to '
'override, ignore the flags you are ok with '
'using defaults for '
'supported systems: dgx-a100-single, '
'dgx-a100-dual, and dgx-2 for now. -single '
'is used for a system with a single storage '
'NIC, and -dual is used for a system with two'
' storage NICs', choices=SYSTEMS.keys())
commands_parent.add_argument('--stg-extra-flags', help='Experimental - '
'add extra flags to stg tests (currently '
'supported - stg-bw and stg-iops). If '
'providing more than one flag, wrap entire '
'set in quotes')
commands_parent.add_argument('--pause', help='Pause between tests for N '
'seconds to ensure any activity is finished '
'before the next test begins. Defaults to 0 '
'(no pause).', type=int, default=0)
# Create the test initiation commands with the general options above
commands.add_parser(RUN_ALL, help='Run all tests',
parents=[commands_parent])
commands.add_parser(RUN_DALI, help='Run DALI tests only',
parents=[commands_parent])
commands.add_parser(RUN_NCCL, help='Run NCCL tests only',
parents=[commands_parent])
commands.add_parser(RUN_STG_BW, help='Run storage bandwidth test only',
parents=[commands_parent])
commands.add_parser(RUN_STG_125K, help='Run storage 125 IO size test only',
parents=[commands_parent])
commands.add_parser(RUN_STG_IOPS, help='Run storage IOPS test only',
parents=[commands_parent])
commands.add_parser(RUN_STG_META, help='Run storage metadata test only',
parents=[commands_parent])
# Options specific to exporting the containers
export = commands.add_parser(EXPORT, help='Export the container for '
'multisystem tests')
# Options specific to parsing the results
parse = commands.add_parser(PARSE_RESULTS, help='Parse and display results'
'from the log files')
parse.add_argument('log_path', metavar='log-path', help='Path to saved '
'logfile location')
parse.add_argument('--json-filename', help='Specify the filename to use '
'for saving the JSON data. If not specified, the JSON '
'data will not be saved.', default=None, type=str)
parse.add_argument('--override-version-check', help='Optionally skip the '
'version check to ensure the same version of Bobber '
'was used for all tests.', action='store_true')
parse.add_argument('--compare-baseline', help='Compare the values produced'
' by a test run against a pre-defined baseline to '
'verify performance meets an acceptable threshold. '
'This command is ignored if the --custom-baseline flag '
'is used.',
choices=BASELINES)
parse.add_argument('--custom-baseline', help='Compare against a custom '
'baseline to verify performance meets an acceptable '
'threshold. This command overrides the '
'--compare-baseline flag.', type=str)
parse.add_argument('--baseline-tolerance', help='The percentage of '
'tolerance to include while comparing results against '
'a baseline. For example, if the desire is to allow '
'results to be within 5%% of the baseline and still '
'pass, enter "5" for the tolerance. This will only '
'measure tolerance below the result and will not punish'
' if numbers are higher than the baseline above the '
'tolerance level. This value is ignored if not running '
'the baseline comparison. Defaults to 0 tolerance.',
type=int, default=0)
parse.add_argument('--verbose', help='Display text-based information for '
'each system count in addition to the table.',
action='store_true')
# Options specific to building the containers
build = commands.add_parser(BUILD, help='Build the container')
# Options specific to casting the containers
cast = commands.add_parser(CAST, help='Start the container')
cast.add_argument('storage_path', metavar='storage-path', help='Path at '
'which the filesystem under test is mounted')
cast.add_argument('--ignore-gpu', help='Start the Bobber container '
'without GPUs', action='store_true')
# Options specific to loading a Docker image from a local binary
load = commands.add_parser(LOAD, help='Load a container from a local '
'binary')
load.add_argument('filename', help='Filename of local *.tar file of '
'the image to load')
return parser.parse_args()
| 13,434
|
def test_train_small_ensemblemodel_benchmark(small_moddata, tf_session):
"""Tests the `matbench_benchmark()` method for ensemble models."""
from modnet.matbench.benchmark import matbench_benchmark
from modnet.models import EnsembleMODNetModel
data = small_moddata
# set 'optimal' features manually
data.optimal_features = [
col for col in data.df_featurized.columns if col.startswith("ElementProperty")
]
results = matbench_benchmark(
data,
[[["eform"]]],
{"eform": 1},
model_type=EnsembleMODNetModel,
n_models=2,
inner_feat_selection=False,
fast=True,
nested=2,
n_jobs=1,
)
expected_keys = (
"nested_losses",
"nested_learning_curves",
"best_learning_curves",
"predictions",
"stds",
"targets",
"errors",
"scores",
"best_presets",
"model",
)
for key in expected_keys:
assert key in results
assert all(len(results[key]) == 5 for key in expected_keys)
| 13,435
|
def organizations_virtual_dns(self):
""" API core commands for Cloudflare API"""
self.add('AUTH', "organizations", "virtual_dns")
self.add('VOID', "organizations", "virtual_dns", "dns_analytics")
self.add('AUTH', "organizations", "virtual_dns", "dns_analytics/report")
self.add('AUTH', "organizations", "virtual_dns", "dns_analytics/report/bytime")
return
| 13,436
|
def grid_reference_to_northing_easting(grid_reference):
"""
Needs to include reference
:param grid_reference:
:return:
"""
grid_reference = grid_reference.strip().replace(' ', '')
if len(grid_reference) == 0 or len(grid_reference) % 2 == 1 or len(grid_reference) > 12:
return None, None
grid_reference = grid_reference.upper()
if grid_reference[0] not in 'STNOH' or grid_reference[1] == 'I':
return None, None
e = n = 0
c = grid_reference[0]
if c == 'T':
e = 500000
elif c == 'N':
n = 500000
elif c == 'O':
e = 500000
n = 500000
elif c == 'H':
n = 1000000
c = ord(grid_reference[1]) - 66
if c < 8: # J
c += 1
e += (c % 5) * 100000
n += (4 - c/5) * 100000
c = grid_reference[2:]
try:
s = c[:int(len(c)/2)]
while len(s) < 5:
s += '0'
e += int(s)
s = c[int(-len(c)/2):]
while len(s) < 5:
s += '0'
n += int(s)
except Exception as error:
print("Caught exception during conversion. Issue: {}".format(error))
return None, None
# Data is converted into integers
return int(e), int(n)
| 13,437
|
def tflite_copts_warnings():
"""Defines common warning flags used primarily by internal TFLite libraries."""
# TODO(b/155906820): Include with `tflite_copts()` after validating clients.
return select({
clean_dep("//tensorflow:windows"): [
# We run into trouble on Windows toolchains with warning flags,
# as mentioned in the comments below on each flag.
# We could be more aggressive in enabling supported warnings on each
# Windows toolchain, but we compromise with keeping BUILD files simple
# by limiting the number of config_setting's.
],
"//conditions:default": [
"-Wall",
],
})
| 13,438
|
async def send_events(count, sleep, channel, server):
"""
Allow to send fake event to the server
"""
for i in range(count):
await asyncio.sleep(sleep)
server.push_result(channel, {"foo": i})
| 13,439
|
def deleteMatches():
"""Remove all the match records from the database."""
db = connect()
db_cursor = db.cursor()
query = "DELETE FROM matches"
db_cursor.execute(query)
db.commit()
db.close()
| 13,440
|
def write_seqs_to_tfrecords(record_name, name_to_seqs, label,
frame_labels_string):
"""Write frames to a TFRecord file."""
writer = tf.io.TFRecordWriter(record_name)
for name in name_to_seqs:
if isinstance(label,int):
lb=label
else:
lb=label[name]
ex = get_example(name, name_to_seqs[name],
seq_label=lb,
frame_labels_string=frame_labels_string)
writer.write(ex.SerializeToString())
writer.close()
| 13,441
|
def sn_random_numbers(shape, antithetic=True, moment_matching=True,
fixed_seed=False):
"""Returns an ndarray object of shape with (pseudo)random numbers
that are standard normally distributed.
Parameters
----------
shape : tuple (o, n, m)
Generation of array with shape (o, n, m).
antithetic : bool, default=True
Generation of antithetic variates.
moment_matching : bool, default=True
Matching of first and second moments.
fixed_seed : bool, default=False
Flag to fix the seed.
Returns
-------
ran: numpy.ndarray
(o, n, m) array of (pseudo)random numbers.
"""
if fixed_seed:
np.random.seed(1000)
if antithetic:
ran = np.random.standard_normal(
(shape[0], shape[1], shape[2] // 2))
ran = np.concatenate((ran, -ran), axis=2)
else:
ran = np.random.standard_normal(shape)
if moment_matching:
ran = ran - np.mean(ran)
ran = ran / np.std(ran)
if shape[0] == 1:
return ran[0]
else:
return ran
| 13,442
|
def blue(N: int) -> np.ndarray:
"""
Blue noise.
* N: Amount of samples.
Power increases with 6 dB per octave.
Power density increases with 3 dB per octave.
https://github.com/python-acoustics
"""
x = white(N)
X = rfft(x) / N
S = np.sqrt(np.arange(X.size)) # Filter
y = irfft(X*S).real[:N]
return normalise(y)
| 13,443
|
def _remove_attribute(note_dict: Dict, attribute: str) -> Dict:
""" Create a copy of the note where a single attribute is removed """
d = dict(note_dict)
d[attribute] = None
return d
| 13,444
|
def get_config(path: str) -> config_schema:
"""Load the config from the path, validate and return the dcitionary
Args:
path (str): Path the config.yaml
Returns:
config_schema: The configuration dictionary
"""
config_path = Path(path)
config = yaml.full_load(open(config_path))
return Schema(config_schema).validate(config)
| 13,445
|
def compare_chars(first, second):
"""
Returns the greater of the two characters
:param first:
:param second:
:return: char
"""
return chr(max(ord(first), ord(second)))
| 13,446
|
def get_file_path(filename):
"""Find filename in the relative directory `../data/` .
Args:
filename (str): file we're looking for in the ./data/ directory.
Returns:
str: absolute path to file "filename" in ./data/ dir.
"""
root_dir = Path(__file__).parent.parent
file_dir = os.path.join(str(root_dir), "data", filename)
return file_dir
| 13,447
|
def parse_arguments() -> typing.Dict[typing.Any, typing.Any]:
"""Parses command line parameters."""
argument_parser = argparse.ArgumentParser(
usage=f"Database transfer tool for Cloud Composer v.{SCRIPT_VERSION}.\n\n"
+ USAGE
+ "\n"
)
argument_parser.add_argument("operation", type=str, choices=["import", "export"])
argument_parser.add_argument("--project", type=str, required=True)
argument_parser.add_argument("--environment", type=str, required=True)
argument_parser.add_argument("--location", type=str, required=True)
argument_parser.add_argument("--fernet-key-file", type=str, required=True)
return argument_parser.parse_args()
| 13,448
|
def find_author():
"""This returns 'The NeuroKit's development team'"""
result = re.search(
r'{}\s*=\s*[\'"]([^\'"]*)[\'"]'.format("__author__"),
open("../neurokit2/__init__.py").read(),
)
return str(result.group(1))
| 13,449
|
def tate_pairing(E, P, Q, m, k=2):
"""
Calculate Tate Pairing
Args:
E: The Elliptic Curve
P: A point over E which has order m
Q: A point over E which has order m
m: The order of P, Q on E
k: [Optional] The Embedding Degree of m on E
"""
from ecpy.utils.util import is_enable_native, _native
if is_enable_native:
P = _native.EC_elem(E.ec, tuple(P.x), tuple(P.y), tuple(P.z))
Q = _native.EC_elem(E.ec, tuple(Q.x), tuple(Q.y), tuple(Q.z))
if E.ec.type == 1:
t = _native.FF_elem(0)
elif E.ec.type == 2:
t = _native.EF_elem(0, 0)
_native.tate_pairing(t, E.ec, P, Q, m, k)
if E.ec.type == 1:
from ecpy.fields.Zmod import ZmodElement
return ZmodElement(E.field, t.to_python())
elif E.ec.type == 2:
from ecpy.fields.ExtendedFiniteField import ExtendedFiniteFieldElement
t = t.to_python()
return ExtendedFiniteFieldElement(E.field, t[0], t[1])
else:
f = miller(E, P, Q, m)
return f ** (((E.field.p ** k) - 1) // m)
| 13,450
|
def kl_loss(img,decoded_img,encoder_log_var,encoder_mu):
"""
LK loss for VAEs
"""
kl_loss = -0.5 * tf.reduce_sum( (1+encoder_log_var-tf.exp(encoder_log_var)-encoder_mu**2), axis=[1,2,3],name='klloss' )
return tf.reduce_mean(kl_loss,axis=0)
| 13,451
|
def steady(L, maxiter=10, tol=1e-6, itertol=1e-5, method='solve',
use_umfpack=True, use_precond=False):
"""
Deprecated. See steadystate instead.
"""
message = "steady has been deprecated, use steadystate instead"
warnings.warn(message, DeprecationWarning)
return steadystate(L, [], maxiter=maxiter, tol=tol,
use_umfpack=use_umfpack, use_precond=use_precond)
| 13,452
|
def and_intersection(map_list):
"""
Bitwise or a list of HealSparseMaps as an intersection. Only pixels that
are valid in all the input maps will have valid values in the output.
Only works on integer maps.
Parameters
----------
map_list : `list` of `HealSparseMap`
Input list of maps to bitwise and
Returns
-------
result : `HealSparseMap`
Bitwise and of maps
"""
filler = map_list[0]._sparse_map.dtype.type(-1)
return _apply_operation(map_list, np.bitwise_and, filler, union=False, int_only=True)
| 13,453
|
def parse(url):
"""Parses a cache URL."""
config = {}
url = urlparse.urlparse(url)
# Handle python 2.6 broken url parsing
path, query = url.path, url.query
if '?' in path and query == '':
path, query = path.split('?', 1)
cache_args = dict([(key.upper(), ';'.join(val)) for key, val in
urlparse.parse_qs(query).items()])
# Update with environment configuration.
backend = BACKENDS.get(url.scheme)
if not backend:
raise Exception('Unknown backend: "{0}"'.format(url.scheme))
config['BACKEND'] = BACKENDS[url.scheme]
redis_options = {}
if url.scheme == 'hiredis':
redis_options['PARSER_CLASS'] = 'redis.connection.HiredisParser'
# File based
if not url.netloc:
if url.scheme in ('memcached', 'pymemcached', 'djangopylibmc'):
config['LOCATION'] = 'unix:' + path
elif url.scheme in ('redis', 'hiredis'):
match = re.match(r'.+?(?P<db>\d+)', path)
if match:
db = match.group('db')
path = path[:path.rfind('/')]
else:
db = '0'
config['LOCATION'] = 'unix:%s?db=%s' % (path, db)
else:
config['LOCATION'] = path
# URL based
else:
# Handle multiple hosts
config['LOCATION'] = ';'.join(url.netloc.split(','))
if url.scheme in ('redis', 'hiredis'):
if url.password:
redis_options['PASSWORD'] = url.password
# Specifying the database is optional, use db 0 if not specified.
db = path[1:] or '0'
port = url.port if url.port else 6379
config['LOCATION'] = "redis://%s:%s/%s" % (url.hostname, port, db)
if redis_options:
config.setdefault('OPTIONS', {}).update(redis_options)
if url.scheme == 'uwsgicache':
config['LOCATION'] = config.get('LOCATION', 'default') or 'default'
# Pop special options from cache_args
# https://docs.djangoproject.com/en/1.10/topics/cache/#cache-arguments
options = {}
for key in ['MAX_ENTRIES', 'CULL_FREQUENCY']:
val = cache_args.pop(key, None)
if val is not None:
options[key] = int(val)
if options:
config.setdefault('OPTIONS', {}).update(options)
config.update(cache_args)
return config
| 13,454
|
def states_state(id=""):
""" displays a HTML page with a list of cities by states """
states = storage.all(State).values()
states = sorted(states, key=lambda k: k.name)
found = 0
state = ""
cities = []
for i in states:
if id == i.id:
state = i
found = 1
break
if found:
states = sorted(state.cities, key=lambda k: k.name)
state = state.name
if id and not found:
found = 2
return render_template('9-states.html',
state=state,
array=states,
found=found)
| 13,455
|
def make_tree_item(parent, text, icon, first_col_text=None, second_col_text=None):
"""
构造树的子项
:param parent: 要构造子项的父节点元素
:param text: 构造的子节点信息
:param icon: 图标,该元素的展示图标对象
:param first_col_text: 第一列隐藏信息
:param second_col_text: 第二列隐藏信息
"""
item = MyTreeWidgetItem(parent)
item.setIcon(0, icon)
item.setText(0, text)
if first_col_text:
# 作为隐藏属性,写于指定列
item.setText(1, first_col_text)
if second_col_text:
item.setText(2, second_col_text)
return item
| 13,456
|
def deploy_results(run_once=False):
"""
Harvest data and deploy slides indefinitely
"""
while True:
start = time()
safe_execute('ap.update')
safe_execute('data.update')
safe_execute('deploy_bop')
safe_execute('deploy_results')
safe_execute('deploy_big_boards')
safe_execute('deploy_states')
duration = int(time() - start)
wait = app_config.RESULTS_DEPLOY_INTERVAL - duration
if wait < 0:
print 'WARN: Deploying slides took %ds longer than %ds' % (abs(wait), app_config.RESULTS_DEPLOY_INTERVAL)
wait = 0
else:
print 'Deploying slides ran in %ds' % duration
if run_once:
print 'Run once specified, exiting.'
sys.exit()
else:
print 'Waiting %ds...' % wait
sleep(wait)
| 13,457
|
def get_boundary_condition(name):
"""
Return a boundary condition by name
"""
try:
return _BOUNDARY_CONDITIONS[name]
except KeyError:
ocellaris_error(
'Boundary condition "%s" not found' % name,
'Available boundary conditions:\n'
+ '\n'.join(
' %-20s - %s' % (n, s.description)
for n, s in sorted(_BOUNDARY_CONDITIONS.items())
),
)
raise
| 13,458
|
def run(ts):
""" Actually do the hard work of getting the USDM in geojson """
pgconn = get_dbconn('postgis')
cursor = pgconn.cursor(cursor_factory=psycopg2.extras.DictCursor)
# Look for polygons into the future as well as we now have Flood products
# with a start time in the future
cursor.execute("""
SELECT ST_asGeoJson(geom) as geojson, dm, valid
from usdm WHERE valid = %s ORDER by dm ASC
""", (ts, ))
if cursor.rowcount == 0:
# go back one week
cursor.execute("""
SELECT ST_asGeoJson(geom) as geojson, dm, valid
from usdm WHERE valid = %s ORDER by dm ASC
""", (ts - datetime.timedelta(days=7), ))
utcnow = datetime.datetime.utcnow()
res = {'type': 'FeatureCollection',
'features': [],
'generation_time': utcnow.strftime("%Y-%m-%dT%H:%M:%SZ"),
'count': cursor.rowcount}
for row in cursor:
res['features'].append(dict(type="Feature",
id=row['dm'],
properties=dict(
date=row['valid'].strftime("%Y-%m-%d"),
dm=row['dm']),
geometry=json.loads(row['geojson'])
))
return json.dumps(res)
| 13,459
|
def mtxslv_user_ratings(user_id, dataset):
"""
Receives user_id and dataset. Look for all
occurences of user_id in dataset and returns
such subset.
If no user_id is found, return an empty
numpy array.
"""
subset = [] # the same thing as I_i (set of item user_id has voted)
for it in range(0,np.shape(dataset)[0]):
if (dataset[it,user_column] == user_id):
subset.append(dataset[it,:].tolist())
return np.array(subset)
| 13,460
|
def random_rotation(min, max, prng=DEFAULT_PRNG):
""" Construct a random rotation between -max and max.
Args
min: a scalar for the minimum absolute angle in radians
max: a scalar for the maximum absolute angle in radians
prng: the pseudo-random number generator to use.
Returns
a homogeneous 3 by 3 rotation matrix
"""
return rotation(prng.uniform(min, max))
| 13,461
|
def test_save_pred_vs_label_7tuple_short_side_60_img():
"""
When the image is too low in resolution (e.g. short side 60),
we cannot use cv2's text rendering code. Instead, we will also save
the upsampled version.
"""
short_side = 60 # pixels
data_dir = f'{TEST_DATA_ROOT}/Camvid_test_data'
img_fpath = f'{data_dir}/images/0016E5_08159.png'
label_fpath = f'{data_dir}/preds/0016E5_08159.png'
img_rgb = imageio.imread(img_fpath)
label_img = imageio.imread(label_fpath)
img_rgb = resize_img_by_short_side(img_rgb, short_side, img_type='rgb')
label_img = resize_img_by_short_side(label_img, short_side, img_type='label')
img_h, img_w = label_img.shape
pred_img = np.random.randint(0,200, (img_h, img_w)).astype(np.uint16)
id_to_class_name_map = get_dataloader_id_to_classname_map('pascal-context-460')
save_fpath = f'{TEST_DATA_ROOT}/rand_549_temp_small.png'
save_pred_vs_label_7tuple(img_rgb, pred_img, label_img, id_to_class_name_map, save_fpath)
os.remove(f'{TEST_DATA_ROOT}/rand_549_temp_small_upsample_pred_labels_palette.png')
os.remove(f'{TEST_DATA_ROOT}/rand_549_temp_small_pred_labels_palette.png')
| 13,462
|
def test_save_and_load(tmp_path):
"""Verify that we can save the label image and load it correctly."""
array = np.zeros((2, 3, 4), dtype=np.int32)
pixel_coordinates = {
Axes.X: [2, 3, 4, 5],
Axes.ZPLANE: [0, 1],
}
physical_coordinates = {
Coordinates.X: [0, 0.5, 1.0, 1.5],
Coordinates.Y: [0, 0.2, 0.4],
Coordinates.Z: [0, 0.1],
}
log = Log()
# instantiate a filter (even though that makes no sense in this context)
filt = Filter.Reduce((Axes.ROUND,), func="max")
log.update_log(filt)
label_image = LabelImage.from_label_array_and_ticks(
array, pixel_coordinates, physical_coordinates, log)
label_image.to_netcdf(tmp_path / "label_image.netcdf")
loaded_label_image = LabelImage.open_netcdf(tmp_path / "label_image.netcdf")
assert label_image.xarray.equals(loaded_label_image.xarray)
assert label_image.xarray.attrs == loaded_label_image.xarray.attrs
| 13,463
|
def precheck(files, dirs):
"""Checks whether the files/dirs user specified exists or are valid
Arguments:
files {list} -- File list
dirs {list} -- Dir list
Raises:
AssertionError -- Raises if the given file doesn't exist or isn't a valid file
AssertionError -- Raises if the given dir doesn't exist or isn't a valid dir
"""
for f in files:
if not os.path.isfile(f):
raise AssertionError(
'Item declared in files: {0} is not a file'.format(f))
for d in dirs:
if not os.path.isdir(d):
raise AssertionError(
'Item declared in dirs {0} is not a directory'.format(d))
| 13,464
|
def continued_fraction_iterator(x):
"""
Return continued fraction expansion of x as iterator.
Examples
========
>>> from sympy.core import Rational, pi
>>> from sympy.ntheory.continued_fraction import continued_fraction_iterator
>>> list(continued_fraction_iterator(Rational(3, 8)))
[0, 2, 1, 2]
>>> for i, v in enumerate(continued_fraction_iterator(pi)):
... if i > 7:
... break
... print(v)
3
7
15
1
292
1
1
1
References
==========
.. [1] http://en.wikipedia.org/wiki/Continued_fraction
"""
while True:
i = Integer(x)
yield i
x -= i
if not x:
break
x = 1/x
| 13,465
|
def country_code_from_name(country_names,l3=False):
"""2 letter ['BE'] or 3 letter codes ['BEL'] from country names
Accepts string or list of strings e.g, 'Serbia' or ['Belgium','Slovakia']
Update 3/1/2022: also accepts non uppercase titles, e.g. ['united Kingdom', 'hungary']
Arguments:
*country_names* (string or list of strings) : country names
*l3* (Boolean) : return 3l-code; default = False -> returns 2l-code
Returns
*sel* (string or list of strings) : 2l or 3l codes
"""
if True:
data = config['paths']['data']
df = pd.read_csv((data / 'country_codes.csv'), delimiter=';')
df_3l = df['country']
if l3:
code_col = 'code3' #return 3l code
else:
code_col = 'code2' #return 2l code
unpack = False
if not isinstance(country_names, list):
country_names = [country_names]
unpack = True
capitalized_names = [name.title() for name in country_names]
sel = list(df.loc[df.country.isin(capitalized_names)][code_col])
if unpack: sel = sel[0]
return sel
| 13,466
|
def steem_amount(value):
"""Returns a decimal amount, asserting units are STEEM"""
return parse_amount(value, 'STEEM')
| 13,467
|
def get_edge_angle(fx,fy):
"""エッジ強度と勾配を計算する関数
"""
# np.power : 行列のn乗を計算
# np.sqrt : 各要素の平方根を計算
edge = np.sqrt(np.power(fx.astype(np.float32),2)+np.power(fy.astype(np.float32),2))
edge = np.clip(edge, 0, 255)
fx = np.maximum(fx, 1e-5)
angle = np.arctan(fy/fx)
return edge,angle
| 13,468
|
def write_site_pair_score_data_to_file(sorted_data_list, output_file_path, algorithm_used, max_iterations=None, num_threads=None):
"""Since site indices are starting from zero within python we add one to
each of them when they are being written to output file.
"""
formater = '#' + '='*100
formater += '\n'
with open(output_file_path, 'w') as fh:
fh.write(formater)
fh.write('# This result is computed using {}\n'.format(algorithm_used))
if max_iterations is not None:
fh.write('# maximum number of gradient decent iterations: {}\n'.format(max_iterations))
if num_threads is not None:
fh.write('# Number of threads used: {}\n'.format(num_threads))
fh.write('# The first and second columns are site pairs. The third column represents interaction score\n')
fh.write(formater)
for site_pair, score in sorted_data_list:
i, j = site_pair[0] + 1, site_pair[1] + 1
fh.write('{}\t{}\t{}\n'.format(i, j, score))
return None
| 13,469
|
def cs_2tuple_list(value):
"""
Parses a comma separated 2-tuple strings into a python list of tuples
>>> cs_2tuple_list('')
[]
>>> cs_2tuple_list('(foobar, "test")')
[('foobar', 'test')]
>>> cs_2tuple_list('(foobar, "test"), ('"'barfoo', "' lalala) ')
[('foobar', 'test'), ('barfoo', 'lalala')]
>>> cs_2tuple_list('(foobar, "test"), ("(barfoo", "lalala)")')
[('foobar', 'test'), ('(barfoo', 'lalala)')]
"""
res = ['']
in_tuple = False
quote_char = None
for char in value:
if in_tuple:
if not quote_char and char in ["'", '"']:
quote_char = char
elif char == quote_char:
quote_char = None
elif not quote_char and char == ")":
res[-1] = tuple(cs_string_list(res[-1]))
in_tuple = False
else:
res[-1] += char
elif char == " ":
continue
elif char == ",":
res.append('')
elif char == "(":
in_tuple = True
else:
raise ValueError("Unexpected character '{}' after '{}'"
.format(char, res))
if in_tuple or quote_char:
raise ValueError("Unterminated tuple {}".format(res[-1]))
# remove empty string stored as state
if not isinstance(res[-1], tuple):
del res[-1]
if any(not isinstance(e, tuple) or len(e) != 2 for e in res):
raise ValueError("Unexpected value in {}".format(res))
return res
| 13,470
|
def expectatedCapacityFactorFromDistribution( powerCurve, windspeedValues, windspeedCounts):
"""Computes the expected capacity factor of a wind turbine based on an explicitly-provided wind speed distribution
"""
windspeedValues = np.array(windspeedValues)
windspeedCounts = np.array(windspeedCounts)
if not len(windspeedValues.shape) == 1: raise ResError("windspeedValues must be 1-dimensional")
# Handle 2 dimensional counts with 1 dimensional wind speeds
if len(windspeedCounts.shape) > 1:
if not windspeedCounts.shape[0] == windspeedValues.shape[0]:
raise ResError("Dimensional incompatability")
windspeedValues = np.reshape(windspeedValues, (windspeedCounts.shape[0],1))
# Estimate generation distribution
gen = np.interp(windspeedValues, powerCurve.ws, powerCurve.cf, left=0, right=0) * windspeedCounts
meanGen = gen.sum(0)/windspeedCounts.sum(0)
# Done
return meanGen
| 13,471
|
def check_icon_arg(src, default):
"""
Checks if icon arguments are valid: either a URL or an absolute path.
:param src: Source of the icon
:param default: default value of the icon
:return: src (possibly pre-pended with "file://")
"""
if src != default:
# check if URl
if not src.startswith('https://') and not src.startswith('http://'):
# Either a file or incorrect input
if os.path.isabs(src):
src = "file://" + src
else:
raise IOError(
f"Please provide a valid URL or valid *absolute* path to icon: {src}"
)
return src
| 13,472
|
def setup_2d_em_pic():
"""
Returns a 2D electromagnetic PIC for testing
"""
params = {
"length": [2 * np.pi, 2 * np.pi],
"cells": [32, 32],
"dimensions": 2,
"nppc": 10,
"single_stream": { # defaults for single stream instability
"stream_v": 3,
"stream_frac": 0.8,
"stream_width": 1
},
"landau": { # defaults for Landau damping
"amplitude": 0.8,
"mode": 3
},
"two_stream": { # defaults for two stream instability
"vpos": 2,
"vneg": -2,
"stream_frac": 1,
"stream_width": 0.8
},
}
sim_params = plat.params.Parameters(2)
sim_params.set_from_dict(params)
pic = plat.pic_2d_em.PIC_2D_EM(sim_params)
return pic
| 13,473
|
def young_laplace(Bo,nPoints,L):
"""
Bo = float - Bond number
nPoints = int - number of integration points desired
L = float - final arc length for range of integration
"""
#integration range and number of integration points
s1=L
N=nPoints
#set initial values
s0 = 0
y0 = [0.00001,0.00001,0.00001]
sVec = np.linspace(s0,s1,N)
bond=Bo
sol = odeint(ode_system,y0,sVec,args=(bond,))
r = sol[:,1]
z = sol[:,2]
fi = sol[:,0]
return r,z,fi
| 13,474
|
def get_uncertain_point_coords_on_grid(uncertainty_map, num_points):
"""
Find `num_points` most uncertain points from `uncertainty_map` grid.
Args:
uncertainty_map (Tensor): A tensor of shape (N, 1, H, W) that contains uncertainty
values for a set of points on a regular H x W grid.
num_points (int): The number of points P to select.
Returns:
point_indices (Tensor): A tensor of shape (N, P) that contains indices from
[0, H x W) of the most uncertain points.
point_coords (Tensor): A tensor of shape (N, P, 2) that contains [0, 1] x [0, 1] normalized
coordinates of the most uncertain points from the H x W grid.
"""
R, _, H, W = uncertainty_map.shape
h_step = 1.0 / float(H)
w_step = 1.0 / float(W)
num_points = min(H * W, num_points)
point_indices = torch.topk(uncertainty_map.view(R, H * W), k=num_points, dim=1)[1]
point_coords = torch.zeros(R, num_points, 2, dtype=torch.float, device=uncertainty_map.device)
point_coords[:, :, 0] = w_step / 2.0 + (point_indices % W).to(torch.float) * w_step
point_coords[:, :, 1] = h_step / 2.0 + (point_indices // W).to(torch.float) * h_step
return point_indices, point_coords
| 13,475
|
def amex_credit_card(input_filename, month):
"""Format is just contents.
date, description, amount"""
test = _make_month_test(0, month)
def transform(xs):
return [xs[0], xs[1],
'-' + xs[2] if xs[2][0] != '-' else xs[2][1:]]
return _csv_transform(input_filename, test, transform,
None)
| 13,476
|
def loadf(file_like, *args, attributes=None, **kwargs):
"""Read a data file and load it -- scaled -- in memory.
This function differs from `read` in several ways:
* The output data type should be a floating point type.
* If an affine scaling (slope, intercept) is defined in the
file, it is applied to the data.
* the default output data type is `torch.get_default_dtype()`.
Parameters
----------
file_like : str or file object
Path to file or file object (with methods `seek`, `read`)
dtype : dtype_like, optional
Output data type. By default, use `torch.get_default_dtype()`.
Should be a floating point type.
device : torch.device, default='cpu'
Output device.
rand : bool, default=False
If the on-disk dtype is not floating point, sample noise
in the uncertainty interval.
cutoff : float or (float, float), default=(0, 1)
Percentile cutoff. If only one value is provided, it is
assumed to relate to the upper percentile.
dim : int or list[int], optional
Dimensions along which to compute percentiles.
By default, they are computed on the flattened array.
numpy : bool, default=False
Return a numpy array rather than a torch tensor.
attributes : list[str]
List of attributes to return as well.
See `MappedArray` for the possible attributes.
Returns
-------
dat : array or tensor
The array loaded in memory
attributes : dict, if attributes is not None
Dictionary of attributes loaded as well
"""
file = map(file_like, permission='r', keep_open=False)
dat = file.fdata(*args, **kwargs)
if attributes:
attributes = {getattr(file, key) for key in attributes}
return dat, attributes
else:
return dat
| 13,477
|
def test_directive2_report_txt_md(checker):
"""The copy of .md file in fenced code block is the same as the file."""
check_first_block(
markdown_path="doc/directive2_report_txt.md",
contents_path="doc/directive2_report.txt",
checker_function=checker,
)
| 13,478
|
def download_if_not_exists(filename, url):
"""
Download a URL to a file if the file
does not exist already.
Returns
-------
True if the file was downloaded,
False if it already existed
"""
if not os.path.exists(filename):
down_load_file(filename, url)
return True
return False
| 13,479
|
def _backsubstitution(A: MatrixData, B: List[float]) -> List[float]:
""" Solve equation A . x = B for an upper triangular matrix A by backsubstitution.
Args:
A: row major matrix
B: vector of floats
"""
num = len(A)
x = [0.0] * num
for i in range(num - 1, -1, -1):
x[i] = B[i] / A[i][i]
for row in range(i - 1, -1, -1):
B[row] -= A[row][i] * x[i]
return x
| 13,480
|
def deiterize(func):
"""The inverse of iterize.
Takes an "iterized" (a.k.a. "vectorized") function (i.e. a function that
works on iterables), and
That is, takes a func(X,...) function and returns a next(iter(func([X],
...))) function."""
return Pipe(wrap_first_arg_in_list(func), iter, next)
| 13,481
|
def ecg_data(rdb, day, patient, time):
""" Returns DatFrame to plot ecg signal """
sql = """SELECT * FROM ECG where "Day"='{0}' and "Patient"='{1}' and "Date"::time='{2}' """.format(day, patient, time)
try:
df = pd.read_sql(sql, rdb)
except:
df = pd.DataFrame()
return df
| 13,482
|
def evaluate(data_set_file_or_name, data_format=None, data_directory=None,
map_features=None, feature_selection=None, example_filter=None,
noisy_preprocessing_methods=None, preprocessing_methods=None,
split_data_set=None, splitting_method=None,
splitting_fraction=None,
model_type=None, latent_size=None, hidden_sizes=None,
number_of_importance_samples=None,
number_of_monte_carlo_samples=None,
inference_architecture=None, latent_distribution=None,
number_of_classes=None, parameterise_latent_posterior=False,
prior_probabilities_method=None,
generative_architecture=None, reconstruction_distribution=None,
number_of_reconstruction_classes=None, count_sum=None,
proportion_of_free_nats_for_y_kl_divergence=None,
minibatch_normalisation=None, batch_correction=None,
dropout_keep_probabilities=None,
number_of_warm_up_epochs=None, kl_weight=None,
minibatch_size=None, run_id=None, models_directory=None,
included_analyses=None, analysis_level=None,
decomposition_methods=None, highlight_feature_indices=None,
export_options=None, analyses_directory=None,
evaluation_set_kind=None, sample_size=None,
prediction_method=None, prediction_training_set_kind=None,
model_versions=None, **keyword_arguments):
"""Evaluate model on data set."""
if split_data_set is None:
split_data_set = defaults["data"]["split_data_set"]
if splitting_method is None:
splitting_method = defaults["data"]["splitting_method"]
if splitting_fraction is None:
splitting_fraction = defaults["data"]["splitting_fraction"]
if models_directory is None:
models_directory = defaults["models"]["directory"]
if evaluation_set_kind is None:
evaluation_set_kind = defaults["evaluation"]["data_set_name"]
if sample_size is None:
sample_size = defaults["models"]["sample_size"]
if prediction_method is None:
prediction_method = defaults["evaluation"]["prediction_method"]
if prediction_training_set_kind is None:
prediction_training_set_kind = defaults["evaluation"][
"prediction_training_set_kind"]
if model_versions is None:
model_versions = defaults["evaluation"]["model_versions"]
if analyses_directory is None:
analyses_directory = defaults["analyses"]["directory"]
evaluation_set_kind = normalise_string(evaluation_set_kind)
prediction_training_set_kind = normalise_string(
prediction_training_set_kind)
model_versions = parse_model_versions(model_versions)
print(title("Data"))
binarise_values = False
if reconstruction_distribution == "bernoulli":
if noisy_preprocessing_methods:
if noisy_preprocessing_methods[-1] != "binarise":
noisy_preprocessing_methods.append("binarise")
else:
binarise_values = True
data_set = DataSet(
data_set_file_or_name,
data_format=data_format,
directory=data_directory,
map_features=map_features,
feature_selection=feature_selection,
example_filter=example_filter,
preprocessing_methods=preprocessing_methods,
binarise_values=binarise_values,
noisy_preprocessing_methods=noisy_preprocessing_methods
)
if not split_data_set or evaluation_set_kind == "full":
data_set.load()
if split_data_set:
training_set, validation_set, test_set = data_set.split(
method=splitting_method, fraction=splitting_fraction)
data_subsets = [data_set, training_set, validation_set, test_set]
for data_subset in data_subsets:
clear_data_subset = True
if data_subset.kind == evaluation_set_kind:
evaluation_set = data_subset
clear_data_subset = False
if data_subset.kind == prediction_training_set_kind:
prediction_training_set = data_subset
clear_data_subset = False
if clear_data_subset:
data_subset.clear()
else:
splitting_method = None
splitting_fraction = None
evaluation_set = data_set
prediction_training_set = data_set
evaluation_subset_indices = indices_for_evaluation_subset(
evaluation_set)
models_directory = build_directory_path(
models_directory,
data_set=evaluation_set,
splitting_method=splitting_method,
splitting_fraction=splitting_fraction
)
analyses_directory = build_directory_path(
analyses_directory,
data_set=evaluation_set,
splitting_method=splitting_method,
splitting_fraction=splitting_fraction
)
print(title("Model"))
if number_of_classes is None:
if evaluation_set.has_labels:
number_of_classes = (
evaluation_set.number_of_classes
- evaluation_set.number_of_excluded_classes)
model = _setup_model(
data_set=evaluation_set,
model_type=model_type,
latent_size=latent_size,
hidden_sizes=hidden_sizes,
number_of_importance_samples=number_of_importance_samples,
number_of_monte_carlo_samples=number_of_monte_carlo_samples,
inference_architecture=inference_architecture,
latent_distribution=latent_distribution,
number_of_classes=number_of_classes,
parameterise_latent_posterior=parameterise_latent_posterior,
prior_probabilities_method=prior_probabilities_method,
generative_architecture=generative_architecture,
reconstruction_distribution=reconstruction_distribution,
number_of_reconstruction_classes=number_of_reconstruction_classes,
count_sum=count_sum,
proportion_of_free_nats_for_y_kl_divergence=(
proportion_of_free_nats_for_y_kl_divergence),
minibatch_normalisation=minibatch_normalisation,
batch_correction=batch_correction,
dropout_keep_probabilities=dropout_keep_probabilities,
number_of_warm_up_epochs=number_of_warm_up_epochs,
kl_weight=kl_weight,
models_directory=models_directory
)
if not model.has_been_trained(run_id=run_id):
raise Exception(
"Model not found. Either it has not been trained or "
"scVAE is looking in the wrong directory. "
"The model directory resulting from the model specification is: "
"\"{}\"".format(model.log_directory())
)
if ("best_model" in model_versions
and not better_model_exists(model, run_id=run_id)):
model_versions.remove("best_model")
if ("early_stopping" in model_versions
and not model_stopped_early(model, run_id=run_id)):
model_versions.remove("early_stopping")
print(subtitle("Analysis"))
analyses.analyse_model(
model=model,
run_id=run_id,
included_analyses=included_analyses,
analysis_level=analysis_level,
export_options=export_options,
analyses_directory=analyses_directory
)
print(title("Results"))
print("Evaluation set: {} set.".format(evaluation_set.kind))
print("Model version{}: {}.".format(
"" if len(model_versions) == 1 else "s",
enumerate_strings(
[v.replace("_", " ") for v in model_versions], conjunction="and")))
if prediction_method:
prediction_specifications = PredictionSpecifications(
method=prediction_method,
number_of_clusters=number_of_classes,
training_set_kind=prediction_training_set.kind
)
print("Prediction method: {}.".format(
prediction_specifications.method))
print("Number of clusters: {}.".format(
prediction_specifications.number_of_clusters))
print("Prediction training set: {} set.".format(
prediction_specifications.training_set_kind))
print()
for model_version in model_versions:
use_best_model = False
use_early_stopping_model = False
if model_version == "best_model":
use_best_model = True
elif model_version == "early_stopping":
use_early_stopping_model = True
print(subtitle(model_version.replace("_", " ").capitalize()))
print(heading("{} evaluation".format(
model_version.replace("_", "-").capitalize())))
(
transformed_evaluation_set,
reconstructed_evaluation_set,
latent_evaluation_sets
) = model.evaluate(
evaluation_set=evaluation_set,
evaluation_subset_indices=evaluation_subset_indices,
minibatch_size=minibatch_size,
run_id=run_id,
use_best_model=use_best_model,
use_early_stopping_model=use_early_stopping_model,
output_versions="all"
)
print()
if sample_size:
print(heading("{} sampling".format(
model_version.replace("_", "-").capitalize())))
sample_reconstruction_set, __ = model.sample(
sample_size=sample_size,
minibatch_size=minibatch_size,
run_id=run_id,
use_best_model=use_best_model,
use_early_stopping_model=use_early_stopping_model
)
print()
else:
sample_reconstruction_set = None
if prediction_method:
print(heading("{} prediction".format(
model_version.replace("_", "-").capitalize())))
latent_prediction_training_sets = model.evaluate(
evaluation_set=prediction_training_set,
minibatch_size=minibatch_size,
run_id=run_id,
use_best_model=use_best_model,
use_early_stopping_model=use_early_stopping_model,
output_versions="latent",
log_results=False
)
print()
cluster_ids, predicted_labels, predicted_superset_labels = (
predict_labels(
training_set=latent_prediction_training_sets["z"],
evaluation_set=latent_evaluation_sets["z"],
specifications=prediction_specifications
)
)
evaluation_set_versions = [
transformed_evaluation_set, reconstructed_evaluation_set
] + list(latent_evaluation_sets.values())
for evaluation_set_version in evaluation_set_versions:
evaluation_set_version.update_predictions(
prediction_specifications=prediction_specifications,
predicted_cluster_ids=cluster_ids,
predicted_labels=predicted_labels,
predicted_superset_labels=predicted_superset_labels
)
print()
print(heading("{} analysis".format(
model_version.replace("_", "-").capitalize())))
analyses.analyse_results(
evaluation_set=transformed_evaluation_set,
reconstructed_evaluation_set=reconstructed_evaluation_set,
latent_evaluation_sets=latent_evaluation_sets,
model=model,
run_id=run_id,
sample_reconstruction_set=sample_reconstruction_set,
decomposition_methods=decomposition_methods,
evaluation_subset_indices=evaluation_subset_indices,
highlight_feature_indices=highlight_feature_indices,
best_model=use_best_model,
early_stopping=use_early_stopping_model,
included_analyses=included_analyses,
analysis_level=analysis_level,
export_options=export_options,
analyses_directory=analyses_directory
)
return 0
| 13,483
|
def subscriptions(db_path, auth):
"""Download feedly goals for the authenticated user"""
db = sqlite_utils.Database(db_path)
try:
data = json.load(open(auth))
token = data["developer_token"]
except (KeyError, FileNotFoundError):
utils.error(
"Cannot find authentication data, please run `feedly_to_sqlite auth`!"
)
click.echo("Downloading subscriptions")
r = requests.get(
FEEDLY_API_URL + "/v3/collections",
headers={"Authorization": "Bearer {}".format(token)},
)
r.raise_for_status()
collections = r.json()
for coll in collections:
feeds = coll["feeds"]
coll_id = coll["id"]
coll_data = {k: coll.get(k) for k in COLLECTION_KEYS}
db["collections"].upsert(coll_data, pk="id")
for f in feeds:
feed_data = {k: f.get(k) for k in FEED_KEYS}
db["collections"].update(coll_id).m2m(db.table("feeds", pk="id"), feed_data)
| 13,484
|
def deploy_new_company(company_id):
"""
Deploy new company contract
:param company_id: Company off chain id for deploy
:return: True in case of successful, false otherwise
"""
try:
instance = Company.objects.get(pk=company_id)
except Company.DoesNotExist:
logger.error('Company with id {} not found, contract will bot be deployed.'.format(company_id))
return False
else:
oracle = OracleHandler()
w3 = utils.get_w3()
contract_file = 'dapp/contracts/Company.sol'
compile_sol = compile_files([contract_file, ],
output_values=("abi", "ast", "bin", "bin-runtime",))
create_abi(compile_sol[contract_file + ':Company']['abi'], 'Company')
obj = w3.eth.contract(
abi=compile_sol[contract_file + ':Company']['abi'],
bytecode=compile_sol[contract_file + ':Company']['bin'],
bytecode_runtime=compile_sol[contract_file + ':Company']['bin-runtime'],
)
args = [settings.VERA_COIN_CONTRACT_ADDRESS, settings.VERA_ORACLE_CONTRACT_ADDRESS, ]
logger.info('Try to unlock account: {}.'.format(oracle.unlockAccount()))
try:
txn_hash = obj.deploy(transaction={'from': oracle.account}, args=args)
except Exception as e:
logger.warning('Error while deploy new company contract. Company {}: {}'.format(company_id, e))
else:
logger.info('Lock account: {}'.format(oracle.lockAccount()))
save_txn.delay(txn_hash.hex(), 'NewCompany', instance.created_by.id, company_id)
save_txn_to_history.delay(instance.created_by.id, txn_hash.hex(),
'Creation of a new Company contract')
| 13,485
|
def MEMB(G,rb,cycle=0):
"""
It returns a dictionary with {box_id:subgraph_generated_by_the_nodes_in_this_box}
The box_id is the center of the box.
cycle: Ignore this parameter. Use the default cycle=0.
"""
adj = G.adj
number_of_nodes = G.number_of_nodes()
covered_nodes = set()
center_nodes = set()
non_center_nodes = G.nodes()
center_node_found = 0
boxes={} #this will be "box_id:[nodes in box]"
central_distance_of_node = {} #"node:central_distance"
node_box_id = {} #"node:box_id"
nodes_sorted_by_central_distance={} #Dict with {central_distance:[nodes]}
excluded_mass_of_non_centers_rb = {} #This contains [(node:excluded_mass)] for rb
excluded_mass_of_non_centers_rb2 = {} #This contains [(node:excluded_mass)] for rb+1
rb2 = rb + 1
for node in non_center_nodes:
#if node in [5000,10000,20000,30000]: print "node", node
level=0 # the current level
nextlevel={node:1} # list of nodes to check at next level
paths_rb=None
paths_rb2={node:[node]} # paths dictionary (paths to key from source)
while nextlevel:
paths_rb = deepcopy(paths_rb2)
thislevel=nextlevel
nextlevel={}
for v in thislevel:
for w in G.neighbors(v):
if not paths_rb2.has_key(w):
paths_rb2[w]=paths_rb2[v]+[w]
nextlevel[w]=1
level=level+1
if (rb2 <= level): break
excluded_mass_of_node = len(paths_rb2)
try:
excluded_mass_of_non_centers_rb2[excluded_mass_of_node].append(node)
except KeyError:
excluded_mass_of_non_centers_rb2[excluded_mass_of_node] = [node]
excluded_mass_of_node = len(paths_rb)
try:
excluded_mass_of_non_centers_rb[excluded_mass_of_node].append(node)
except KeyError:
excluded_mass_of_non_centers_rb[excluded_mass_of_node] = [node]
maximum_excluded_mass = 0
nodes_with_maximum_excluded_mass=[]
new_covered_nodes = {}
center_node_and_mass = []
cycle_index = 0
while len(covered_nodes) < number_of_nodes:
#print len(covered_nodes),number_of_nodes
cycle_index += 1
if cycle_index == cycle:
rb2 = rb+1
cycle_index = 0
else:
rb2 = rb
while 1:
if rb2 == rb+1:
#t1=time.time()
while 1:
maximum_key = max(excluded_mass_of_non_centers_rb2.keys())
node = random.choice(excluded_mass_of_non_centers_rb2[maximum_key])
if node in center_nodes:
excluded_mass_of_non_centers_rb2[maximum_key].remove(node)
if not excluded_mass_of_non_centers_rb2[maximum_key]: del excluded_mass_of_non_centers_rb2[maximum_key]
else:
break
nodes_visited = {}
bfs = single_source_shortest_path(G,node,cutoff=rb2)
for i in bfs:
nodes_visited[i] = len(bfs[i])-1
excluded_mass_of_node = len(set(nodes_visited.keys()).difference(covered_nodes))
if excluded_mass_of_node == maximum_key:
center_node_and_mass = (node,maximum_key)
excluded_mass_of_non_centers_rb2[maximum_key].remove(node)
if not excluded_mass_of_non_centers_rb2[maximum_key]: del excluded_mass_of_non_centers_rb2[maximum_key]
new_covered_nodes = nodes_visited
break
else:
excluded_mass_of_non_centers_rb2[maximum_key].remove(node)
if not excluded_mass_of_non_centers_rb2[maximum_key]: del excluded_mass_of_non_centers_rb2[maximum_key]
try:
excluded_mass_of_non_centers_rb2[excluded_mass_of_node].append(node)
except KeyError:
excluded_mass_of_non_centers_rb2[excluded_mass_of_node] = [node]
#print "time", time.time()-t1
else:
#t1=time.time()
while 1:
maximum_key = max(excluded_mass_of_non_centers_rb.keys())
node = random.choice(excluded_mass_of_non_centers_rb[maximum_key])
if node in center_nodes:
excluded_mass_of_non_centers_rb[maximum_key].remove(node)
if not excluded_mass_of_non_centers_rb[maximum_key]: del excluded_mass_of_non_centers_rb[maximum_key]
else:
break
nodes_visited = {}
bfs = single_source_shortest_path(G,node,cutoff=rb)
for i in bfs:
nodes_visited[i] = len(bfs[i])-1
excluded_mass_of_node = len(set(nodes_visited.keys()).difference(covered_nodes))
if excluded_mass_of_node == maximum_key:
center_node_and_mass = (node,maximum_key)
excluded_mass_of_non_centers_rb[maximum_key].remove(node)
if not excluded_mass_of_non_centers_rb[maximum_key]: del excluded_mass_of_non_centers_rb[maximum_key]
new_covered_nodes = nodes_visited
break
else:
excluded_mass_of_non_centers_rb[maximum_key].remove(node)
if not excluded_mass_of_non_centers_rb[maximum_key]: del excluded_mass_of_non_centers_rb[maximum_key]
try:
excluded_mass_of_non_centers_rb[excluded_mass_of_node].append(node)
except KeyError:
excluded_mass_of_non_centers_rb[excluded_mass_of_node] = [node]
#print "time", time.time()-t1
center_node_found = center_node_and_mass[0]
boxes[center_node_found] = [center_node_found]
node_box_id[center_node_found] = center_node_found
non_center_nodes.remove(center_node_found)
center_nodes.add(center_node_found)
covered_nodes = covered_nodes.union(set(new_covered_nodes.keys()))
#print len(covered_nodes)
for i in new_covered_nodes:
try:
if central_distance_of_node[i] > new_covered_nodes[i]:
nodes_sorted_by_central_distance[central_distance_of_node[i]].remove(i)
if not nodes_sorted_by_central_distance[central_distance_of_node[i]]:
del nodes_sorted_by_central_distance[central_distance_of_node[i]]
try:
nodes_sorted_by_central_distance[new_covered_nodes[i]].append(i)
except KeyError:
nodes_sorted_by_central_distance[new_covered_nodes[i]] = [i]
central_distance_of_node[i] = new_covered_nodes[i]
except KeyError:
central_distance_of_node[i] = new_covered_nodes[i]
try:
nodes_sorted_by_central_distance[new_covered_nodes[i]].append(i)
except:
nodes_sorted_by_central_distance[new_covered_nodes[i]] = [i]
max_distance = max(nodes_sorted_by_central_distance.keys())
for i in range(1,max_distance+1):
for j in nodes_sorted_by_central_distance[i]:
targets = list(set(adj[j].iterkeys()).intersection(set(nodes_sorted_by_central_distance[i-1])))
node_box_id[j] = node_box_id[random.choice(targets)]
boxes[node_box_id[j]].append(j)
boxes_subgraphs={}
for i in boxes:
boxes_subgraphs[i] = subgraph(G,boxes[i])
return boxes_subgraphs
| 13,486
|
def test_stationconfiguration_not_equal_to_other_objects():
"""
Verify that StationConfiguration objects are not considered equal to objects
of other types.
"""
config = StationConfiguration(1)
assert config is not None
assert config != 1
assert config != object()
| 13,487
|
def set_app_path(required=False):
"""Find app directory and set value to environment variable."""
import os
from pathlib import Path
from getpass import getuser
matched_path = None
config_paths = (Path.home() / "hyperglass-agent", Path("/etc/hyperglass-agent/"))
for path in config_paths:
try:
if path.exists():
tmp = path / "test.tmp"
tmp.touch()
if tmp.exists():
matched_path = path
tmp.unlink()
break
except Exception:
matched_path = None
if required and matched_path is None:
# Only raise an error if required is True
raise RuntimeError(
"""
No configuration directories were determined to both exist and be readable
by hyperglass. hyperglass is running as user '{un}' (UID '{uid}'), and tried
to access the following directories:
{dir}""".format(
un=getuser(),
uid=os.getuid(),
dir="\n".join([" - " + str(p) for p in config_paths]),
)
)
if matched_path is not None:
os.environ["hyperglass_agent_directory"] = str(matched_path)
return True
| 13,488
|
def compfile(file_path, name="", list_files=None):
"""
Compare files to avoid that the same file is given multiple times or
in different ways (e.g. different name but same content).
"""
__string(file_path, "%s path" % name, True)
if list_files is None:
list_files = []
elif not list_files:
__ex("File list is empty (no files to compare with).", True,
ValueError)
else:
for item in list_files:
if not isinstance(item, list):
__ex("Every list item must be a sub-list.", True,
ValueError)
if not len(item) == 2:
__ex("Every sub-list must contain two items.", True,
ValueError)
file_path = os.path.abspath(file_path)
for item in list_files:
path_compare = os.path.abspath(str(item[0]))
name_compare = str(item[1])
if file_path == path_compare:
__ex("The %s and the %s file path must not be identical." %
(name, name_compare), False, ValueError)
if os.path.exists(file_path) and os.path.exists(path_compare):
if filecmp.cmp(file_path, path_compare, 0):
__ex("The %s and %s file content must not be identical." %
(name, name_compare), False, ValueError)
| 13,489
|
def shownames(namespace, **args):
"""helper method to generate a template keyword for a namespace"""
ctx = args['ctx']
repo = ctx.repo()
ns = repo.names[namespace]
names = ns.names(repo, ctx.node())
return showlist(ns.templatename, names, plural=namespace, **args)
| 13,490
|
def only_half_radius(
subsampled_radius: float, full_diameter: float, radius_constraint: float
):
"""
Check if radius is smaller than fraction of full radius.
"""
assert 0.0 <= radius_constraint <= 1.0
return subsampled_radius <= ((full_diameter / 2) * radius_constraint)
| 13,491
|
def test_brain_add_foci(renderer):
"""Test adding foci in _Brain instance."""
brain = _Brain(subject_id, hemi='lh', size=500,
surf=surf, subjects_dir=subjects_dir)
brain.add_foci([0], coords_as_verts=True,
hemi='lh', color='blue')
brain.close()
| 13,492
|
def complete_from_man(context: CommandContext):
"""
Completes an option name, based on the contents of the associated man
page.
"""
if context.arg_index == 0 or not context.prefix.startswith("-"):
return
cmd = context.args[0].value
def completions():
for desc, opts in _parse_man_page_options(cmd).items():
yield RichCompletion(
value=opts[-1], display=", ".join(opts), description=desc
)
return completions(), False
| 13,493
|
def sanitise_description(original: str) -> str:
"""
Remove newlines from ticket descriptions.
:param original: the string to sanitise
:return: the same string, with newlines as spaces
"""
return original.replace("\n", " ")
| 13,494
|
def dnsdomain_get_all(context):
"""Get a list of all dnsdomains in our database."""
return IMPL.dnsdomain_get_all(context)
| 13,495
|
def mockselect(r, w, x, timeout=0): # pylint: disable=W0613
"""Simple mock for select()
"""
readable = [s for s in r if s.ready_for_read]
return readable, w[:], []
| 13,496
|
def load_imgs(path):
"""Given a path load image(s).
The input path can either be (i) a directory in which case all the JPG-images will be loaded into a dictionary,
or (ii) an image file.
Returns:
imgs: A dictionary of of n-dim images. Keys are the original filenames
"""
## Get filenames
filenames = []
if os.path.isdir(path):
'Images in {} will be loaded'.format(path)
for file in os.listdir(path):
if file.endswith(".jpg"):
filenames.append(os.path.basename(file))
imagepath = path
else:
filenames.append(os.path.basename(path))
imagepath = os.path.dirname(path)
'{} images found in {}'.format(len(filenames), path)
## Load images
imgs = dict()
for file in filenames:
#print '\nImage: {}'.format(file)
imgs[file]=imread(os.path.join(imagepath, file))
return imgs
| 13,497
|
async def cryptoQuotesSSEAsync(symbols=None, token="", version=""):
"""This returns the quote for a specified cryptocurrency. Quotes are available via REST and SSE Streaming.
https://iexcloud.io/docs/api/#cryptocurrency-quote
Args:
symbols (str): Tickers to request
token (str): Access token
version (str): API version
"""
async for item in _runSSEAsync("cryptoQuotes", symbols, token, version):
yield item
| 13,498
|
def solve_5c2c9af4(x):
"""
Required Transformation: The input contains 3 cells with non-zero value. The non-zero valued cells are diagonally
positioned with some amount of gap between each non-zero valued cells. The program should identify the colour and
their position in the grid and form a squared box around the centered non-zero valued cell. Each squared box should
be of equal width between the previous one.
Implementation: The solution is to identify the coloured cells in the grid and form a squared boxes around centered
cell (non-zero valued cell). The Width should be same between each squared box, where width is measured by the
difference between the number of rows or columns between 2 consecutive non-zero valued cells.
The non-zero valued cells can be arranged in 2 forms,
1. Up Slope
2. Down Slope
In the case of Up slope, once the first non-zero valued cell is identified, the pattern to fill the cells are as
follows,
RIGHT, DOWN, LEFT, UP
Whereas in the case of Down Slope, once the first non-zero valued cell is identified, the pattern to fill the cells
are as follows,
DOWN, LEFT, UP, RIGHT
After one full rotation, the row & column is recalculated based on the width.This process is repeated until the
row & column goes out of the grid.
Training & Test Grid: The solution works on all Training & Test cases
"""
non_zero_indexes = np.nonzero(x)
non_zero_row_array = non_zero_indexes[0]
non_zero_col_array = non_zero_indexes[1]
# Difference between the columns of first & second non-zero valued cell
width = non_zero_col_array[0] - non_zero_col_array[1]
row, col = non_zero_row_array[0], non_zero_col_array[0]
# Centered non-zero Valued cell. This cell will become the reference point for all the squared boxes in the grid
midpoint_loc = (non_zero_row_array[1], non_zero_col_array[1])
value = x[non_zero_row_array[1], non_zero_col_array[1]]
# Assign the initial width to Original Width because the width values increases as the size of the square increase.
original_width = width
while True:
if width > 0:
# Up Slope: down, left, up, right
row, col = travel_down(x, row, col, midpoint_loc[0], abs(width), value)
row, col = travel_left(x, row, col, midpoint_loc[1], abs(width), value)
row, col = travel_up(x, row, col, midpoint_loc[0], abs(width), value)
row, col = travel_right(x, row, col, midpoint_loc[1], abs(width), value)
# Recalculate the rows & column based on the original width. Because each square should have same width
row, col = row - abs(original_width), col + abs(original_width)
else:
# Down Slope: right, down, left, up
row, col = travel_right(x, row, col, midpoint_loc[1], abs(width), value)
row, col = travel_down(x, row, col, midpoint_loc[0], abs(width), value)
row, col = travel_left(x, row, col, midpoint_loc[1], abs(width), value)
row, col = travel_up(x, row, col, midpoint_loc[0], abs(width), value)
# Recalculate the rows & column based on the original width. Because each square should have same width
row, col = row - abs(original_width), col - abs(original_width)
width = width + original_width
# If the rows or columns exceed beyond the grid size terminate the loop.
if (row < -1 and col < -1) or (row < -1 and col > x[0].shape[0]):
break
return x
| 13,499
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.