content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
|---|---|---|
import os
def join_paths(path, *paths):
"""
"""
return os.path.join(path, *paths)
|
fdd069ba4414831a201192d096cdb7723037d3dc
| 3,644,100
|
def determine_configure_options(module):
"""
Determine configure arguments for this system.
Automatically determine configure options for this system and build
options when the explicit configure options are not specified.
"""
options = module.params['configure_options']
build_userspace = module.params['build_userspace']
build_module = module.params['build_module']
build_terminal_programs = module.params['build_terminal_programs']
build_bindings = module.params['build_bindings']
build_fuse_client = module.params['build_fuse_client']
with_transarc_paths = module.params['with_transarc_paths']
with_debug_symbols = module.params['with_debug_symbols']
with_rxgk = module.params['with_rxgk']
if options is None:
options = {'enable': [], 'disable': [], 'with': [], 'without': []}
if not build_userspace or not build_module:
module.fail_json(msg="build_userspace and build_module are false.")
if build_module:
options['enable'].append('kernel-module')
if is_linux():
options['with'].append('linux-kernel-packaging')
else:
options['disable'].append('kernel-module')
if not build_terminal_programs:
options['disable'].append('gtx')
if not build_bindings:
options['without'].append('swig')
if not build_fuse_client:
options['disable'].append('fuse-client')
if with_debug_symbols:
options['enable'].append('debug')
options['disable'].extend(['optimize', 'strip-binaries'])
if build_module:
options['enable'].append('debug-kernel')
options['disable'].append('optimize-kernel')
if with_transarc_paths:
options['enable'].append('transarc-paths')
if with_rxgk:
options['enable'].append('rxgk')
return options
|
02026dfa737edd8a3c7b60e5e34f48dc7a958858
| 3,644,101
|
def getElementTypeToolTip(t):
"""Wrapper to prevent loading qtgui when this module is imported"""
if t == PoolControllerView.ControllerModule:
return "Controller module"
elif t == PoolControllerView.ControllerClass:
return "Controller class"
|
6862b10bc940daec1c13ef97fafbf525c2683e9e
| 3,644,102
|
def parse_dates(array):
"""Parse the valid dates in an array of strings.
"""
parsed_dates = []
for elem in array:
elem = parse_date(elem)
if elem is not None:
parsed_dates.append(elem)
return parsed_dates
|
1ec89f084cdd68709a37ea05356ceeb1a21f98bd
| 3,644,103
|
def app_factory(global_config, **local_config):
"""
定义一个 app 的 factory 方法,以便在运行时绑定具体的 app,而不是在配置文件中就绑定。
:param global_config:
:param local_config:
:return:
"""
return MyApp()
|
c4c29963f88253c272319bc2369d4801df284fbf
| 3,644,104
|
import pytz
def str_to_datetime(dt_str):
""" Converts a string to a UTC datetime object.
@rtype: datetime
"""
try:
return dt.datetime.strptime(
dt_str, DATE_STR_FORMAT).replace(tzinfo=pytz.utc)
except ValueError: # If dt_str did not match our format
return None
|
a9ac073c11b13dca011cca46860080cdc638dcbe
| 3,644,105
|
def quantize(img):
"""Quantize the output of model.
:param img: the input image
:type img: ndarray
:return: the image after quantize
:rtype: ndarray
"""
pixel_range = 255
return img.mul(pixel_range).clamp(0, 255).round().div(pixel_range)
|
49abd32d8b2cf54c955e16765602bbff77a2a1b9
| 3,644,106
|
def is_normalized(M, x, eps):
"""Return True if (a Fuchsian) matrix M is normalized, that
is all the eigenvalues of it's residues in x lie in [-1/2, 1/2)
range (in limit eps->0). Return False otherwise.
Examples:
>>> x, e = var("x epsilon")
>>> is_normalized(matrix([[(1+e)/3/x, 0], [0, e/x]]), x, e)
True
"""
points = singularities(M, x)
for x0, p in points.items():
M0 = matrix_residue(M, x, x0)
for ev in M0.eigenvalues():
ev = limit_fixed(ev, eps, 0)
if not (Rational((-1, 2)) <= ev and ev < Rational((1, 2))):
return False
return True
|
01715cd58cad25a805ffd260b78641701483ad86
| 3,644,107
|
def _get_dashboard_link(course_key):
""" Construct a URL to the external analytics dashboard """
analytics_dashboard_url = f'{settings.ANALYTICS_DASHBOARD_URL}/courses/{str(course_key)}'
link = HTML("<a href=\"{0}\" rel=\"noopener\" target=\"_blank\">{1}</a>").format(
analytics_dashboard_url, settings.ANALYTICS_DASHBOARD_NAME
)
return link
|
fa9fb656ff4e7cf70c3512755351a46302cec71b
| 3,644,108
|
def figure1_control(data1, cols):
""" Creates a data set to plot figure 1, Panel B, D, F.
Args:
- data1 (pd.DataFrame): the original data set
- cols (list): a list of column names ["agus", "bct", "bcg"]
Returns:
- df_fig1_contr (pd.DataFrame): a data set for plotting panels with controls
"""
data1["uazY"] = data1["uazY"].astype("category")
for column in cols:
data_df = data1.loc[(data1["dzagr01"] != 0) & (abs(data1["dzagr01"]) < 0.2), [column, "uazY"]].dropna()
data_df["constant"] = [1] * len(data_df.index)
y,X = patsy.dmatrices("{}~constant".format(column), data = data_df, return_type='dataframe')
ybar = y.mean()
y = y - y.groupby(data_df["uazY"]).transform('mean') + ybar
Xbar = X.mean()
X = X - X.groupby(data_df["uazY"]).transform('mean') + Xbar
reg = smp.OLS(y,X).fit()
y_hat = reg.predict()
y_hat.shape = (len(y_hat), 1)
residual = y - y_hat
data1["{}_res".format(column)] = residual
df_fig1_contr = data1.groupby("dzagr01")["{}_res".format(cols[0]),
"{}_res".format(cols[1]),
"{}_res".format(cols[2])].mean()
df_fig1_contr.reset_index(level = 0, inplace = True)
for column in cols:
fig1_B1 = sm.ols(formula = "{}_res ~ dzagr01".format(column),
data = df_fig1_contr[(df_fig1_contr["dzagr01"] < 0) & (abs(df_fig1_contr["dzagr01"]) < 0.2)]).fit()
fig1_B2 = sm.ols(formula = "{}_res ~ dzagr01".format(column),
data = df_fig1_contr[(df_fig1_contr["dzagr01"] > 0) & (abs(df_fig1_contr["dzagr01"]) < 0.2)]).fit()
pred_B1 = fig1_B1.predict()
pred_B2 = fig1_B2.predict()
df_fig1_contr.loc[(df_fig1_contr["dzagr01"] < 0) & (abs(df_fig1_contr["dzagr01"]) < 0.2),
"pred_{}1".format(column)] = pred_B1
df_fig1_contr.loc[(df_fig1_contr["dzagr01"] > 0) & (abs(df_fig1_contr["dzagr01"]) < 0.2),
"pred_{}2".format(column)] = pred_B2
return df_fig1_contr
|
5eef05c567159a623fdaaafa5a5707c48c7fe7fa
| 3,644,109
|
import ctypes
def GetEffectiveRightsFromAclW(acl, sid):
"""
Takes a SID instead of a trustee!
"""
_GetEffectiveRightsFromAclW = windll.advapi32.GetEffectiveRightsFromAclW
_GetEffectiveRightsFromAclW.argtypes = [PVOID, PTRUSTEE_W, PDWORD] #[HANDLE, SE_OBJECT_TYPE, DWORD, PSID, PSID, PACL, PACL, PSECURITY_DESCRIPTOR]
_GetEffectiveRightsFromAclW.restype = RaiseIfNotErrorSuccess
sid_data = sid.to_bytes()
psid = ctypes.create_string_buffer(sid_data, len(sid_data))
trustee = TRUSTEE_W()
trustee.pMultipleTrustee = 0
trustee.MultipleTrusteeOperation = 0
trustee.TrusteeForm = 0
trustee.TrusteeType = 0
trustee.ptstrName = ctypes.c_void_p(ctypes.addressof(psid))
effective_rigths_mask = DWORD(0)
acl_data = acl.to_bytes()
pacl = ctypes.create_string_buffer(acl_data, len(acl_data))
res = _GetEffectiveRightsFromAclW(pacl, trustee, byref(effective_rigths_mask))
return effective_rigths_mask.value
|
3edb0080a98a7d9d0d040914435c76cd20f30e0a
| 3,644,110
|
def store(mnemonic, opcode):
""" Create a store instruction """
ra = Operand("ra", Or1kRegister, read=True)
rb = Operand("rb", Or1kRegister, read=True)
imm = Operand("imm", int)
syntax = Syntax(["l", ".", mnemonic, " ", imm, "(", ra, ")", ",", " ", rb])
patterns = {"opcode": opcode, "ra": ra, "rb": rb, "imm": imm}
members = {
"ra": ra,
"rb": rb,
"imm": imm,
"syntax": syntax,
"patterns": patterns,
"tokens": [Orbis32StoreToken],
}
class_name = mnemonic.title()
return type(class_name, (Orbis32Instruction,), members)
|
c9d1d7376b5c73eed87b5c3a7438cc54ecab9ad2
| 3,644,111
|
import ctypes
def hlmlDeviceGetPowerUsage(device: hlml_t.HLML_DEVICE.TYPE) -> int:
""" Retrieves power usage for the device in mW
Parameters:
device (HLML_DEVICE.TYPE) - The handle for a habana device.
Returns:
power (int) - The given device's power usage in mW.
"""
global _hlmlOBJ
power = ctypes.c_uint()
fn = _hlmlOBJ.get_func_ptr("hlml_device_get_power_usage")
ret = fn(device, ctypes.byref(power))
check_return(ret)
return power.value
|
ed2d64be06a8e319221b2c3e2017f07a6c16a028
| 3,644,112
|
def usgs_coef_parse(**kwargs):
"""
Combine, parse, and format the provided dataframes
:param kwargs: potential arguments include:
dataframe_list: list of dataframes to concat and format
args: dictionary, used to run flowbyactivity.py ('year' and 'source')
:return: df, parsed and partially formatted to flowbyactivity specifications
"""
# load arguments necessary for function
args = kwargs['args']
# Read directly into a pandas df
df_raw = pd.read_csv(externaldatapath + "USGS_WU_Coef_Raw.csv")
# rename columns to match flowbyactivity format
df = df_raw.copy()
df = df.rename(columns={"Animal Type": "ActivityConsumedBy",
"WUC_Median": "FlowAmount",
"WUC_Minimum": "Min",
"WUC_Maximum": "Max"
})
# drop columns
df = df.drop(columns=["WUC_25th_Percentile", "WUC_75th_Percentile"])
# hardcode data
df["Class"] = "Water"
df["SourceName"] = "USGS_WU_Coef"
df["Location"] = US_FIPS
df['Year'] = args['year']
df = assign_fips_location_system(df, '2005')
df["Unit"] = "gallons/animal/day"
df['DataReliability'] = 5 # tmp
df['DataCollection'] = 5 # tmp
return df
|
9cfa29cc5390717fd4a36360dcdb373614ae7345
| 3,644,113
|
def success_poly_overlap(gt_poly, res_poly, n_frame):
"""
:param gt_poly: [Nx8]
:param result_bb:
:param n_frame:
:return:
"""
thresholds_overlap = np.arange(0, 1.05, 0.05)
success = np.zeros(len(thresholds_overlap))
iou_list = []
for i in range(gt_poly.shape[0]):
iou = poly_overlap_ratio(gt_poly[i], res_poly[i])
iou_list.append(iou)
iou_np = np.array(iou_list)
for i in range(len(thresholds_overlap)):
success[i] = np.sum(iou_np > thresholds_overlap[i]) / float(n_frame)
return success
|
3de9e308fd8a29fb7e7ed4a7132ce5157b5794eb
| 3,644,114
|
import io
def my_get_size_png(gg, height, width, dpi, limitsize):
"""
Get actual size of ggplot image saved (with bbox_inches="tight")
"""
buf = io.BytesIO()
gg.save(buf, format= "png", height = height, width = width,
dpi=dpi, units = "in", limitsize = limitsize,verbose=False,
bbox_inches="tight")
buf.seek(0)
img = Image.open(buf)
width, height = img.size
return width / dpi, height / dpi
|
fe6417f35480048b70f25bfab97978515fd7d7d1
| 3,644,115
|
def getRnnGenerator(vocab_size,hidden_dim,input_dim=512):
"""
"Apply" the RNN to the input x
For initializing the network, the vocab size needs to be known
Default of the hidden layer is set tot 512 like Karpathy
"""
generator = SequenceGenerator(
Readout(readout_dim = vocab_size,
source_names = ["states"], # transition.apply.states ???
emitter = SoftmaxEmitter(name="emitter"),
feedback_brick = LookupFeedback(
vocab_size,
input_dim,
name = 'feedback'
),
name = "readout"
),
MySimpleRecurrent(
name = "transition",
activation = Tanh(),
dim = hidden_dim
),
weights_init = IsotropicGaussian(0.01),
biases_init = Constant(0),
name = "generator"
)
generator.push_initialization_config()
generator.transition.weights_init = IsotropicGaussian(0.01)
generator.initialize()
return generator
|
b1c033da42a0079e8c539fd908b715b8e6cb076f
| 3,644,116
|
def first_true(iterable, default=False, pred=None):
"""Returns the first true value in the iterable.
If no true value is found, returns *default*
If *pred* is not None, returns the first item
for which pred(item) is true.
"""
# first_true([a,b,c], x) --> a or b or c or x
# first_true([a,b], x, f) --> a if f(a) else b if f(b) else x
return next(filter(pred, iterable), default)
|
66c6b3e282cfdf60819d5df2d48cdea31484a4f1
| 3,644,117
|
import subprocess
def get_length(filename):
"""
Get the length of a specific file with ffrobe from the ffmpeg library
:param filename: this param is used for the file
:type filename: str
:return: length of the given video file
:rtype: float
"""
# use ffprobe because it is faster then other (for example moviepy)
result = subprocess.run([
"ffprobe", "-v", "error", "-show_entries", "format=duration", "-of",
"default=noprint_wrappers=1:nokey=1", filename
],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
return float(result.stdout)
|
335e220c14612ea5a5d0a330043b75e4d4d1a050
| 3,644,118
|
import os
def is_File(path):
"""Takes the path of the folder as argument
Returns is the path is a of a Folder or not in bool"""
if os.path.isfile(path):
return True
else:
return False
|
63ec104ab50c8644856d980bedf04b101f2730e1
| 3,644,119
|
def get_device_serial_no(instanceId, gwMgmtIp, fwApiKey):
"""
Retrieve the serial number from the FW.
@param gwMgmtIP: The IP address of the FW
@type: ```str```
@param fwApiKey: Api key of the FW
@type: ```str```
@return The serial number of the FW
@rtype: ```str```
"""
serial_no = None
if gwMgmtIp is None:
logger.error('Firewall IP could not be found. Can not interact with the device')
return False
logger.info('Retrieve the serial number from FW {} with IP: {}'.format(instanceId, gwMgmtIp))
cmd_show_system_info = "/api/?type=op&key={}&cmd=<show><system><info/></system></show>".format(fwApiKey)
response = execute_api_request(gwMgmtIp, 443, cmd_show_system_info)
if response['result'] == False:
logger.error('PAN Firewall: Fail to execute the show system info command for device: {} with IP: {}'.format(instanceId, gwMgmtIp))
result = response['data'].findall(".//line")
for msg in result:
error_msg = msg.text
logger.error('Reason for failure: {}'.format(error_msg))
return False
serial_info = response['data'].findall(".//serial")
for info in serial_info:
serial_no = info.text
if not serial_no:
logger.error("Unable to retrieve the serial number from device: {} with IP: {}".format(instanceId, gwMgmtIp))
return serial_no
|
e13d90da032f4084b2c1cafcf4d3a77b189a5d58
| 3,644,120
|
from typing import Optional
import torch
def multilabel_cross_entropy(
x: Tensor,
target: Tensor,
weight: Optional[Tensor] = None,
ignore_index: int = -100,
reduction: str = 'mean'
) -> Tensor:
"""Implements the cross entropy loss for multi-label targets
Args:
x (torch.Tensor[N, K, ...]): input tensor
target (torch.Tensor[N, K, ...]): target tensor
weight (torch.Tensor[K], optional): manual rescaling of each class
ignore_index (int, optional): specifies target value that is ignored and do not contribute to gradient
reduction (str, optional): reduction method
Returns:
torch.Tensor: loss reduced with `reduction` method
"""
# log(P[class]) = log_softmax(score)[class]
logpt = F.log_softmax(x, dim=1)
# Ignore index (set loss contribution to 0)
valid_idxs = torch.ones(logpt.shape[1], dtype=torch.bool, device=x.device)
if ignore_index >= 0 and ignore_index < x.shape[1]:
valid_idxs[ignore_index] = False
# Weight
if weight is not None:
# Tensor type
if weight.type() != x.data.type():
weight = weight.type_as(x.data)
logpt = logpt * weight.view(1, -1, *([1] * (x.ndim - 2))) # type: ignore[attr-defined]
# CE Loss
loss = - target * logpt
# Loss reduction
if reduction == 'sum':
loss = loss[:, valid_idxs].sum()
else:
loss = loss[:, valid_idxs].sum(dim=1)
if reduction == 'mean':
loss = loss.mean()
return loss
|
12f1bdb41955fc6ba05b125956cdef40e42ca94c
| 3,644,121
|
def dataset_string(dataset):
"""Generate string from dataset"""
data = dataset_data(dataset)
try:
# single value
return fn.VALUE_FORMAT % data
except TypeError:
# array
if dataset.size > 1:
return fn.data_string(data)
# probably a string
return fn.shortstr('%s' % data)
|
25d82bc87ae83599857a6b8d83b671d25339df9f
| 3,644,122
|
from typing import Type
from typing import Callable
def create_constant_value_validator(
constant_cls: Type, is_required: bool
) -> Callable[[str], bool]:
"""
Create a validator func that validates a value is one of the valid values.
Parameters
----------
constant_cls: Type
The constant class that contains the valid values.
is_required: bool
Whether the value is required.
Returns
-------
validator_func: Callable[[str], bool]
The validator func.
"""
def is_valid(value: str) -> bool:
"""
Validate that value is valid.
Parameters
----------
value: str
The value to validate.
Returns
-------
status: bool
The validation status.
"""
if value is None:
return not is_required
return value in get_all_class_attr_values(constant_cls)
return is_valid
|
d225c4a225a4e24c809ef8cc6d557cf989375542
| 3,644,123
|
import pprint
def process_arguments(arguments):
"""
Process command line arguments to execute VM actions.
Called from cm4.command.command
:param arguments:
"""
result = None
if arguments.get("--debug"):
pp = pprint.PrettyPrinter(indent=4)
print("vm processing arguments")
pp.pprint(arguments)
default_cloud = Config().data["cloudmesh"]["default"]["cloud"]
vm = Vm(default_cloud)
if arguments.get("list"):
result = vm.list()
elif arguments.get("create"):
# TODO: Reconcile `create` behavior here and in docopts where
# create is called with a `VMCOUNT`.
vm_name = arguments.get("VMNAME")
if vm_name is None:
vm_name = vm.new_name()
vm.create(vm_name)
result = f"Created {vm_name}"
elif arguments.get("start"):
result = vm.start(arguments.get("--vms"))
elif arguments.get("stop"):
result = vm.stop(arguments.get("--vms"))
elif arguments.get("destroy"):
result = vm.destroy(arguments.get("--vms"))
elif arguments.get("status"):
result = vm.status(arguments.get("--vms"))
elif arguments.get("publicip"):
result = vm.get_public_ips(arguments.get('--vms'))
elif arguments.get("ssh"):
# TODO
pass
elif arguments.get("run"):
# TODO
pass
elif arguments.get("script"):
# TODO
pass
return result
|
cacb2f4696b19a92fcbad3c98017a81a8fdf0567
| 3,644,124
|
import json
def deliver_hybrid():
"""
Endpoint for submissions intended for dap and legacy systems. POST request requires the submission JSON to be
uploaded as "submission", the zipped transformed artifact as "transformed", and the filename passed in the
query parameters.
"""
logger.info('Processing Hybrid submission')
filename = request.args.get("filename")
meta = MetaWrapper(filename)
files = request.files
submission_bytes = files[SUBMISSION_FILE].read()
survey_dict = json.loads(submission_bytes.decode())
data_bytes = files[TRANSFORMED_FILE].read()
meta.set_legacy(survey_dict, data_bytes)
return process(meta, data_bytes)
|
87bb05f376c1791668bd5e160cc5940377363f64
| 3,644,125
|
from pathlib import Path
import os
def change_path(path, dir="", file="", pre="", post="", ext=""):
"""
Change the path ingredients with the provided directory, filename
prefix, postfix, and extension
:param path:
:param dir: new directory
:param file: filename to replace the filename full_path
:param pre: prefix to be appended to filename full_path
:param post: postfix to be appended to filename full_path
:param ext: extension of filename to be changed
:return:
"""
target = ""
path_obj = Path(path)
old_filename = path_obj.name.replace(path_obj.suffix, "") \
if len(path_obj.suffix) > 0 else path_obj.name
if os.name == "nt":
if len(dir) > 0:
directory = dir
elif path.endswith("\\"):
directory = path[:-1]
old_filename = ""
else:
directory = str(path_obj.parent)
old_extension = path_obj.suffix
new_filename = file if len(file) > 0 else old_filename
new_filename = pre + new_filename if len(pre) > 0 else new_filename
new_filename = new_filename + post if len(post) > 0 else new_filename
new_extension = "." + ext if len(ext) > 0 else old_extension
target = directory + "\\" + new_filename + new_extension
else:
if len(dir) > 0:
directory = dir
elif path.endswith("/"):
directory = path[:-1]
old_filename = ""
else:
directory = str(path_obj.parent)
old_extension = path_obj.suffix
new_filename = file if len(file) > 0 else old_filename
new_filename = pre + new_filename if len(pre) > 0 else new_filename
new_filename = new_filename + post if len(post) > 0 else new_filename
new_extension = "." + ext if len(ext) > 0 else old_extension
target = directory + "/" + new_filename + new_extension
return target
|
b629da207f96f4476d6eda3a1c88b1c63f701742
| 3,644,126
|
def midi_to_chroma(pitch):
"""Given a midi pitch (e.g. 60 == C), returns its corresponding
chroma class value. A == 0, A# == 1, ..., G# == 11 """
return ((pitch % 12) + 3) % 12
|
25ef72f78269c3f494ca7431f1291891ddea594a
| 3,644,127
|
import re
def _snippet_items(snippet):
"""Return all markdown items in the snippet text.
For this we expect it the snippet to contain *nothing* but a markdown list.
We do not support "indented" list style, only one item per linebreak.
Raises SyntaxError if snippet not in proper format (e.g. contains
anything other than a markdown list).
"""
unformatted = snippet.text and snippet.text.strip()
# treat null text value as empty list
if not unformatted:
return []
# parse out all markdown list items
items = re.findall(r'^[-*+] +(.*)$', unformatted, re.MULTILINE)
# if there were any lines that didn't yield an item, assume there was
# something we didn't parse. since we never want to lose existing data
# for a user, this is an error condition.
if len(items) < len(unformatted.splitlines()):
raise SyntaxError('unparsed lines in user snippet: %s' % unformatted)
return items
|
bdeb5b5c5e97ef3a8082b7131d46990de02a59af
| 3,644,128
|
def get_collection(*args, **kwargs):
""" Returns event collection schema
:param event_collection: string, the event collection from which schema is to be returned,
if left blank will return schema for all collections
"""
_initialize_client_from_environment()
return _client.get_collection(*args, **kwargs)
|
95698a5c750b2d40caad0f0ddfe9e17a8354be03
| 3,644,129
|
def get_tf_generator(data_source: extr.PymiaDatasource):
"""Returns a generator that wraps :class:`.PymiaDatasource` for the tensorflow data handling.
The returned generator can be used with `tf.data.Dataset.from_generator
<https://www.tensorflow.org/api_docs/python/tf/data/Dataset#from_generator>`_ in order to build a tensorflow dataset`_.
Args:
data_source (.PymiaDatasource): the datasource to be wrapped.
Returns:
generator: Function that loops over the entire datasource and yields all entries.
"""
def generator():
for i in range(len(data_source)):
yield data_source[i]
return generator
|
2b786b111c2e2b17c3ee2887f93aff02de63f369
| 3,644,130
|
def is_mechanical_ventilation_heat_recovery_active(bpr, tsd, t):
"""
Control of activity of heat exchanger of mechanical ventilation system
Author: Gabriel Happle
Date: APR 2017
:param bpr: Building Properties
:type bpr: BuildingPropertiesRow
:param tsd: Time series data of building
:type tsd: dict
:param t: time step / hour of the year
:type t: int
:return: Heat exchanger ON/OFF status
:rtype: bool
"""
if is_mechanical_ventilation_active(bpr, tsd, t)\
and has_mechanical_ventilation_heat_recovery(bpr)\
and control_heating_cooling_systems.is_heating_season(t, bpr):
# heat recovery is always active if mechanical ventilation is active (no intelligent by pass)
# this is the usual system configuration according to Clayton Miller
return True
elif is_mechanical_ventilation_active(bpr, tsd, t)\
and has_mechanical_ventilation_heat_recovery(bpr)\
and control_heating_cooling_systems.is_cooling_season(t, bpr)\
and tsd['T_int'][t-1] < tsd['T_ext'][t]:
return True
elif is_mechanical_ventilation_active(bpr, tsd, t) \
and control_heating_cooling_systems.is_cooling_season(t, bpr) \
and tsd['T_int'][t-1] >= tsd['T_ext'][t]:
# heat recovery is deactivated in the cooling case,
# if outdoor air conditions are colder than indoor (free cooling)
return False
else:
return False
|
626e24da9f0676be27e15a4422676034a94e1702
| 3,644,131
|
import aiohttp
async def fetch_user(user_id):
"""
Asynchronous function which performs an API call to retrieve a user from their ID
"""
session = aiohttp.ClientSession()
res = await session.get(url=str(f'{MAIN_URL}/api/user/{user_id}'),
headers=headers)
await session.close()
# Reminder : 2XX is a success
# If unsuccessful we return the error message
if res.status != 200:
return res.content
# However, if successful return the json data that was returned and transform it into its python equivalent
return await res.json()
|
725c4f7f89efc242948799c48541a25a2bd17d8c
| 3,644,132
|
from typing import List
import requests
from bs4 import BeautifulSoup
def category(category: str) -> List[str]:
"""Get list of emojis in the given category"""
emoji_url = f"https://emojipedia.org/{category}"
page = requests.get(emoji_url)
soup = BeautifulSoup(page.content, 'lxml')
symbols: List[str]
try:
ul = soup.find('ul', class_="emoji-list")
spans = ul.find_all('span', class_='emoji')
symbols = [span.get_text() for span in spans]
except:
symbols = list()
return symbols
|
61eaff867e9d9c75582f31435a6c22f3b92fd85a
| 3,644,133
|
from typing import Optional
def calc_cumulative_bin_metrics(
labels: np.ndarray,
probability_predictions: np.ndarray,
number_bins: int = 10,
decimal_points: Optional[int] = 4) -> pd.DataFrame:
"""Calculates performance metrics for cumulative bins of the predictions.
Args:
labels: An array of true binary labels represented by 1.0 and 0.0.
probability_predictions: An array of predicted probabilities between 0.0 and
1.0.
number_bins: Number of cumulative bins that we want to divide the ranked
predictions into. Default is 10 bins such that the 1st bin contains the
highest 10% of the predictions, 2nd bin contains the highest 20% of the
predictions and so on.
decimal_points: Number of decimal points to use when outputting the
calculated performance metrics.
Returns:
bin_metrics: Following metrics calculated for each cumulative bin.
cumulative_bin_number: Bin number starting from 1.
bin_size: Total numbers of instances in the bin,
bin_size_proportion: Proportion of instances in the bin out of all the
instances in the labels.
positive_instances: Numbers of positive instances in the bin,
precision: Proportion of positive instances out of all the instances
in the bin,
coverage (recall): Proportion of positives instances in the bin out of
all the positive instances in the labels,
prop_label_positives: Proportion of positive instances in the labels,
precision_uplift: Uplift of precision of the bin compared to the
precision of the random prediction (prop_label_positives).
"""
utils.assert_label_values_are_valid(labels)
utils.assert_prediction_values_are_valid(probability_predictions)
utils.assert_label_and_prediction_length_match(labels,
probability_predictions)
# Separate the probability_predictions into bins.
label_predictions = pd.DataFrame(
list(zip(labels, probability_predictions)),
columns=['label', 'prediction'])
label_predictions = label_predictions.sort_values(
by='prediction', ascending=False)
number_total_instances = label_predictions.shape[0]
equal_bin_size = number_total_instances / number_bins
number_total_positive_instances = label_predictions[
label_predictions['label'] > 0].shape[0]
prop_label_positives = round(
number_total_positive_instances / number_total_instances, decimal_points)
cumulative_bin_metrics_list = list()
for i in range(1, (number_bins + 1)):
current_bin_size = round(equal_bin_size * i)
bin_size_proportion = round(current_bin_size / number_total_instances,
decimal_points)
bin_instances = label_predictions.head(current_bin_size)
number_bin_positive_instances = bin_instances[
bin_instances['label'] > 0].shape[0]
bin_precision = round(number_bin_positive_instances / current_bin_size,
decimal_points)
bin_recall = round(
number_bin_positive_instances / number_total_positive_instances,
decimal_points)
bin_precision_uplift = round(bin_precision / prop_label_positives,
decimal_points)
cumulative_bin_metrics_list.append(
(i, current_bin_size, bin_size_proportion,
number_bin_positive_instances, bin_precision, bin_recall,
prop_label_positives, bin_precision_uplift))
return pd.DataFrame(
cumulative_bin_metrics_list,
columns=[
'cumulative_bin_number', 'bin_size', 'bin_size_proportion',
'positive_instances', 'precision', 'coverage (recall)',
'prop_label_positives', 'precision_uplift'
])
|
c3574c8e74d5c6fd649ea4258b9a8518811210f6
| 3,644,134
|
def rootbeta_cdf(x, alpha, beta_, a, b, bounds=(), root=2.):
"""
Calculates the cumulative density function of the log-beta distribution, i.e.::
F(z; a, b) = I_z(a, b)
where ``z=(ln(x)-ln(a))/(ln(b)-ln(a))`` and ``I_z(a, b)`` is the regularized incomplete beta function.
Parameters
----------
x : float or array_like, shape (n,)
Realization.
alpha : float
Shape parameter 1.
beta_ : float
Shape parameter 2.
a : float
Minimum.
b : float
Maximum.
bounds : tuple
Tuple of minimum and maximum attainable realizations
root : float
Root.
Returns
-------
p : float or array_like, shape (n,)
Probability.
"""
_chk_root_mmm_inp(a, b)
if not bounds:
bounds = (a, b)
_chk_beta_inp(alpha, beta_)
_chk_dist_inp(x, bounds)
return beta_cdf(sqrt(x, root), alpha, beta_, sqrt(a, root), sqrt(b, root))
|
e0b951c177f288bc89536494485904e1839af7de
| 3,644,135
|
def get_scores(treatment, outcome, prediction, p, scoring_range=(0,1), plot_type='all'):
"""Calculate AUC scoring metrics.
Parameters
----------
treatment : array-like
outcome : array-like
prediction : array-like
p : array-like
Treatment policy (probability of treatment for each row).
scoring_range : 2-tuple
Fractional range over which frost score is calculated. First element
must be less than second, and both must be less than 1.
Returns
-------
scores : dict
A dictionary containing the following values. Each is also appended
with `_cgains` and `_aqini` for the corresponding values for the
cumulative gains curve and adjusted qini curve, respectively.
q1: Traditional Q score normalized by the theoretical
maximal qini. Note the theoretical max here goes up with a slope of 2.
q2: Traditional Q score normalized by the practical maximal qini. This
curve increases with a slope of 1.
Q: Area between qini curve and random selection line. This is named
after the notation in Radcliffe & Surry 2011, but note that they
normalize their curves differently.
Q_max: Maximal possible qini score, which is used for normalization
of qini to get frost score. Only obtainable by overfitting.
Q_practical_max: Practical maximal qini score, if you are not
overfitting. This assumes that all (outcome, treatment) = (1,1) were
persuadables, but that there are also an equal number of persuadables
in the control group. This is the best possible scenario, but likely
assumes too few "sure things".
overall_lift: The lift expected from random application of treatment.
"""
treatment = _ensure_array(treatment)
outcome = _ensure_array(outcome)
prediction = _ensure_array(prediction)
p = _ensure_array(p)
Nt1o1, Nt0o1, Nt1o0, Nt0o0 = _get_counts(treatment, outcome, p)
Nt1, Nt0, N = _get_tc_counts(Nt1o1, Nt0o1, Nt1o0, Nt0o0)
def riemann(x, y):
avgy = [(a+b)/2 for (a,b) in zip(y[:-1], y[1:])]
dx = [b-a for (a,b) in zip(x[:-1], x[1:])]
return sum([a*b for (a,b) in zip(dx, avgy)])
qini_riemann = riemann(*_maximal_qini_curve(_get_overfit_counts, Nt1o1, Nt0o1, Nt1o0, Nt0o0))
practical_qini_riemann = riemann(*_maximal_qini_curve(_get_no_sure_thing_counts, Nt1o1, Nt0o1, Nt1o0, Nt0o0))
overall_lift = (Nt1o1/Nt1-Nt0o1/Nt0)
qini_max = qini_riemann - 0.5*overall_lift
practical_qini_max = practical_qini_riemann - 0.5*overall_lift
# The predicted Qini curve.
# First we need to reorder the y values and y_pred based on this reordering
# We calculate TOT roughly here so we have a way of distinguishing those that (ordered, treated) and those that (ordered, untreated).
y = (2*treatment - 1)*outcome
def sortbyprediction(vec):
list2 = list(zip(prediction,vec))
# Sort by prediction.
list2.sort(key=lambda tup: tup[0], reverse=True) # included the tup[0] because otherwise we run into problems when there are only a few predicted values -- it orders by index i instead -- not what we want!
# Extract `y`, sorted by prediction.
_, vec_ordered = zip(*list2)
return vec_ordered
y_ordered = sortbyprediction(y)
tr_ordered = sortbyprediction(treatment)
p_ordered = sortbyprediction(p)
def auc(method='qini'):
# Calculate the area.
uplift_last = 0
nt1o1 = 0
nt0o1 = 0
nt1 = EPS
nt0 = EPS
pred_riemann = 0
uplifts = []
for i in range(round(scoring_range[0]*len(treatment)), round(scoring_range[1]*len(treatment))):
if y_ordered[i] > 0:
nt1o1 += 0.5*(1/p_ordered[i])
elif y_ordered[i] < 0:
nt0o1 += 0.5*(1/(1-p_ordered[i]))
if tr_ordered[i] == 1:
nt1 += 0.5*(1/p_ordered[i])
else:
nt0 += 0.5*(1/(1-p_ordered[i]))
if method=='qini':
uplift_next = nt1o1/Nt1-nt0o1/Nt0
elif method=='cgains':
uplift_next = (nt1o1/nt1-nt0o1/nt0)*(nt1+nt0)/N
elif method=='aqini':
uplift_next = nt1o1/Nt1-nt0o1*nt1/(nt0*Nt1 + EPS)
uplifts.append(uplift_next)
# each point corresponds to an x delta of 1/N
pred_riemann += 1/2*(uplift_next+uplift_last)/N
uplift_last = uplift_next
AUC = pred_riemann - 0.5*overall_lift*(scoring_range[1]**2 - scoring_range[0]**2)
maxgain = np.amax(uplifts)
return AUC, maxgain
# Dictionary to store all scores.
scores = {}
# Raw max scores.
scores['Q_max'] = qini_max
scores['overall_lift'] = overall_lift
scores['Q_practical_max'] = practical_qini_max
if (plot_type=='qini') or (plot_type=='all'):
# Qini curve scores.
scores['Q_qini'], scores['max_qini'] = auc(method='qini')
scores['q1_qini'] = scores['Q_qini']/scores['Q_max']
scores['q2_qini'] = scores['Q_qini']/scores['Q_practical_max']
if (plot_type=='cgains') or (plot_type=='all'):
# Scores for cumulative gains curve.
scores['Q_cgains'], scores['max_cgains'] = auc(method='cgains')
scores['q1_cgains'] = scores['Q_cgains']/scores['Q_max']
scores['q2_cgains'] = scores['Q_cgains']/scores['Q_practical_max']
if (plot_type=='aqini') or (plot_type=='all'):
# Scores for adjusted qini curve.
scores['Q_aqini'], scores['max_aqini'] = auc(method='aqini')
scores['q1_aqini'] = scores['Q_aqini']/scores['Q_max']
scores['q2_aqini'] = scores['Q_aqini']/scores['Q_practical_max']
return scores
|
c59cc98e08cfff6b01eff5c3ff4f74973ababf34
| 3,644,136
|
def get_arima_nemo_pipeline():
""" Function return complex pipeline with the following structure
arima \
linear
nemo |
"""
node_arima = PrimaryNode('arima')
node_nemo = PrimaryNode('exog_ts')
node_final = SecondaryNode('linear', nodes_from=[node_arima, node_nemo])
pipeline = Pipeline(node_final)
return pipeline
|
1ae171d29624ecc615f213f343c4a88c733d3554
| 3,644,137
|
from typing import Counter
import math
def conditional_entropy(x,
y,
nan_strategy=REPLACE,
nan_replace_value=DEFAULT_REPLACE_VALUE):
"""
Calculates the conditional entropy of x given y: S(x|y)
Wikipedia: https://en.wikipedia.org/wiki/Conditional_entropy
**Returns:** float
Parameters
----------
x : list / NumPy ndarray / Pandas Series
A sequence of measurements
y : list / NumPy ndarray / Pandas Series
A sequence of measurements
nan_strategy : string, default = 'replace'
How to handle missing values: can be either 'drop' to remove samples
with missing values, or 'replace' to replace all missing values with
the nan_replace_value. Missing values are None and np.nan.
nan_replace_value : any, default = 0.0
The value used to replace missing values with. Only applicable when
nan_strategy is set to 'replace'.
"""
if nan_strategy == REPLACE:
x, y = replace_nan_with_value(x, y, nan_replace_value)
elif nan_strategy == DROP:
x, y = remove_incomplete_samples(x, y)
y_counter = Counter(y)
xy_counter = Counter(list(zip(x, y)))
total_occurrences = sum(y_counter.values())
entropy = 0.0
for xy in xy_counter.keys():
p_xy = xy_counter[xy] / total_occurrences
p_y = y_counter[xy[1]] / total_occurrences
entropy += p_xy * math.log(p_y / p_xy)
return entropy
|
c0a9c943efdd4da1ad2f248ef7eaa2e4b1b7be06
| 3,644,138
|
def peaks_in_time(dat, troughs=False):
"""Find indices of peaks or troughs in data.
Parameters
----------
dat : ndarray (dtype='float')
vector with the data
troughs : bool
if True, will return indices of troughs instead of peaks
Returns
-------
nadarray of int
indices of peaks (or troughs) in dat
Note
----
This function does not deal well with flat signal; when the signal is not
increasing, it is assumed to be descreasing. As a result, this function
finds troughs where the signal begins to increase after either decreasing
or remaining constant
"""
diff_dat = diff(dat)
increasing = zeros(len(diff_dat))
increasing[diff_dat > 0] = 1 # mask for all points where dat is increasing
flipping = diff(increasing) # peaks are -1, troughs are 1, the rest is zero
target = -1 if not troughs else 1
return where(flipping == target)[0] + 1
|
acafee26ac6bc236aa68f48fbea5953020faa471
| 3,644,139
|
def read_submod_def(line):
"""Attempt to read SUBMODULE definition line"""
submod_match = SUBMOD_REGEX.match(line)
if submod_match is None:
return None
else:
parent_name = None
name = None
trailing_line = line[submod_match.end(0):].split('!')[0]
trailing_line = trailing_line.strip()
parent_match = WORD_REGEX.match(trailing_line)
if parent_match is not None:
parent_name = parent_match.group(0).lower()
if len(trailing_line) > parent_match.end(0)+1:
trailing_line = trailing_line[parent_match.end(0)+1:].strip()
else:
trailing_line = ''
#
name_match = WORD_REGEX.match(trailing_line)
if name_match is not None:
name = name_match.group(0).lower()
return 'smod', SMOD_info(name, parent_name)
|
27ed8d88fdb8fd112b072f50dba00bad783eb9f3
| 3,644,140
|
def predict(model, images, labels=None):
"""Predict.
Parameters
----------
model : tf.keras.Model
Model used to predict labels.
images : List(np.ndarray)
Images to classify.
labels : List(str)
Labels to return.
"""
if type(images) == list:
images = tf.stack(images)
predictions = model(images)
predictions = tf.math.argmax(predictions, axis=1)
if labels is not None:
predictions = [labels[pred] for pred in predictions]
return predictions
|
a6c2261e7fea262fb1372f870ba3096a9faf2a68
| 3,644,141
|
import codecs
import re
def process_span_file(doc, filename):
"""Reads event annotation from filename, and add to doc
:type filename: str
:type doc: nlplingo.text.text_theory.Document
<Event type="CloseAccount">
CloseAccount 0 230
anchor 181 187
CloseAccount/Source 165 170
CloseAccount/Source 171 175
CloseAccount/Source 176 180
CloseAccount/Target 191 198
CloseAccount/Target 207 214
CloseAccount/Target 215 229
</Event>
"""
lines = []
""":type: list[str]"""
with codecs.open(filename, 'r', encoding='utf-8') as f:
for line in f:
lines.append(line.strip())
i = 0
while i < len(lines):
line = lines[i]
if line.startswith('<Event type='):
event_type = re.search(r' type="(.*?)"', line).group(1)
event_id = '{}.e-{}'.format(doc.docid, len(doc.events))
event = Event(event_id, event_type)
i += 1
line = lines[i]
while not line.startswith('</Event>'):
tokens = line.split()
info = tokens[0]
offset = IntPair(int(tokens[1]), int(tokens[2]))
if info == event_type or info == 'anchor' or '/' in info:
text = doc.get_text(offset.first, offset.second)
if text is None or text == '':
logger.warning('skipping annotation span {} {}-{} (doc length: {}, #sentences:{})'.format(doc.docid, offset.first, offset.second, doc.text_length(), len(doc.sentences)))
else:
# sometimes, the UI captures an extra trailing space. Check for that and adjust ending offset
if text[-1] == ' ':
text = text[0:-1]
offset.second = offset.second - 1
if info == event_type: # this is an event span
id = '{}.s-{}'.format(event_id, len(event.event_spans))
event.add_event_span(EventSpan(id, offset, text, event_type))
elif info == 'anchor': # anchor span
id = '{}.t-{}'.format(event_id, len(event.anchors))
#print('Spannotator, adding ANCHOR with text "{}"'.format(text))
newtext, newoffset = remove_trailing_periods(text, offset)
if text != newtext:
print('- revising anchor, text=[%s] offset=(%d,%d) newtext=[%s] newoffset=(%d,%d)' % (text, offset.first, offset.second, newtext, newoffset.first, newoffset.second))
event.add_anchor(Anchor(id, newoffset, newtext, event_type))
elif '/' in info: # argument span
em_id = 'm-{}-{}'.format(offset.first, offset.second)
newtext, newoffset = remove_trailing_periods(text, offset)
if text != newtext:
print('- revising argument, text=[%s] offset=(%d,%d) newtext=[%s] newoffset=(%d,%d)' % (text, offset.first, offset.second, newtext, newoffset.first, newoffset.second))
em = EntityMention(em_id, newoffset, newtext, 'dummy')
# we just use a dummy em first, for creating the EventArgument (notice that this em is not added to the doc)
# later, when we annotate sentence, we will find an actual EntityMention that is backed by tokens
# and use that to back the EventArgument
# Ref: text_theory.annotate_sentence_with_events()
arg_role = info[info.index('/') + 1:]
arg_id = '{}.t-{}'.format(event_id, len(event.arguments))
event.add_argument(EventArgument(arg_id, em, arg_role))
i += 1
line = lines[i]
doc.add_event(event)
i += 1
return doc
|
e2ae8f32947a6c99dfba69b0da06adcfffa3fc3c
| 3,644,142
|
from typing import Tuple
def mask_frame_around_position(
frame: np.ndarray,
position: Tuple[float, float],
radius: float = 5,
) -> np.ndarray:
"""
Create a circular mask with the given ``radius`` at the given
position and set the frame outside this mask to zero. This is
sometimes required for the ``Gaussian2D``-based photometry methods
to prevent the Gaussian to try and fit some part of the data that
is far from the target ``position``.
Args:
frame: A 2D numpy array of shape `(x_size, y_size)` containing
the data on which to run the aperture photometry.
position: A tuple `(x, y)` specifying the position at which to
estimate the flux. The position should be in astropy /
photutils coordinates.
radius: The radius of the mask; this should approximately match
the size of a planet signal.
Returns:
A masked version of the given ``frame`` on which we can perform
photometry based on fitting a 2D Gaussian to the data.
"""
# Define shortcuts
frame_size = (frame.shape[0], frame.shape[1])
masked_frame = np.array(np.copy(frame))
# Get circle mask; flip the position because numpy convention
circle_mask = get_circle_mask(
mask_size=frame_size, radius=radius, center=position[::-1]
)
# Apply the mask
masked_frame[~circle_mask] = 0
return masked_frame
|
cf616a0193cf9150821ed00c8e20c61a88b64d9e
| 3,644,143
|
import numpy as np
def apogeeid_digit(arr):
"""
NAME:
apogeeid_digit
PURPOSE:
Extract digits from apogeeid because its too painful to deal with APOGEE ID in h5py
INPUT:
arr (ndarray): apogee_id
OUTPUT:
apogee_id with digits only (ndarray)
HISTORY:
2017-Oct-26 - Written - Henry Leung (University of Toronto)
"""
if isinstance(arr, np.ndarray) or isinstance(arr, list):
arr_copy = np.array(arr) # make a copy
for i in range(arr_copy.shape[0]):
arr_copy[i] = str(''.join(filter(str.isdigit, arr_copy[i])))
return arr_copy
else:
return str(''.join(filter(str.isdigit, arr)))
|
48e21ab69c9f733dbf7b612994bfed35b8980424
| 3,644,144
|
def transform_user_weekly_artist_chart(chart):
"""Converts lastfm api weekly artist chart data into neo4j friendly
weekly artist chart data
Args:
chart (dict): lastfm api weekly artist chart
Returns:
list - neo4j friendly artist data
"""
chart = chart['weeklyartistchart']
artists = []
for artist in chart['artist']:
artists.append(transform_artist(artist))
return artists
|
1034211f6c21774044d767aeb7861b6aa80b4023
| 3,644,145
|
def plotter(fdict):
""" Go """
pgconn = get_dbconn('isuag')
ctx = get_autoplot_context(fdict, get_description())
threshold = 50
threshold_c = temperature(threshold, 'F').value('C')
hours1 = ctx['hours1']
hours2 = ctx['hours2']
station = ctx['station']
oldstation = XREF[station]
df = read_sql("""
with obs as (
select valid, c300, lag(c300) OVER (ORDER by valid ASC) from hourly
where station = %s),
agg1 as (
select valid,
case when c300 > %s and lag < %s then 1
when c300 < %s and lag > %s then -1
else 0 end as t from obs),
agg2 as (
SELECT valid, t from agg1 where t != 0),
agg3 as (
select valid, lead(valid) OVER (ORDER by valid ASC),
t from agg2),
agg4 as (
select extract(year from valid) as yr, valid, lead,
rank() OVER (PARTITION by extract(year from valid) ORDER by valid ASC)
from agg3 where t = 1
and (lead - valid) >= '%s hours'::interval),
agg5 as (
select extract(year from valid) as yr, valid, lead
from agg3 where t = -1)
select f.yr, f.valid as fup, f.lead as flead, d.valid as dup,
d.lead as dlead from agg4 f JOIN agg5 d ON (f.yr = d.yr)
where f.rank = 1 and d.valid > f.valid
ORDER by fup ASC
""", pgconn, params=(oldstation,
threshold, threshold, threshold, threshold, hours1),
index_col=None)
if df.empty:
raise NoDataFound("No Data Found")
df2 = read_sql("""
with obs as (
select valid, tsoil_c_avg,
lag(tsoil_c_avg) OVER (ORDER by valid ASC) from sm_hourly
where station = %s),
agg1 as (
select valid,
case when tsoil_c_avg > %s and lag < %s then 1
when tsoil_c_avg < %s and lag > %s then -1
else 0 end as t from obs),
agg2 as (
SELECT valid, t from agg1 where t != 0),
agg3 as (
select valid, lead(valid) OVER (ORDER by valid ASC),
t from agg2),
agg4 as (
select extract(year from valid) as yr, valid, lead,
rank() OVER (PARTITION by extract(year from valid) ORDER by valid ASC)
from agg3 where t = 1
and (lead - valid) >= '%s hours'::interval),
agg5 as (
select extract(year from valid) as yr, valid, lead
from agg3 where t = -1)
select f.yr, f.valid as fup, f.lead as flead, d.valid as dup,
d.lead as dlead from agg4 f JOIN agg5 d ON (f.yr = d.yr)
where f.rank = 1 and d.valid > f.valid
ORDER by fup ASC
""", pgconn, params=(station,
threshold_c, threshold_c, threshold_c, threshold_c,
hours1),
index_col=None)
if df2.empty:
raise NoDataFound("No Data Found")
(fig, ax) = plt.subplots(1, 1, figsize=(8, 6))
d2000 = utc(2000, 1, 1, 6)
for d in [df, df2]:
for _, row in d.iterrows():
if row['dlead'] is None:
continue
f0 = (row['fup'].replace(year=2000) - d2000).total_seconds()
f1 = (row['flead'].replace(year=2000) - d2000).total_seconds()
d0 = (row['dup'].replace(year=2000) - d2000).total_seconds()
d1 = (row['dlead'].replace(year=2000) - d2000).total_seconds()
if d1 < d0:
continue
ax.barh(row['fup'].year, (f1-f0), left=f0, facecolor='r',
align='center', edgecolor='r')
color = 'lightblue' if (d1 - d0) < (hours2 * 3600) else 'b'
ax.barh(row['fup'].year, (d1-d0), left=d0, facecolor=color,
align='center', edgecolor=color)
xticks = []
xticklabels = []
for i in range(1, 13):
d2 = d2000.replace(month=i)
xticks.append((d2 - d2000).total_seconds())
xticklabels.append(d2.strftime("%-d %b"))
ax.set_xticks(xticks)
ax.set_xticklabels(xticklabels)
ax.set_xlim(xticks[2], xticks[6])
ax.grid(True)
nt = NetworkTable("ISUSM")
nt2 = NetworkTable("ISUAG", only_online=False)
ab = nt.sts[station]['archive_begin']
if ab is None:
raise NoDataFound("Unknown station metadata.")
ax.set_title(("[%s] %s 4 Inch Soil Temps\n[%s] %s used for pre-%s dates"
) % (station, nt.sts[station]['name'], oldstation,
nt2.sts[oldstation]['name'],
ab.year))
ax.set_ylim(df['yr'].min() - 1, df2['yr'].max() + 1)
p0 = plt.Rectangle((0, 0), 1, 1, fc="r")
p1 = plt.Rectangle((0, 0), 1, 1, fc="lightblue")
p2 = plt.Rectangle((0, 0), 1, 1, fc="b")
ax.legend((p0, p1, p2), (
'First Period Above %s for %s+ Hours' % (threshold, hours1),
'Below %s for 1+ Hours' % (threshold, ),
'Below %s for %s+ Hours' % (threshold, hours2)),
ncol=2, fontsize=11, loc=(0., -0.2))
box = ax.get_position()
ax.set_position([box.x0, box.y0 + box.height * 0.1, box.width,
box.height * 0.9])
return fig, df
|
f8a412065700ab111f5bf846938721aa397803b3
| 3,644,146
|
def config_namespace(config_file=None, auto_find=False,
verify=True, **cfg_options):
"""
Return configuration options as a Namespace.
.. code:: python
reusables.config_namespace(os.path.join("test", "data",
"test_config.ini"))
# <Namespace: {'General': {'example': 'A regul...>
:param config_file: path or paths to the files location
:param auto_find: look for a config type file at this location or below
:param verify: make sure the file exists before trying to read
:param cfg_options: options to pass to the parser
:return: Namespace of the config files
"""
return ConfigNamespace(**config_dict(config_file, auto_find,
verify, **cfg_options))
|
c3293fa36e32d2ebea610a88a6e29ba47906ab7b
| 3,644,147
|
import pandas
import numpy
import tqdm
import torch
def extract_peaks(peaks, sequences, signals, controls=None, chroms=None,
in_window=2114, out_window=1000, max_jitter=128, min_counts=None,
max_counts=None, verbose=False):
"""Extract sequences and signals at coordinates from a peak file.
This function will take in genome-wide sequences, signals, and optionally
controls, and extract the values of each at the coordinates specified in
the peak file and return them as tensors.
Signals and controls are both lists with the length of the list, n_s
and n_c respectively, being the middle dimension of the returned
tensors. Specifically, the returned tensors of size
(len(peaks), n_s/n_c, (out_window/in_wndow)+max_jitter*2).
The values for sequences, signals, and controls, can either be filepaths
or dictionaries of numpy arrays or a mix of the two. When a filepath is
passed in it is loaded using pyfaidx or pyBigWig respectively.
Parameters
----------
peaks: str or pandas.DataFrame
Either the path to a bed file or a pandas DataFrame object containing
three columns: the chromosome, the start, and the end, of each peak.
sequences: str or dictionary
Either the path to a fasta file to read from or a dictionary where the
keys are the unique set of chromosoms and the values are one-hot
encoded sequences as numpy arrays or memory maps.
signals: list of strs or list of dictionaries
A list of filepaths to bigwig files, where each filepath will be read
using pyBigWig, or a list of dictionaries where the keys are the same
set of unique chromosomes and the values are numpy arrays or memory
maps.
controls: list of strs or list of dictionaries or None, optional
A list of filepaths to bigwig files, where each filepath will be read
using pyBigWig, or a list of dictionaries where the keys are the same
set of unique chromosomes and the values are numpy arrays or memory
maps. If None, no control tensor is returned. Default is None.
chroms: list or None, optional
A set of chromosomes to extact peaks from. Peaks in other chromosomes
in the peak file are ignored. If None, all peaks are used. Default is
None.
in_window: int, optional
The input window size. Default is 2114.
out_window: int, optional
The output window size. Default is 1000.
max_jitter: int, optional
The maximum amount of jitter to add, in either direction, to the
midpoints that are passed in. Default is 128.
min_counts: float or None, optional
The minimum number of counts, summed across the length of each example
and across all tasks, needed to be kept. If None, no minimum. Default
is None.
max_counts: float or None, optional
The maximum number of counts, summed across the length of each example
and across all tasks, needed to be kept. If None, no maximum. Default
is None.
verbose: bool, optional
Whether to display a progress bar while loading. Default is False.
Returns
-------
seqs: torch.tensor, shape=(n, 4, in_window+2*max_jitter)
The extracted sequences in the same order as the peaks in the peak
file after optional filtering by chromosome.
signals: torch.tensor, shape=(n, len(signals), out_window+2*max_jitter)
The extracted signals where the first dimension is in the same order
as peaks in the peak file after optional filtering by chromosome and
the second dimension is in the same order as the list of signal files.
controls: torch.tensor, shape=(n, len(controls), out_window+2*max_jitter)
The extracted controls where the first dimension is in the same order
as peaks in the peak file after optional filtering by chromosome and
the second dimension is in the same order as the list of control files.
If no control files are given, this is not returned.
"""
seqs, signals_, controls_ = [], [], []
in_width, out_width = in_window // 2, out_window // 2
# Load the sequences
if isinstance(sequences, str):
sequences = pyfaidx.Fasta(sequences)
# Load the peaks or rename the columns to be consistent
names = ['chrom', 'start', 'end']
if isinstance(peaks, str):
peaks = pandas.read_csv(peaks, sep="\t", usecols=(0, 1, 2),
header=None, index_col=False, names=names)
else:
peaks = peaks.copy()
peaks.columns = names
if chroms is not None:
peaks = peaks[numpy.isin(peaks['chrom'], chroms)]
# Load the signal and optional control tracks if filenames are given
for i, signal in enumerate(signals):
if isinstance(signal, str):
signals[i] = pyBigWig.open(signal, "r")
if controls is not None:
for i, control in enumerate(controls):
if isinstance(control, str):
controls[i] = pyBigWig.open(control, "r")
desc = "Loading Peaks"
d = not verbose
for chrom, start, end in tqdm(peaks.values, disable=d, desc=desc):
mid = start + (end - start) // 2
start = mid - out_width - max_jitter
end = mid + out_width + max_jitter
# Extract the signal from each of the signal files
signals_.append([])
for signal in signals:
if isinstance(signal, dict):
signal_ = signal[chrom][start:end]
else:
signal_ = signal.values(chrom, start, end, numpy=True)
signal_ = numpy.nan_to_num(signal_)
signals_[-1].append(signal_)
# For the sequences and controls extract a window the size of the input
start = mid - in_width - max_jitter
end = mid + in_width + max_jitter
# Extract the controls from each of the control files
if controls is not None:
controls_.append([])
for control in controls:
if isinstance(control, dict):
control_ = control[chrom][start:end]
else:
control_ = control.values(chrom, start, end, numpy=True)
control_ = numpy.nan_to_num(control_)
controls_[-1].append(control_)
# Extract the sequence
if isinstance(sequences, dict):
seq = sequences[chrom][start:end].T
else:
seq = one_hot_encode(sequences[chrom][start:end].seq.upper(),
alphabet=['A', 'C', 'G', 'T', 'N']).T
seqs.append(seq)
seqs = torch.tensor(numpy.array(seqs), dtype=torch.float32)
signals_ = torch.tensor(numpy.array(signals_), dtype=torch.float32)
idxs = torch.ones(signals_.shape[0], dtype=torch.bool)
if max_counts is not None:
idxs = (idxs) & (signals_.sum(dim=(1, 2)) < max_counts)
if min_counts is not None:
idxs = (idxs) & (signals_.sum(dim=(1, 2)) > min_counts)
if controls is not None:
controls_ = torch.tensor(numpy.array(controls_), dtype=torch.float32)
return seqs[idxs], signals_[idxs], controls_[idxs]
return seqs[idxs], signals_[idxs]
|
f3a3696f2e31b7b91384df50dd0374c2e4e46443
| 3,644,148
|
import re
import fnmatch
import os
def findfiles(which, where='.'):
"""Returns list of filenames from `where` path matched by 'which'
shell pattern. Matching is case-insensitive.
# findfiles('*.ogg')
"""
# TODO: recursive param with walk() filtering
rule = re.compile(fnmatch.translate(which), re.IGNORECASE)
fn_names = [name for name in os.listdir(where) if rule.match(name)]
return [os.path.join(where, f) for f in fn_names]
|
a19a9684b44c1ec1f668071edb74dd5f1f411e65
| 3,644,149
|
def map_feature(value, f_type):
""" Builds the Tensorflow feature for the given feature information """
if f_type == np.dtype('object'):
return bytes_feature(value)
elif f_type == np.dtype('int'):
return int64_feature(value)
elif f_type == np.dtype('float'):
return float64_feature(value)
elif f_type == np.dtype('bool'):
return int64_feature(value.astype('int'))
else:
raise ValueError('Do not know how to store value {} with type {}'
.format(value, f_type))
|
26416b27737542c8ac6100168775f47b271206a3
| 3,644,150
|
def is_text_area(input):
"""
Template tag to check if input is file
:param input: Input field
:return: True if is file, False if not
"""
return input.field.widget.__class__.__name__ == "Textarea"
|
4657a93809e123aaa27ee0a202b33e0383ac23cc
| 3,644,151
|
def print_album_list(album_list):
"""Print album list and return the album name choice.
If return is all then all photos on page will be download."""
for i in range(len(album_list)):
print("{}. {} ({} photo(s))".format(
i + 1, album_list[i]['name'], album_list[i]['count']))
choice = raw_input("Please enter your choice (0 for all): ")
return int(choice) - 1
|
2a3c4fde9fc56da179ea43c88f966735fc5c7beb
| 3,644,152
|
import os
import sys
def dprepb_imaging(vis_input):
"""The DPrepB/C imaging pipeline for visibility data.
Args:
vis_input (array): array of ARL visibility data and parameters.
Returns:
restored: clean image.
"""
# Load the Input Data
# ------------------------------------------------------
vis1 = vis_input[0]
vis2 = vis_input[1]
channel = vis_input[2]
stations = vis_input[3]
lofar_stat_pos = vis_input[4]
APPLY_IONO = vis_input[5]
APPLY_BEAM = vis_input[6]
MAKE_PLOTS = vis_input[7]
UV_CUTOFF = vis_input[8]
PIXELS_PER_BEAM = vis_input[9]
POLDEF = vis_input[10]
RESULTS_DIR = vis_input[11]
FORCE_RESOLUTION = vis_input[12]
ionRM1 = vis_input[13]
times1 = vis_input[14]
time_indices1 = vis_input[15]
ionRM2 = vis_input[16]
times2 = vis_input[17]
time_indices2 = vis_input[18]
twod_imaging = vis_input[19]
npixel_advice = vis_input[20]
cell_advice = vis_input[21]
# Make a results directory on the worker:
os.makedirs(RESULTS_DIR, exist_ok=True)
# Redirect stdout, as Dask cannot print on workers
# ------------------------------------------------------
sys.stdout = open('%s/dask-log.txt' % (RESULTS_DIR), 'w')
# Prepare Measurement Set
# ------------------------------------------------------
# Combine MSSS snapshots:
vis = append_visibility(vis1, vis2)
# Apply a uv-distance cut to the data:
vis = uv_cut(vis, UV_CUTOFF)
# Make some basic plots:
if MAKE_PLOTS:
uv_cov(vis)
uv_dist(vis)
# Imaging and Deconvolution
# ------------------------------------------------------
# Convert from XX/XY/YX/YY to I/Q/U/V:
vis = convert_to_stokes(vis, POLDEF)
# Image I, Q, U, V, per channel:
if twod_imaging:
dirty, psf = image_2d(vis, npixel_advice, cell_advice, channel, RESULTS_DIR)
else:
dirty, psf = wstack(vis, npixel_advice, cell_advice, channel, RESULTS_DIR)
# Deconvolve (using complex Hogbom clean):
comp, residual = deconvolve_cube_complex(dirty, psf, niter=100, threshold=0.001, \
fracthresh=0.001, window_shape='', gain=0.1, \
algorithm='hogbom-complex')
# Convert resolution (FWHM in arcmin) to a psfwidth (standard deviation in pixels):
clean_res = (((FORCE_RESOLUTION/2.35482004503)/60.0)*np.pi/180.0)/cell_advice
# Create the restored image:
restored = restore_cube(comp, psf, residual, psfwidth=clean_res)
# Save to disk:
export_image_to_fits(restored, '%s/imaging_clean_WStack-%s.fits'
% (RESULTS_DIR, channel))
return restored
|
1452d6c1a5ddd5391c68b3ced15f93b77a204489
| 3,644,153
|
import os
import sys
def procure_data(args):
"""Load branches from specified file as needed to calculate
all fit and cut expressions. Then apply cuts and binning, and
return only the processed fit data."""
# look up list of all branches in the specified root file
# determine four-digit number of DRS board used
# apply shorthand (a1 -> area_xxxx_1, etc.)
root_file = args.run if os.sep in args.run else ROOT_FILE.format(args.run)
branches_all = fileio.get_keys(root_file)
# find all channels present by matching noise_*
channels = {_.rpartition("_")[2] for _ in branches_all if _.startswith("noise_")}
# any matching channels: fill replacements using templates in SHORTHAND
if channels:
test_branch = next(_ for _ in branches_all if _.startswith("noise_"))
board = test_branch.partition('_')[2].partition('_')[0]
replacements = {}
for pre,post in SHORTHAND.items():
if "{ch}" in pre:
replacements.update(
{pre.format(ch=_):post.format(board=board,ch=_) for _ in channels}
)
else:
replacements.update(
{pre:post.format(board=board,ch=next(_ for _ in channels))}
)
# no matching branches found: don't apply shorthand
else:
replacements = {}
# set of branches needed to evaluate all fits, cuts, defs, and xfs
branches_needed = set()
# compile expressions and update branches_needed
fn_fits = []
for fit in args.fits:
# skip fits where the first entry is None. This happens when
# the positional argument is not specified, so handling this
# case lets us supply all fits via --fit if desired.
if fit[0] is None:
continue
fn = expr.check_and_compile(replace_names(fit[0], replacements))
fn_fits.append(fn)
branches_needed |= fn.kwargnames
# copy at this point to capture branches needed for fit expressions
branches_needed_fit = branches_needed.copy()
fn_cuts = []
for cut in args.cuts:
fn = expr.check_and_compile(replace_names(cut[0], replacements))
fn_cuts.append(fn)
branches_needed |= fn.kwargnames
# copy branches_needed at this point to capture which are needed
# explicitly for fits and cuts
branches_fit_and_cut = branches_needed.copy()
fn_defs = []
for def_ in args.defs:
fn = expr.check_and_compile(replace_names(def_[1], replacements))
fn_defs.append(fn)
branches_needed |= fn.kwargnames
fn_xfs = []
for xf in args.xfs:
raise Exception("xfs not implemented yet")
fn = expr.check_and_compile(replace_names(xf[1], replacements))
fn_xfs.append(fn)
branches_needed |= fn.kwargnames
# load branches from specified root file, allowing for missing
# branches. missing branches must be generated by one of the
# defs or xfs included.
branches = fileio.load_branches(root_file, branches_needed - set(BRANCHES_CONSTRUCT))
# initialize the branch manager instance with the resulting branches
bm = data.BranchManager(branches, export_copies=False, import_copies=False)
# construct branches if needed
if "entry" in branches_needed:
bm.bud(data.bud_entry)
# apply scaler rectification
if args.rectify_scalers:
if any(_.startswith("scaler_") for _ in bm.keys):
bm.bud(data.rectify_scaler(), overwrite=True)
# apply timestamp fix and localization
if any(_.startswith("timestamp_") for _ in bm.keys):
bm.bud(data.fix_monotonic_timestamp(), overwrite=True)
if args.localize_timestamps:
bm.bud(data.localize_timestamp(), overwrite=True)
# process defs and xfs to create new branches
# todo: current implementation is slightly inefficient. defs and xfs
# are evaluated before applying any cuts, resulting in excess
# computation in the case where cuts do not depend on defs or xfs.
# an implementation which applies each cut as soon as it is able to,
# and prioritizes defs and xfs which enable cuts, would be faster.
fn_defs_remain = [True for _ in fn_defs]
fn_xfs_remain = [True for _ in fn_xfs]
n_remaining = len(fn_defs) + len(fn_xfs)
while n_remaining:
for i,remain in enumerate(fn_defs_remain):
if remain and fn_defs[i].kwargnames.issubset(bm.keys):
this_name = args.defs[i][0]
this_fn = fn_defs[i]
bm.bud(
lambda man:{this_name:this_fn(**{_:man[_] for _ in this_fn.kwargnames})}
)
fn_defs_remain[i] = False
# # xfs not implemented yet
# for i,remain in enumerate(fn_xfs_remain):
# if remain and fn_xfs[i].kwargnames.issubset(bm.keys):
# bm.bud()
# fn_xfs_remain[i] = False
# if we have all branches needed for fits and cuts, there's
# no need to keep evaluating defs and xfs
if branches_fit_and_cut.issubset(bm.keys):
break
# check to see if progress has been made
# if not, then it never will, and we have to exit
n_remaining_now = sum(fn_defs_remain) + sum(fn_xfs_remain)
if n_remaining_now == n_remaining:
print("could not evaluate all definititions and transformations")
print("missing one or more variables for completion")
sys.exit(1)
n_remaining = n_remaining_now
# wrapper functions to capture loop variable values
# if we don't use these, the overwritten value of fn and other
# variables used in the loop will change, and the change will affect
# the function calls to calculate masks
def mask_bool(fn):
mask = lambda man:fn(**{_:man[_] for _ in fn.kwargnames})
return mask
def mask_range(fn,lo,hi):
mask = lambda man:data.inrange(fn(**{_:man[_] for _ in fn.kwargnames}),lo,hi)
return mask
# process cuts
masks = []
for icut,fn in enumerate(fn_cuts):
this_cut = args.cuts[icut]
# no bounds specified: boolean expression
if (this_cut[1] is None) and (this_cut[2] is None):
masks.append(mask_bool(fn))
# at least one bound specified: lo<expression<hi
else:
masks.append(mask_range(fn,this_cut[1],this_cut[2]))
# apply cuts
if masks:
data_fit_raw = bm.mask(
data.mask_all(*masks),
branches_needed_fit,
apply_mask = False,
)
else:
data_fit_raw = {_:bm[_] for _ in branches_needed_fit}
# data_fit_raw are all the branches that show up in the expression
# for at least one fit. to get the fit data, we have still have to
# evaluate the expressions.
fit_data = []
for fn in fn_fits:
fit_data.append(fn(**{_:data_fit_raw[_] for _ in fn.kwargnames}))
# get counts and edges by binning data_fit_raw
fit_counts = []
fit_edges = []
for i,fit in enumerate(args.fits):
this_data = fit_data[i]
# determine bin edges
lo = this_data.min() if fit[1] in [None,-np.inf] else fit[1]
hi = this_data.max() if fit[2] in [None, np.inf] else fit[2]
if fit[3]:
nbins = fit[3]
else:
this_ndata = data.inrange(this_data,lo,hi,True,True).sum()
nbins = data.bin_count_from_ndata(this_ndata)
if fit[4].startswith("li"):
this_edges = data.edges_lin(lo,hi,nbins)
elif fit[4].startswith("lo"):
if lo<=0:
lo = this_data[this_data>0].min()
this_edges = data.edges_log(lo,hi,nbins)
elif fit[4].startswith("s"):
this_edges = data.edges_symlog(lo,hi,nbins)
# calculate histogram counts and append
this_counts, _ = np.histogram(this_data, this_edges)
fit_counts.append(this_counts)
fit_edges.append(this_edges)
return fit_counts, fit_edges
|
da7a05f5a7af8fa6de112a040f880494e3605484
| 3,644,154
|
import os
def after_file_name(file_to_open):
"""
Given a file name return as:
[file_to_open root]_prep.[file-to_open_ending]
Parameters
----------
file_to_open : string
Name of the input file.
Returns
--------
after_file : string
Full path to the (new) file.
Examples
---------
>>> from preparenovonix.novonix_io import after_file_name
>>> after_file_name('example_data/example_data.csv')
"""
# Extract the path and file name
dirname, fname = os.path.split(os.path.abspath(file_to_open))
root = fname.split(".")[0]
ending = fname.split(".")[1]
fname = root + "_prep." + ending
after_file = os.path.join(dirname, fname)
return after_file
|
8b06b3cabbe8dd388cafc2d9d48b30feb2f6c254
| 3,644,155
|
import struct
def read_bool(data):
"""
Read 1 byte of data as `bool` type.
Parameters
----------
data : io.BufferedReader
File open to read in binary mode
Returns
-------
bool
True or False
"""
s_type = "=%s" % get_type("bool")
return struct.unpack(s_type, data.read(1))[0]
|
9302a3f4831143c44b0a67cfe0f146463e8ba27e
| 3,644,156
|
def sectorize(position):
""" Returns a tuple representing the sector for the given `position`.
Parameters
----------
position : tuple of len 3
Returns
-------
sector : tuple of len 3
"""
x, y, z = normalize(position)
x, y, z = x // GameSettings.SECTOR_SIZE, y // GameSettings.SECTOR_SIZE, z // GameSettings.SECTOR_SIZE
return (x, 0, z)
|
689fc3ee350e5493d037df290c5df05d50621b7e
| 3,644,157
|
import random
def add_random_phase_shift(hkl, phases, fshifts=None):
"""
Introduce a random phase shift, at most one unit cell length along each axis.
Parameters
----------
hkl : numpy.ndarray, shape (n_refls, 3)
Miller indices
phases : numpy.ndarray, shape (n_refls,)
phase values in degrees, ordered as hkl
fshifts : numpy.ndarray, shape (3,), optional
fractional shifts along (a,b,c) to apply; if None, apply random shifts
Returns
-------
shifted_phases : numpy.ndarray, shape (n_refls,)
phase values in degrees, ordered as hkl
fshifts : numpy.ndarray, shape (3,)
fractional shifts applied along (a,b,c)
"""
if fshifts is None:
fshifts = np.array([random.random() for i in range(3)])
shifted_phases = wrap_phases(phases - 360 * np.dot(hkl, fshifts).ravel())
return shifted_phases, fshifts
|
7739d99b58bec80283a5e49fc2e6eaa6161286ae
| 3,644,158
|
import os
def hierarchical_dataset(root, opt, select_data="/", data_type="label", mode="train"):
"""select_data='/' contains all sub-directory of root directory"""
dataset_list = []
dataset_log = f"dataset_root: {root}\t dataset: {select_data[0]}"
print(dataset_log)
dataset_log += "\n"
for dirpath, dirnames, filenames in os.walk(root + "/"):
if not dirnames:
select_flag = False
for selected_d in select_data:
if selected_d in dirpath:
select_flag = True
break
if select_flag:
if data_type == "label":
dataset = LmdbDataset(dirpath, opt, mode=mode)
else:
dataset = LmdbDataset_unlabel(dirpath, opt)
sub_dataset_log = f"sub-directory:\t/{os.path.relpath(dirpath, root)}\t num samples: {len(dataset)}"
print(sub_dataset_log)
dataset_log += f"{sub_dataset_log}\n"
dataset_list.append(dataset)
concatenated_dataset = ConcatDataset(dataset_list)
return concatenated_dataset, dataset_log
|
100b2d5b8f8829df4f3545ec2f37c05df4961897
| 3,644,159
|
from typing import get_args
import os
import sys
def main() -> None:
"""Run main entrypoint."""
# Parse command line arguments
get_args()
# Ensure environment tokens are present
try:
SLACK_TOKEN = os.environ["PAGEY_SLACK_TOKEN"]
except KeyError:
print("Error, env variable 'PAGEY_SLACK_TOKEN' not set", file=sys.stderr)
sys.exit(1)
try:
PD_TOKEN = os.environ["PAGEY_PD_TOKEN"]
except KeyError:
print("Error, env variable 'PAGEY_PD_TOKEN' not set", file=sys.stderr)
sys.exit(1)
# Initialize Pagerduty module
pagerduty = PageyPD(PD_TOKEN)
def commandCallback(command: str) -> str:
"""This is a callback function for Slack to evaluate response based on given command.
Args:
command (str): the command/message after the bot mention (e.g.: @pagey <command>).
Returns:
str: The reply to be sent to Slack.
"""
# [Command: oncall] Get Pagerduty schedules
if command.startswith("oncall"):
schedules = pagerduty.get_schedules()
response = ""
for team, users in schedules.items():
response += f"*{team}*\n"
# Sort by escalation level
users.sort(key=lambda s: s["level"])
for user in users:
if int(user["level"]) == 1:
response += (
f"* [lvl: *{user['level']}* -> {user['until']}] *{user['name']}*\n"
)
else:
response += (
f"* [lvl: *{user['level']}* -> {user['until']}] {user['name']}\n"
)
response += "\n"
return response
# [Command: info] Report some info
if command.startswith("info"):
return f"{DEF_NAME} ({DEF_VERSION}) - {DEF_DESC}\nFind me here: {DEF_GITHUB}\n"
return "Available commands: " + ", ".join(COMMANDS)
# Connect to Slack (RTM mode)
slack = PageySlack(SLACK_TOKEN, commandCallback)
if not slack.connect():
print("Connection to Slack failed. Exception traceback printed above.", file=sys.stderr)
sys.exit(1)
print("Pagey connected to Slack and running!")
slack.run()
|
e55c513d1123abd236487466d1878bfb7a58e37b
| 3,644,160
|
from vistrails.core.packagemanager import get_package_manager
def save_vistrail_bundle_to_zip_xml(save_bundle, filename, vt_save_dir=None, version=None):
"""save_vistrail_bundle_to_zip_xml(save_bundle: SaveBundle, filename: str,
vt_save_dir: str, version: str)
-> (save_bundle: SaveBundle, vt_save_dir: str)
save_bundle: a SaveBundle object containing vistrail data to save
filename: filename to save to
vt_save_dir: directory storing any previous files
Generates a zip compressed version of vistrail.
It raises an Exception if there was an error.
"""
if save_bundle.vistrail is None:
raise VistrailsDBException('save_vistrail_bundle_to_zip_xml failed, '
'bundle does not contain a vistrail')
if not vt_save_dir:
vt_save_dir = tempfile.mkdtemp(prefix='vt_save')
# abstractions are saved in the root of the zip file
# abstraction_dir = os.path.join(vt_save_dir, 'abstractions')
#thumbnails and mashups have their own folder
thumbnail_dir = os.path.join(vt_save_dir, 'thumbs')
mashup_dir = os.path.join(vt_save_dir, 'mashups')
# Save Vistrail
xml_fname = os.path.join(vt_save_dir, 'vistrail')
save_vistrail_to_xml(save_bundle.vistrail, xml_fname, version)
# Save Log
if save_bundle.vistrail.db_log_filename is not None:
xml_fname = os.path.join(vt_save_dir, 'log')
if save_bundle.vistrail.db_log_filename != xml_fname:
shutil.copyfile(save_bundle.vistrail.db_log_filename, xml_fname)
save_bundle.vistrail.db_log_filename = xml_fname
if save_bundle.log is not None:
xml_fname = os.path.join(vt_save_dir, 'log')
save_log_to_xml(save_bundle.log, xml_fname, version, True)
save_bundle.vistrail.db_log_filename = xml_fname
# Save Abstractions
saved_abstractions = []
for obj in save_bundle.abstractions:
if isinstance(obj, basestring):
# FIXME we should have an abstraction directory here instead
# of the abstraction_ prefix...
if not os.path.basename(obj).startswith('abstraction_'):
obj_fname = 'abstraction_' + os.path.basename(obj)
else:
obj_fname = os.path.basename(obj)
# xml_fname = os.path.join(abstraction_dir, obj_fname)
xml_fname = os.path.join(vt_save_dir, obj_fname)
saved_abstractions.append(xml_fname)
# if not os.path.exists(abstraction_dir):
# os.mkdir(abstraction_dir)
# print "obj:", obj
# print "xml_fname:", xml_fname
if obj != xml_fname:
# print 'copying %s -> %s' % (obj, xml_fname)
try:
shutil.copyfile(obj, xml_fname)
except Exception, e:
saved_abstractions.pop()
debug.critical('copying %s -> %s failed: %s' % \
(obj, xml_fname, str(e)))
else:
raise VistrailsDBException('save_vistrail_bundle_to_zip_xml failed, '
'abstraction list entry must be a filename')
# Save Thumbnails
saved_thumbnails = []
for obj in save_bundle.thumbnails:
if isinstance(obj, basestring):
obj_fname = os.path.basename(obj)
png_fname = os.path.join(thumbnail_dir, obj_fname)
saved_thumbnails.append(png_fname)
if not os.path.exists(thumbnail_dir):
os.mkdir(thumbnail_dir)
try:
shutil.copyfile(obj, png_fname)
except shutil.Error, e:
#files are the same no need to show warning
saved_thumbnails.pop()
except IOError, e2:
saved_thumbnails.pop()
debug.warning('copying thumbnail %s -> %s failed: %s' % \
(obj, png_fname, str(e2)))
else:
raise VistrailsDBException('save_vistrail_bundle_to_zip_xml failed, '
'thumbnail list entry must be a filename')
# Save Mashups
saved_mashups = []
#print " mashups:"
if len(save_bundle.mashups) > 0 and not os.path.exists(mashup_dir):
os.mkdir(mashup_dir)
for obj in save_bundle.mashups:
#print " ", obj
try:
xml_fname = os.path.join(mashup_dir, str(obj.id))
save_mashuptrail_to_xml(obj, xml_fname)
saved_mashups.append(obj)
except Exception, e:
raise VistrailsDBException('save_vistrail_bundle_to_zip_xml failed, '
'when saving mashup: %s'%str(e))
# call package hooks
# it will fail if package manager has not been constructed yet
try:
pm = get_package_manager()
for package in pm.enabled_package_list():
package.saveVistrailFileHook(save_bundle.vistrail, vt_save_dir)
except Exception, e:
debug.warning("Could not call package hooks", str(e))
tmp_zip_dir = tempfile.mkdtemp(prefix='vt_zip')
tmp_zip_file = os.path.join(tmp_zip_dir, "vt.zip")
z = zipfile.ZipFile(tmp_zip_file, 'w')
try:
with Chdir(vt_save_dir):
# zip current directory
for root, dirs, files in os.walk('.'):
for f in files:
z.write(os.path.join(root, f))
z.close()
shutil.copyfile(tmp_zip_file, filename)
finally:
os.unlink(tmp_zip_file)
os.rmdir(tmp_zip_dir)
save_bundle = SaveBundle(save_bundle.bundle_type, save_bundle.vistrail,
save_bundle.log, thumbnails=saved_thumbnails,
abstractions=saved_abstractions,
mashups=saved_mashups)
return (save_bundle, vt_save_dir)
|
8027074d485607a789dd5aa1d01be84910199d69
| 3,644,161
|
import itertools
import re
def parse_cluster_file(filename):
"""
Parse the output of the CD-HIT clustering and return a dictionnary of clusters.
In order to parse the list of cluster and sequences, we have to parse the CD-HIT
output file. Following solution is adapted from a small wrapper script
([source code on Github](https://github.com/Y-Lammers/CD-HIT-Filter/blob/master/CD-HIT-Filter.py),
author: Youri Lammers).
"""
# parse through the .clstr file and create a dictionary
# with the sequences per cluster
# open the cluster file and set the output dictionary
cluster_file, cluster_dic = open(filename), {}
# parse through the cluster file and store the cluster name + sequences in the dictionary
# This is a generator comprehension which groups lines together based of wether the
# line starts with a ">".
cluster_groups = (x[1] for x in itertools.groupby(cluster_file, key=lambda line: line[0] == '>'))
# Now we get alternate groups of cluster name and sequence list.
for cluster in cluster_groups:
# Note: next(cluster) retrieves the first line of the cluster i (>cluster name)
name = next(cluster).strip()
name = re.sub(' ', '_', name[1:])
# Note: next(cluster_groups) retrieves the next cluster i+1 containing the sequences
# the cluster is itself an iterator (every line)
seqs = [seq.split('>')[1].split('...') for seq in next(cluster_groups)]
# Write a boolean value True if sequence is the reference sequence from the cluster
seqs = [[seq[0], (True if seq[1] == ' *\n' else False)] for seq in seqs]
cluster_dic[name] = seqs
# return the cluster dictionary
return cluster_dic
|
d50eaeb926be3a7b8d1139c82142e4a1b595c1a0
| 3,644,162
|
def app(par=None):
"""
Return the Miniweb object instance.
:param par: Dictionary with configuration parameters. (optional parameter)
:return: Miniweb object instance.
"""
return Miniweb.get_instance(par)
|
3d2b0d1a9fd87e9e5c26ea9a141e40fbe342b764
| 3,644,163
|
def openTopics():
"""Opens topics file
:return: list of topics
"""
topicsFile = 'topics'
with open(topicsFile) as f:
topics = f.read().split()
return topics
|
e6d43ff6717122532a71355b71134d6f78f9db85
| 3,644,164
|
from django.forms.boundfield import BoundField
from django.utils.inspect import func_supports_parameter, func_accepts_kwargs
def fix_behaviour_widget_render_forced_renderer(utils):
"""
Restore the behaviour where the "renderer" parameter of Widget.render() may not be supported by subclasses.
"""
original_as_widget = BoundField.as_widget
def as_widget(self, widget=None, attrs=None, only_initial=False):
widget = widget or self.field.widget
if not (
func_supports_parameter(widget.render, "renderer")
or func_accepts_kwargs(widget.render)
):
original_widget_render = widget.render
utils.emit_warning(
"Add the `renderer` argument to the render() method of %s. "
"It will be mandatory in Django 2.1." % widget.__class__,
RemovedInDjango21Warning,
stacklevel=2,
)
def instance_render(name, value, attrs=None, renderer=None):
del renderer # restore non-mandatory support for this parameter
return original_widget_render(name=name, value=value, attrs=attrs)
utils.inject_callable(
widget, "render", instance_render
) # beware, function stored in INSTANCE
return original_as_widget(
self, widget=widget, attrs=attrs, only_initial=only_initial
)
utils.inject_callable(BoundField, "as_widget", as_widget)
|
7d55ecc18fae91af221b806448fa30203fdd9cd4
| 3,644,165
|
from typing import List
def split_blocks(blocks:List[Block], ncells_per_block:int,direction:Direction=None):
"""Split blocks is used to divide an array of blocks based on number of cells per block. This code maintains the greatest common denominator of the parent block. Number of cells per block is simply an estimate of how many you want. The actual number will change to meet the greatest common denominator (GCD). GCD of 4 means multigrid of 3 e.g. grid/4 (coarse), 2 (fine), and 1 (finest). If a direction is not specified then for each block the longest index either i,j, or k is used.
Wisdom from Dave Rigby:
For example, for radial equilibrium we must integrate across the span. Some codes (GlennHT used to) would want a single block across the entire span. In that case you would want some additional control.
Another example might be if you would like a block to include the entire boundary layer. In that case you might introduce an aspect ratio control.
Args:
blocks (List[Block]): List of blocks
ncells_per_block (int): number of cells desired per block
direction (Direction): direction to split the blocks in. Direction.(i,j,k). Defaults to None. None means it will pick the direction for you based on which is greater IMAX, JMAX, or KMAX
Returns:
Blocks (List[Block]): list of blocks split in the specified direction
"""
direction_to_use = direction # store the user input variable
new_blocks = list()
for block_indx in range(len(blocks)):
block = blocks[block_indx]
total_cells = block.IMAX*block.JMAX*block.KMAX
if direction==None:
indx = np.argmin(np.array([block.IMAX,block.JMAX,block.KMAX]))
if indx == 0:
direction_to_use=Direction.i
elif indx == 1:
direction_to_use=Direction.j
elif indx == 2:
direction_to_use=Direction.k
if total_cells>ncells_per_block:
# Use greatest common divsor to maintain multi-grid so say the entire block is divisible by 4 then we want to maintain than for all the splits!
greatest_common_divisor =gcd(block.IMAX-1, gcd(block.JMAX-1, block.KMAX-1)) # Gets the maximum number of partitions that we can make for this given block
if direction_to_use == Direction.i:
# In order to get close to the number of cells per block, we need to control how many steps of the greatest_common_divisor to advance so for example if you have a multigrid mesh that has gcd of 16 (fine) => 8 (coarse) => 4 (coarser) => 2 (coarsest) and you want 400K cells per block then JMAX*KMAX*gcd*some_factor has to be close to 400K cells
denominator = block.JMAX*block.KMAX
step_size = __step_search(total_cells,greatest_common_divisor,ncells_per_block,denominator,direction='backward')
if step_size==-1:
step_size = __step_search(total_cells,greatest_common_divisor,ncells_per_block,denominator,direction='forward')
if step_size==-1:
assert('no valid step size found, do you have multi-block? gcd > 1')
# step_size-1 is the IMAX of the sub_blocks e.g. 0 to 92 this shows IMAX=93, (93-1) % 4 = 0 (good)
iprev = 0
for i in range(step_size,block.IMAX,step_size):
if (i+1) > block.IMAX:
break
X = block.X[iprev:i+1,:,:] # New X, Y, Z splits
Y = block.Y[iprev:i+1,:,:] # This indexes to iprev:i so if iprev=2 and i = 10 it will go from 2 to 9
Z = block.Z[iprev:i+1,:,:]
iprev=i # Blocks have to share the same face, Pick the previous face
new_blocks.append(Block(X,Y,Z))
# Check for remainder
if i+1 < block.IMAX:
# Add remainder to last block
X = block.X[i:,:,:] # New X, Y, Z splits
Y = block.Y[i:,:,:]
Z = block.Z[i:,:,:]
new_blocks.append(Block(X,Y,Z))
elif direction_to_use == Direction.j:
denominator = block.IMAX*block.KMAX
step_size = __step_search(total_cells,greatest_common_divisor,ncells_per_block,denominator,direction='backward')
if step_size==-1:
step_size = __step_search(total_cells,greatest_common_divisor,ncells_per_block,denominator,direction='forward')
if step_size==-1:
assert('no valid step size found, do you have multi-block? gcd > 1')
jprev = 0
for j in range(step_size,block.JMAX,step_size):
if (j+1) > block.IMAX:
break
X = block.X[:,jprev:j,:] # New X, Y, Z splits
Y = block.Y[:,jprev:j,:]
Z = block.Z[:,jprev:j,:]
jprev=j
new_blocks.append(Block(X,Y,Z))
# Check for remainder
if j+1 < block.JMAX:
# Add remainder to last block
X = block.X[:,j:,:] # New X, Y, Z splits
Y = block.Y[:,j:,:]
Z = block.Z[:,j:,:]
new_blocks.append(Block(X,Y,Z))
else:
denominator = block.IMAX*block.JMAX
step_size = __step_search(total_cells,greatest_common_divisor,ncells_per_block,denominator,direction='backward')
if step_size==-1:
step_size = __step_search(total_cells,greatest_common_divisor,ncells_per_block,denominator,direction='forward')
if step_size==-1:
assert('no valid step size found, do you have multi-block? gcd > 1')
kprev = 0
for k in range(step_size,block.KMAX,step_size):
if (k+1) > block.KMAX:
break
X = block.X[:,:,kprev:k+1] # New X, Y, Z splits
Y = block.Y[:,:,kprev:k+1]
Z = block.Z[:,:,kprev:k+1]
kprev=k
new_blocks.append(Block(X,Y,Z))
# Check for remainder
if k+1 < block.KMAX:
# Add remainder to last block
X = block.X[:,:,k:] # New X, Y, Z splits
Y = block.Y[:,:,k:]
Z = block.Z[:,:,k:]
new_blocks.append(Block(X,Y,Z)) # replace it
return new_blocks
|
e7ebf6189b3f140b006d74846c4979058023784a
| 3,644,166
|
def get_entry_details(db_path, entry_id):
"""Get all information about an entry in database.
Args:
db_path: path to database file
entry_id: string
Return:
out: dictionary
"""
s = connect_database(db_path)
# find entry
try:
sim = s.query(Main).filter(Main.entry_id == entry_id).one()
except NoResultFound:
print("No entry found with entry_id {} in {}.".format(entry_id, db_path))
return {}
# details from main table
out = sim.__dict__
# groups
out["groups"] = [g.name for g in sim.groups]
# tags
out["tags"] = [t.name for t in sim.keywords if t.value == None]
# keywords
out["keywords"] = {k.name: k.value for k in sim.keywords if k.value != None}
# meta data
meta = {}
for meta_group in sim.meta.all():
meta[meta_group.name] = {m.name: m.value for m in meta_group.entries}
out["meta"] = meta
s.close()
# clean up output
try:
del out["_sa_instance_state"]
except:
pass
return out
|
7a4023fa32a0e41cf3440bcd8fd2140ce88b8c33
| 3,644,167
|
import bisect
def pose_interp(poses, timestamps_in, timestamps_out, r_interp='slerp'):
"""
:param poses: N x 7, (t,q)
:param timestamps: (N,)
:param t: (K,)
:return: (K,)
"""
# assert t_interp in ['linear', 'spline']
assert r_interp in ['slerp', 'squad']
assert len(poses)>1
assert len(poses) == len(timestamps_in)
input_ts = poses[:,:3]
input_rs= poses[:,3:] #quaternions
timestamps_in = np.array(timestamps_in)
#sort the inputs
inds = np.argsort(timestamps_in)
poses = poses[inds]
timestamps_in = timestamps_in[inds]
if r_interp == 'squad':
input_rs_ = quaternion.from_float_array(input_rs)
output_rs = quaternion.squad( input_rs, timestamps_in, timestamps_out)
output_rs = quaternion.as_float_array(output_rs)
elif r_interp == 'slerp':
output_rs = []
for t in timestamps_out:
input_rs_ = quaternion.from_float_array(input_rs)
idx = bisect.bisect_left(timestamps_in)
output_r = quaternion.slerp(input_rs_[idx],input_rs_[idx+1], timestamps_in[idx], timestamps_in[idx+1],t )
output_r = quaternion.as_float_array(output_r)
output_rs.append(output_r)
output_ts = []
for t in timestamps_out:
idx = bisect_left.bisect_left(timestamps_in)
if idx>=len(timestamps_in)-1:
idx -= 1
t1 = timestamps_in[idx]
t2 = timestamps_in[idx+1]
output_t = ((t-t1)*input_ts[idx+1] + (t2-t) *input_ts[idx]) / (t2-t1)
output_ts.append(output_t)
output_ts =np.concatenate(output_ts, axis=0 )
output_rs =np.concatenate(output_rs, axis=0 )
new_pose = np.concatenate([output_ts, output_rs], axis=1)
return new_pose
|
cc8e49b6bab918c6887e37973d09469fcddc298d
| 3,644,168
|
from datetime import datetime
def checklist_saved_action(report_id):
"""
View saved report
"""
report = Report.query.filter_by(id=report_id).first()
return render_template(
'checklist_saved.html',
uid=str(report.id),
save_date=datetime.now(),
report=report,
title='Отчет | %s' % TITLE
)
|
302bc174ffe0ed7d3180b2a59c5212b3a38e7eaf
| 3,644,169
|
def trilinear_memory_efficient(a, b, d, use_activation=False):
"""W1a + W2b + aW3b."""
n = tf.shape(a)[0]
len_a = tf.shape(a)[1]
len_b = tf.shape(b)[1]
w1 = tf.get_variable('w1', shape=[d, 1], dtype=tf.float32)
w2 = tf.get_variable('w2', shape=[d, 1], dtype=tf.float32)
w3 = tf.get_variable('w3', shape=[1, 1, d], dtype=tf.float32)
a_reshape = tf.reshape(a, [-1, d]) # [bs*len_a, d]
b_reshape = tf.reshape(b, [-1, d]) # [bs*len_b, d]
part_1 = tf.reshape(tf.matmul(a_reshape, w1), [n, len_a]) # [bs, len_a]
part_1 = tf.tile(tf.expand_dims(part_1, 2),
[1, 1, len_b]) # [bs, len_a, len_b]
part_2 = tf.reshape(tf.matmul(b_reshape, w2), [n, len_b]) # [bs, len_b]
part_2 = tf.tile(tf.expand_dims(part_2, 1),
[1, len_a, 1]) # [bs, len_a, len_b]
a_w3 = a * w3 # [bs, len_a, d]
part_3 = tf.matmul(a_w3, tf.transpose(b, perm=[0, 2, 1])) # [bs,len_a,len_b]
## return the unnormalized logits matrix : [bs,len_a,len_b]
if use_activation:
return tf.nn.relu(part_1 + part_2 + part_3)
return part_1 + part_2 + part_3
|
d6ed8cc216019987674b86ef36377a6af45a6702
| 3,644,170
|
def private_questions_get_unique_code(assignment_id: str):
"""
Get all questions for the given assignment.
:param assignment_id:
:return:
"""
# Try to find assignment
assignment: Assignment = Assignment.query.filter(
Assignment.id == assignment_id
).first()
# Verify that the assignment exists
req_assert(assignment is not None, message='assignment does not exist')
# Assert that the assignment is within the course context
assert_course_context(assignment)
assigned_question_count = AssignedStudentQuestion.query.filter(
AssignedStudentQuestion.assignment_id == assignment.id
).count()
return success_response({
'assignment_name': assignment.name,
'questions': get_all_questions(assignment),
'questions_assigned': assigned_question_count > 0,
'assigned_question_count': assigned_question_count,
})
|
1c94404168ac659e9ee3c45b3ecf7c2c398d1cca
| 3,644,171
|
def make_ngram(tokenised_corpus, n_gram=2, threshold=10):
"""Extract bigrams from tokenised corpus
Args:
tokenised_corpus (list): List of tokenised corpus
n_gram (int): maximum length of n-grams. Defaults to 2 (bigrams)
threshold (int): min number of n-gram occurrences before inclusion
Returns:
ngrammed_corpus (list)
"""
tokenised = tokenised_corpus.copy()
t = 1
# Loops while the ngram length less / equal than our target
while t < n_gram:
phrases = models.Phrases(tokenised, threshold=threshold)
bigram = models.phrases.Phraser(phrases)
tokenised = bigram[tokenised]
t += 1
return list(tokenised)
|
8897456e9da4cd3c0f1c3f055b43e7d27c7261d8
| 3,644,172
|
def bw_estimate(samples):
"""Computes Abraham's bandwidth heuristic."""
sigma = np.std(samples)
cand = ((4 * sigma**5.0) / (3.0 * len(samples)))**(1.0 / 5.0)
if cand < 1e-7:
return 1.0
return cand
|
44629f9e774d07f7c55a5a77dcb7b06ae38a964b
| 3,644,173
|
def process_coins():
"""calculate the amount of money paid based on the coins entered"""
number_of_quarters = int(input("How many quarters? "))
number_of_dimes = int(input("How many dimes? "))
number_of_nickels = int(input("How many nickels? "))
number_of_pennies = int(input("How many pennies? "))
quarters = number_of_quarters * 0.25
dimes = number_of_dimes * 0.10
nickels = number_of_nickels * 0.05
pennies = number_of_pennies * 0.01
total_inserted = quarters + dimes + nickels + pennies
return total_inserted
|
6a26ad161720554079a76f6bdadbbf9555d6b82d
| 3,644,174
|
def getLastSegyTraceHeader(SH,THN='cdp',data='none', bheadSize = 3600, endian='>'): # added by A Squelch
"""
getLastSegyTraceHeader(SH,TraceHeaderName)
"""
bps=getBytePerSample(SH)
if (data=='none'):
data = open(SH["filename"]).read()
# SET PARAMETERS THAT DEFINE THE LOCATION OF THE LAST HEADER
# AND THE TRACE NUMBER KEY FIELD
THpos=STH_def[THN]["pos"]
THformat=STH_def[THN]["type"]
ntraces=SH["ntraces"]
pos=THpos+bheadSize+(SH["ns"]*bps+240)*(ntraces-1);
txt="getLastSegyTraceHeader : Reading last trace header " + THN + " " + str(pos)
printverbose(txt,20);
thv,index = getValue(data,pos,THformat,endian,1)
txt="getLastSegyTraceHeader : " + THN + "=" + str(thv)
printverbose(txt,30);
return thv
|
19de6339bcc3ec63b0e33007f51fa50ddb619449
| 3,644,175
|
def get_data_url(data_type):
"""Gets the latest url from the kff's github data repo for the given data type
data_type: string value representing which url to get from the github api; must be either 'pct_total' or 'pct_share'
"""
data_types_to_strings = {
'pct_total': 'Percent of Total Population that has Received a COVID-19 Vaccine by RaceEthnicity',
'pct_share': 'COVID19 Vaccinations by RE',
'pct_population': 'Distribution of Vaccinations, Cases, Deaths',
}
df = gcs_to_bq_util.load_json_as_df_from_web_based_on_key(BASE_GITHUB_API_URL, "tree")
df = df.loc[df['path'].str.contains(data_types_to_strings[data_type])]
urls = df.loc[df['path'] == df['path'].max()].url
if len(urls) != 1:
raise ValueError("Found %d urls, should have only found 1" % len(urls))
return urls.values[0]
|
f92520243ee7f952ff69c7c62c315225982a24fe
| 3,644,176
|
def kl(p, q):
"""Kullback-Leibler divergence D(P || Q) for discrete distributions
Parameters
----------
p, q : array-like, dtype=float, shape=n
Discrete probability distributions.
"""
p = np.asarray(p, dtype=np.float)
q = np.asarray(q, dtype=np.float)
return np.sum(np.where(p != 0, p * np.log(p / q), 0))
|
06b6283ea83a729f9c374dabbe1c1a94a8ed8480
| 3,644,177
|
import torch
def get_loaders(opt):
""" Make dataloaders for train and validation sets
"""
# train loader
opt.mean = get_mean(opt.norm_value, dataset=opt.mean_dataset)
# opt.std = get_std()
if opt.no_mean_norm and not opt.std_norm:
norm_method = transforms.Normalize([0, 0, 0], [1, 1, 1])
elif not opt.std_norm:
norm_method = transforms.Normalize(opt.mean, [1, 1, 1])
else:
norm_method = transforms.Normalize(opt.mean, opt.std)
spatial_transform = transforms.Compose([
# crop_method,
transforms.Scale((opt.sample_size, opt.sample_size)),
#grayscale
# transforms.Grayscale(num_output_channels=1),
transforms.ToTensor(),
norm_method
])
temporal_transform = None #TemporalRandomCrop(16)
target_transform = ClassLabel()
training_data = get_training_set(opt, spatial_transform,
temporal_transform, target_transform)
train_loader = torch.utils.data.DataLoader(
training_data,
batch_size=opt.batch_size,
shuffle=True,
num_workers=opt.num_workers,
pin_memory=True)
# validation loader
validation_data = get_validation_set(opt, spatial_transform,
temporal_transform, target_transform)
val_loader = torch.utils.data.DataLoader(
validation_data,
batch_size=opt.batch_size,
shuffle=False,
num_workers=opt.num_workers,
pin_memory=True)
return train_loader, val_loader
|
d7a166a477c535a60846e05598dd19bbe84062be
| 3,644,178
|
def trapezoidal(f, a, b, n):
"""Trapezoidal integration via iteration."""
h = (b-a)/float(n)
I = f(a) + f(b)
for k in xrange(1, n, 1):
x = a + k*h
I += 2*f(x)
I *= h/2
return I
|
f2887a3b0d1732f322dca52d0d869c1063e08c22
| 3,644,179
|
def writetree(tree, sent, key, fmt, comment=None, morphology=None,
sentid=False):
"""Convert a tree to a string representation in the given treebank format.
:param tree: should have indices as terminals
:param sent: contains the words corresponding to the indices in ``tree``
:param key: an identifier for this tree; part of the output with some
formats or when ``sentid`` is True.
:param fmt: Formats are ``bracket``, ``discbracket``, Negra's ``export``
format, and ``alpino`` XML format, as well unlabeled dependency
conversion into ``mst`` or ``conll`` format (requires head rules).
The formats ``tokens`` and ``wordpos`` are to strip away tree structure
and leave only lines with space-separated tokens or ``token/POS``.
When using ``bracket``, make sure tree is canonicalized.
:param comment: optionally, a string that will go in the format's comment
field (supported by ``export`` and ``alpino``), or at the end of the
line preceded by a tab (``discbracket``); ignored by other formats.
Should be a single line.
:param sentid: for line-based formats, prefix output by ``key|``.
Lemmas, functions, and morphology information will be empty unless nodes
contain a 'source' attribute with such information."""
if fmt == 'bracket':
result = writebrackettree(tree, sent)
# if comment:
# result = '# %s\n%s\n' % (comment, result.rstrip('\n'))
elif fmt == 'discbracket':
result = writediscbrackettree(tree, sent)
if comment:
result = '%s\t%s\n' % (result.rstrip('\n'), comment)
elif fmt == 'tokens':
result = '%s\n' % ' '.join(sent)
elif fmt == 'wordpos':
result = '%s\n' % ' '.join('%s/%s' % (word, pos) for word, (_, pos)
in zip(sent, sorted(tree.pos())))
elif fmt == 'export':
result = writeexporttree(tree, sent, key, comment, morphology)
elif fmt == 'alpino':
result = writealpinotree(tree, sent, key, comment)
elif fmt in ('conll', 'mst'):
result = writedependencies(tree, sent, fmt)
else:
raise ValueError('unrecognized format: %r' % fmt)
if sentid and fmt in ('tokens', 'wordpos', 'bracket', 'discbracket'):
return '%s|%s' % (key, result)
return result
|
cf8181596a4882ae18a8adcd0411e1c4e2ee8a33
| 3,644,180
|
import struct
def xor_string(hash1, hash2, hash_size):
"""Encrypt/Decrypt function used for password encryption in
authentication, using a simple XOR.
Args:
hash1 (str): The first hash.
hash2 (str): The second hash.
Returns:
str: A string with the xor applied.
"""
xored = [h1 ^ h2 for (h1, h2) in zip(hash1, hash2)]
return struct.pack("{0}B".format(hash_size), *xored)
|
4efc263a0ff9fb05b0ee7cb7b7b3fdd4c8c0c2ec
| 3,644,181
|
def create_secret_key(string):
"""
:param string: A string that will be returned as a md5 hash/hexdigest.
:return: the hexdigest (hash) of the string.
"""
h = md5()
h.update(string.encode('utf-8'))
return h.hexdigest()
|
eb31e149684074b18fdbc1989ecfc14f21756dea
| 3,644,182
|
import base64
def decode_password(base64_string: str) -> str:
"""
Decode a base64 encoded string.
Args:
base64_string: str
The base64 encoded string.
Returns:
str
The decoded string.
"""
base64_bytes = base64_string.encode("ascii")
sample_string_bytes = base64.b64decode(base64_bytes)
return sample_string_bytes.decode("ascii")
|
0f04617c239fbc740a9b4c9c2d1ae867a52e0c74
| 3,644,183
|
def _generate_overpass_api(endpoint=None):
""" Create and initialise the Overpass API object.
Passing the endpoint argument will override the default
endpoint URL.
"""
# Create API object with default settings
api = overpass.API()
# Change endpoint if desired
if endpoint is not None:
api.endpoint = endpoint
return api
|
9b8016035e87428286f68622e9a6129bcf818c4a
| 3,644,184
|
def to_pascal_case(value):
"""
Converts the value string to PascalCase.
:param value: The value that needs to be converted.
:type value: str
:return: The value in PascalCase.
:rtype: str
"""
return "".join(character for character in value.title() if not character.isspace())
|
138ab9ddf7ca814b50bf8ff0618de03b236535c7
| 3,644,185
|
from typing import Iterable
from typing import Any
from typing import List
def drop(n: int, it: Iterable[Any]) -> List[Any]:
"""
Return a list of N elements drop from the iterable object
Args:
n: Number to drop from the top
it: Iterable object
Examples:
>>> fpsm.drop(3, [1, 2, 3, 4, 5])
[4, 5]
"""
return list(it)[n:]
|
0732bd560f0da0a43f65ee3b5ed46fd3a05e26f5
| 3,644,186
|
def generate_classification_style_dataset(classification='multiclass'):
"""
Dummy data to test models
"""
x_data = np.array([
[1,1,1,0,0,0],
[1,0,1,0,0,0],
[1,1,1,0,0,0],
[0,0,1,1,1,0],
[0,0,1,1,0,0],
[0,0,1,1,1,0]])
if classification=='multiclass':
y_data = np.array([
[1, 0, 0],
[1, 0, 0],
[0, 0, 1],
[0, 0, 1],
[0, 1, 0],
[0, 1, 0]])
elif classification=='binary':
y_data = np.array([
[1],
[1],
[1],
[-1],
[-1],
[-1]])
else:
raise Exception("Only binary or multiclass classification supported")
print("Returning classification style dataset")
return x_data, y_data
|
77a65bb3445216a9a21aa30a7c7201983328efce
| 3,644,187
|
def c2_get_platform_current_status_display(reference_designator):
"""
Get C2 platform Current Status tab contents, return current_status_display.
Was: #status = _c2_get_instrument_driver_status(instrument['reference_designator'])
"""
start = dt.datetime.now()
timing = False
contents = []
platform_info = {}
platform_deployment = _get_platform(reference_designator)
if platform_deployment:
platform_code = "-".join([platform_deployment['mooring_code'], platform_deployment['platform_code'] ])
# Get instruments for this platform
instruments, oinstruments = _get_instruments(platform_code)
for instrument in instruments:
istart = dt.datetime.now()
row = {}
if not instrument['display_name']:
row['display_name'] = instrument['reference_designator']
else:
row['display_name'] = instrument['display_name']
row['reference_designator'] = instrument['reference_designator']
# Get instrument operational status based on instrument driver and agent status
status = _get_instrument_operational_status(instrument['reference_designator'])
row['operational_status'] = status
platform_info[instrument['reference_designator']] = row
if timing:
iend = dt.datetime.now()
iexecution_time = str(iend-istart)
message = '\t debug --- Execution time: %s ' % iexecution_time
print '\n', message
# Create list of dictionaries representing row(s) for 'data' (ordered by reference_designator)
# 'data' == rows for initial grid ('Current Status')
for instrument_reference_designator in oinstruments:
if instrument_reference_designator in platform_info:
contents.append(platform_info[instrument_reference_designator])
if timing:
end = dt.datetime.now()
execution_time = str(end-start)
message = '\t debug --- Total Execution time: %s ' % execution_time
print '\n', message
return jsonify(current_status_display=contents)
|
f10f5d242a5fd9b4d8aea33166025a73c21486c6
| 3,644,188
|
def getSupportedDatatypes():
"""
Gets the datatypes that are supported by the framework
Returns:
a list of strings of supported datatypes
"""
return router.getSupportedDatatypes()
|
635612975c271bdbe22b622787a2d7f823277baa
| 3,644,189
|
def run_stacking(named_data, subjects_data, cv=10, alphas=None,
train_sizes=None, n_jobs=None):
"""Run stacking.
Parameters
----------
named_data : list(tuple(str, pandas.DataFrame))
List of tuples (name, data) with name and corresponding features
to be used for predictions by linear models.
subjects_data : pandas.DataFrame
Information about subjects from CamCAN dataset.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross validation,
- integer, to specify the number of folds in a `(Stratified)KFold`,
- :term:`CV splitter`,
- An iterable yielding (train, test) splits as arrays of indices.
For integer/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass, :class:`StratifiedKFold` is used. In all
other cases, :class:`KFold` is used.
alphas : numpy.ndarray
Values for parameter alpha to be tested. Default is
np.logspace(start=-3, stop=1, num=50, base=10.0).
train_sizes : array-like, shape (n_ticks,), dtype float or int
Relative or absolute numbers of training examples that will be used to
generate the learning curve. If the dtype is float, it is regarded as a
fraction of the maximum size of the training set (that is determined
by the selected validation method), i.e. it has to be within (0, 1].
Otherwise it is interpreted as absolute sizes of the training sets.
Note that for classification the number of samples usually have to
be big enough to contain at least one sample from each class.
(default: np.linspace(0.1, 1.0, 5))
n_jobs : int or None, optional (default=None)
The number of CPUs to use to do the computation.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
"""
if alphas is None:
alphas = np.logspace(-3, 5, 100)
if train_sizes is None:
train_sizes = np.linspace(.1, 1.0, 5)
rnd_state = 42
names = []
combined_data = []
# extract data and estimator names
for name, data in named_data:
names.append(name)
combined_data.append(data)
data = pd.concat(combined_data, axis=1, join='inner')
feature_col_lens = tuple(d.shape[1] for d in combined_data)
estimators = []
subjects = data.index.values
# prepare first-level estimators for stacking
for i_data, _ in enumerate(named_data):
feature_transformers = []
ft_begin = 0
ft_end = 0
# prepare input information for ColumnTransformer
for i_ct, (name, col_len) in enumerate(zip(names, feature_col_lens)):
trans_name = ('pass_' if i_data == i_ct else 'drop_') + name
transformer = 'passthrough' if i_data == i_ct else 'drop'
ft_end = ft_end + col_len
trans_slice = slice(ft_begin, ft_end)
ft_begin = ft_begin + col_len
feature_transformers.append((trans_name, transformer, trans_slice))
est_name = 'reg_' + named_data[i_data][0]
est_pipeline = make_pipeline(
ColumnTransformer(feature_transformers),
StandardScaler(), RidgeCV(alphas))
estimators.append((est_name, est_pipeline))
final_estimator = RandomForestRegressor(n_estimators=100,
random_state=rnd_state,
oob_score=True)
reg = StackingRegressor(estimators=estimators,
final_estimator=final_estimator, cv=cv,
random_state=rnd_state, n_jobs=n_jobs)
y = subjects_data.loc[subjects].age.values
X = data.values
cv = check_cv(cv)
mae = cross_val_score(reg, X, y, scoring='neg_mean_absolute_error',
cv=cv, n_jobs=n_jobs)
r2 = cross_val_score(reg, X, y, scoring='r2', cv=cv, n_jobs=n_jobs)
y_pred = cross_val_predict(reg, X, y, cv=cv, n_jobs=n_jobs)
train_sizes, train_scores, test_scores = \
learning_curve(reg, X, y, cv=cv, train_sizes=train_sizes,
scoring='neg_mean_absolute_error', n_jobs=n_jobs)
fold = _get_fold_indices(cv, X, y)
df_pred = pd.DataFrame(dict(y=y_pred, fold=fold), index=subjects,
dtype=float)
return df_pred, mae, r2, train_sizes, train_scores, test_scores
|
75b97509097652fdccc444cfd3731ce68b49e992
| 3,644,190
|
def add_random_shadow(img, w_low=0.6, w_high=0.85):
"""
Overlays supplied image with a random shadow poligon
The weight range (i.e. darkness) of the shadow can be configured via the interval [w_low, w_high)
"""
cols, rows = (img.shape[0], img.shape[1])
top_y = np.random.random_sample() * rows
bottom_y = np.random.random_sample() * rows
bottom_y_right = bottom_y + np.random.random_sample() * (rows - bottom_y)
top_y_right = top_y + np.random.random_sample() * (rows - top_y)
if np.random.random_sample() <= 0.5:
bottom_y_right = bottom_y - np.random.random_sample() * (bottom_y)
top_y_right = top_y - np.random.random_sample() * (top_y)
poly = np.asarray([[[top_y, 0], [bottom_y, cols], [bottom_y_right, cols], [top_y_right, 0]]], dtype=np.int32)
mask_weight = np.random.uniform(w_low, w_high)
origin_weight = 1 - mask_weight
mask = np.copy(img).astype(np.int32)
cv2.fillPoly(mask, poly, (0, 0, 0))
# masked_image = cv2.bitwise_and(img, mask)
return cv2.addWeighted(img.astype(np.int32), origin_weight, mask, mask_weight, 0).astype(np.uint8)
|
3b520312941ffc4b125ce0a777aeb76fecd6b263
| 3,644,191
|
def csv_args(value):
"""Parse a CSV string into a Python list of strings.
Used in command line parsing."""
return map(str, value.split(","))
|
b2596180054f835bfe70e3f900caa5b56a7856a6
| 3,644,192
|
def get_tokens():
"""
Returns a tuple of tokens in the format {{site/property}} that will be used to build the dictionary passed into execute
"""
return (HAWQMASTER_PORT, HAWQSTANDBY_ADDRESS)
|
4664feb568a3a5599b9da64594d09a034e9aaebb
| 3,644,193
|
def projl1_epigraph(center):
"""
Project center=proxq.true_center onto the l1 epigraph. The bound term is
center[0], the coef term is center[1:]
The l1 epigraph is the collection of points $(u,v): \|v\|_1 \leq u$
np.fabs(coef).sum() <= bound.
"""
norm = center[0]
coef = center[1:]
sorted_coefs = np.sort(np.fabs(coef))
n = sorted_coefs.shape[0]
csum = sorted_coefs.sum()
for i, c in enumerate(sorted_coefs):
csum -= c
if csum - (n - i - 1) * c <= norm + c:
# this will terminate as long as norm >= 0
# when it terminates, we know that the solution is between
# sorted_coefs[i-1] and sorted_coefs[i]
# we set the cumulative sum back to the value at i-1
csum += c
idx = i-1
break
if i == n-1: # if it hasn't terminated early, then even soft-thresholding at the largest value was insufficent, answer is 0
return np.zeros_like(center)
# the solution is such that csum - (n-idx-1)*x = norm+x
thold = (csum - norm) / (n-idx)
result = np.zeros_like(center)
result[0] = norm + thold
result[1:] = st(coef, thold)
return result
|
d7b8c70f45853eef61322fdb9583c8279780982f
| 3,644,194
|
import requests
from datetime import datetime
def crypto_command(text):
""" <ticker> -- Returns current value of a cryptocurrency """
try:
encoded = quote_plus(text)
request = requests.get(API_URL.format(encoded))
request.raise_for_status()
except (requests.exceptions.HTTPError, requests.exceptions.ConnectionError) as e:
return "Could not get value: {}".format(e)
data = request.json()
if "error" in data:
return "{}.".format(data['error'])
updated_time = datetime.fromtimestamp(data['timestamp'])
if (datetime.today() - updated_time).days > 2:
# the API retains data for old ticker names that are no longer updated
# in these cases we just return a "not found" message
return "Currency not found."
change = float(data['change'])
if change > 0:
change_str = "\x033{}%\x0f".format(change)
elif change < 0:
change_str = "\x035{}%\x0f".format(change)
else:
change_str = "{}%".format(change)
return "{} // \x0307${:,.2f}\x0f USD - {:,.7f} BTC // {} change".format(data['symbol'].upper(),
float(data['price']['usd']),
float(data['price']['btc']),
change_str)
|
0b0757a8b657791204d74b8536be3b6cb5af2ff5
| 3,644,195
|
import torch
def byol_loss_multi_views_func(p: torch.Tensor, z: torch.Tensor,p1: torch.Tensor, z1: torch.Tensor, simplified: bool = True) -> torch.Tensor:
"""Computes BYOL's loss given batch of predicted features p and projected momentum features z.
Args:
p, p1 (torch.Tensor): NxD Tensor containing predicted features from view 1
z, z1 (torch.Tensor): NxD Tensor containing projected momentum features from view 2
simplified (bool): faster computation, but with same result. Defaults to True.
Returns:
torch.Tensor: BYOL's loss.
"""
if simplified:
loss = F.cosine_similarity(p, z.detach(), dim=-1).mean() + F.cosine_similarity(p1, z1.detach(), dim=-1).mean()
return 2 - 2 * loss
p = F.normalize(p, dim=-1)
z = F.normalize(z, dim=-1)
p1 = F.normalize(p1, dim=-1)
z1 = F.normalize(z1, dim=-1)
return 2 - 2 * ((p * z.detach()).sum(dim=1).mean() +(p1 * z1.detach()).sum(dim=1).mean())
|
705cbe9e62fa1e58da0a1f4087e6090d7b8002b8
| 3,644,196
|
def a_test_model(n_classes=2):
"""
recover model and test data from disk, and test the model
"""
images_test, labels_test, data_num_test = load_test_data_full()
model = load_model(BASE_PATH + 'models/Inception_hemorrhage_model.hdf5')
adam_optimizer = keras.optimizers.Adam(
lr=0.0001,
beta_1=0.9,
beta_2=0.999,
epsilon=None,
decay=0.0,
amsgrad=False)
model.compile(optimizer=adam_optimizer, loss='binary_crossentropy', metrics=['accuracy'])
# score the test data
test_data_generator = generate_testing_from_hdf5(TEST_INDICES, batch_size=BATCH_SIZE)
scores = model.evaluate_generator(test_data_generator, steps=N_STEPS_PER_EPOCH_TEST)
# refresh the data generator and generate predictions
test_data_generator = generate_testing_from_hdf5(TEST_INDICES, batch_size=batch_size)
predictions = model.predict_generator(test_data_generator, steps=N_STEPS_PER_EPOCH_TEST)
classes = np.argmax(predictions, axis=1)
pred_ground_truth = np.column_stack((predictions, classes, labels_test))
pred_ground_truth = pd.DataFrame(
pred_ground_truth,
columns=[
'Proba Neg',
'Proba Pos',
'Class Proba',
'Neg Label',
'Pos Label'])
# Compute ROC curve and ROC area for each class
fpr, tpr, thresholds = roc_curve(
pred_ground_truth['Class Proba'],
pred_ground_truth['Pos Label'])
roc_auc = auc(fpr, tpr)
accuracy, precision, recall, f1_score, cm = vol_inception_utils.calc_metrics(
pred_ground_truth['Pos Label'],
pred_ground_truth['Class Proba'])
np.savetxt(BASE_PATH + 'results/confusion_matrix.csv', (cm), delimiter=',')
return pred_ground_truth, accuracy, precision, recall, f1_score, cm, fpr, tpr, thresholds, roc_auc
|
d060f79a149d7659d74ffac316f71d7ef7b63368
| 3,644,197
|
def generate_synchronous_trajectory(initial_state):
"""
Simulate the network starting from a given initial state in the synchronous strategy
:param initial_state: initial state of the network
:return: a trajectory in matrix from, where each row denotes a state
"""
trajectory = [initial_state]
state_index_set = {state_to_index(initial_state)} # if a state reoccurs, an attractor or fixed point is
# reached, stop.
s = initial_state
while True:
new_s = update(s) # synchronous
new_s_index = state_to_index(new_s)
if new_s_index in state_index_set:
break
trajectory.append(new_s)
state_index_set.add(new_s_index)
s = new_s
return np.array(trajectory)
|
85f452f7665028e29085296820f67cf2e5cdb8bf
| 3,644,198
|
import inspect
from textwrap import dedent
import ast
def arg_names(level=2):
"""Try to determine names of the variables given as arguments to the caller
of the caller. This works only for trivial function invocations. Otherwise
either results may be corrupted or exception will be raised.
level: 0 is current frame, 1 is the caller, 2 is caller of the caller
"""
try:
caller_frame_info = inspect.stack()[level]
caller_context = caller_frame_info.code_context
code = dedent(''.join(caller_context))
tree = ast.parse(code, '', 'eval')
always_assert(isinstance(tree.body, ast.Call))
args = tree.body.args
names = [astunparse.unparse(arg).strip() for arg in args]
return names
except Exception as ex:
raise Exception('Cannot determine arg names') from None
|
ce5b26747404442bfd017827435e9515c60aace0
| 3,644,199
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.