content stringlengths 35 762k | sha1 stringlengths 40 40 | id int64 0 3.66M |
|---|---|---|
def extract_y(x, coefficients, degree):
"""
:param x: a matrix containing in each row the first 'degree' powers of a random number in the interval [-3, 2]
:param coefficients: vector of coefficients w_star' (in ascending order: from x**0 to x**n)
:return y : value of y that satisfy the polynomial given 'x' and 'coefficients'
"""
y = np.matmul(extract_feature_map(x, degree), coefficients)
return y | 424b94b99bfcbe12e230e18ba0ecc7f7296da164 | 3,632,600 |
def model_scattered_light(data, errs, mask,
verbose=True,
deg=[5,5], sigma=3.0, maxiter=10):
"""
Fit a 2D legendre polynomial to data (only using data in the mask).
Iteratively sigma-clip outlier points.
"""
scatlight = data.copy()
scatlighterr = errs.copy()
shape = data.shape
## Fit scattered light with iterative rejection
def normalize(x):
""" Linearly scale from -1 to 1 """
x = np.array(x)
nx = len(x)
xmin, xmax = x.min(), x.max()
xhalf = (x.max()-x.min())/2.
return (x-xhalf)/xhalf
XN, YN = np.meshgrid(normalize(np.arange(shape[0])), normalize(np.arange(shape[1])), indexing="ij")
finite = np.isfinite(scatlight) & mask
_XN = XN[finite].ravel()
_YN = YN[finite].ravel()
_scatlight = scatlight[finite].ravel()
_scatlighterr = scatlighterr[finite].ravel()
_scatlightfit = np.full_like(_scatlight, np.nanmedian(_scatlight)) # initialize fit to constant
Noutliertotal = 0
for iter in range(maxiter):
# Clip outlier pixels
normresid = (_scatlight - _scatlightfit)/_scatlighterr
#mu = np.nanmedian(resid)
#sigma = np.nanstd(resid)
#iinotoutlier = np.logical_and(resid < mu + sigmathresh*sigma, resid > mu - sigmathresh*sigma)
iinotoutlier = np.abs(normresid < sigma)
Noutlier = np.sum(~iinotoutlier)
if verbose: print(" m2fs_subtract_scattered_light: Iter {} removed {} pixels".format(iter, Noutlier))
if Noutlier == 0 and iter > 0: break
Noutliertotal += Noutlier
_XN = _XN[iinotoutlier]
_YN = _YN[iinotoutlier]
_scatlight = _scatlight[iinotoutlier]
_scatlighterr = _scatlighterr[iinotoutlier]
# Fit scattered light model
xypoly = np.polynomial.legendre.legvander2d(_XN, _YN, deg)
coeff = np.linalg.lstsq(xypoly, _scatlight, rcond=-1)[0]
# Evaluate the scattered light model
_scatlightfit = xypoly.dot(coeff)
scatlightpoly = np.polynomial.legendre.legvander2d(XN.ravel(), YN.ravel(), deg)
scatlightfit = (scatlightpoly.dot(coeff)).reshape(shape)
resid = (scatlight-scatlightfit)[finite].ravel()
scatlightmed = np.median(resid)
scatlighterr = biweight_scale(resid)
print("scatlightmed",scatlightmed)
print("scatlighterr",scatlighterr)
return scatlightfit, (scatlightmed, scatlighterr, Noutliertotal, iter, scatlight) | 28cd9638a40db6734fe00a00bbbb76b588eb0619 | 3,632,601 |
def Convert_Data_To_GrayScale(data):
"""
This function converts an image data set in grayscale
input:
data: input data set
return: a numpy array of grayscale images
"""
return np.sum(data/3, axis=3, keepdims=True) | dc814b209e7a22981e5395cd3fead16b0d7c222c | 3,632,602 |
def jittered_center_crop(frames,
box_extract,
box_gt,
search_area_factor,
output_sz,
scale_type='original',
border_type='replicate'):
""" For each frame in frames, extracts a square crop centered at box_extract, of area search_area_factor^2
times box_extract area. The extracted crops are then resized to output_sz. Further, the co-ordinates of the box
box_gt are transformed to the image crop co-ordinates
args:
frames - list of frames
box_extract - list of boxes of same length as frames. The crops are extracted using anno_extract
box_gt - list of boxes of same length as frames. The co-ordinates of these boxes are transformed from
image co-ordinates to the crop co-ordinates
search_area_factor - The area of the extracted crop is search_area_factor^2 times box_extract area
output_sz - The size to which the extracted crops are resized
returns:
list - list of image crops
list - box_gt location in the crop co-ordinates
"""
crops_resize_factors = [
sample_target(
f,
a,
search_area_factor,
output_sz,
scale_type=scale_type,
border_type=border_type) for f, a in zip(frames, box_extract)
]
frames_crop, resize_factors = zip(*crops_resize_factors)
crop_sz = np.array([output_sz, output_sz], 'int')
# find the bb location in the crop
box_crop = [
transform_image_to_crop(a_gt, a_ex, rf, crop_sz)
for a_gt, a_ex, rf in zip(box_gt, box_extract, resize_factors)
]
return frames_crop, box_crop | 477c847fe6b9d5a8baa5775c8220c267d69c22ab | 3,632,603 |
def np_sample_kumaraswamy(a, b, size):
"""
Numpy function to sample k ~ Kumaraswamy(a, b)
Args:
a: shape parameter 1
b: shape parameter 2
size: Return shape of np array
"""
assert a>0 and b>0, "Parameters can not be zero"
U = np.random.uniform(size=size)
K = (1 - (1 - U)**(1.0/b))**(1.0/a)
return K | f1011f4a590066290f7c8ea10585432b49375987 | 3,632,604 |
import re
def find_meta(meta, file, error=True):
"""
Extract __meta__ value from METAFILE.
file may contain:
__meta__ = 'value'
__meta__ = '''value lines '''
"""
try:
text = read(file)
except Exception as err:
raise RuntimeError("Failed to read file") from err
tbase = (r"^__{meta}__[ ]*=[ ]*(({sq}(?P<text1>(.*\n*)*?){sq})|"
"({dq}(?P<text2>(.*\n*)*?){dq}))")
sbase = (r"^__{meta}__[ ]*=[ ]*(({sq}(?P<text1>([^\n])*?){sq})|"
"({dq}(?P<text2>([^\n])*?){dq}))")
triple = tbase.format(meta=meta, sq="'''", dq='"""')
re_meta_tripple = re.compile(triple, re.M)
single = sbase.format(meta=meta, sq="'", dq='"')
re_meta_single = re.compile(single, re.M)
try:
meta_match = re_meta_tripple.search(text)
except Exception:
meta_match = None
# This is separated from exception since search may
# result with None.
if meta_match is None:
try:
meta_match = re_meta_single.search(text)
except Exception:
meta_match = None
if meta_match is not None:
match1 = meta_match.group('text1')
match2 = meta_match.group('text2')
return match1 if match1 is not None else match2
if error:
raise RuntimeError("Unable to find __{meta}__ string in {file}."
.format(meta=meta, file=file)) | 844f6000d591d145f3e267a73bf7a8ebf67c60a3 | 3,632,605 |
def get_training_input(filenames, params):
""" Get input for training stage
:param filenames: A list contains [source_filename, target_filename]
:param params: Hyper-parameters
:returns: A dictionary of pair <Key, Tensor>
"""
with tf.device("/cpu:0"):
src_dataset = tf.data.TextLineDataset(filenames[0])
tgt_dataset = tf.data.TextLineDataset(filenames[1])
dataset = tf.data.Dataset.zip((src_dataset, tgt_dataset))
dataset = dataset.shuffle(params.buffer_size)
dataset = dataset.repeat()
# Split string
dataset = dataset.map(
lambda src, tgt: (
tf.string_split([src]).values,
tf.string_split([tgt]).values
),
num_parallel_calls=params.num_threads
)
# Append <eos> symbol
dataset = dataset.map(
lambda src, tgt: (
tf.concat([src, [tf.constant(params.eos)]], axis=0),
tf.concat([tgt, [tf.constant(params.eos)]], axis=0)
),
num_parallel_calls=params.num_threads
)
# Convert to dictionary
dataset = dataset.map(
lambda src, tgt: {
"source": src,
"target": tgt,
"source_length": tf.shape(src),
"target_length": tf.shape(tgt)
},
num_parallel_calls=params.num_threads
)
# Create iterator
iterator = dataset.make_one_shot_iterator()
features = iterator.get_next()
# Create lookup table
src_table = tf.contrib.lookup.index_table_from_tensor(
tf.constant(params.vocabulary["source"]),
default_value=params.mapping["source"][params.unk]
)
tgt_table = tf.contrib.lookup.index_table_from_tensor(
tf.constant(params.vocabulary["target"]),
default_value=params.mapping["target"][params.unk]
)
# String to index lookup
features["source"] = src_table.lookup(features["source"])
features["target"] = tgt_table.lookup(features["target"])
# Batching
features = batch_examples(features, params.batch_size,
params.max_length, params.mantissa_bits,
shard_multiplier=len(params.device_list),
length_multiplier=params.length_multiplier,
constant=params.constant_batch_size,
num_threads=params.num_threads)
# Convert to int32
features["source"] = tf.to_int32(features["source"])
features["target"] = tf.to_int32(features["target"])
features["source_length"] = tf.to_int32(features["source_length"])
features["target_length"] = tf.to_int32(features["target_length"])
features["source_length"] = tf.squeeze(features["source_length"], 1)
features["target_length"] = tf.squeeze(features["target_length"], 1)
return features | 25e2a92cce6b9dcbc89187d873ba580bd8ed3da0 | 3,632,606 |
def cbar(ni, nj, resources, commcost):
""" Average communication cost """
n = len(resources)
if n == 1:
return 0
npairs = n * (n - 1)
return 1. * sum(commcost(ni, nj, a1, a2) for a1 in resources.values() for a2 in resources.values()
if a1 != a2) / npairs | b215de30bcb019e2299edbb61591b7a1c129c58b | 3,632,607 |
import re
def get_electrostatic_potentials(outcar, atoms):
""" Retrieve the electrostatic averaged potentials from the OUTCAR file
:param outcar: content of the OUTCAR file (list of strings)
:param atoms: number of atoms of each atomic species (list of integers)
:return: dictionary with the electrostatic potential for each atom """
index_beg = bf.grep(outcar, 'average (electrostatic) potential at core', nb_found=1)[0][1] + 3
try:
index_end = outcar[index_beg:].index(' ')
except ValueError as e :
index_end = outcar[index_beg:].index('')
potentials_str = outcar[index_beg: index_beg + index_end]
potentials_raw = np.concatenate([[float(f) for f in re.split(' {5}|-', q)[1:]] for q in potentials_str])
potentials = np.array([-f[1] for f in np.split(potentials_raw, len(atoms))])
if len(potentials) != len(atoms):
raise bf.PyDEFImportError('Number of electrostatic potentials retrieved and number are not consistent')
return dict(list(zip(list(atoms), potentials))) | 5846dc5b33d68ba19fced68fa0a6ffd76653ebb8 | 3,632,608 |
import logging
def get_metrics_delta(metric_name, label_suffix, labels, before_metrics, after_metrics):
"""Calculate the difference between 2 samples"""
s1 = find_sample_by_labels(metric_name, label_suffix, labels, before_metrics)
s2 = find_sample_by_labels(metric_name, label_suffix, labels, after_metrics)
if not s1 or not s2:
logging.error(f"Required metric/label combination not found: {metric_name}/{labels}")
return None
#logging.info(f"Before: {s1}")
#logging.info(f"After: {s2}")
return s2.value - s1.value | e2baf39507bfaf281731241257796275e7284c32 | 3,632,609 |
def convert_region_type(region_type):
"""
Convert the integer region_type to the corresponding RegionType enum object.
"""
return int_to_region_type[region_type] | 2f16634c188e172a0a5a2d84db38782e7131d86f | 3,632,610 |
import os
import mimetypes
import base64
def CreateMessageWithAttachment(sender, to, subject, message_text, file_dir,
filename):
"""Create a message for an email.
Args:
sender: Email address of the sender.
to: Email address of the receiver.
subject: The subject of the email message.
message_text: The text of the email message.
file_dir: The directory containing the file to be attached.
filename: The name of the file to be attached.
Returns:
An object containing a base64url encoded email object.
"""
message = MIMEMultipart()
message['to'] = to
message['from'] = sender
message['subject'] = subject
msg = MIMEText(message_text)
message.attach(msg)
path = os.path.join(file_dir, filename)
content_type, encoding = mimetypes.guess_type(path)
if content_type is None or encoding is not None:
content_type = 'application/octet-stream'
main_type, sub_type = content_type.split('/', 1)
if main_type == 'text':
fp = open(path, 'rb')
msg = MIMEText(fp.read(), _subtype=sub_type)
fp.close()
elif main_type == 'image':
fp = open(path, 'rb')
msg = MIMEImage(fp.read(), _subtype=sub_type)
fp.close()
elif main_type == 'audio':
fp = open(path, 'rb')
msg = MIMEAudio(fp.read(), _subtype=sub_type)
fp.close()
else:
fp = open(path, 'rb')
msg = MIMEBase(main_type, sub_type)
msg.set_payload(fp.read())
fp.close()
msg.add_header('Content-Disposition', 'attachment', filename=filename)
message.attach(msg)
return {'raw': base64.urlsafe_b64encode(message.as_string())} | 5467241bb1ce84e0b9069a43bf8a7a64b2f8554a | 3,632,611 |
import scipy
def fit_to_data(x: np.ndarray, y: np.ndarray) -> np.ndarray:
"""
Fit @a func to data in @a x and @a y
Create an initial estimate for parameters, because timestamps are very big
"""
p0 = np.array([1.0, x[0] - 100, 0.0])
popt, _ = scipy.optimize.curve_fit(f=weight, xdata=x, ydata=y, p0=p0)
return popt | c9bba5795c5590cce87c4d04d8ccddfb0ce7d57f | 3,632,612 |
def new_url(fiscal_year, dept_str=DEPARTMENTS_DICT['1700']):
"""
modify the URL
https://www.fpds.gov/ddps/FY07-V1.4/1700-DEPARTMENTOFTHENAVY/1700-DEPARTMENTOFTHENAVY-DEPTOctober2006-Archive.zip
to be correct for the `fiscal_year` given.
"""
assert type(fiscal_year) is str, "fiscal year must be string"
assert int(fiscal_year) > 0 and int(fiscal_year) < 100
prev_year = str(int("20" + fiscal_year) - 1)
dept_str = DEPARTMENTS_DICT[k]
return "https://www.fpds.gov/ddps/FY" + fiscal_year + \
"-V1.4/" + dept_str +\
"/" + dept_str + "-DEPTOctober"\
+ prev_year + "-Archive.zip" | 7d12bd1e060abcd3b382ee9eb094c193e5d3a04e | 3,632,613 |
def normalize_units(data):
"""Normalize units in datasets and their exchanges"""
for obj in data:
obj['unit'] = normalize_units_function(obj.get('unit', ''))
# for param in ds.get('parameters', {}).values():
# if 'unit' in param:
# param['unit'] = normalize_units_function(param['unit'])
return data | eea6cbdc7e8ad9852c0f6ab03a8f9d2789568164 | 3,632,614 |
def xvalBooklets(dfResp, dfObsResp, configObsList, configRespList):
"""
Cross-validates records for a booklet using data from a ready-made data frames. Returns a data frame containing
extracted responses from the response data table and the reconstructed responses from the observable
data, for selected item types that the x-val algorithm currently handles.
:param dfResp: a data frame of response data, from which we extract the responses for each item
:param dfObsResp: a data frame of observable data, from which we reconstruct responses for each item
:param configObsList: list containing configurations for processing observables
:param configRespList: list containing configurations for processing responses
:return: a data frame that matches the extracted and reconstructed responses
"""
assert (len(configObsList)>0 & ("itemtypeColumn" in configObsList))
assert (isinstance(dfResp, pd.DataFrame))
assert (isinstance(dfObsResp, pd.DataFrame))
# make sure there are overlapping subjects
subjlist = list(set(dfResp.BookletNumber.unique()).intersection(set(dfObsResp.BookletNumber.unique())))
assert(len(subjlist)> 0)
##################
# recon answers using the configObsList
# Join the observable data back again
try:
dfObs = pd.concat([reconByConfig(dfObsResp, config=c) for c in configObsList])
if dfObs.shape[0]>0:
dfObs = dfObs.loc[:, ['BlockCode', 'BookletNumber', "AccessionNumber", 'ResponseComponentId',
'ReconstructedAnswer', 'ResponseHistory']]
except Exception as e:
logger.error("xvalBooklets: Error reconstructing responses")
logger.exception(e)
return None
##################
# Merge recorded and reconstructed responses
try:
dfCompare = pd.merge(dfResp, dfObs, how="outer", on=["BookletNumber", "BlockCode", "ResponseComponentId"])
except Exception as e:
logger.error("xvalBooklets: Error merging response and observable data")
logger.exception(e)
return None
# Need to transform the extracted responses by the `childItemType`, because `ItemTypeCode` is too gross.
dfCompare.loc[dfCompare.ItemTypeCode.isin(["MCSS", "BQMCSS"]), "ChildItemType"] = "MCSS"
dfCompare.loc[dfCompare.ItemTypeCode.isin(["MCMS", "BQMCMS"]), "ChildItemType"] = "MCMS"
# ## Extract and transform responses to prepare for comparisons
try:
dfCompare = pd.concat([parseItemResponses(dfCompare, config=c) for c in configRespList])
except Exception as e:
logger.error("xvalBooklets: Error extracting responses")
logger.exception(e)
return None
# ## Comparison and discrepancies
# first, take care of a special case in BQMCMS and BQChoices, where one can add free text as "response"
idx = dfCompare.ItemTypeCode.isin(["BQMCSS", "BQMCMS", "BQChoices"]) & dfCompare["ExtractedAnswer"].notnull()
dfCompare.loc[idx, "ExtractedAnswer"] = dfCompare.loc[idx, "ExtractedAnswer"] \
.apply(lambda l: [i for i in l if i not in ['response', 'response']])
# discrepancies
try:
# we take a shortcut here, converting responses to a set of string-values
# if the response is None, then the result is not a set, but a None
setReconAnswer = dfCompare.loc[:, "ReconstructedAnswer"]\
.apply(lambda respList: set([str(i) for i in respList]) if isinstance(respList,list) else None)
setExtraAnswer = dfCompare.loc[:, "ExtractedAnswer"]\
.apply(lambda respList: set([str(i) for i in respList]) if isinstance(respList,list) else None)
dfCompare.loc[:, "matched"] = None
# matched==True iff neither is None and the sets (of strings) are equal (recall None!=None)
idx = setReconAnswer == setExtraAnswer
dfCompare.loc[idx, "matched"] = True
# matched==False iff the 2 sets were not equal, or one of them is None, but if both are None, we ignore
idx = (setReconAnswer != setExtraAnswer)
dfCompare.loc[idx, "matched"] = False
dfCompare.loc[setReconAnswer.isnull() & setExtraAnswer.isnull(), "matched"] = None
# if the response is empty, it is treated as missing; comparison is True
idx = dfCompare["ReconstructedAnswer"].isnull() & (dfCompare["ExtractedAnswer"].apply(lambda l: l==[]))
# dfCompare.loc[idx, "matched"] = None
dfCompare.loc[idx, "matched"] = True
except Exception as e:
logger.error("xvalBooklets: Error comparing extracted and reconstructed responses")
logger.exception(e)
return None
return dfCompare | 29ca12be17a9b5b660ab44256deaf85f678b86d0 | 3,632,615 |
def imap_any(conditions):
"""
Generate an IMAP query expression that will match any of the expressions in
`conditions`.
In IMAP, both operands used by the OR operator appear after the OR, and
chaining ORs can create very verbose, hard to parse queries e.g. "OR OR OR
X-GM-THRID 111 X-GM-THRID 222 OR X-GM-THRID 333 X-GM-THRID 444 X-GM-THRID
555". Using logical equivalence, a functionally identical query can be
built with "AND" and "NOT"; (a || b || c...) == !(!a && !b && !c...).
Arguments:
conditions: List of IMAP expressions.
Returns:
An IMAP expression that evaluates to "true" if any of the conditions are
true.
"""
if not conditions:
return ""
negations = [("NOT %s" % condition) for condition in conditions]
return "(NOT (%s))" % " ".join(negations) | de4ef1680cd2c8370d82640ff95186ed3ea81202 | 3,632,616 |
def format_sources(sources):
"""
Make a comma separated string of news source labels.
"""
formatted_sources = ""
for source in sources:
formatted_sources += source["value"] + ','
return formatted_sources | f9f86f11e4dfe9ecd3fbbd5e14d3ca750a4e1a5a | 3,632,617 |
def update_dict_to_latex(update_dict, order):
"""Returns update dictionary and order as latex string."""
ret_val = "\\begin{eqnarray*}\n"
get_line = lambda obj: wrap_long_latex_line(latex_print(obj) + "\\\\\n")
for v in reversed(order):
ret_val += latex_print(v) + " &=& "
if isinstance(update_dict[v], set):
if len(update_dict[v]) == 1:
ret_val += get_line(list(update_dict[v])[0])
else:
for n, eq in enumerate(update_dict[v]):
ret_val += get_line(eq)
if n != len(update_dict[v]) - 1:
ret_val += " &||& "
else:
ret_val += get_line(update_dict[v])
ret_val += "\\end{eqnarray*}\n"
# ret_val = ret_val.replace('(', '\\left(')
# ret_val = ret_val.replace(')', '\\right)')
# ret_val = ret_val.replace('--', '')
return ret_val | 4498216b5a6a224a7609d739670348e7dafa0843 | 3,632,618 |
def setup_base_empty_grade_helper(user: User, unit: models.Unit) -> models.Grade:
"""
Helper method to setup an empty grade before sending a request to the grading
view.
"""
grade = models.Grade(user=user, unit=unit)
grade.status = "sent"
grade.score = None
grade.notebook = None
grade.message = ""
grade.save()
return grade | 1c80a04c0de4859c050c8e09c5c8f31166898c64 | 3,632,619 |
def register_project(fn: tp.Callable = None):
"""Register new project.
Parameters
----------
call
This function will get invoked upon finding the project_path.
the function name will be used to search in $PROJECT_PATHS
"""
def _wrapper():
path = _start_proj_shell(fn.__name__)
result = fn(path)
return result
ENVS[fn.__name__] = _wrapper
return _wrapper | 8b379434c2cefa444d2fbb3168bf66295c6e8cac | 3,632,620 |
import asyncio
import sys
async def uart_terminal():
"""This is a simple "terminal" program that uses the Nordic Semiconductor
(nRF) UART service. It reads from stdin and sends each line of data to the
remote device. Any data received from the device is printed to stdout.
"""
def match_nus_uuid(device: BLEDevice, adv: AdvertisementData):
# This assumes that the device includes the UART service UUID in the
# advertising data. This test may need to be adjusted depending on the
# actual advertising data supplied by the device.
if UART_SERVICE_UUID.lower() in adv.service_uuids:
return True
return False
device = await BleakScanner.find_device_by_filter(match_nus_uuid)
def handle_disconnect(_: BleakClient):
print("Device was disconnected, goodbye.")
# cancelling all tasks effectively ends the program
for task in asyncio.all_tasks():
task.cancel()
def handle_rx(_: int, data: bytearray):
print("received:", data)
async with BleakClient(device, disconnected_callback=handle_disconnect) as client:
await client.start_notify(UART_TX_CHAR_UUID, handle_rx)
print("Connected, start typing and press ENTER...")
loop = asyncio.get_running_loop()
while True:
# This waits until you type a line and press ENTER.
# A real terminal program might put stdin in raw mode so that things
# like CTRL+C get passed to the remote device.
data = await loop.run_in_executor(None, sys.stdin.buffer.readline)
# data will be empty on EOF (e.g. CTRL+D on *nix)
if not data:
break
# some devices, like devices running MicroPython, expect Windows
# line endings (uncomment line below if needed)
# data = data.replace(b"\n", b"\r\n")
await client.write_gatt_char(UART_RX_CHAR_UUID, data)
print("sent:", data) | 49bac7a20cdaead9d63ea10e001ad78ccce748fc | 3,632,621 |
import re
def tag_word_in_sentence(sentence, tag_word):
"""
Use regex to wrap every derived form of a given ``tag_word`` in ``sentence`` in an html-tag.
Args:
sentence: String containing of multiple words.
tag_word: Word that should be wrapped.
Returns:
: Sentence with replacements.
"""
words = sentence.split()
words = clean_up(words, lemmatize=False)
# get unique, non-empty strings:
words = [word for word in set(words) if word]
lemmas = clean_up(words, lemmatize=True)
tag_lemma = clean_up([tag_word])[0]
words_found = [
word
for word, lemma in zip(words, lemmas)
if lemma == tag_lemma or word == tag_word
]
for word in words_found:
sentence = re.sub(
f"([^>])({word})([^<])",
r'\1<span class="word">\2</span>\3',
sentence,
flags=re.IGNORECASE,
)
return sentence | 84567341d24b34cf7effca7cb1798d9c4b01533d | 3,632,622 |
def get_content_type(response: 'Response') -> str:
"""Get content type from ``response``.
Args:
response (:class:`requests.Response`): Response object.
Returns:
The content type from ``response``.
Note:
If the ``Content-Type`` header is not defined in ``response``,
the function will utilise |magic|_ to detect its content type.
.. |Response| replace:: ``requests.Response``.
.. _Response: https://requests.readthedocs.io/en/latest/api/index.html#requests.Response
.. |magic| replace:: ``magic``
.. _magic: https://pypi.org/project/python-magic/
"""
ct_type = response.headers.get('Content-Type')
if ct_type is None:
try:
ct_type = magic.detect_from_content(response.content).mime_type
except Exception:
ct_type = '(null)'
return ct_type.casefold().split(';', maxsplit=1)[0].strip() | 34398cca048c6eb261e2481884f4496a03749c16 | 3,632,623 |
def _get_file_url_from_dropbox(dropbox_url, filename):
"""Dropbox now supports modifying the shareable url with a simple
param that will allow the tool to start downloading immediately.
"""
return dropbox_url + '?dl=1' | fe0256ae747826dbbe5ac3c3a4afa42e0584699a | 3,632,624 |
def launch_coef_scores(args):
"""
Wrapper to compute the standardized scores of the regression coefficients, used when computing the number of
features in the reduced parameter set.
@param args: Tuple containing the instance of SupervisedPCABase, feature matrix and response array.
@return: The standardzed scores of the coefficients.
"""
spca, X, y = args
scoefs = spca._compute_stnd_coefs(X, y)
return scoefs | 02423ef564b55dfcc37bddadcc813edffba05795 | 3,632,625 |
from typing import Any
def update(
configuration: dict, client: Any, issue: Any, issue_fields: dict, transition: str = None
) -> dict:
"""Updates a Jira issue."""
data = {"resource_id": issue.key, "link": f"{configuration.browser_url}/browse/{issue.key}"}
if issue_fields:
issue.update(fields=issue_fields)
if transition:
transitions = client.transitions(issue)
for t in transitions:
if t["name"].lower() == transition.lower():
client.transition_issue(issue, t["id"])
break
return data | 13342f693e6fdce753d10856a5ff325d6ec84d9b | 3,632,626 |
def resolve_wishlist_from_user(user: "User") -> Wishlist:
"""Return wishlist of the logged in user."""
wishlist, _ = Wishlist.objects.get_or_create(user=user)
return wishlist | 2b21487bc5ee6c8da0cce7212e1065e2abb85004 | 3,632,627 |
def register_dat_matrix(file_path):
"""
Parse the registration matrix from the given file.
Parse the registration matrix from the given file in register.dat file format. See https://surfer.nmr.mgh.harvard.edu/fswiki/RegisterDat for the file format. The matrix encodes an affine transformation that can be applied to a coordinate vector.
Parameters
----------
file_path: str
Path to the file in register.dat format.
Returns
-------
2D numpy array of floats
The parsed matrix, with dimension (4, 4).
Examples
--------
Parse a register.dat file that comes with FreeSurfer v6.0:
>>> reg_data_file = os.path.join(my_freesurfer_dir, 'subjects', 'cvs_avg35_inMNI152', 'mri.2mm', 'register.dat')
>>> matrix = st.register_dat_matrix(reg_data_file)
"""
with open(file_path) as fh:
lines = [line.rstrip('\n') for line in fh]
return _parse_register_dat_lines(lines) | 379c8d5b39ac448ef63412975afde50bf9f7359e | 3,632,628 |
import warnings
import math
def lnprob(theta, phi_total_data, f_blue_data, err, corr_mat_inv):
"""
Calculates log probability for emcee
Parameters
----------
theta: array
Array of parameter values
phi: array
Array of y-axis values of mass function
err: numpy.array
Array of error values of red and blue mass function
corr_mat: array
Array of inverse of correlation matrix
Returns
---------
lnp: float
Log probability given a model
chi2: float
Value of chi-squared given a model
"""
# Moved to outside the try clause for cases where parameter values are
# outside the prior (specific one was when theta[1] was > 14)
# randint_logmstar = random.randint(1,101)
randint_logmstar = None
if theta[0] < 0:
chi2 = -np.inf
return -np.inf, chi2
if theta[1] < 0:
chi2 = -np.inf
return -np.inf, chi2
if theta[2] < 0:
chi2 = -np.inf
return -np.inf, chi2
if theta[3] < 0:
chi2 = -np.inf
return -np.inf, chi2
if theta[4] < 0.1:
chi2 = -np.inf
return -np.inf, chi2
if theta[5] < 0:
chi2 = -np.inf
return -np.inf, [chi2, randint_logmstar]
if theta[6] < 0:# or theta[6] > 16:
chi2 = -np.inf
return -np.inf, [chi2, randint_logmstar]
if theta[7] < 0:
chi2 = -np.inf
return -np.inf, [chi2, randint_logmstar]
if theta[8] < 0:# or theta[8] > 5:
chi2 = -np.inf
return -np.inf, [chi2, randint_logmstar]
warnings.simplefilter("error", (UserWarning, RuntimeWarning))
try:
gals_df = populate_mock(theta[:5], model_init)
gals_df = gals_df.loc[gals_df['stellar_mass'] >= 10**8.6].reset_index(drop=True)
gals_df['cs_flag'] = np.where(gals_df['halo_hostid'] == \
gals_df['halo_id'], 1, 0)
cols_to_use = ['halo_mvir', 'halo_mvir_host_halo', 'cs_flag',
'stellar_mass']
gals_df = gals_df[cols_to_use]
gals_df.stellar_mass = np.log10(gals_df.stellar_mass)
if quenching == 'hybrid':
f_red_cen, f_red_sat = hybrid_quenching_model(theta[5:], gals_df, \
'vishnu')
elif quenching == 'halo':
f_red_cen, f_red_sat = halo_quenching_model(theta[5:], gals_df, \
'vishnu')
gals_df = assign_colour_label_mock(f_red_cen, f_red_sat, \
gals_df)
v_sim = 130**3
# v_sim = 890641.5172927063 #survey volume used in group_finder.py
## Observable #1 - Total SMF
total_model = measure_all_smf(gals_df, v_sim, False)
## Observable #2 - Blue fraction
f_blue = blue_frac(gals_df, True, False)
data_arr = []
data_arr.append(phi_total_data)
data_arr.append(f_blue_data)
model_arr = []
model_arr.append(total_model[1])
model_arr.append(f_blue[1])
err_arr = err
data_arr, model_arr = np.array(data_arr), np.array(model_arr)
chi2 = chi_squared(data_arr, model_arr, err_arr, corr_mat_inv)
lnp = -chi2 / 2
if math.isnan(lnp):
raise ValueError
except (ValueError, RuntimeWarning, UserWarning):
lnp = -np.inf
chi2 = np.inf
return lnp, [chi2, randint_logmstar] | 97a884ce0af245982b806f360d16606d3eed966f | 3,632,629 |
def get_car_coordinates(list_points, x_points_traj, y_points_traj):
"""
input:
list_points - car config = phi(last point), length, width, l_base
x_points_traj, x_points_traj - current shifted position(center of back axis)
return:
car_coordinates - list(list)
len(car_coordinates) = 4
"""
list_points = list_points.split(" ")
list_points = [float(x) for x in list_points]
phi, length, width, l_base = list_points
car_pose_shifted = [x_points_traj, y_points_traj]
shift_length = l_base / 2
dx_shift = shift_length * np.cos(phi)
dy_shift = shift_length * np.sin(phi)
car_center = [car_pose_shifted[0] + dx_shift, car_pose_shifted[1] + dy_shift]
#clockwise
top_boundary = point_shift(car_center[0], car_center[1],
phi, length / 2)
bottom_boundary = point_shift(car_center[0], car_center[1],
phi + np.pi, length / 2)
car_coordinates = []
car_coordinates.append(point_shift(top_boundary[0], top_boundary[1],
phi + np.pi / 2, width / 2))
car_coordinates.append(point_shift(top_boundary[0], top_boundary[1],
phi - np.pi / 2, width / 2))
car_coordinates.append(point_shift(bottom_boundary[0], bottom_boundary[1],
phi - np.pi / 2, width / 2))
car_coordinates.append(point_shift(bottom_boundary[0], bottom_boundary[1],
phi + np.pi / 2, width / 2))
#copy point for plotting
car_coordinates.append(point_shift(top_boundary[0], top_boundary[1],
phi + np.pi / 2, width / 2))
return car_coordinates | 0e1449ff39828db49ad3e5497d901ffeef135110 | 3,632,630 |
def create_module(module_name):
"""Function for create a new empty virtual module and register it"""
module = module_cls(module_name)
setattr(module, '__spec__', spec_cls(name=module_name, loader=VirtualModuleLoader))
registry[module_name] = module
return module | 9b08c7899513a4f181577b11385dc77d4e347d09 | 3,632,631 |
def _days_in_month(month_0: int, year: int) -> int:
""" Returns days in a month (0-indexed). Hope I got this right. """
if month_0 != 1:
return DAYS_IN_MONTH[month_0]
if (year % 4) == 0 and ((year % 100) != 0 or (year % 400) == 0):
return DAYS_IN_MONTH[month_0] + 1
return DAYS_IN_MONTH[month_0] | b234491372def8c1f2da30039e8b41551d14eb84 | 3,632,632 |
def create_otfeature( featureName = "calt",
featureCode = "# empty feature code",
targetFont = None,
codeSig = "DEFAULT-CODE-SIGNATURE" ):
"""
Creates or updates an OpenType feature in the font.
Returns a status message in form of a string.
"""
if targetFont:
beginSig = "# BEGIN " + codeSig + "\n"
endSig = "# END " + codeSig + "\n"
if featureName in [ f.name for f in targetFont.features ]:
# feature already exists:
targetFeature = targetFont.features[ featureName ]
if beginSig in targetFeature.code:
# replace old code with new code:
targetFeature.code = updated_code( targetFeature.code, beginSig, endSig, featureCode )
else:
# append new code:
targetFeature.code += "\n" + beginSig + featureCode + "\n" + endSig
return "Updated existing OT feature '%s'." % featureName
else:
# create feature with new code:
newFeature = GSFeature()
newFeature.name = featureName
newFeature.code = beginSig + featureCode + "\n" + endSig
targetFont.features.append( newFeature )
return "Created new OT feature '%s'" % featureName
else:
return "ERROR: Could not create OT feature %s. No font detected." % ( featureName ) | 4e212dfaf161b3cd7c7ab5c4ffb187a73c979e67 | 3,632,633 |
def wilson_ci(num_hits, num_total, confidence=0.95):
""" Convenience wrapper for general_wilson """
z = st.norm.ppf((1+confidence)/2)
p = num_hits / num_total
return general_wilson(p, num_total, z=z) | 30706ff0848cc8c292b182ed946af71093b07e73 | 3,632,634 |
def convert_to_noun(word, from_pos):
""" Transform words given from/to POS tags """
if word.lower() in ['most', 'more'] and from_pos == 'a':
word = 'many'
synsets = wn.synsets(word, pos=from_pos)
# Word not found
if not synsets:
return []
result = derivational_conversion(word, from_pos, 'n')
if len(result) == 0:
result = attribute_conversion(word, from_pos)
if len(result) == 0 and word[-2:].lower() == 'ed' and from_pos != 'v':
result = derivational_conversion(word, 'v', 'n')
if len(result) == 0:
result = convert_similartos(word, from_pos)
if len(result) == 0 and from_pos == 'r': # working with pertainyms
adj_words = convert_pertainym(word)
for adj in adj_words:
word_a = adj[0]
# print(word_a)
result = derivational_conversion(word_a, 'a', 'n')
if len(result) == 0:
result = attribute_conversion(word_a, 'a')
else: break
if len(result) == 0 and word_a[-2:].lower() == 'ed' and from_pos != 'v':
result = derivational_conversion(word_a, 'v', 'n')
else: break
if len(result) == 0:
result = convert_similartos(word_a, 'a')
else: break
if len(result) == 0:
result = nounalike_conversion(word, from_pos)
# return all the possibilities sorted by probability
return result | 02b7aba4e386297ed34d004aa55cab481c22f81f | 3,632,635 |
import itertools
def get_slug(obj, title, group):
"""
used to get unique slugs
:param obj: Model Object
:param title: Title to create slug from
:param group: Model Class
:return: Model object with unique slug
"""
if obj.pk is None:
obj.slug = slug_orig = slugify(title)
for x in itertools.count(1):
if not group.objects.filter(slug=obj.slug).exists() and obj.slug is not None:
break
obj.slug = '%s-%d' % (slug_orig, x)
return obj | 014ac32090d70c5acda6f7f46804b88e730c54bd | 3,632,636 |
def create_link(url):
"""Create an html link for the given url"""
return (f'<a href = "{url}" target="_blank">{url}</a>') | 77a5375369be2be140a69a4521c50a92cee2d5ed | 3,632,637 |
def cummean(x):
"""Return a same-length array, containing the cumulative mean."""
return x.expanding().mean() | b5a35c56cb78e0588dd5be64a75384c4cd81ccb5 | 3,632,638 |
def validity_range_contains_range(
overall_range: DateRange,
contained_range: DateRange,
) -> bool:
"""
If the contained_range has both an upper and lower bound, check they are
both within the overall_range.
If either end is unbounded in the contained range,it must also be unbounded
in the overall range.
"""
# XXX assumes both ranges are [] (inclusive-lower, inclusive-upper)
if overall_range.lower_inf and overall_range.upper_inf:
return True
if (contained_range.lower_inf and not overall_range.lower_inf) or (
contained_range.upper_inf and not overall_range.upper_inf
):
return False
if not overall_range.lower_inf:
if (
not contained_range.upper_inf
and contained_range.upper < overall_range.lower
):
return False
if contained_range.lower < overall_range.lower:
return False
if not overall_range.upper_inf:
if (
not contained_range.lower_inf
and contained_range.lower > overall_range.upper
):
return False
if contained_range.upper > overall_range.upper:
return False
return True | 255f0782a8b6461692a255380fdfc9079e5ca33a | 3,632,639 |
def find_reference_section_no_title_via_dots(docbody):
"""This function would generally be used when it was not possible to locate
the start of a document's reference section by means of its title.
Instead, this function will look for reference lines that have numeric
markers of the format 1., 2., etc.
@param docbody: (list) of strings -each string is a line in the document.
@return: (dictionary) :
{ 'start_line' : (integer) - index in docbody of 1st reference line,
'title_string' : (None) - title of the reference section
(None since no title),
'marker' : (string) - the marker of the first reference line,
'marker_pattern' : (string) - the regexp string used to find the
marker,
'title_marker_same_line' : (integer) 0 - to signal title not on same
line as marker.
}
Much of this information is used by later functions to rebuild
a reference section.
-- OR --
(None) - when the reference section could not be found.
"""
marker_patterns = [re_reference_line_dot_markers]
return find_reference_section_no_title_generic(docbody, marker_patterns) | 546533051b9ca8df1266ea278dd34134c1a234c9 | 3,632,640 |
def get_syntax_errors(graph):
"""List the syntax errors encountered during compilation of a BEL script.
Uses SyntaxError as a stand-in for :exc:`pybel.parser.exc.BelSyntaxError`
:param pybel.BELGraph graph: A BEL graph
:return: A list of 4-tuples of line number, line text, exception, and annotations present in the parser
:rtype: list[tuple]
"""
return [
(number, line, exc, an)
for number, line, exc, an in graph.warnings
if isinstance(exc, SyntaxError)
] | a0f3493b88b081de3613397c997d71dabdae78be | 3,632,641 |
def generate_S_tau(t):
"""
Generates the S_tau matrix for a template
Args:
t (np.array): the template vector
Returns:
np.array: the S_tau matrix
"""
t_binning = unique_binning(t)
return generate_S_from_binning(t_binning) | b438f9ddd0c36de3abb32553e21b7495f163b1a9 | 3,632,642 |
def create_neural_network(input_, output_, reservoir_, spectral_, sparsity_, noise_, input_scale, random_, silent_):
"""Create an Echo State Network.
:rtype: pyESN.ESN
:param input_: number of input units to use in ESN
:param output_: number of output units to use in ESN
:param reservoir_: number of hidden units to use in the ESN
:param spectral_: the spectral radius - scale the hidden state as such that the biggest eigenvalue equals this
:param sparsity_: sparsity of the weight connections (proportion of weights set to 0)
:param noise_: noise to add to each neuron, helps reduce generalization error
:param input_scale: scale of the input - smaller will leave more trace of previous timestep input during fitting
:param random_: use for random_state when initializing network
:param silent_: if true, print fitting data
:return: pyESN Echo State Network model
"""
model = ESN(n_inputs=input_, # Inputs: ones
n_outputs=output_, # Outputs: predicted daily open
n_reservoir=reservoir_, # Size of reservoir
spectral_radius=spectral_, # More: longer range interactions and slowed decay of information
sparsity=sparsity_, # Just keep this small
noise=noise_, # Add noise to better model a dynamic system - reduces generalisation
input_scaling=[input_scale], # Scale is important - not too big as to wipe out past information
random_state=random_, # Random number generator
silent=silent_) # Silent = False means we can see which stage ESN is at via print()
return model | 25b2048e1d3790e6386df0cfa6bdcfee4fc73db4 | 3,632,643 |
def validate_measure_for_asset_changes(asset_type: str, measure: str) -> str:
"""
Validates the range argument for asset changes command
:param asset_type: asset type argument passed by the user
:param measure: measure argument passed by the user
:return: measure if valid else raise ValueError
"""
valid_measure = ''
if asset_type in VALID_ASSET_TYPES:
if measure not in VALID_ASSET_TYPE_MEASURES:
raise ValueError(MESSAGES['INVALID_ASSET_TYPE_MEASURE'])
valid_measure = measure
elif asset_type in VALID_ASSET_DETAIL_TYPES:
if measure not in VALID_ASSET_DETAIL_TYPE_MEASURES:
raise ValueError(MESSAGES['INVALID_ASSET_DETAIL_TYPE_MEASURE'])
valid_measure = measure
return valid_measure | 0bac72d07fd65cbcc589b2e6d5ea0faed13ee69a | 3,632,644 |
def create_incident(**kwargs):
"""
Creates an incident
"""
incidents = cachet.Incidents(endpoint=ENDPOINT, api_token=API_TOKEN)
if 'component_id' in kwargs:
return incidents.post(name=kwargs['name'],
message=kwargs['message'],
status=kwargs['status'],
component_id=kwargs['component_id'],
component_status=kwargs['component_status'])
else:
return incidents.post(name=kwargs['name'],
message=kwargs['message'],
status=kwargs['status']) | a19312816556a06f892da8ac9c8c6bb344ed3ce8 | 3,632,645 |
def harvey_two(frequency, tau_1, sigma_1, tau_2, sigma_2, white_noise, ab=False):
"""
Two Harvey model
Parameters
----------
frequency : numpy.ndarray
the frequency array
tau_1 : float
timescale of the first harvey component
sigma_1 : float
amplitude of the first harvey component
tau_2 : float
timescale of the second harvey component
sigma_2 : float
amplitude of the second harvey component
white_noise : float
the white noise component
Returns
-------
model : numpy.ndarray
the two-Harvey model
"""
model = np.zeros_like(frequency)
if not ab:
model += (4.*(sigma_1**2.)*tau_1)/(1.0+(2.*np.pi*tau_1*frequency)**2.0+(2.*np.pi*tau_1*frequency)**4.0)
model += (4.*(sigma_2**2.)*tau_2)/(1.0+(2.*np.pi*tau_2*frequency)**2.0+(2.*np.pi*tau_2*frequency)**4.0)
else:
model += tau_1/(1.0+(sigma_1*frequency)**2.0+(sigma_1*frequency)**4.0)
model += tau_2/(1.0+(sigma_2*frequency)**2.0+(sigma_2*frequency)**4.0)
model += white_noise
return model | bc5860984c7bf357f18f6f7ec8e2ce592f6841cd | 3,632,646 |
def is_valid_combination( row ):
"""
Should return True if combination is valid and False otherwise.
Test row that is passed here can be incomplete.
To prevent search for unnecessary items filtering function
is executed with found subset of data to validate it.
"""
n = len(row)
if n>1:
# Brand Y does not support Windows 98
if "98" == row[1] and "Brand Y" == row[0]:
return False
# Brand X does not work with XP
if "XP" == row[1] and "Brand X" == row[0]:
return False
if n > 4:
# Contractors are billed in 30 min increments
if "Contr." == row[3] and row[4] < 30:
return False
return True | c0758c3d30debbd3fc3d5f07d6728c23bfb71145 | 3,632,647 |
import os
def get_out_name(subdataset_name_tuple, out_ext=""):
"""
Get output file name for sub dataset
Takes tuple with (subdataset name, description)
"""
subdataset_name = subdataset_name_tuple[0]
outname = os.path.split(subdataset_name)[-1]
outname = outname.replace(".xml","")
outname = outname.replace(":","_")
# Get UTM string from description.
utm_str = subdataset_name_tuple[1].split(",")[-1].replace(" ","")
outname = "_".join(outname.split("_")[:-2])
outname = "{}_{}".format(outname, utm_str)
outname = outname + out_ext
return outname | d55b77546e579f1f0887f4e88df2ec9825991c9f | 3,632,648 |
def PinkFilter(c):
"""Returns True if color can be classified as a shade of pink"""
if (c[0] > c[1]) and (c[2] > c[1]) and (c[2] == c[0]): return True
else: return False | 0514954e95a409901f3b7053a1c67315b556f517 | 3,632,649 |
def greedy_search(problem, h=None):
"""f(n) = h(n)"""
h = memoize(h or problem.h, 'h')
return best_first_graph_search(problem, h) | eaa1aac7e8b10f95effb32f61c765224ebe1f01e | 3,632,650 |
def get_callback(request, spider):
"""Get request.callback of a scrapy.Request, as a callable."""
if request.callback is None:
return getattr(spider, 'parse')
return request.callback | a1f62822d812bebdeabafa14edda4462949657d8 | 3,632,651 |
import requests
def veryrandom(msg, min=1, max=6, base=10, num=1):
"""Los datos generados por veryrandom provienen de random.org, lo cual
es una garantía adicional de la aleatoriedad de los resultados. Se
obtendrá un número aleatorio entre los 2 definidos, ambos inclusive.
"""
url = 'http://www.random.org/integers/'
try:
data = requests.get(url, params={
'min': min, 'max': max, 'base': base, 'num': num,
'col': 1, 'format': 'plain', 'rdn': 'new',
}).text
except Exception as e:
data = str(e)
return data.replace('\n', ' ') | 75320750423e060ba117ed8ce6bfb84a02d16411 | 3,632,652 |
import os
import shlex
def read_configuration(start_path, configuration_filename):
"""Return compiler options from configuration.
Return None if there is no configuration.
"""
configuration_path = find_configuration(
os.path.abspath(start_path),
configuration_filename=configuration_filename)
raw_lines = None
if configuration_path:
raw_lines = read_lines(configuration_path)
if raw_lines is None:
return None
options = []
for line in raw_lines:
if line.startswith(INCLUDE_OPTION):
options.append('-isystem')
relative_path = line[len(INCLUDE_OPTION):].lstrip()
options.append(os.path.join(os.path.dirname(configuration_path),
relative_path))
else:
for token in shlex.split(line):
options.append(token)
return options | a5d4d382d8b55879cda9ce7f767a370b09360064 | 3,632,653 |
from typing import Union
from typing import List
from typing import Dict
from typing import Sequence
def clean_documents(documents: Union[List[str], Dict[str, str]], **kwargs) -> Union[Sequence[str], Dict[str, str]]:
"""Seaches for `Filth` in `documents` and replaces it with placeholders.
`documents` can be in a dict, in the format of ``{'document_name': 'document'}``, or as a list of strings
(each a seperate document).
This can be useful when processing many documents.
.. code:: pycon
>>> import scrubadub
>>> scrubadub.clean_documents({'contact.txt': "contact Joe Duffy at joe@example.com",
... 'hello.txt': 'hello world!'})
{'contact.txt': 'contact {{NAME}} {{NAME}} at {{EMAIL}}', 'hello.txt': 'hello world!'}
>>> scrubadub.clean_documents(["contact Joe Duffy at joe@example.com", 'hello world!'])
['contact {{NAME}} {{NAME}} at {{EMAIL}}', 'hello world!']
:param documents: Documents containing possible PII that needs to be redacted in the form of a list of documents
or a dictonary with the key as the document name and the value as the document text
:type documents: `list` of `str` objects, `dict` of `str` objects
:return: Documents in the same format as input, but with `Filth` redacted
:rtype: `list` of `str` objects, `dict` of `str` objects; same as input
"""
scrubber = Scrubber()
return scrubber.clean_documents(documents, **kwargs) | 2c789e3fa5db952db452341a4e838230c01decf5 | 3,632,654 |
from math import log
def calcShannonEnt(dataSet):
"""
计算香农熵,用于判断划分
Parameters
----------
dataSet:数据集(可能是原始数据集,有可能是划分子集)
Returns:数据集的信息熵,根据分类标签信息确定
-------
"""
numEntries = len(dataSet) # 数据总量
labelCounts = {} # 创建一个数据字典,用来计数各个类别
for featVec in dataSet.values: # 每次取一行
currentLabel = featVec[-1] # 默认每行最后一列的元素是样本类标签
if currentLabel not in labelCounts.keys():
labelCounts[currentLabel] = 0.0 # 不存在的类别要进行添加,初始为0
labelCounts[currentLabel] += 1 # 计数累进
# print(labelCounts)
shannonEnt = 0.0 # 从0累加信息熵
for key in labelCounts: # 遍历数据字典的键
prob = float(labelCounts[key]) / numEntries # 计算数据集i类样本所占比例P_i
shannonEnt -= prob * log(prob, 2) # 以二为底
return shannonEnt | bdade097a799feff0cb41eb15b3438ca1e088983 | 3,632,655 |
def Disc(
pos=(0, 0, 0),
r1=0.5,
r2=1,
c="coral",
alpha=1,
res=12,
resphi=None,
):
"""
Build a 2D disc of internal radius `r1` and outer radius `r2`.
|Disk|
"""
ps = vtk.vtkDiskSource()
ps.SetInnerRadius(r1)
ps.SetOuterRadius(r2)
ps.SetRadialResolution(res)
if not resphi:
resphi = 6 * res
ps.SetCircumferentialResolution(resphi)
ps.Update()
actor = Actor(ps.GetOutput(), c, alpha).flat()
actor.SetPosition(pos)
settings.collectable_actors.append(actor)
return actor | b7f79bfa2a1b8e06a028c741c590d71a93c2faf0 | 3,632,656 |
def identity_block(x, n_filters):
""" Construct a Bottleneck Residual Block with Identity Link
x : input into the block
n_filters: number of filters
"""
# Save input vector (feature maps) for the identity link
shortcut = x
## Construct the 1x1, 3x3, 1x1 residual block (fig 3c)
# Dimensionality reduction
x = Conv2D(n_filters, (1, 1), strides=(1, 1), use_bias=False, kernel_initializer='he_normal')(x)
x = BatchNormalization()(x)
x = ReLU()(x)
# Bottleneck layer
x = Conv2D(n_filters, (3, 3), strides=(1, 1), padding="same", use_bias=False, kernel_initializer='he_normal')(x)
x = BatchNormalization()(x)
x = ReLU()(x)
# Dimensionality restoration - increase the number of output filters by 4X
x = Conv2D(n_filters * 4, (1, 1), strides=(1, 1), use_bias=False, kernel_initializer='he_normal')(x)
x = BatchNormalization()(x)
# Add the identity link (input) to the output of the residual block
x = Add()([shortcut, x])
x = ReLU()(x)
return x | 7de4980119577a5b37bf34a9394dfc779094b015 | 3,632,657 |
def makeDarker(color: colors.Color) -> colors.Color:
"""
Takes a color and returns a slightly darker version of the
original color.
:param Color color: the color you want to darken
:return: the new, darker color
:rtype: Color
:raises TypeError: if ``color`` is not a :py:class:`~.colors.Color`
"""
if not isinstance(color, colors.Color):
raise TypeError(color_type_error("makeDarker", "color",
"Color", color))
return color.darker() | 79d89502019b66cb413a6e231b54832cd0690a54 | 3,632,658 |
import itertools
def multi_indices(n):
"""Return the list of all multi-indices within the specified bounds.
Return the list of multi-indices ``[b[0], ..., b[dim - 1]]`` such that
``0 <= b[i] < n[i]`` for all i.
"""
iterables = [range(ni) for ni in n]
return [np.asarray(b, dtype=np.intc)
for b in itertools.product(*iterables)] | f469e90253f4d762416b7757cc1b8de427589915 | 3,632,659 |
def removeProject(info, project):
"""
Removing an docker stack for the current project
if the checkIfComposerExistsBool == TRUE
else perform `docker rm`
:param info:
:param project:
:return:
"""
print(project)
if checkIfComposerExistsBool(project):
return "docker stack rm " + project.lower()
else:
return "docker rm -f " + project.lower() | c8f3556aa67882c7241d93482d2371c9f8263793 | 3,632,660 |
def columnize_as_rows(lis, columns, horizontal=False, fill=None):
"""Like 'zip' but fill any missing elements."""
data = distribute(lis, columns, horizontal, fill)
rowcount = len(data)
length = max(len(x) for x in data)
for c, lis in enumerate(data):
n = length - len(lis)
if n > 0:
extension = [fill] * n
lis.extend(extension)
return list(zip(*data)) | cce7251db42e7d17ee1e01a97eb5a0bcce7e9b60 | 3,632,661 |
from pathlib import Path
def single_extra_atom_line_v3000_sdf(tmp_path: Path) -> Path:
"""Write a single molecule to a v3000 sdf with an extra atom line.
Args:
tmp_path: pytest fixture for writing files to a temp directory
Returns:
Path to the sdf
"""
sdf_text = """
0 0 0 0 0 999 V3000
M V30 BEGIN CTAB
M V30 COUNTS 8 9 0 0 0
M V30 BEGIN ATOM
M V30 1 C 87.71 -95.64 0 0
M V30 2 C 87.71 -81.29 0 0
M V30 3 C 100.18 -74.09 0 0
M V30 4 C 100.18 -59.69 0 0
M V30 5 C 87.71 -52.49 0 0
M V30 6 C 75.24 -59.69 0 0
M V30 7 C 75.24 -74.09 0 0
M V30 8 C 87.71 -38.09 0 0
M V30 9 O 100.18 -30.89 0 0
M V30 END ATOM
M V30 BEGIN BOND
M V30 1 1 1 2
M V30 2 1 2 3
M V30 3 2 3 4
M V30 4 1 4 5
M V30 5 2 5 6
M V30 6 1 6 7
M V30 7 2 7 2
M V30 8 1 5 8
M V30 9 2 8 9
M V30 END BOND
M V30 END CTAB
M END
$$$$
"""
outpath = tmp_path / "input.sdf"
with open(outpath, "w") as outh:
outh.write(sdf_text)
return outpath | e4fff4a730362fc2f75cb322b773853d9a6ec364 | 3,632,662 |
def add_metaclass(metaclass): # pragma: no cover
""" Class decorator for creating a class with a metaclass.
Copied from six
"""
def wrapper(cls):
orig_vars = cls.__dict__.copy()
orig_vars.pop('__dict__', None)
orig_vars.pop('__weakref__', None)
for slots_var in orig_vars.get('__slots__', ()):
orig_vars.pop(slots_var)
return metaclass(cls.__name__, cls.__bases__, orig_vars)
return wrapper | dc101f414207e7e3c73bb7427d1bc1142c2e6a1a | 3,632,663 |
def vzerog(v):
"""vzerog(ConstSpiceDouble * v) -> SpiceBoolean"""
return _cspyce0.vzerog(v) | 7496c628dda05a01d77637fb5f8b05dbc414afcf | 3,632,664 |
from pathlib import Path
import csv
def read_barcodes(barcodes_file: Path) -> dict:
"""
Read in barcodes from file
:param barcodes_file: path to csv file with barcodes and gene name
:return barcode_dict:
barcode_dict = {
barcode_1 : {"gene": Gene_1, "count": 0}
barcode_2 : {"gene": Gene_2, "count": 0}
}
"""
# Store barcodes
barcode_dict = dict()
with open(barcodes_file, "r") as csv_barcode:
# Skip Header
next(csv_barcode)
for line in csv.reader(csv_barcode):
# Ignore comments
if not line[0].startswith("#"):
gene = line[1]
barcode = line[0].upper()
# Check for duplicate barcode
if barcode not in barcode_dict:
barcode_dict[barcode] = {"gene": gene, "count": 0}
else:
logger.error(f"Barcode {barcode} already in dictionary.")
raise IOError(f"Duplicate error: {barcode} already in dictionary")
# Add _other for barcode that do not match
barcode_dict["_other"] = {"gene": "_other", "count": 0}
return barcode_dict | ee68f0172134e22b37432af9f0de2f0884b4354c | 3,632,665 |
def is_localized(node):
"""Check message wrapped by _()"""
if isinstance(node.parent, compiler.ast.CallFunc):
if isinstance(node.parent.node, compiler.ast.Name):
if node.parent.node.name == '_':
return True
return False | 09c7a0693c5aba9a984bc94a85a3476aeb15528c | 3,632,666 |
from typing import Callable
from typing import Iterable
def skip(count: int) -> Callable[[Iterable[_TSource]], Iterable[_TSource]]:
"""Returns a sequence that skips N elements of the underlying
sequence and then yields the remaining elements of the sequence.
Args:
count: The number of items to skip.
"""
def _skip(source: Iterable[_TSource]) -> Iterable[_TSource]:
def gen():
for i, n in enumerate(source):
if i >= count:
yield n
return SeqGen(gen)
return _skip | bae4c2e92940a1d2ade01f1defc7242e35a59e7a | 3,632,667 |
from threading import Thread
from pathlib import Path
import click
import json
from typing import List
def upload_to_nomad(nomad_configfile, num, mongo_configfile):
"""
upload n launchers to NOMAD using the following procedure
1. Find n launchers and split them into 10 threads
2. upload those n launchers and remove the generated .tar.gz file
:param nomad_configfile: nomad user name and password json file path.
:param num: maximum number of materials to upload
:param mongo_configfile: mongo db connections
:return:
Success code
"""
configfile: Path = Path(mongo_configfile)
full_nomad_config_path: Path = Path(nomad_configfile).expanduser()
num: int = num
ctx = click.get_current_context()
run = ctx.parent.parent.params["run"]
directory = ctx.parent.params["directory"]
full_root_dir: Path = Path(directory)
configfile: Path = Path(configfile)
if configfile.exists() is False:
raise FileNotFoundError(f"Config file [{configfile}] is not found")
# connect to mongo necessary mongo stores
gdrive_mongo_store = MongograntStore(mongogrant_spec="rw:knowhere.lbl.gov/mp_core_mwu",
collection_name="gdrive",
mgclient_config_path=configfile.as_posix())
if run:
gdrive_mongo_store.connect()
if not full_nomad_config_path.exists():
raise FileNotFoundError(f"Nomad Config file not found in {full_nomad_config_path}")
cred: dict = json.load(full_nomad_config_path.open('r'))
username: str = cred["username"]
password: str = cred["password"]
# find the earliest n tasks that has not been uploaded
task_ids_not_uploaded: List[List[str]] = nomad_find_not_uploaded(num=num, gdrive_mongo_store=gdrive_mongo_store)
threads = []
for i in range(len(task_ids_not_uploaded)):
name = f"thread_{i}"
thread = Thread(target=nomad_upload_data, args=(task_ids_not_uploaded[i],
username, password,
gdrive_mongo_store,
full_root_dir / "tmp_storage", name,))
threads.append(thread)
thread.start()
for thread in threads:
thread.join()
gdrive_mongo_store.close()
else:
logger.info("Not running. Please supply the run flag. ")
return ReturnCodes.SUCCESS | fc4ed41ac042ebdec429fb3c30b8c020ed7c9e15 | 3,632,668 |
import os
import logging
def model_fn(model_dir):
"""
Load the gluon model. Called once when hosting service starts.
:param: model_dir The directory where model files are stored.
:return: a model (in this case a Gluon network)
assumes that the parameters artifact is {model_name}.params
"""
os.environ["MXNET_CUDNN_AUTOTUNE_DEFAULT"] = "0"
ctx = get_ctx()
logging.info("Using ctx {}".format(ctx))
logging.info("Dir content {}".format(os.listdir()))
# instantiate net and reset to classes of interest
net = gluon.nn.SymbolBlock.imports(
symbol_file=[f for f in os.listdir() if f.endswith("json")][0],
input_names=["data"],
param_file=[f for f in os.listdir() if f.endswith("params")][0],
ctx=ctx,
)
return net | 1856527e384ece4eb08a624dc30fe050d02c70e1 | 3,632,669 |
def init_db(uri, echo=True):
"""Initialize the database and reflect the tables"""
global meta
uri = make_url(uri)
uri.query.setdefault("charset", "utf8")
engine = create_engine(uri, echo=echo)
meta.bind = engine
Session.configure(bind=engine)
reflect_tables()
return engine | 9535a6b3f2379b65da151f1384f739ccdcf25d70 | 3,632,670 |
def get_weighted_average(We, x, w):
"""
Compute the weighted average vectors
:param We: We[i,:] is the vector for word i
:param x: x[i, :] are the indices of the words in sentence i
:param w: w[i, :] are the weights for the words in sentence i
:return: emb[i, :] are the weighted average vector for sentence i
"""
n_samples = x.shape[0]
emb = np.zeros((n_samples, We.shape[1]))
for i in range(n_samples):
emb[i,:] = w[i,:].dot(We[x[i,:],:]) / np.count_nonzero(w[i,:])
return emb | 6fbc2a5581a9cab4609604b771a4a27a6631b74d | 3,632,671 |
def logout_user_cleanup():
"""Logs out user."""
print("\n\nGot to logout from: {}".format(request.referrer))
logout_user()
session.clear()
flash("You were logged out!")
return redirect(request.referrer) | 60d114c94bd1409982e004840c29a3da3c3dd772 | 3,632,672 |
def read_vtk_sowfa(filename):
"""
Reads SOWFA results .vtk file and returns coordinates of cell centres and velocity field as numpy arrays.
:param filename:
:return:
"""
reader = vtk.vtkPolyDataReader()
reader.SetFileName(filename)
reader.Update()
data = reader.GetOutput()
u = vtk_to_numpy(data.GetCellData().GetArray(0))
x = np.zeros_like(u)
for idx in range(0, data.GetNumberOfCells()):
# idx = 9231 # cell number
p = vtk_to_numpy(data.GetCell(idx).GetPoints().GetData())
p_center = (np.max(p, 0) + np.min(p, 0)) / 2
x[idx, :] = p_center # save center of point to coordinate list
return x, u | e78c4ee4300204af53373ca2199582b8ddacbc4a | 3,632,673 |
def compute_primary_orientations(primary_segments, angle_epsilon=0.1):
"""
Computes the primary orientations based on the given primary segments.
Parameters
----------
primary_segments : list of BoundarySegment
The primary segments.
angle_epsilon : float, optional
Angles will be considered equal if the difference is within
this value (in radians).
Returns
-------
primary_orientations : list of float
The computed primary orientations in radians, sorted by the length
of the segments which have that orientation.
"""
orientations = []
for s in primary_segments:
a1 = s.orientation
for o in orientations:
a2 = o['orientation']
angle_diff = utils.angle.min_angle_difference(a1, a2)
if angle_diff < angle_epsilon:
if len(s.points) > o['size']:
o['size'] = len(s.points)
o['orientation'] = a1
break
else:
orientations.append({'orientation': a1,
'size': len(s.points)})
primary_orientations = sort_orientations(orientations)
return primary_orientations | 327a92aa493e8f6d82a760ba7087d0889ddf55e4 | 3,632,674 |
def main(global_config, **settings):
""" This function returns a Pyramid WSGI application.
"""
config = Configurator(settings=settings)
config.include('pyramid_chameleon')
config.include('pyramid_debugtoolbar')
# home
config.add_route('index', '/')
# Documentos da Colecao:
config.add_route('collection_documents', '/api/v1/collection/{collection_acronym}/document/')
# Documentos do Periodico:
config.add_route('journal_documents', '/api/v1/journal/{journal_issn}/document/')
# Citacoes da Colecao:
config.add_route('collection_citations', '/api/v1/collection/{collection_acronym}/citation/')
# Citacoes do Periodico:
config.add_route('journal_citations', '/api/v1/journal/{journal_issn}/citation/')
config.scan()
return config.make_wsgi_app() | 5085564cccfed7624455dd0a2608d3e241a2ae67 | 3,632,675 |
def plot_contribution_map(contribution_map, ax=None, vrange=None, vmin=None, vmax=None, hide_ticks=True, cmap="bwr",
percentile=100):
"""
Visualises a contribution map, i.e., a matrix assigning individual weights to each spatial location.
As default, this shows a contribution map with the "bwr" colormap and chooses vmin and vmax so that the map
ranges from (-max(abs(contribution_map), max(abs(contribution_map)).
Args:
contribution_map: (H, W) matrix to visualise as contributions.
ax: axis on which to plot. If None, a new figure is created.
vrange: If None, the colormap ranges from -v to v, with v being the maximum absolute value in the map.
If provided, it will range from -vrange to vrange, as long as either one of the boundaries is not
overwritten by vmin or vmax.
vmin: Manually overwrite the minimum value for the colormap range instead of using -vrange.
vmax: Manually overwrite the maximum value for the colormap range instead of using vrange.
hide_ticks: Sets the axis ticks to []
cmap: colormap to use for the contribution map plot.
percentile: If percentile is given, this will be used as a cut-off for the attribution maps.
Returns: The axis on which the contribution map was plotted.
"""
assert len(contribution_map.shape) == 2, "Contribution map is supposed to only have spatial dimensions.."
contribution_map = to_numpy(contribution_map)
cutoff = np.percentile(np.abs(contribution_map), percentile)
contribution_map = np.clip(contribution_map, -cutoff, cutoff)
if ax is None:
fig, ax = plt.subplots(1)
if vrange is None or vrange == "auto":
vrange = np.max(np.abs(contribution_map.flatten()))
im = ax.imshow(contribution_map, cmap=cmap,
vmin=-vrange if vmin is None else vmin,
vmax=vrange if vmax is None else vmax)
if hide_ticks:
ax.set_xticks([])
ax.set_yticks([])
return ax, im | 2715424895926652539d8775e0629ba47dfc76c2 | 3,632,676 |
def pac_mvl(z):
""" Calculate PAC using the mean vector length.
Parameters
----------
ang: array_like
Phase of the low frequency signal.
amp: array_like
Amplitude envelop of the high frequency signal.
Returns
-------
out: float
The pac strength using the mean vector length.
Note
----
The input signals can only be 1-dimensional (along the number of samples).
"""
# out = np.abs(np.mean(z, axis=-1))
# out = np.abs(np.sum(z,axis=0))
# out /= np.sqrt(np.sum(amp * amp,axis=0))
# print(z.shape, out, np.max(np.abs(z)), np.mean(amp, axis=0))
# out /= np.max(amp)
# out /= np.sqrt(z.shape[0])
return np.abs(np.mean(z, axis=-1)) | 5412f9f4596b105e939a586fd570e1c1299b4194 | 3,632,677 |
def merge_values(list1, list2):
"""Merge two selection value lists and dedup.
All selection values should be simple value types.
"""
tmp = list1[:]
if not tmp:
return list2
else:
tmp.extend(list2)
return list(set(tmp)) | 9412dd28c6110bc6df70ac7d563cb19d1211beb8 | 3,632,678 |
def load_data(database_filepath):
"""
Load data from sqlite database
Arguments:
database_filepath: path to database file
"""
engine = create_engine(f'sqlite:///{database_filepath}')
sql = 'SELECT * FROM DisasterPipeline'
df = pd.read_sql(sql, engine)
x = df.message
y = df.iloc[:, 4:]
y_labels = list(y)
return x, y, y_labels | 99371afba33d7527cc5a6a3c4f0d98b1b76ba5c0 | 3,632,679 |
import numpy as np
from .._tier0 import empty_image_like
from .._tier0 import execute
from .._tier1 import copy
from .._tier0 import create
from .._tier1 import copy_slice
from .._tier0 import _warn_of_interpolation_not_available
from typing import Union
def affine_transform(source : Image, destination : Image = None, transform : Union[np.ndarray, AffineTransform3D, AffineTransform] = None, linear_interpolation : bool = False, auto_size:bool = False) -> Image:
"""
Applies an affine transform to an image.
Parameters
----------
source : Image
image to be transformed
destination : Image, optional
image where the transformed image should be written to
transform : 4x4 numpy array or AffineTransform3D object or skimage.transform.AffineTransform object or str, optional
transform matrix or object or string describing the transformation
linear_interpolation: bool, optional
If true, bi-/tri-linear interplation will be applied; if hardware supports it.
If false, nearest-neighbor interpolation wille be applied.
auto_size:bool, optional
If true, the destination image size will be determined automatically, depending on the provided transform.
the transform might be modified so that all voxels of the result image have positions x>=0, y>=0, z>=0 and sit
tight to the coordinate origin. No voxels will cropped, the result image will fit in the returned destination.
Hence, the applied transform may have an additional translation vector that was not explicitly provided. This
also means that any given translation vector will be neglected.
If false, the destination image will have the same size as the input image.
Note: The value of auto-size is ignored if: destination is not None or transform is not an instance of
AffineTransform3D.
Returns
-------
destination
"""
# handle output creation
if destination is None:
if auto_size and isinstance(transform, AffineTransform3D):
# This modifies the given transform
new_size, transform, _ = _determine_translation_and_bounding_box(source, transform)
destination = create(new_size)
else:
destination = create_like(source)
# deal with 2D input images
if len(source.shape) == 2:
source_3d = create([1, source.shape[0], source.shape[1]])
copy_slice(source, source_3d, 0)
source = source_3d
# deal with 2D output images
original_destination = destination
copy_back_after_transforming = False
if len(destination.shape) == 2:
destination = create([1, destination.shape[0], destination.shape[1]])
copy_slice(original_destination, destination, 0)
copy_back_after_transforming = True
if isinstance(transform, str):
transform = AffineTransform3D(transform, source)
# we invert the transform because we go from the target image to the source image to read pixels
if isinstance(transform, AffineTransform3D):
transform_matrix = np.asarray(transform.copy().inverse())
elif isinstance(transform, AffineTransform):
# Question: Don't we have to invert this one as well? haesleinhuepf
matrix = np.asarray(transform.params)
matrix = np.asarray([
[matrix[0,0], matrix[0,1], 0, matrix[0,2]],
[matrix[1,0], matrix[1,1], 0, matrix[1,2]],
[0, 0, 1, 0],
[matrix[2,0], matrix[2,1], 0, matrix[2,2]]
])
transform_matrix = np.linalg.inv(matrix)
else:
transform_matrix = np.linalg.inv(transform)
gpu_transform_matrix = push(transform_matrix)
kernel_suffix = ''
if linear_interpolation:
image = empty_image_like(source)
copy(source, image)
if type(source) != type(image):
kernel_suffix = '_interpolate'
else:
_warn_of_interpolation_not_available()
source = image
parameters = {
"input": source,
"output": destination,
"mat": gpu_transform_matrix
}
execute(__file__, '../clij-opencl-kernels/kernels/affine_transform_' + str(len(destination.shape)) + 'd' + kernel_suffix + '_x.cl',
'affine_transform_' + str(len(destination.shape)) + 'd' + kernel_suffix, destination.shape, parameters)
# deal with 2D output images
if copy_back_after_transforming:
copy_slice(destination, original_destination, 0)
return original_destination | 78fe7ff1ea9c9afc9284a2a9248765f7ff57ba3d | 3,632,680 |
def create_host(values):
"""Create a host from the values."""
return IMPL.create_host(values) | bb49c51dfd6ef7be988da89dd12ec441948dbb2b | 3,632,681 |
import argparse
def parse_arguments():
"""Parse the command line arguments.
Returns:
Parsed arguments.
"""
parser = argparse.ArgumentParser(description='tail for BAMs')
parser.add_argument(
'filenames',
help='BAMs on which to perform the tail operation',
nargs='*',
metavar='FILE'
)
parser.add_argument(
'--version',
'-v',
help='print the version',
action='store_true'
)
return parser.parse_args() | 4c09fecf32bcc5a4d016d012a87996f5b08b765f | 3,632,682 |
import math
def get_distance(lat_a, long_a, lat_b, long_b):
"""
Returns the distance, in meters, between two points
Uses the haversine formula, i.e.:
a = sin²(Δφ/2) + cos φ1 ⋅ cos φ2 ⋅ sin²(Δλ/2)
c = 2 ⋅ atan2( √a, √(1−a) )
d = R ⋅ c
Keep in mind this is an "as the crow flies" type of estimation
Parameters:
- lat_a: latitude of first point
- long_a: longitude of second point
- lat_b: latitude of second point
- long_b: longitude of second point
Returns
A number representing the distance in kilometres
"""
phi1 = math.radians(lat_a)
phi2 = math.radians(lat_b)
delta_phi = phi2 - phi1
delta_lamba = math.radians(long_b - long_a)
# sin²(Δφ/2)
phi_distance = math.sin(delta_phi / 2) ** 2
# sin²(Δλ/2)
delta_distance = math.sin(delta_lamba / 2) ** 2
# cos φ1 ⋅ cos φ2
point_cos = math.cos(phi1) * math.cos(phi2)
# sin²(Δφ/2) + cos φ1 ⋅ cos φ2 ⋅ sin²(Δλ/2)
a = phi_distance + point_cos * delta_distance
# c = 2 ⋅ atan2( √a, √(1−a) )
c = 2 * math.atan2(math.sqrt(a), math.sqrt(1 - a))
return radius * c | 57b64ad307c28d87caf7015f8a16c9a5fb8562dd | 3,632,683 |
def format_heading(level, text):
"""Create a heading of <level> [1, 2 or 3 supported]."""
underlining = ['=', '-', '~', ][level-1] * len(text)
return '%s\n%s\n\n' % (text, underlining) | 6b8caaa134ddc32666a4d7ce62a775d6ffda7425 | 3,632,684 |
def get_list_of_temps(temp_string):
"""
A function to process an argument string line to an array of temperatures
"""
success = True
error = ""
temps = None
arr_string = temp_string.split(",")
arr_string_len = len(arr_string)
temps = np.zeros(arr_string_len)
for i in range(arr_string_len):
try:
temp = float(arr_string[i])
except:
success = False
error = "Incorrect temperatures: %s" % (temp_string)
break
temps[i] = temp
return success, error, temps | 0ad6ffd2ecf25ae3158b25b592f89824e4370633 | 3,632,685 |
def test():
"""Test
:param:
:return:
"""
print('!! Begin Test!..')
return encrypt("vigenere","hello","lemon") | d698bba18d8dba0465c5bb2bd8ac0311b9b9f64d | 3,632,686 |
def get_new_size_zoom(current_size, target_size):
"""
Returns size (width, height) to scale image so
smallest dimension fits target size.
"""
scale_w = target_size[0] / current_size[0]
scale_h = target_size[1] / current_size[1]
scale_by = max(scale_w, scale_h)
return (int(current_size[0] * scale_by), int(current_size[1] * scale_by)) | e0b42eab3d35ba5c662282cab1ffa798327ad92a | 3,632,687 |
def get_name_component(x509_name, component):
"""Gets single name component from X509 name."""
value = ""
for c in x509_name.get_components():
if c[0] == component:
value = c[1]
return value | 6a473a96b99daa6f69fd6aac45f2594af933d4bd | 3,632,688 |
def hurst(ts):
""" the implewmentation on the blog http://www.quantstart.com
http://www.quantstart.com/articles/Basics-of-Statistical-Mean-Reversion-Testing
Returns the Hurst Exponent of the time series vector ts"""
# Create the range of lag values
lags = range(2, 100)
# Calculate the array of the variances of the lagged differences
tau = [sqrt(std(subtract(ts[lag:], ts[:-lag]))) for lag in lags]
# Use a linear fit to estimate the Hurst Exponent
poly = polyfit(log(lags), log(tau), 1)
# Return the Hurst exponent from the polyfit output
return poly[0]*2.0 | 59708d86d9f0022cf991fd60e32e9d8c59c630c7 | 3,632,689 |
def _bunchify(b):
"""Ensure all dict elements are Bunch."""
assert isinstance(b, dict)
b = Bunch(b)
for k in b:
if isinstance(b[k], dict):
b[k] = Bunch(b[k])
return b | c76bb7ba86d1958f498775ab68f944b0fcb1dd0c | 3,632,690 |
import random
def generate_random_username():
"""Generate function
Generates a random username for anonymous users.
:returns: String with the anonymous username
"""
random.seed()
return 'anon-' + str(random.randint(0, MAX_INT_ANONYMOUS)) | ef7d7897d3eeb518808547afd8e1717c7db1d5d6 | 3,632,691 |
import os
def subvolume_snapshot(source, dest=None, name=None, read_only=False):
"""
Create a snapshot of a source subvolume
source
Source subvolume from where to create the snapshot
dest
If only dest is given, the subvolume will be named as the
basename of the source
name
Name of the snapshot
read_only
Create a read only snapshot
CLI Example:
.. code-block:: bash
salt '*' btrfs.subvolume_snapshot /var/volumes/tmp dest=/.snapshots
salt '*' btrfs.subvolume_snapshot /var/volumes/tmp name=backup
"""
if not dest and not name:
raise CommandExecutionError("Provide parameter dest, name, or both")
cmd = ["btrfs", "subvolume", "snapshot"]
if read_only:
cmd.append("-r")
if dest and not name:
cmd.append(dest)
if dest and name:
name = os.path.join(dest, name)
if name:
cmd.append(name)
res = __salt__["cmd.run_all"](cmd)
salt.utils.fsutils._verify_run(res)
return True | 12747ee6a1db10c703cf85e918e37d8c7a432e62 | 3,632,692 |
from typing import Union
import pathlib
import io
from typing import Optional
from typing import Dict
from typing import Any
import torch
from typing import Tuple
import zipfile
import warnings
import os
def load_from_zip_file(
load_path: Union[str, pathlib.Path, io.BufferedIOBase],
load_data: bool = True,
custom_objects: Optional[Dict[str, Any]] = None,
device: Union[torch.device, str] = "auto",
verbose: int = 0,
print_system_info: bool = False,
) -> (Tuple[Optional[Dict[str, Any]], Optional[TensorDict], Optional[TensorDict]]):
"""
Load model data from a .zip archive
:param load_path: Where to load the model from
:param load_data: Whether we should load and return data
(class parameters). Mainly used by 'load_parameters' to only load model parameters (weights)
:param custom_objects: Dictionary of objects to replace
upon loading. If a variable is present in this dictionary as a
key, it will not be deserialized and the corresponding item
will be used instead. Similar to custom_objects in
``keras.models.load_model``. Useful when you have an object in
file that can not be deserialized.
:param device: Device on which the code should run.
:param verbose: Verbosity level, 0 means only warnings, 2 means debug information.
:param print_system_info: Whether to print or not the system info
about the saved model.
:return: Class parameters, model state_dicts (aka "params", dict of state_dict)
and dict of pytorch variables
"""
load_path = open_path(load_path, "r", verbose=verbose, suffix="zip")
# set device to cpu if cuda is not available
device = get_device(device=device)
# Open the zip archive and load data
try:
with zipfile.ZipFile(load_path) as archive:
namelist = archive.namelist()
# If data or parameters is not in the
# zip archive, assume they were stored
# as None (_save_to_file_zip allows this).
data = None
pytorch_variables = None
params = {}
# Debug system info first
if print_system_info:
if "system_info.txt" in namelist:
print("== SAVED MODEL SYSTEM INFO ==")
print(archive.read("system_info.txt").decode())
else:
warnings.warn(
"The model was saved with SB3 <= 1.2.0 and thus cannot print system information.",
UserWarning,
)
if "data" in namelist and load_data:
# Load class parameters that are stored
# with either JSON or pickle (not PyTorch variables).
json_data = archive.read("data").decode()
data = json_to_data(json_data, custom_objects=custom_objects)
# Check for all .pth files and load them using th.load.
# "pytorch_variables.pth" stores PyTorch variables, and any other .pth
# files store state_dicts of variables with custom names (e.g. policy, policy.optimizer)
pth_files = [file_name for file_name in namelist if os.path.splitext(file_name)[1] == ".pth"]
for file_path in pth_files:
with archive.open(file_path, mode="r") as param_file:
# File has to be seekable, but param_file is not, so load in BytesIO first
# fixed in python >= 3.7
file_content = io.BytesIO()
file_content.write(param_file.read())
# go to start of file
file_content.seek(0)
# Load the parameters with the right ``map_location``.
# Remove ".pth" ending with splitext
th_object = torch.load(file_content, map_location=device)
# "tensors.pth" was renamed "pytorch_variables.pth" in v0.9.0, see PR #138
if file_path == "pytorch_variables.pth" or file_path == "tensors.pth":
# PyTorch variables (not state_dicts)
pytorch_variables = th_object
else:
# State dicts. Store into params dictionary
# with same name as in .zip file (without .pth)
params[os.path.splitext(file_path)[0]] = th_object
except zipfile.BadZipFile:
# load_path wasn't a zip file
raise ValueError(f"Error: the file {load_path} wasn't a zip-file")
return data, params, pytorch_variables | 157f725244700785ccf0c0ba775606136693bafe | 3,632,693 |
import math
def GetRadar(dt):
""" Simulate radar range to object at 1K altidue and moving at 100m/s.
Adds about 5% measurement noise. Returns slant range to the object.
Call once for each new measurement at dt time from last call.
"""
if not hasattr (GetRadar, "posp"):
GetRadar.posp = 0
vel = 100 + .5 * randn()
alt = 1000 + 10 * randn()
pos = GetRadar.posp + vel*dt
v = 0 + pos* 0.05*randn()
slant_range = math.sqrt (pos**2 + alt**2) + v
GetRadar.posp = pos
return slant_range | 0f61b507d57efa68825b22b0ba2d97154f97547c | 3,632,694 |
def get_gin_feature(inputs, neigh_idx, k):
"""
Aggregate neighbor features for each point with GIN
GIN conv layer:
Xu, Keyulu, Weihua Hu, Jure Leskovec, and Stefanie Jegelka.
"How Powerful are Graph Neural Networks?."
arXiv:1810.00826 (2018).
Args:
inputs: (batch_size, num_vertices, 1, num_dims)
neigh_idx: (batch_size, num_vertices, k)
k: int
Returns:
aggregated features: (batch_size, num_vertices, 1, num_dims)
"""
batch_size = inputs.get_shape().as_list()[0]
in_copy = inputs
inputs = tf.squeeze(inputs)
if batch_size == 1:
inputs = tf.expand_dims(inputs, 0)
inputs_central = inputs
inputs_shape = inputs.get_shape()
batch_size = inputs_shape[0].value
num_vertices = inputs_shape[1].value
num_dims = inputs_shape[2].value
idx = tf.range(batch_size) * num_vertices
idx = tf.reshape(idx, [batch_size, 1, 1])
inputs_flat = tf.reshape(inputs, [-1, num_dims])
inputs_neighbors = tf.gather(inputs_flat, neigh_idx+idx)
neigh_features = inputs_neighbors
aggr_neigh_features = tf.reduce_sum(neigh_features, axis=-2, keep_dims=True)
aggr_features = aggr_neigh_features
return aggr_features | 12fd947cd9e18e3a8755cf32b4e72a0aa0befa5f | 3,632,695 |
import os
import sys
def gen_training_matrix(directory_path, output_file, cols_to_ignore):
"""
Reads the csv files in directory_path and assembles the training matrix with
the features extracted using the functions from EEG_feature_extraction.
Parameters:
directory_path (str): directory containing the CSV files to process.
output_file (str): filename for the output file.
cols_to_ignore (list): list of columns to ignore from the CSV
Returns:
numpy.ndarray: 2D matrix containing the data read from the CSV
Author:
Original: [lmanso]
Updates and documentation: [fcampelo]
Updates: [vdevane]
"""
# Initialise return matrix
FINAL_MATRIX = None
for x in os.listdir(directory_path):
# Ignore non-CSV files
if not x.lower().endswith('.csv'):
continue
# For safety we'll ignore files containing the substring "test".
# [Test files should not be in the dataset directory in the first place]
if 'test' in x.lower():
continue
try:
name, state, _ = x[:-4].split('-')
except:
print ('Wrong file name', x)
sys.exit(-1)
if state.lower() == 'Negative':
state = 2.
elif state.lower() == 'Neutral':
state = 1.
elif state.lower() == 'Positive':
state = 0.
else:
print ('Wrong file name', x)
sys.exit(-1)
print ('Using file', x)
full_file_path = directory_path + '/' + x
vectors, header = generate_feature_vectors_from_samples(file_path = full_file_path,
nsamples = 150,
period = 1.,
state = state,
remove_redundant = True,
cols_to_ignore = cols_to_ignore)
print ('resulting vector shape for the file', vectors.shape)
if FINAL_MATRIX is None:
FINAL_MATRIX = vectors
else:
FINAL_MATRIX = np.vstack( [ FINAL_MATRIX, vectors ] )
print ('FINAL_MATRIX', FINAL_MATRIX.shape)
# Shuffle rows
np.random.shuffle(FINAL_MATRIX)
# Save to file
np.savetxt(output_file, FINAL_MATRIX, delimiter = ',',
header = ','.join(header),
comments = '')
return None | 1a8828c1ceb9d7b11bb114508920a3653762b258 | 3,632,696 |
def five_crops(image, crop_size):
""" Returns the central and four corner crops of `crop_size` from `image`. """
image_size = tf.shape(image)[:2]
crop_margin = tf.subtract(image_size, crop_size)
assert_size = tf.assert_non_negative(
crop_margin, message='Crop size must be smaller or equal to the image size.')
with tf.control_dependencies([assert_size]):
top_left = tf.floor_div(crop_margin, 2)
bottom_right = tf.add(top_left, crop_size)
center = image[top_left[0]:bottom_right[0], top_left[1]:bottom_right[1]]
top_left = image[:-crop_margin[0], :-crop_margin[1]]
top_right = image[:-crop_margin[0], crop_margin[1]:]
bottom_left = image[crop_margin[0]:, :-crop_margin[1]]
bottom_right = image[crop_margin[0]:, crop_margin[1]:]
return center, top_left, top_right, bottom_left, bottom_right | 16f699f8569ca8271c180027db4df199543a62b1 | 3,632,697 |
from typing import Dict
from re import A
def get_tfms(conf: DictConfig) -> Dict[str, A.Compose]:
"""
Loads in albumentation augmentations for train, valid, test
from given config as a dictionary.
"""
trn_tfms = [
load_obj(i["class_name"])(**i["params"]) for i in conf.augmentation.train
]
val_tfms = [
load_obj(i["class_name"])(**i["params"]) for i in conf.augmentation.valid
]
test_tfms = [
load_obj(i["class_name"])(**i["params"]) for i in conf.augmentation.test
]
# transforms dictionary :
transforms = {
"train": A.Compose(
trn_tfms,
bbox_params=A.BboxParams(
format="pascal_voc", label_fields=["class_labels"]
),
),
"valid": A.Compose(
val_tfms,
bbox_params=A.BboxParams(
format="pascal_voc", label_fields=["class_labels"]
),
),
"test": A.Compose(
test_tfms,
bbox_params=A.BboxParams(
format="pascal_voc", label_fields=["class_labels"]
),
),
}
return transforms | e4d47d19a52027fd413f501aa05d1e659e232ace | 3,632,698 |
def song_line(line):
"""Parse one line
Parameters
----------
line: str
One line in the musixmatch dataset
Returns
-------
dict
track_id: Million song dataset track id, track_id_musixmatch:
Musixmatch track id and bag_of_words: Bag of words dict in
{word: count} format
Notes
-----
Musixmatch starts words at index 1, we are shifting it so it starts at 0
"""
elements = line.split(',')
track_id = elements[0]
track_id_musixmatch = elements[1]
bag_of_words = [s.split(':') for s in elements[2:]]
# shift index so it starts at zero
bag_of_words_dict = {int(idx) - 1: int(count) for idx, count
in bag_of_words}
return dict(track_id=track_id, track_id_musixmatch=track_id_musixmatch,
bag_of_words=bag_of_words_dict) | 2108dfa037aa6293a0b3111a97c354e62c0dd2a5 | 3,632,699 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.