content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
|---|---|---|
import random
def ports_info(ptfadapter, duthost, setup, tx_dut_ports):
"""
Return:
dut_iface - DUT interface name expected to receive packtes from PTF
ptf_tx_port_id - Port ID used by PTF for sending packets from expected PTF interface
dst_mac - DUT interface destination MAC address
src_mac - PTF interface source MAC address
"""
data = {}
data["dut_iface"] = random.choice(tx_dut_ports.keys())
data["ptf_tx_port_id"] = setup["dut_to_ptf_port_map"][data["dut_iface"]]
data["dst_mac"] = get_dut_iface_mac(duthost, data["dut_iface"])
data["src_mac"] = ptfadapter.dataplane.ports[(0, data["ptf_tx_port_id"])].mac()
return data
|
14aef7e68386872a1d960329f2f8bee452aa9e29
| 3,647,800
|
def test_text_single_line_of_text(region, projection):
"""
Place a single line text of text at some x, y location.
"""
fig = Figure()
fig.text(
region=region,
projection=projection,
x=1.2,
y=2.4,
text="This is a line of text",
)
return fig
|
0e82165a2717fe9279015d3823b717a870b94e05
| 3,647,801
|
def safely_get_form(request, domain, instance_id):
"""Fetches a form and verifies that the user can access it."""
form = get_form_or_404(domain, instance_id)
if not can_edit_form_location(domain, request.couch_user, form):
raise location_restricted_exception(request)
return form
|
b3ba8da253a6455f5aeb65f828f8c28c826ac2d2
| 3,647,802
|
def generate_hazard_rates(n, d, timelines, constant=False, independent=0, n_binary=0, model="aalen"):
"""
n: the number of instances
d: the number of covariates
lifelines: the observational times
constant: make the coeffients constant (not time dependent)
n_binary: the number of binary covariates
model: from ["aalen", "cox"]
Returns:s
hazard rates: (t,n) dataframe,
coefficients: (t,d+1) dataframe of coefficients,
covarites: (n,d) dataframe
"""
covariates = generate_covariates(n, d, n_binary=n_binary)
if model == "aalen":
coefficients = time_varying_coefficients(d + 1, timelines, independent=independent, constant=constant)
hazard_rates = np.dot(covariates, coefficients.T)
return pd.DataFrame(hazard_rates.T, index=timelines), coefficients, pd.DataFrame(covariates)
elif model == "cox":
covariates = covariates[:, :-1]
coefficients = constant_coefficients(d, timelines, independent)
baseline = time_varying_coefficients(1, timelines)
hazard_rates = np.exp(np.dot(covariates, coefficients.T)) * baseline[baseline.columns[0]].values
coefficients["baseline: " + baseline.columns[0]] = baseline.values
return pd.DataFrame(hazard_rates.T, index=timelines), coefficients, pd.DataFrame(covariates)
else:
raise Exception
|
9c0da64f5796f57d474822121e1af5ca8ebb25e2
| 3,647,803
|
def load_graph(graph_url):
"""
Function that loads a graph given the URL
for a text representation of the graph
Returns a dictionary that models a graph
"""
graph_file = urllib2.urlopen(graph_url)
graph_text = graph_file.read()
graph_lines = graph_text.split('\n')
graph_lines = graph_lines[ : -1]
print "Loaded graph with", len(graph_lines), "nodes"
answer_graph = {}
for line in graph_lines:
neighbors = line.split(' ')
node = int(neighbors[0])
answer_graph[node] = set([])
for neighbor in neighbors[1 : -1]:
answer_graph[node].add(int(neighbor))
return answer_graph
|
d346fb75f5ff872147a166948af65bb52bab739c
| 3,647,804
|
import torch
def calculate_regularization_term(means, n_objects, norm):
"""means: bs, n_instances, n_filters"""
bs, n_instances, n_filters = means.size()
reg_term = 0.0
for i in range(bs):
if n_objects[i]:
_mean_sample = means[i, : n_objects[i], :] # n_objects, n_filters
_norm = torch.norm(_mean_sample, norm, 1)
reg_term += torch.mean(_norm)
reg_term = reg_term / bs
return reg_term
|
b6eb43a8915449c7e86d01a08b3ea2e77ae51064
| 3,647,805
|
from typing import Union
from typing import Sequence
def plot_timeseries_histograms(
axes: Axes,
data: pd.DataFrame,
bins: Union[str, int, np.ndarray, Sequence[Union[int, float]]] = "auto",
colormap: Colormap = cm.Blues,
**plot_kwargs,
) -> Axes: # pragma: no cover
"""Generate a heat-map-like plot for time-series sample data.
The kind of input this function expects can be obtained from an
XArray object as follows:
.. code:
data = az_post_trace.posterior_predictive.Y_t[chain_idx].loc[
{"dt": slice(t1, t2)}
]
data = data.to_dataframe().Y_t.unstack(level=0)
Parameters
==========
axes
The Matplotlib axes to use for plotting.
data
The sample data to be plotted. This should be in "wide" format: i.e.
the index should be "time" and the columns should correspond to each
sample.
bins
The `bins` parameter passed to ``np.histogram``.
colormap
The Matplotlib colormap use to show relative frequencies within bins.
plot_kwargs
Keywords passed to ``fill_between``.
"""
index = data.index
y_samples = data.values
n_t = len(index)
# generate histograms and bins
list_of_hist, list_of_bins = [], []
for t in range(n_t):
# TODO: determine proper range=(np.min(Y_t), np.max(Y_t))
hist, bins_ = np.histogram(y_samples[t], bins=bins, density=True)
if np.sum(hist > 0) == 1:
hist, bins_ = np.array([1.0]), np.array([bins_[0], bins_[-1]])
list_of_hist.append(hist)
list_of_bins.append(bins_)
if axes is None:
_, (axes) = plt.subplots(nrows=1, ncols=1, sharex=True, figsize=(12, 4))
axes.plot(index, np.mean(y_samples, axis=1), alpha=0.0, drawstyle="steps")
for t in range(n_t):
mask = index == index[t]
hist, bins_ = list_of_hist[t], list_of_bins[t]
# normalize bin weights for plotting
hist = hist / np.max(hist) * 0.85 if len(hist) > 1 else hist
n = len(hist)
# construct predictive arrays to plot
y_t_ = np.tile(bins_, (n_t, 1))
# include consecutive time points to create grid-ish steps
if t > 0:
mask = np.logical_or(mask, index == index[t - 1])
for i in range(n):
color_val = hist[i]
color = colormap(color_val) if color_val else (1, 1, 1, 1)
plot_kwargs.setdefault("step", "pre")
axes.fill_between(
index,
y_t_[:, i],
y_t_[:, i + 1],
where=mask,
color=color,
**plot_kwargs,
)
return axes
|
5f207097478f73d969e1f85f0aa2bbe5f894f038
| 3,647,806
|
def mode(x):
""" Find most frequent element in array.
Args:
x (List or Array)
Returns:
Input array element type: Most frequent element
"""
vals, counts = np.unique(x, return_counts=True)
return vals[np.argmax(counts)]
|
b73bf301ca9ebf45f3a6698f8b6d45a5640cb301
| 3,647,807
|
def has_path(matrix, path: str) -> bool:
"""
Given a matrix, make sure there is a path for a given string or not.
Parameters
----------
path: str
A given path, like "abcd"
Returns
-------
out: bool
Whether the given path can be found in the matrix
"""
if not path:
return True
if not matrix[0]:
return False
rows, cols = len(matrix), len(matrix[0])
visited = []
for i in range(rows):
tmp = []
for j in range(cols):
tmp.append(False)
visited.append(tmp)
plen = 0
for row in range(rows):
for col in range(cols):
hasp = has_path_core(matrix, row, col, rows, cols,
path, plen, visited)
if hasp:
return True
return False
|
bbde72992b762dd73c44c60da675da829255000d
| 3,647,808
|
def gensim_processing(data):
"""
Here we use gensim to define bi-grams and tri-grams which enable us to create a create a dictonary and corpus
We then process the data by calling the process_words function from our utils folder
"""
#build the models first
bigram = gensim.models.Phrases(data, min_count=3, threshold=15) #We're lowering the threshold as we've not a lot of data
trigram = gensim.models.Phrases(bigram[data], threshold=15)
#Then fit them to the data
bigram_mod = gensim.models.phrases.Phraser(bigram)
trigram_mod = gensim.models.phrases.Phraser(trigram)
#We further process the data using spacy and allow Nouns and Adjectives to pass (not verbs or adverbs!)
data_processed = lda_utils.process_words(data,nlp, bigram_mod, trigram_mod, allowed_postags=["NOUN","ADJ"])
#We now have a list of words which can be used to train the LDA model
return data_processed
|
67a4d9a90c8ea9809980d9871b769288915fe3cc
| 3,647,809
|
import os
import re
def is_cuda_compatible(lib, cuda_ver, cudnn_ver):
"""Check compatibility between given library and cudnn/cudart libraries."""
ldd_bin = which('ldd') or '/usr/bin/ldd'
ldd_out = run_shell([ldd_bin, lib], True)
ldd_out = ldd_out.split(os.linesep)
cudnn_pattern = re.compile('.*libcudnn.so\\.?(.*) =>.*$')
cuda_pattern = re.compile('.*libcudart.so\\.?(.*) =>.*$')
cudnn = None
cudart = None
cudnn_ok = True # assume no cudnn dependency by default
cuda_ok = True # assume no cuda dependency by default
for line in ldd_out:
if 'libcudnn.so' in line:
cudnn = cudnn_pattern.search(line)
cudnn_ok = False
elif 'libcudart.so' in line:
cudart = cuda_pattern.search(line)
cuda_ok = False
if cudnn and len(cudnn.group(1)):
cudnn = convert_version_to_int(cudnn.group(1))
if cudart and len(cudart.group(1)):
cudart = convert_version_to_int(cudart.group(1))
if cudnn is not None:
cudnn_ok = (cudnn == cudnn_ver)
if cudart is not None:
cuda_ok = (cudart == cuda_ver)
return cudnn_ok and cuda_ok
|
9d89dacffe7ce865f2cb0702ccc37a57365af594
| 3,647,810
|
import os
def image_upload_to(instance, filename):
"""Create the path where to store the files.
If the file instance is a Sponsor, the file has to be the logo so it will be uploaded to
MEDIA_ROOT/sponsors/<sponsor_name>/logo<ext>.
"""
logger.debug("Hello!")
path = None
basename, ext = os.path.splitext(filename)
if isinstance(instance, Category):
path = os.path.join('categories', instance.slug, 'img{}'.format(ext))
logger.info("Image {filename} saved in {path}".format(path=path, filename=filename))
return path
|
a82dc1bbdcfdb071ce0c578930e00ada64206673
| 3,647,811
|
def _distances(value_domain, distance_metric, n_v):
"""Distances of the different possible values.
Parameters
----------
value_domain : array_like, with shape (V,)
Possible values V the units can take.
If the level of measurement is not nominal, it must be ordered.
distance_metric : callable
Callable that return the distance of two given values.
n_v : ndarray, with shape (V,)
Number of pairable elements for each value.
Returns
-------
d : ndarray, with shape (V, V)
Distance matrix for each value pair.
"""
return np.array([[distance_metric(v1, v2, i1=i1, i2=i2, n_v=n_v)
for i2, v2 in enumerate(value_domain)]
for i1, v1 in enumerate(value_domain)])
|
90c362db28497569a50475d7f6040755b1cfffea
| 3,647,812
|
import torch
import math
def log_mvn_likelihood(mean: torch.FloatTensor, covariance: torch.FloatTensor, observation: torch.FloatTensor) -> torch.FloatTensor:
"""
all torch primitives
all non-diagonal elements of covariance matrix are assumed to be zero
"""
k = mean.shape[0]
variances = covariance.diag()
log_likelihood = 0
for i in range(k):
log_likelihood += - 0.5 * torch.log(variances[i]) \
- 0.5 * k * math.log(2 * math.pi) \
- 0.5 * ((observation[i] - mean[i])**2 / variances[i])
return log_likelihood
|
6333ea91ddff9ac685f18954c5b7344846810ec3
| 3,647,813
|
def M_Mobs(H0, M_obs):
"""
Given an observed absolute magnitude, returns absolute magnitude
"""
return M_obs + 5.*np.log10(H0/100.)
|
e7f817eaf281f2dd64f33ea4af44cd1cf9da31fa
| 3,647,814
|
def generate_proctoring_requirements_email_context(user, course_id):
"""
Constructs a dictionary for use in proctoring requirements email context
Arguments:
user: Currently logged-in user
course_id: ID of the proctoring-enabled course the user is enrolled in
"""
course_module = modulestore().get_course(course_id)
return {
'user': user,
'course_name': course_module.display_name,
'proctoring_provider': capwords(course_module.proctoring_provider.replace('_', ' ')),
'proctoring_requirements_url': settings.PROCTORING_SETTINGS.get('LINK_URLS', {}).get('faq', ''),
'id_verification_url': IDVerificationService.get_verify_location(),
}
|
fc594882b68b7f1f554fa1681943d49b722ae229
| 3,647,815
|
import random
def mutate_strings(s):
"""Return s with a random mutation applied"""
mutators = [
delete_random_character,
insert_random_character,
flip_random_character
]
mutator = random.choice(mutators)
# print(mutator)
return mutator(s)
|
0ba9dd533da44bc2051a7076b775177f29f4aaa6
| 3,647,816
|
def get_one_hot(inputs, num_classes):
"""Get one hot tensor.
Parameters
----------
inputs: 3d numpy array (a x b x 1)
Input array.
num_classes: integer
Number of classes.
Returns
-------
One hot tensor.
3d numpy array (a x b x n).
"""
onehots = np.zeros(shape=tuple(list(inputs.shape[:-1]) + [num_classes]))
for i in range(inputs.shape[0]):
for j in range(inputs.shape[1]):
try:
onehots[i, j, inputs[i, j, 0]] = 1.0
except IndexError:
onehots[i, j, 0] = 1.0
return onehots
|
2f4a8b3a60a90a8f81579dd5938a1bab91cb5537
| 3,647,817
|
def one_hot_encoder(batch_inds, num_categories):
"""Applies one-hot encoding from jax.nn."""
one_hots = jax.nn.one_hot(batch_inds, num_classes=num_categories)
return one_hots
|
85c15859555ee1bdec64adc627f34cc161c7e66c
| 3,647,818
|
def part1(entries: defaultdict) -> int:
"""part1 solver take the entries and return the part1 solution"""
return calculate(entries, 80)
|
a35a559395f0c53eeac4600aaa28bc04d3e1766f
| 3,647,819
|
def ceki_filter(data, bound):
""" Check if convergence checks ceki are within bounds"""
ceki = data["ceki"].abs() < bound
return ceki
|
09cd53f44241b13cf77eb2299c802ed238580259
| 3,647,820
|
def get_middleware(folder, request_name, middlewares=None):
""" Gets the middleware for the given folder + request """
middlewares = middlewares or MW
if folder:
middleware = middlewares[folder.META.folder_name + "_" + request_name]
else:
middleware = middlewares[request_name]
if middleware is None:
def default_middleware(run, kwargs, env):
return run(kwargs)
middleware = default_middleware
return middleware
|
720aafa5a3d0ef265eeaa8fe40a68c7024b0adc3
| 3,647,821
|
from typing import Dict
import types
from typing import List
def convert_dm_compatible_observations(
observes: Dict,
dones: Dict[str, bool],
observation_spec: Dict[str, types.OLT],
env_done: bool,
possible_agents: List,
) -> Dict[str, types.OLT]:
"""Convert Parallel observation so it's dm_env compatible.
Args:
observes : observations per agent.
dones : dones per agent.
observation_spec : env observation spec.
env_done : is env done.
possible_agents : possible agents in env.
Returns:
a dm compatible observation.
"""
observations: Dict[str, types.OLT] = {}
for agent in possible_agents:
# If we have a valid observation for this agent.
if agent in observes:
observation = observes[agent]
if isinstance(observation, dict) and "action_mask" in observation:
legals = observation["action_mask"].astype(
observation_spec[agent].legal_actions.dtype
)
# Environments like flatland can return tuples for observations
if isinstance(observation_spec[agent].observation, tuple):
# Assuming tuples all have same type.
observation_dtype = observation_spec[agent].observation[0].dtype
else:
observation_dtype = observation_spec[agent].observation.dtype
observation = observation["observation"].astype(observation_dtype)
else:
# TODO Handle legal actions better for continous envs,
# maybe have min and max for each action and clip the
# agents actions accordingly
legals = np.ones(
observation_spec[agent].legal_actions.shape,
dtype=observation_spec[agent].legal_actions.dtype,
)
# If we have no observation, we need to use the default.
else:
# Handle tuple observations
if isinstance(observation_spec[agent].observation, tuple):
observation_spec_list = []
for obs_spec in observation_spec[agent].observation:
observation_spec_list.append(
np.zeros(
obs_spec.shape,
dtype=obs_spec.dtype,
)
)
observation = tuple(observation_spec_list) # type: ignore
else:
observation = np.zeros(
observation_spec[agent].observation.shape,
dtype=observation_spec[agent].observation.dtype,
)
legals = np.ones(
observation_spec[agent].legal_actions.shape,
dtype=observation_spec[agent].legal_actions.dtype,
)
if agent in dones:
terminal = dones[agent]
else:
terminal = env_done
observations[agent] = types.OLT(
observation=observation,
legal_actions=legals,
terminal=np.asarray([terminal], dtype=np.float32),
)
return observations
|
8dfe814037144e2da74375b0767f5dcde95ae44f
| 3,647,822
|
def tf_repeat_2d(a, repeats):
"""Tensorflow version of np.repeat for 2D"""
assert len(a.get_shape()) == 2
a = tf.expand_dims(a, 0)
a = tf.tile(a, [repeats, 1, 1])
return a
|
8337cbef8459a1403fc6a681f89c14d6ae3a00a5
| 3,647,823
|
import torch
def accuracy(output, target, topk=(1,), output_has_class_ids=False):
"""Computes the accuracy over the k top predictions for the specified values of k"""
if not output_has_class_ids:
output = torch.Tensor(output)
else:
output = torch.LongTensor(output)
target = torch.LongTensor(target)
with torch.no_grad():
maxk = max(topk)
batch_size = output.shape[0]
if not output_has_class_ids:
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
else:
pred = output[:, :maxk].t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
|
f702000a64db1bb6f53b7686f1143656f9864e8d
| 3,647,824
|
def masked_residual_block(c, k, nonlinearity, init, scope):
"""
Residual Block for PixelCNN. See https://arxiv.org/abs/1601.06759
"""
with tf.variable_scope(scope):
n_ch = c.get_shape()[3].value
half_ch = n_ch // 2
c1 = nonlinearity(c)
c1 = conv(c1, k=1, out_ch=half_ch, stride=False, mask_type='B', init=init, scope='1x1_a')
c1 = nonlinearity(c1)
c1 = conv(c1, k=k, out_ch=half_ch, stride=False, mask_type='B', init=init, scope='conv')
c1 = nonlinearity(c1)
c1 = conv(c1, k=1, out_ch=n_ch, stride=False, mask_type='B', init=init, scope='1x1_b')
c = c1 + c
return c
|
ffd4bb042affc0250472d50b6b824be66f808878
| 3,647,825
|
def calculate_lookup(src_cdf: np.ndarray, ref_cdf: np.ndarray) -> np.ndarray:
"""
This method creates the lookup table
:param array src_cdf: The cdf for the source image
:param array ref_cdf: The cdf for the reference image
:return: lookup_table: The lookup table
:rtype: array
"""
lookup_table = np.zeros(256)
lookup_val = 0
for src_pixel_val in range(len(src_cdf)):
for ref_pixel_val in range(len(ref_cdf)):
if ref_cdf[ref_pixel_val] >= src_cdf[src_pixel_val]:
lookup_val = ref_pixel_val
break
lookup_table[src_pixel_val] = lookup_val
return lookup_table
|
f1433e6af001ddcda44c740dabfb1ee643cd2260
| 3,647,826
|
def measureInTransitAndDiffCentroidForOneImg(prfObj, ccdMod, ccdOut, cube, rin, bbox, rollPhase, flags, hdr=None, plot=False):
"""Measure image centroid of in-transit and difference images
Inputs:
-----------
prfObj
An object of the class prf.KeplerPrf()
ccdMod, ccdOut
(int) CCD module and output of image. Needed to
create the correct PRF model
cube
(3d np array) A TPF data cube as returned by
dave.fileio.getTargetPixelArrayFromFits()
rin
(int) Which image to process. rin should be in the range 0..len(cube)
bbox
[c1, c2, r1, r2]. Define the range of columns (c1..c2)
and rows (r1..r2) defined by the image.
An exception raised if the following equality not true
img.shape = (c2-c1), (r2-r1)
rollPhase
(1d np array) An array of roll phases for each row
of cube. len(rollPhase) == len(cube). Units of this
array don't matter, so long as cadences with similar
roll angles have similar values of rollPhase
flags
(1d array) flag values indicating bad cadences.
Currently a non-zero value of flags indicates a bad
cadence.
Optional Inputs:
---------------
hdr
Fits header object for TPF file. Useful if you want to plot
plot
(bool) Request plots.
Returns:
-------------
A two element tuple
A 4 element numpy array
ic In transit centroid column
ir In transit centroid row
dc Difference image centroid column
dr Difference image centroid row
A dictionary containing some diagnostics describing the cadences used
then creating the difference image.
"""
diff, oot, diagnostics = diffimg.constructK2DifferenceImage(cube, rin, \
rollPhase, flags)
if np.max(np.fabs(oot)) == 0:
return np.array([-1,-1,-1,-1]), diagnostics
ootRes = fitPrfCentroidForImage(oot, ccdMod, ccdOut, bbox, prfObj)
diffRes = fitPrfCentroidForImage(diff, ccdMod, ccdOut, bbox, prfObj)
#Fit the difference image. I don't think this is the right thing to do
# snr = diff / np.sqrt(cube[rin])
# snr[ np.isnan(snr) ] = 0
# diffRes = fitPrfCentroidForImage(snr, ccdMod, ccdOut, bbox, prfObj)
# print rin, diffRes.x
return np.array([ootRes.x[0], ootRes.x[1], diffRes.x[0], diffRes.x[1]]), diagnostics
|
655477460e5841736f07106d5e6afd666d95f450
| 3,647,827
|
def readGlobalFileWithoutCache(fileStore, jobStoreID):
"""Reads a jobStoreID into a file and returns it, without touching
the cache.
Works around toil issue #1532.
"""
f = fileStore.getLocalTempFile()
fileStore.jobStore.readFile(jobStoreID, f)
return f
|
8c784e809acdc1a7fb3d8c108f85ce61bd1ad11c
| 3,647,828
|
def get_user_granted_assets_direct(user):
"""Return assets granted of the user directly
:param user: Instance of :class: ``User``
:return: {asset1: {system_user1, system_user2}, asset2: {...}}
"""
assets = {}
asset_permissions_direct = user.asset_permissions.all()
for asset_permission in asset_permissions_direct:
if not asset_permission.is_valid:
continue
for asset in asset_permission.get_granted_assets():
if not asset.is_active:
continue
if asset in assets:
assets[asset] |= set(asset_permission.system_users.all())
else:
setattr(asset, 'inherited', False)
assets[asset] = set(asset_permission.system_users.all())
return assets
|
602bd104835cc85dcf59339c8b4b2e2e2b5f747b
| 3,647,829
|
def nullColumns(fileHeaders, allKeys):
"""
Return a set of column names that don't exist in the file.
"""
s1 = set(fileHeaders)
s2 = set(allKeys)
return s2.difference(s1)
|
17a0bb80414fe88f213399958b217ccf6fb5d1e9
| 3,647,830
|
def listable_attachment_tags(obj, joiner=" "):
"""
Return an html string containing links for each of the attachments for
input object. Images will be shown as hover images and other attachments will be
shown as paperclip icons.
"""
items = []
attachments = obj.attachment_set.all()
label = mark_safe('<i class="fa fa-paperclip fa-fw" aria-hidden="true"></i>')
img_label = mark_safe('<i class="fa fa-photo fa-fw" aria-hidden="true"></i>')
for a in attachments:
if a.is_image:
img = attachment_img(a, klass="listable-image")
items.append(
'<div class="hover-img"><a href="%s" target="_blank">%s<span>%s</span></a></div>' %
(a.attachment.url, img_label, img)
)
else:
items.append(attachment_link(a, label=label))
return joiner.join(items)
|
b2fa3fd249469334e42616f0e4392ce16d4076d1
| 3,647,831
|
import math
def distance_km(lat1, lon1, lat2, lon2):
""" return distance between two points in km using haversine
http://en.wikipedia.org/wiki/Haversine_formula
http://www.platoscave.net/blog/2009/oct/5/calculate-distance-latitude-longitude-python/
Author: Wayne Dyck
"""
ret_val = 0
radius = 6371 # km
lat1 = float(lat1)
lon1 = float(lon1)
lat2 = float(lat2)
lon2 = float(lon2)
dlat = math.radians(lat2-lat1)
dlon = math.radians(lon2-lon1)
a = math.sin(dlat/2) * math.sin(dlat/2) + math.cos(math.radians(lat1)) \
* math.cos(math.radians(lat2)) * math.sin(dlon/2) * math.sin(dlon/2)
c = 2 * math.atan2(math.sqrt(a), math.sqrt(1-a))
ret_val = radius * c
return ret_val
|
f50d444b5769b1d00045429e3d577ec22f922774
| 3,647,832
|
def _flip(r, u):
"""Negate `r` if `u` is negated, else identity."""
return ~ r if u.negated else r
|
18ddcf5132867f5646c729bdadcb2c5077df8c03
| 3,647,833
|
def get_arguments():
"""Defines command-line arguments, and parses them."""
parser = ArgumentParser()
# Execution mode
parser.add_argument(
"--mode",
"-m",
choices=['train', 'test', 'full'],
default='train',
help=(
"train: performs training and validation; test: tests the model "
"found in \"--checkpoint-dir\" with name "
"\"--name\" on \"--dataset\"; "
"full: combines train and test modes. Default: train"
)
)
parser.add_argument(
"--resume",
action='store_true',
help=(
"The model found in \"--checkpoint-dir/--name/\" and filename "
"\"--name.h5\" is loaded."
)
)
parser.add_argument(
"--initial-epoch",
type=int,
default=0,
help="Epoch at which to start training. Default: 0"
)
parser.add_argument(
"--no-pretrained-encoder",
dest='pretrained_encoder',
action='store_false',
help=(
"Pretrained encoder weights are not loaded."
)
)
parser.add_argument(
"--weights-path",
type=str,
default="./checkpoints/linknet_encoder_weights.h5",
help=(
"HDF5 file where the weights are stored. This setting is ignored "
"if \"--no-pretrained-encoder\" is set. Default: "
"/checkpoints/linknet_encoder_weights.h5"
)
)
# Hyperparameters
parser.add_argument(
"--batch-size",
"-b",
type=int,
default=16,
help="The batch size. Default: 10"
)
parser.add_argument(
"--epochs",
type=int,
default=200,
help="Number of training epochs. Default: 300"
)
parser.add_argument(
"--learning-rate",
"-lr",
type=float,
default=5e-4,
help="The learning rate. Default: 5e-4"
)
parser.add_argument(
"--lr-decay",
type=float,
default=0.1,
help="The learning rate decay factor. Default: 0.1"
)
parser.add_argument(
"--lr-decay-epochs",
type=int,
default=200,
help=(
"The number of epochs before adjusting the learning rate. "
"Default: 100"
)
)
parser.add_argument(
"--dataset-dir",
type=str,
default="../data/ForDataGenTrainTestVal/",
help=(
"Path to the root directory of the selected dataset. "
"Default: data/CamVid"
)
)
# Settings
parser.add_argument(
"--workers",
type=int,
default=24,
help="Number of subprocesses to use for data loading. Default: 4"
)
parser.add_argument(
"--verbose",
choices=[0, 1, 2],
default=1,
help=(
"Verbosity mode: 0 - silent, 1 - progress bar, 2 - one line per "
"epoch. Default: 1"
)
)
# Storage settings
parser.add_argument(
"--name",
type=str,
default='LinkNet',
help="Name given to the model when saving. Default: LinkNet"
)
parser.add_argument(
"--checkpoint-dir",
type=str,
default='edge_point',
help="The directory where models are saved. Default: checkpoints"
)
return parser.parse_args()
|
5385c75524460ed4968def0ab98fc29112d72434
| 3,647,834
|
def twoThreeMove(tri, angle, face_num, perform = True, return_edge = False):
"""Apply a 2-3 move to a taut triangulation, if possible.
If perform = False, returns if the move is possible.
If perform = True, modifies tri, returns (tri, angle) for the performed move"""
face = tri.triangle(face_num)
embed0 = face.embedding(0)
tet0 = embed0.simplex()
tet_num0 = tet0.index()
tet_0_face_num = embed0.face()
vertices0 = embed0.vertices() # Maps vertices (0,1,2) of face to the corresponding vertex numbers of tet0
embed1 = face.embedding(1)
tet1 = embed1.simplex()
tet_num1 = tet1.index()
tet_1_face_num = embed1.face()
vertices1 = embed1.vertices() # Maps vertices (0,1,2) of face to the corresponding vertex numbers of tet1
if tet0 == tet1: ### Cannot perform a 2-3 move across a self-gluing
return False
### taut 2-3 move is valid if the pis are on different edges of face
### this never happens if we start with a veering triangulation.
### for veering, the two-tetrahedron ball is always a continent.
for i in range(3):
j = (i+1) % 3
k = (i+2) % 3
if angle[tet_num0] == unsorted_vert_pair_to_edge_pair[(vertices0[j], vertices0[k])]:
pi_num_0 = i
if angle[tet_num1] == unsorted_vert_pair_to_edge_pair[(vertices1[j], vertices1[k])]:
pi_num_1 = i
if pi_num_0 == pi_num_1:
return False
if perform == False:
return True
### check we do the same as regina...
tri2 = regina.Triangulation3(tri) ## make a copy
tri2.pachner(tri2.triangle(face_num))
### We have to implement twoThreeMove ourselves. e.g. we do a 2-3 move to canonical fig 8 knot complement triangulation.
### All of the original tetrahedra are removed. I don't see any way to carry the angle structure through without knowing
### exactly how Ben's implementation works.
## record the tetrahedra and gluings adjacent to tet0 and tet1
tets = [tet0, tet1]
vertices = [vertices0, vertices1]
# print('2-3 vertices signs')
# print([v.sign() for v in vertices])
gluings = []
for i in range(2):
tet_gluings = []
for j in range(3):
tet_gluings.append( [ tets[i].adjacentTetrahedron(vertices[i][j]), tets[i].adjacentGluing(vertices[i][j])] )
# if tets[i].adjacentTetrahedron(vertices[i][j]) in tets:
# print('self gluing')
gluings.append(tet_gluings)
### add new tetrahedra
new_tets = []
for i in range(3):
new_tets.append(tri.newTetrahedron())
### glue around degree 3 edge
for i in range(3):
new_tets[i].join(2, new_tets[(i+1)%3], regina.Perm4(0,1,3,2))
### replace mapping info with corresponding info for the 3 tet. Self gluings will be annoying...
### write verticesi[j] as vij
### tet0 new_tet0
### _________ _________
### ,'\ /`. ,'\`. ,'/`.
### ,' \ v03 / `. ,' \ `0' / `.
### ,' \ / `. ,' \ | / `.
### / \ \ / / \ /|\ \|/ /|\
### /v02\ * /v01\ / | \ * / | \
### / _\..... | ...../_ \ / | 3\..... | ...../2 | \
### /_--"" / * \ ""--_\ /2 ,' / * \ `. 3\
### \`.v12/ / \ \v11,'/ `. \`.| / /|\ \ |,'/
### \ `./ / \ \,' / ----} \ `./ / | \ \,' /
### \ /`. / v00 \ ,'\ / ,' \|/`. / | \ ,'\|/
### \ `. / \ ,' / \ `. / | \ ,' /
### \ `---------' / \ * 3 | 2 * /
### \ \ / / \ \ | / /
### \ \ v10 / / new_tet1 \ \ | / / new_tet2
### \ \ / / \ \ | / /
### \ \ / / \ \|/ /
### \ * / \ * /
### tet1 \...|.../ \...|.../
### \ | / \`.|.'/
### \v13/ \ 1 /
### \|/ \|/
### * *
# permutations taking the vertices for a face of the 3-tet ball to the
# vertices of the same face for the 2-tet ball
# these should be even in order to preserve orientability.
# exactly one of vertices[0] and vertices[1] is even, but it seems to depend on the face.
# perms = [[regina.Perm4( vertices[0][3], vertices[0][0], vertices[0][1], vertices[0][2] ), ### opposite v00
# regina.Perm4( vertices[0][3], vertices[0][1], vertices[0][2], vertices[0][0] ), ### opposite v01
# regina.Perm4( vertices[0][3], vertices[0][2], vertices[0][0], vertices[0][1] ) ### opposite v02
# ],
# [regina.Perm4( vertices[1][0], vertices[1][3], vertices[1][1], vertices[1][2] ), ### opposite v10
# regina.Perm4( vertices[1][1], vertices[1][3], vertices[1][2], vertices[1][0] ), ### opposite v11
# regina.Perm4( vertices[1][2], vertices[1][3], vertices[1][0], vertices[1][1] ) ### opposite v12
# ]
# ]
perms = [[vertices[0] * regina.Perm4( 3,0,1,2 ), ### opposite v00
vertices[0] * regina.Perm4( 3,1,2,0 ), ### opposite v01
vertices[0] * regina.Perm4( 3,2,0,1 ) ### opposite v02
],
[vertices[1] * regina.Perm4( 0,3,1,2 ), ### opposite v10
vertices[1] * regina.Perm4( 1,3,2,0 ), ### opposite v11
vertices[1] * regina.Perm4( 2,3,0,1 ) ### opposite v12
]
]
flip = perms[0][0].sign() == -1
if flip: #then all of the signs are wrong, switch 0 and 1 on input
perms = [[p * regina.Perm4( 1,0,2,3 ) for p in a] for a in perms]
# print('2-3 perms signs')
# print([[p.sign() for p in a] for a in perms])
for i in range(2):
for j in range(3):
gluing = gluings[i][j]
if gluing != None:
if gluing[0] not in tets: ### not a self gluing
gluing[1] = gluing[1] * perms[i][j]
else:
i_other = tets.index( gluing[0] )
otherfacenum = gluing[1][vertices[i][j]]
j_other = [vertices[i_other][k] for k in range(4)].index(otherfacenum)
assert gluings[i_other][j_other][0] == tets[i]
assert gluings[i_other][j_other][1].inverse() == gluings[i][j][1]
gluings[i_other][j_other] = None ### only do a self gluing from one side
gluing[0] = new_tets[j_other]
gluing[1] = perms[i_other][j_other].inverse() * gluing[1] * perms[i][j]
### unglue two tetrahedra
tet0.isolate()
tet1.isolate()
### remove the tetrahedra
tri.removeSimplex(tet0)
tri.removeSimplex(tet1)
### make the gluings on the boundary of the new ball
for i in range(2):
for j in range(3):
if gluings[i][j] != None:
if flip:
new_tets[j].join(i, gluings[i][j][0], gluings[i][j][1])
else:
new_tets[j].join(1 - i, gluings[i][j][0], gluings[i][j][1])
assert tri.isIsomorphicTo(tri2)
assert tri.isOriented()
### update the angle structure
tet_indices = [tet_num0, tet_num1]
tet_indices.sort()
angle.pop(tet_indices[1])
angle.pop(tet_indices[0]) ## remove from the list in the correct order!
new_angle = [None, None, None]
new_angle[pi_num_0] = 0
new_angle[pi_num_1] = 0 ### these two tetrahedra have their pi's on the new degree three edge
third_index = 3 - (pi_num_0 + pi_num_1)
if (pi_num_0 - third_index) % 3 == 1:
new_angle[third_index] = 1
else:
assert (pi_num_0 - third_index) % 3 == 2
new_angle[third_index] = 2
if flip:
new_angle[third_index] = 3 - new_angle[third_index]
angle.extend(new_angle)
assert is_taut(tri, angle)
if not return_edge:
return [ tri, angle ]
else:
return [ tri, angle, new_tets[0].edge(0).index() ]
|
18abe14b2b8446d39e285f1facda82568b808b60
| 3,647,835
|
import csv
def obterUFEstadoPorNome(estado):
"""
Retorna o codigo UF do estado a partir do nome do estado
:param estado: Nome do estado
:return codigoDoEstado: Código UF do estado
"""
try:
with open("./recursos/estados.csv", newline="") as csvfile:
reader = csv.DictReader(csvfile, delimiter=";")
for state in reader:
if state["Unidade_Federativa"].lower() == estado.strip().lower():
return state["UF"]
except Exception as exc:
print("[ERROR]{0}".format(exc))
|
9b136fe8c557e5f75bca235cf66168f92244a4e6
| 3,647,836
|
import random
def get_random_byte_string(byte_length):
""" Use this function to generate random byte string
"""
byte_list = []
i = 0
while i < byte_length:
byte_list.append(chr(random.getrandbits(8)))
i = i + 1
# Make into a string
byte_string = ''.join(byte_list)
return byte_string
|
0ea923a045beb476501dc3d8983f3fe89efef008
| 3,647,837
|
def find_all_indexes(text, pattern):
"""Return a list of starting indexes of all occurrences of pattern in text,
or an empty list if not found.
Complexity Analysis:
Best case: O(t)
Worst Case: O(t)
In the best case the pattern is the empty string(''). In that scenario
this implementation returns a list of all the index positions present
in the text string, using a list comprehension which requires t
iterations, where t is the length of the text. One iteration is required
for each character in the text string.
In the worst case, there are no occurrences of the pattern present in
the text. In that scenario this implementation would be most impacted by
the worst case scenario of the find_index function, which would be O(t)
as well. The find_index function would require iterating through the
entire length of the text string to discover there are pattern matches.
In the average case, there are occurences of the pattern string in the
text string. Since there is no difference between the best and worst
case, the complexity of this function asymptotically approaches O(t) as
well, on the average case.
"""
assert isinstance(text, str), 'text is not a string: {}'.format(text)
assert isinstance(pattern, str), 'pattern is not a string: {}'.format(text)
# edge case: return every index if the pattern is an empty str
if len(pattern) == 0:
return [i for i in range(len(text))]
# otherwise find all indices
else:
# set indices to an empty list on the first pass
indices = list()
# now find all the indices
return find_next_index(text, pattern, indices)
|
0101efe77570b5d027928495dc25cb4e02d5c2f5
| 3,647,838
|
def is_igb(request):
"""
Checks the headers for IGB headers.
"""
if 'HTTP_EVE_TRUSTED' in request.META:
return True
return False
|
1e6485614063a9f4eec36407b60154300d38db76
| 3,647,839
|
from typing import OrderedDict
from re import T
def compile_ADAM_train_function(model, gparams, learning_rate=0.001, b1=0.9, b2=0.999, e=1e-8,
gamma=1 - 1e-8):
"""
ADAM update rules
Default values are taken from [Kingma2014]
References:
[Kingma2014] Kingma, Diederik, and Jimmy Ba.
"Adam: A Method for Stochastic Optimization."
arXiv preprint arXiv:1412.6980 (2014).
http://arxiv.org/pdf/1412.6980v4.pdf
"""
updates = OrderedDict()
all_params = model.params
all_grads = gparams
alpha = learning_rate
t = theano.shared(np.float32(1))
b1_t = b1 * gamma ** (t - 1) # (Decay the first moment running average coefficient)
for theta_previous, g in zip(all_params, all_grads):
m_previous = theano.shared(np.zeros(theta_previous.get_value().shape,
dtype=theano.config.floatX))
v_previous = theano.shared(np.zeros(theta_previous.get_value().shape,
dtype=theano.config.floatX))
m = b1_t * m_previous + (1 - b1_t) * g # (Update biased first moment estimate)
v = b2 * v_previous + (1 - b2) * g ** 2 # (Update biased second raw moment estimate)
m_hat = m / (1 - b1 ** t) # (Compute bias-corrected first moment estimate)
v_hat = v / (1 - b2 ** t) # (Compute bias-corrected second raw moment estimate)
theta = theta_previous - (alpha * m_hat) / (T.sqrt(v_hat) + e) # (Update parameters)
# updates.append((m_previous, m))
# updates.append((v_previous, v))
# updates.append((theta_previous, theta) )
updates[m_previous] = m
updates[v_previous] = v
updates[theta_previous] = theta
updates[t] = t + 1.
return updates
|
a60f27c3b314d3adc2ec2f7bb0f8c92875d7625b
| 3,647,840
|
import subprocess
import os
def Runge_Kutta_Fourth_Order(inputs, coordinate_file, temperature, zeta=-1., **keyword_parameters):
"""
This function determines the gradient of thermal expansion of a strucutre between two temperatures using
a forth order Runge-Kutta numerical analysis
:param Method: Gradient Isotropic QHA ('GiQ');
Gradient Isotropic QHA w/ Gruneisen Parameter ('GiQg');
Gradient Anisotropic QHA ('GaQ');
:param Coordinate_file: file containing the lattice parameters (and coordinates)
:param Program: 'Tinker' for Tinker Molecular Modeling
'Test' for a test run
:param Temperature: in Kelvin
:param Pressure: in atm
:param molecules_in_coord: number of molecules in coordinate file
:param Statistical_mechanics: 'Classical' Classical mechanics
'Quantum' Quantum mechanics
:param RK4_stepsize: stepsize for runge-kutta 4th order
:param keyword_parameters: Parameter_file, LocGrd_Vol_FracStep, LocGrd_LatParam_FracStep, Gruneisen,
Wavenumber_Reference, Volume_Reference, Aniso_LocGrad_Type
Optional Parameters
Parameter_file: program specific file containing force field parameters
LocGrd_Vol_FracStep: isotropic volume fractional stepsize for local gradient
LocGrd_LatParam_FracStep: anisotropic crystal matrix fractional stepsize for local gradient
Gruneisen: Gruneisen parameters found with Setup_Isotropic_Gruneisen
Wavenumber_Reference: Reference wavenumbers for Gruneisen parameter
Volume_Reference: Reference volume of structure for Wavenumber_Reference
Aniso_LocGrad_Type: 73 Hessians to calculate the complete anistropic gradient
25 for d**2G_dhdh only calculating the diagonals and off-diags. of the upper left 3x3 matrix
19 for d**2G_dhdh only calculating the uppder left 3x3 matrix
13 for d**2G_dhdh only calculating the diagonals
7 for d**2G_dhdh only calculating the upper left 3x3 matrix daigonals
Crystal_matrix_Reference:
"""
# Setting up program specific file endings and giving parameter files blank names to avoid errors
file_ending = psf.assign_coordinate_file_ending(inputs.program)
# Output of numerical analysis
NO.start_RK(temperature, inputs.gradient_numerical_step)
# Final array for weights on slopes
RK_multiply = np.array([1. / 6., 1. / 3., 1. / 3., 1. / 6.])
# Copying the coordinate file to a separate file to work with
subprocess.call(['cp', coordinate_file, 'RK4' + file_ending])
if inputs.program == 'QE':
print(coordinate_file, 'copying bv file')
os.system('cp ' + coordinate_file + 'bv' + ' RK4' + file_ending + 'bv')
# Setting the different temperature stepsizes
if zeta == -1.:
temperature_steps = np.array([0., inputs.gradient_numerical_step / 2., inputs.gradient_numerical_step / 2.,
inputs.gradient_numerical_step])
zeta_steps = np.zeros(4)
else:
temperature_steps = np.zeros(4)
zeta_steps = np.array([0., inputs.zeta_numerical_step / 2., inputs.zeta_numerical_step / 2.,
inputs.zeta_numerical_step])
# Setting RK_4 array/matix and general parameters that aren't required for specific methods
if (inputs.method == 'GiQ') or (inputs.method == 'GiQg'):
# Setting array to save 4 gradients in dV/dT
RK_grad = np.zeros(4)
elif (inputs.method == 'GaQ') or (inputs.method == 'GaQg'):
# Setting array to save 4 gradients for the six different strains d\eta/dT
if inputs.anisotropic_type == '1D' and zeta == -1.:
RK_grad = np.zeros(6)
else:
RK_grad = np.zeros((4, 6))
# Calculating the RK gradients for the overall numerical gradient
for i in range(4):
# Outputting numerical analysis
NO.step_RK(i, temperature + temperature_steps[i], inputs.program, 'RK4' + file_ending)
print(" + Performing Runge-Kutta step " + str(i + 1))
if (inputs.method == 'GiQ') or (inputs.method == 'GiQg'):
# Determining the slope at the current RK step
RK_grad[i], wavenumbers_hold, volume_hold, left_minimum = \
Ex.Call_Expansion(inputs, 'local_gradient', 'RK4' + file_ending, Temperature=temperature +
temperature_steps[i],
LocGrd_dV=keyword_parameters['LocGrd_dV'], Gruneisen=keyword_parameters['Gruneisen'],
Wavenumber_Reference=keyword_parameters['Wavenumber_Reference'],
Volume_Reference=keyword_parameters['Volume_Reference'])
elif (inputs.method == 'GaQ') or (inputs.method == 'GaQg'):
if inputs.anisotropic_type != '1D' or zeta != -1.:
# Determining the slope at the current RK step
RK_grad[i], wavenumbers_hold, left_minimum = \
Ex.Call_Expansion(inputs, 'local_gradient', 'RK4' + file_ending, zeta=zeta + zeta_steps[i],
Temperature=temperature + temperature_steps[i],
LocGrd_dC=keyword_parameters['LocGrd_dC'],
Gruneisen=keyword_parameters['Gruneisen'],
Wavenumber_Reference=keyword_parameters['Wavenumber_Reference'],
ref_crystal_matrix=keyword_parameters['ref_crystal_matrix'])
else:
# Determining the slope at the current RK step
RK_grad[i], wavenumbers_hold, left_minimum = \
Ex.Call_Expansion(inputs, 'local_gradient', 'RK4' + file_ending, Temperature=temperature +
temperature_steps[i],
LocGrd_dLambda=keyword_parameters['LocGrd_dLambda'],
dC_dLambda=keyword_parameters['dC_dLambda'],
Gruneisen=keyword_parameters['Gruneisen'],
Wavenumber_Reference=keyword_parameters['Wavenumber_Reference'],
ref_crystal_matrix=keyword_parameters['ref_crystal_matrix'])
volume_hold = 0.
if i == 0:
# Saving outputs to be passed to the earlier code (local gradient and wavenumbers of initial strucuture)
wavenumbers = 1. * wavenumbers_hold
volume = 1. * volume_hold
k1 = 1. * RK_grad[0]
if left_minimum == True:
subprocess.call(['rm', 'RK4' + file_ending])
return np.nan, np.nan, np.nan, np.nan
if i != 3:
if (inputs.method == 'GiQ') or (inputs.method == 'GiQg'):
# For isotropic expansion, determining the volume fraction change of the input strucutre (V_new/V_input)
volume_fraction_change = (volume + RK_grad[i] * temperature_steps[i + 1]) / volume
# Expanding the crystal to the next step size
Ex.Call_Expansion(inputs, 'expand', coordinate_file, volume_fraction_change=volume_fraction_change,
output_file='RK4')
elif (inputs.method == 'GaQ') or (inputs.method == 'GaQg'):
if inputs.anisotropic_type != '1D':
# For anisotropic expansion, determining the strain of th input strucutre for the next step
RK_crystal_matrix = Ex.array_to_triangle_crystal_matrix(RK_grad[i] * temperature_steps[i + 1])
elif zeta != -1.:
RK_crystal_matrix = Ex.array_to_triangle_crystal_matrix(RK_grad[i] * zeta_steps[i])
else:
# For anisotropic expansion, determining the strain of th input strucutre for the next step
RK_crystal_matrix = Ex.array_to_triangle_crystal_matrix(RK_grad[i] * temperature_steps[i + 1] *
keyword_parameters['dC_dLambda'])
# Expanding the crystal to the next step size
Ex.Call_Expansion(inputs, 'expand', coordinate_file, dcrystal_matrix=RK_crystal_matrix,
output_file='RK4')
# Multiplying the found gradient by the fraction it will contribute to the overall gradient
RK_grad[i] = RK_grad[i] * RK_multiply[i]
# Summing all RK gradients for the overall numerical gradient
numerical_gradient = np.sum(RK_grad, axis=0)
# Removing excess files
subprocess.call(['rm', 'RK4' + file_ending])
return numerical_gradient, wavenumbers, volume, k1
|
72c539637359f9f4eaef06ac13ae8e24c4516f8d
| 3,647,841
|
import os
def wooqi_conf():
"""
Wooqi configuration file read from specific project which is using wooqi
Return a dictionary containing all configuration attributes
"""
config_file_path = '{}/wooqi_conf.cfg'.format(os.getcwd())
if os.path.isfile(config_file_path):
config = read_cfg(config_file_path)
else:
config = None
return config
|
b24dda5d7c9376102af114d6a692c5be721e7866
| 3,647,842
|
def linear_svr_pred(X_train, Y_train):
"""
Train a linear model with Support Vector Regression
"""
svr_model = LinearSVR(random_state=RANDOM_STATE)
svr_model.fit(X_train, Y_train)
Y_pred = svr_model.predict(X_train)
return Y_pred
|
336325ec53da4d4008c3219aa737365a40263bdf
| 3,647,843
|
import math
def area(rad: float = 1.0) -> float:
"""
return area of a circle
>>> area(2.0)
3.141592653589793
>>> area(3.0)
7.0685834705770345
>>> area(4.0)
12.566370614359172
"""
return rad * rad * math.pi / 4
|
702fc4a9fa370804d88d1182f966890bc0634466
| 3,647,844
|
import requests
import json
def check_coverage_running(url, coverage_name):
"""
Check if Navitia coverage is up and running
:param url: Navitia server coverage url
:param coverage_name: the name of the coverage to check
:return: Whether a Navitia coverage is up and running
"""
_log.info("checking if %s is up", coverage_name)
response = requests.get(url)
# Get the status of the coverage as Json
json_data = json.loads(response.text)
if "regions" not in json_data or "running" not in json_data["regions"][0]['status']:
_log.info("%s coverage is down", coverage_name)
return False
else:
_log.info("%s coverage is up", coverage_name)
return True
|
3d3d9b1403c541aa0cdb8867845b21bf387431fb
| 3,647,845
|
import random
def make_random_board(row_count, col_count, density=0.5):
"""create a random chess board with given size and density"""
board = {}
for row_num in range(row_count):
for col_num in range(col_count):
factor = random.random() / density
if factor >= 1:
continue
index = int(factor * len(ChessPiece.class_list))
board[(row_num, col_num)] = ChessPiece.class_list[index].symbol
return board
|
ea40883989675c99aa70af0b180957aa677233a5
| 3,647,846
|
def create_roots(batch_data):
"""
Create root nodes for use in MCTS simulation. Takes as a parameter a list of tuples,
containing data for each game. This data consist of: gametype, state, type of player 1
and type of player 2
"""
root_nodes = []
for data in batch_data:
game = data[0]
state = data[1]
player_1 = data[2]
player_2 = data[3]
player = player_1 if game.player(state) else player_2
root_nodes.append(player.create_root_node(state))
return root_nodes
|
d07b0781605b01d08c9ef78f30dad9254ade9907
| 3,647,847
|
def _parse_crs(crs):
"""Parse a coordinate reference system from a variety of representations.
Parameters
----------
crs : {str, dict, int, CRS}
Must be either a rasterio CRS object, a proj-string, rasterio supported
dictionary, WKT string, or EPSG integer.
Returns
-------
rasterio.crs.CRS
The parsed CRS.
Raises
------
CRSError
Raises an error if the input cannot be parsed.
"""
#
# NOTE: This doesn't currently throw an error if the EPSG code is invalid.
#
parsed = None
if isinstance(crs, CRS):
parsed = crs
elif isinstance(crs, str):
try:
# proj-string or wkt
parsed = CRS.from_string(crs)
except CRSError:
# wkt
parsed = CRS.from_wkt(crs)
elif isinstance(crs, dict):
parsed = CRS(crs)
elif isinstance(crs, int):
parsed = CRS.from_epsg(crs)
elif isinstance(crs, pyproj.Proj):
parsed = CRS.from_proj4(crs.proj4_init)
if parsed is None or not parsed.is_valid:
raise CRSError('Could not parse CRS: {}'.format(crs))
return parsed
|
559692b146ec99a9fe5407c8bca340c72dddf0a5
| 3,647,848
|
def hs_instance_get_all(context):
"""Get a list of hyperstash instances."""
return IMPL.hs_instance_get_all(context)
|
e09991f71e3713eea96956306a1ab4813bfb8b1a
| 3,647,849
|
import importlib
def import_from_file(module_name: str, filepath: str):
"""
Imports a module from file.
Args:
module_name (str): Assigned to the module's __name__ parameter (does not
influence how the module is named outside of this function)
filepath (str): Path to the .py file
Returns:
The module
"""
spec = importlib.util.spec_from_file_location(module_name, filepath)
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
return module
|
89ac082cbc7d3dd5d9158a8cc8eb5ef061c444e6
| 3,647,850
|
def plot_chirp(stim_inten, spike_bins, smooth=True, ax=None):
"""
Plot the response to a chirp stimulus (but could be any repeated stimulus, non-shuffled).
The response is plotted with seaborn's lineplot.
params:
- stim_inten: The whole stimulus intensity
- spike_bins: The cell's response to the whole stimulus
- smooth: Flag to smooth or not the cell's response
- ax: The axis for the plot. If None, a new plot is created
return:
- The axis of the plot
"""
if ax is None:
fig, ax = plt.subplots()
#Getting the number of repeats by convolving a part of the stimulus
conv_res = np.convolve(stim_inten[360:600].astype(float), stim_inten.astype(float), mode="full")
n_repeats = np.sum(conv_res.max()==conv_res)
trace = spike_bins.reshape(n_repeats,-1)
len_ = trace.shape[1]
df = pd.DataFrame(columns=["timepoint","repeat","signal"])
for i, repeat_am in enumerate(trace):
if smooth:
repeat_am = np.convolve([.333]*3, repeat_am, mode="same")
repeat_df = pd.DataFrame(list(zip(np.linspace(0,len_/60,len_),
[str(i)]*len_,
repeat_am)), columns=["timepoint","repeat","signal"])
df = df.append(repeat_df, ignore_index=True)
g = sns.lineplot(x="timepoint", y="signal", data=df, ax=ax, n_boot=100) #Small n_boot to speed_up plotting
# (default n_boot=10000)
min_val, max_val = ax.get_ylim()
ax.set_ylim(min_val , (max_val-min_val)*6/5)
ax.set(xlabel='', ylabel='')
ax.imshow([stim_inten.reshape(n_repeats,-1)[0]], aspect='auto', cmap="gray", extent=(0,len_/60,(max_val-min_val)*6/5,max_val))
return ax
|
75fe6defcb23a2c59e2241c9a68bf753dc6828b7
| 3,647,851
|
def _cigar_convert(cigar, chromosome, vci_file, strand='+', position=0):
"""
PHASE 1
Convert each CIGAR element to new mappings and construct an array on NEW cigar elements
For example, depending on the Intervals in the CHAIN file, let's say we have the following
CIGAR string: 35M49N65M
This could get converted into
35M ==> 4M150D31M
49N ==> -1N (remember, surrounding M's are used to find the length of N which is done on next pass)
65M ==> 65M
First pass yields: 35M49N65M => 4M150D31M-1N65M
:param cigar:
:param chromosome:
:param vci_file:
:param strand:
:param position:
:return:
"""
cigar_new = []
current_pos = position
cigar_no = 0
for c in cigar:
cigar_no += 1
LOG.debug("Element #{0}, '{1}{2}' specified, location: {3}".format(cigar_no, c[1], CIGAR_N2C[c[0]], current_pos))
increment = c[1]
if c[0] == CIGAR_m:
new_mappings = vci_file.find_mappings(chromosome, current_pos, current_pos + c[1])
if not new_mappings:
LOG.debug("Mappings: None")
cigar_new.append(Cigar(CIGAR_S, c[1], 0, 0))
elif len(new_mappings) == 1:
LOG.debug("Mappings: Easy: {0}".format(new_mappings[0]))
cigar_new.append(Cigar(CIGAR_M, new_mappings[0].to_end - new_mappings[0].to_start, new_mappings[0].to_start, new_mappings[0].to_end))
else:
# multiple maps, not so easy
last = None
for m in new_mappings:
LOG.debug("Mappings: Multiple: {0}".format(m))
if not last:
last = m
if current_pos < m.from_start:
# special case of first match not in interval, handle accordingly
LOG.debug("Adding 'S', because {0} < {1}".format(current_pos, m.from_start))
cigar_new.append(Cigar(CIGAR_S, m.from_start - current_pos, 0, 0))
else:
if m.from_start != last.from_end:
LOG.debug("Adding 'M' and 'I', because {0} != {1}".format(m.from_start, last.from_end))
cigar_new.append(Cigar(CIGAR_M, last.to_end - last.to_start, last.to_start, last.to_end))
cigar_new.append(Cigar(CIGAR_I, m.from_start - last.from_end, last.to_start, last.to_end))
elif m.to_start != last.to_end:
LOG.debug("Adding 'M' and 'D', because {0} != {1}".format(m.to_start, last.to_end))
cigar_new.append(Cigar(CIGAR_M, last.to_end - last.to_start, last.to_start, last.to_end))
cigar_new.append(Cigar(CIGAR_D, m.to_start - last.to_end, 0, 0))
last = m
LOG.debug("Adding 'M'")
cigar_new.append(Cigar(CIGAR_M, last.to_end - last.to_start, last.to_start, last.to_end))
elif c[0] == CIGAR_i:
LOG.debug("Adding 'I' and 'D'")
cigar_new.append(Cigar(CIGAR_I, c[1], 0, 0))
cigar_new.append(Cigar(CIGAR_D, -1, 0, 0))
increment = 0
elif c[0] == CIGAR_d:
LOG.debug("Adding 'D'")
cigar_new.append(Cigar(CIGAR_D, -1, 0, 0))
elif c[0] == CIGAR_n:
LOG.debug("Adding 'N'")
cigar_new.append(Cigar(CIGAR_N, -1, 0, 0))
elif c[0] in [CIGAR_s, CIGAR_h]:
LOG.debug("Adding '{0}'".format(CIGAR_N2C[c[0]]))
cigar_new.append(Cigar(CIGAR_N2C[c[0]], c[1], 0, 0))
else:
# other
LOG.debug("OTHER CODE '{0}' found, looking at {1} at {2}".format(CIGAR_N2C[c[0]], c, current_pos))
raise exceptions.G2GCigarFormatError("ERROR: Not handling the values in this cigar string: {0}".format(cigar))
#current_pos += c[1]
current_pos += increment
LOG.debug("Current CIGAR: {0}".format(cigar_new))
return cigar_new
|
5c7a0ea83d4959d87c03d0cbfe28d5bdbe02b97e
| 3,647,852
|
import argparse
def get_parser():
"""Creates an ArgumentParser object."""
parser = argparse.ArgumentParser(
"clinker",
description="clinker: Automatic creation of publication-ready"
" gene cluster comparison figures.\n\n"
"clinker generates gene cluster comparison figures from GenBank files."
" It performs pairwise local or global alignments between every sequence"
" in every unique pair of clusters and generates interactive, to-scale comparison figures"
" using the clustermap.js library.",
epilog="Example usage\n-------------\n"
"Align clusters, plot results and print scores to screen:\n"
" $ clinker files/*.gbk\n\n"
"Cameron Gilchrist, 2020",
formatter_class=argparse.RawDescriptionHelpFormatter
)
parser.add_argument("files", help="Gene cluster GenBank files", nargs="+")
alignment = parser.add_argument_group("Alignment options")
alignment.add_argument(
"-na",
"--no_align",
help="Do not align clusters",
action="store_true",
)
alignment.add_argument(
"-i",
"--identity",
help="Minimum alignment sequence identity",
type=float,
default=0.3
)
alignment.add_argument(
"-j",
"--jobs",
help="Number of alignments to run in parallel (0 to use the number of CPUs)",
type=int,
default=0,
)
output = parser.add_argument_group("Output options")
output.add_argument("-s", "--session", help="Path to clinker session")
output.add_argument("-ji", "--json_indent", type=int, help="Number of spaces to indent JSON")
output.add_argument("-f", "--force", help="Overwrite previous output file", action="store_true")
output.add_argument("-o", "--output", help="Save alignments to file")
output.add_argument(
"-p",
"--plot",
nargs="?",
const=True,
default=False,
help="Plot cluster alignments using clustermap.js. If a path is given,"
" clinker will generate a portable HTML file at that path. Otherwise,"
" the plot will be served dynamically using Python's HTTP server."
)
output.add_argument("-dl", "--delimiter", help="Character to delimit output by")
output.add_argument("-dc", "--decimals", help="Number of decimal places in output", default=2)
output.add_argument(
"-hl",
"--hide_link_headers",
help="Hide alignment column headers",
action="store_true",
)
output.add_argument(
"-ha",
"--hide_aln_headers",
help="Hide alignment cluster name headers",
action="store_true",
)
viz = parser.add_argument_group("Visualisation options")
viz.add_argument(
"-ufo",
"--use_file_order",
action="store_true",
help="Display clusters in order of input files"
)
return parser
|
881c7bc495edd37011c07d5db6ac80c816855f4a
| 3,647,853
|
def init_context_processor(app):
"""定义html模板方法"""
@app.context_processor
def pjax_processor():
"""
pjax处理器
"""
def get_template(base, pjax=None):
pjax = pjax or 'pjax.html'
if 'X-PJAX' in request.headers:
return pjax
else:
return base
return dict(pjax=get_template)
@app.context_processor
def pagination_processor():
"""
分页处理器
"""
def pagination(url, pager, template=None, params={}):
template = template or 'common/pagination.html'
pager._dict['current'] = (pager.offset + pager.limit - 1) // pager.limit
pager._dict['total_page'] = (pager.rows_found + pager.limit - 1) // pager.limit
prev_offset = pager.offset - 2 * pager.limit
pager._dict['prev_offset'] = prev_offset if prev_offset >= 0 else 0
pager._dict['params'] = params
pager._dict['url'] = url
return Markup(render_template(template, data=pager))
return dict(pagination=pagination)
@app.context_processor
def column_order_processor():
"""
获取排序字段的css
"""
def column_order(column, order, active):
column = 'sorttable-column-%s' % column
if active:
order = 'sorttable-sorted-reverse' if order == 'desc' else 'sorttable-sorted'
return '%s %s' % (column, order)
else:
return column
return dict(column_order=column_order)
@app.context_processor
def try_active_processor():
"""
尝试激活导航栏项目
"""
def try_active(page_type):
if g.page_type == page_type:
return 'curr'
else:
return ''
return dict(try_active=try_active)
@app.context_processor
def if_else_processor():
"""
gives t if condition evaluates to True, and f if it evaluates to False
"""
def if_else(condition, t, f):
return t if condition else f
return dict(ifelse=if_else)
@app.context_processor
def present_processor():
u"""
present enum to it's name
eg:
>> present(1, {1: 'Android', 2: 'iOS'}
Android
>> present(2, {1: 'Android', 2: 'iOS'}
iOs
"""
def present(enum, dict):
return dict.get(enum, enum)
return dict(present=present)
@app.context_processor
def hostname_processor():
"""
get hostname of url
ex: http://ng.d.cn/xianbian2/news/detail_402586_1.html => ng.d.cn
"""
def hostname(url):
return parse.urlparse(url).netloc
return dict(hostname=hostname)
@app.context_processor
def utility_processor():
def permission(per):
if g.modules == []:
return True
if per in g.modules:
return True
return False
return dict(permission=permission)
@app.context_processor
def utility_processor():
"""激活左边栏当前模块样式"""
def active_cur_menu(per):
if g.uri_path.startswith(per):
# if g.uri_path == per:
return True
return False
return dict(active_cur_menu=active_cur_menu)
|
6b5cf03ec48a1b1324a158388098da5e4884286f
| 3,647,854
|
def tiered(backup_tier, R):
"""Returns a tier aware checker.
The returned checker ensures that it's possible to construct a set
(of length R) including given set s that will contain exactly one
node from the backup tier.
`backup_tier` is a list of node ids that count as backups.
A typical invocation looks something like:
build_copysets(primary_tier + backup_tier, 6, 2,
checker=tiered(backup_tier, 6))
"""
def _checker(backup_tier, R, copysets, copyset):
num_backups = len(copyset.intersection(set(backup_tier)))
if len(copyset) < R:
return num_backups <= 1
else:
return num_backups == 1
return partial(_checker, backup_tier, R)
|
ecde647738fad88ea806948a0df7bee22a73abfa
| 3,647,855
|
def ls_chebyshev( A, b, s_max, s_min, tol = 1e-8, iter_lim = None ):
"""
Chebyshev iteration for linear least squares problems
"""
A = aslinearoperator(A)
m, n = A.shape
d = (s_max*s_max+s_min*s_min)/2.0
c = (s_max*s_max-s_min*s_min)/2.0
theta = (1.0-s_min/s_max)/(1.0+s_min/s_max) # convergence rate
itn_est = np.ceil((log(tol)-log(2))/log(theta))
if (iter_lim is None) or (iter_lim < itn_est) : iter_lim = itn_est
alpha = 0.0
beta = 0.0
r = b.copy()
x = np.zeros( np.int64( n ) )
v = np.zeros( np.int64( n ) )
# print( iter_lim )
for k in range(np.int64(iter_lim)):
if k == 0:
beta = 0.0
alpha = 1.0/d
elif k == 1:
beta = -1.0/2.0*(c*c)/(d*d)
alpha = 1.0*(d-c*c/(2.0*d))
else:
beta = -(c*c)/4.0*(alpha*alpha)
alpha = 1.0/(d-(c*c)/4.0*alpha)
v = A.rmatvec(r) - beta*v
x += alpha*v
r -= alpha*A.matvec(v)
return x
|
05e50ac0167d1ed03ae3e9fa6876c94a50db7893
| 3,647,856
|
def compute_confusion_matrix(args, df_inference, strata):
"""From a list of prediction summary (as produced by get_cloud_prediction_summary), compute a confusion matrix."""
y_true = df_inference["vt_" + strata].values
y_predicted = df_inference["pred_" + strata].values
y_true = np.vectorize(get_closest_class_center_index)(y_true)
y_predicted = np.vectorize(get_closest_class_center_index)(y_predicted)
cm = confusion_matrix(
y_true,
y_predicted,
labels=range(len(bins_centers)),
normalize=args.normalize_cm,
)
return cm
|
0662638c4db5ee9e1d94b1e582d9b0824eefd3ff
| 3,647,857
|
def get_metadata(**kwargs):
"""Metadata
Get account metadata
Reference: https://iexcloud.io/docs/api/#metadata
Data Weighting: ``Free``
.. warning:: This endpoint is only available using IEX Cloud. See
:ref:`Migrating` for more information.
"""
return Metadata(**kwargs).fetch()
|
9f4b506bdf978f525e26d7f976a0fdc2f483ae0f
| 3,647,858
|
import torch
def get_resnet50_moco_state_dict() -> dict:
"""
Get weight of ResNet50 trained with MoCo.
Returns:
(dict): Parameters and persistent buffers of ResNet50.
"""
model_path = get_model_root() / "resnet50_moco.pth"
if not model_path.exists():
# TODO download this from remote
raise RuntimeError(
"Please download and put 'resnet50_moco.pth' to models folder"
)
# the original weight is actually a checkpoint
# https://github.com/bl0/moco
# this includes hyperparameters and everything else we wants
checkpoint = torch.load(model_path)
# we need to drop all prefix from this checkpoint
prefix = len("module.")
state_dict = {k[prefix:]: v for k, v in checkpoint["model"].items()}
return state_dict
|
953f5e0fb037f9315173345910b8808cb400b9f4
| 3,647,859
|
from keystoneclient import service_catalog
import json
import six
import base64
def _validate_client(redis_client, url, tenant, token, env, blacklist_ttl,
max_cache_life):
"""Update the env with the access information for the user
:param redis_client: redis.Redis object connected to the redis cache
:param url: Keystone Identity URL to authenticate against
:param tenant: tenant id of user data to retrieve
:param token: auth_token for the tenant_id
:param env: environment variable dictionary for the client connection
:param blacklist_ttl: time in milliseconds for blacklisting failed tokens
:param max_cache_life: time in seconds for the maximum time a cache entry
should remain in the cache of valid data
:returns: True on success, otherwise False
"""
def _management_url(*args, **kwargs):
return url
def patch_management_url():
service_catalog.ServiceCatalog.url_for = _management_url
patch_management_url()
try:
if _is_token_blacklisted(redis_client, token):
return False
# Try to get the client's access information
access_info = _get_access_info(redis_client,
url,
tenant,
token,
blacklist_ttl,
max_cache_life)
if access_info is None:
LOG.debug('Unable to get Access info for {0}'.format(tenant))
return False
# provided data was valid, insert the information into the environment
env['HTTP_X_IDENTITY_STATUS'] = 'Confirmed'
env['HTTP_X_USER_ID'] = access_info.user_id
env['HTTP_X_USER_NAME'] = access_info.username
env['HTTP_X_USER_DOMAIN_ID'] = access_info.user_domain_id
env['HTTP_X_USER_DOMAIN_NAME'] = access_info.user_domain_name
env['HTTP_X_ROLES'] = ','.join(role for role in access_info.role_names)
if access_info.has_service_catalog():
# Convert the service catalog to JSON
service_catalog_data = json.dumps(
access_info.service_catalog.catalog)
# convert service catalog to unicode to try to help
# prevent encode/decode errors under python2
if six.PY2: # pragma: no cover
u_service_catalog_data = service_catalog_data.decode('utf-8')
else: # pragma: no cover
u_service_catalog_data = service_catalog_data
# Convert the JSON string data to strict UTF-8
utf8_data = u_service_catalog_data.encode(
encoding='utf-8', errors='strict')
# Store it as Base64 for transport
env['HTTP_X_SERVICE_CATALOG'] = base64.b64encode(utf8_data)
try:
decode_check = base64.b64decode(env['HTTP_X_SERVICE_CATALOG'])
except Exception:
LOG.debug('Failed to decode the data properly')
return False
if decode_check != utf8_data:
LOG.debug(
'Decode Check: decoded data does not match '
'encoded data'
)
return False
# Project Scoped V3 or Tenant Scoped v2
# This can be assumed since we validated using X_PROJECT_ID
# and therefore have at least a v2 Tenant Scoped Token
if access_info.project_scoped:
env['HTTP_X_PROJECT_ID'] = access_info.project_id
env['HTTP_X_PROJECT_NAME'] = access_info.project_name
# Domain-Scoped V3
if access_info.domain_scoped:
env['HTTP_X_DOMAIN_ID'] = access_info.domain_id
env['HTTP_X_DOMAIN_NAME'] = access_info.domain_name
# Project-Scoped V3 - X_PROJECT_NAME is only unique
# within the domain
if access_info.project_scoped and (
access_info.domain_scoped):
env['HTTP_X_PROJECT_DOMAIN_ID'] = access_info.project_domain_id
env['HTTP_X_PROJECT_DOMAIN_NAME'] = access_info.project_domain_name
return True
except exceptions.RequestEntityTooLarge:
LOG.debug('Request entity too large error from authentication server.')
raise
except Exception as ex:
msg = 'Error while trying to authenticate against {0} - {1}'.format(
url,
str(ex)
)
LOG.debug(msg)
return False
|
9041786ecce14bc0af005d320e8ff9db49a07fc7
| 3,647,860
|
import os
import shutil
def _fetch(data_filename: str) -> str:
"""Fetch a given data file from either the local cache or the repository.
This function provides the path location of the data file given
its name in the histolab repository.
Parameters
----------
data_filename: str
Name of the file in the histolab repository. e.g.
'breast/sample1.svs'.
Returns
-------
resolved_path: str
Path of the local file
Raises
------
KeyError:
If the filename is not known to the histolab distribution.
ModuleNotFoundError:
If the filename is known to the histolab distribution but pooch is not
installed.
ConnectionError:
If the dataset has not been downloaded yet and histolab is unable to connect
to the internet
"""
resolved_path = os.path.join(data_dir, "..", data_filename)
expected_hash = registry[data_filename]
# Case 1:
# The file may already be in the data_dir.
# We may have decided to ship it in the histolab distribution.
if _has_hash(resolved_path, expected_hash):
# Nothing to be done, file is where it is expected to be
return resolved_path
# Case 2:
# The user is using a cloned version of the github repo, which
# contains both the publicly shipped data, and test data.
# In this case, the file would be located relative to the
# histolab_distribution_dir
gh_repository_path = os.path.join(histolab_distribution_dir, data_filename)
if _has_hash(gh_repository_path, expected_hash):
parent = os.path.dirname(resolved_path)
os.makedirs(parent, exist_ok=True)
shutil.copy2(gh_repository_path, resolved_path)
return resolved_path
# Case 3:
# Pooch not found.
if image_fetcher is None:
raise ModuleNotFoundError(
"The requested file is part of the histolab distribution, "
"but requires the installation of an optional dependency, pooch. "
"To install pooch, use your preferred python package manager. "
"Follow installation instruction found at "
"https://www.fatiando.org/pooch/latest/install.html"
)
# Case 4:
# Pooch needs to download the data. Let the image fetcher search for
# our data. A ConnectionError is raised if no internet connection is
# available.
try:
resolved_path = image_fetcher.fetch(data_filename)
except HTTPError as httperror:
raise HTTPError(f"{httperror}")
except ConnectionError: # pragma: no cover
# If we decide in the future to suppress the underlying 'requests'
# error, change this to `raise ... from None`. See PEP 3134.
raise ConnectionError(
"Tried to download a histolab dataset, but no internet "
"connection is available."
)
return resolved_path
|
0eadbd2e2c47a9f4ce3b8992c8795d69281a9ee4
| 3,647,861
|
import os
def subset_and_group_svs(input_dataset, sample_subset, sample_remap, sample_type, ignore_missing_samples, write_subsetted_bed=False):
"""
Parses raw SV calls from the input file into the desired SV output format for samples in the given subset
:param input_dataset: file path for the raw SV calls
:param sample_subset: optional list of samples to subset to
:param sample_remap: optional mapping of raw sample ids to seqr sample ids
:param sample_type: sample type (WES/WGS)
:param ignore_missing_samples: whether or not to fail if samples in the subset have no raw data
:param write_subsetted_bed: whether or not to write a bed file with only the subsetted samples
:return: dictionary of parsed SVs keyed by ID
"""
parsed_svs_by_name = {}
found_samples = set()
skipped_samples = set()
out_file_path = None
if write_subsetted_bed:
file_name = 'subset_{}'.format(os.path.basename(input_dataset))
out_file_path = os.path.join(os.path.dirname(input_dataset), file_name)
def _parse_row(row, header_indices):
sample_id = get_field_val(row, SAMPLE_COL, header_indices, format_kwargs={'sample_type': sample_type})
if sample_remap and sample_id in sample_remap:
sample_id = sample_remap[sample_id]
if sample_subset is None or sample_id in sample_subset:
parse_sv_row(row, parsed_svs_by_name, header_indices, sample_id)
found_samples.add(sample_id)
return True
else:
skipped_samples.add(sample_id)
return False
load_file(input_dataset, _parse_row, out_file_path=out_file_path)
logger.info('Found {} sample ids'.format(len(found_samples)))
if sample_subset:
if len(found_samples) != len(sample_subset):
missed_samples = sample_subset - found_samples
missing_sample_error = 'Missing the following {} samples:\n{}'.format(
len(missed_samples), ', '.join(sorted(missed_samples))
)
if ignore_missing_samples:
logger.info(missing_sample_error)
else:
logger.info('Samples in callset but skipped:\n{}'.format(', '.join(sorted(skipped_samples))))
raise Exception(missing_sample_error)
return parsed_svs_by_name
|
9397afcbbfa93155b4ae197f44c27267e6334aca
| 3,647,862
|
def load_data():
"""
Carrega os dados do dataset iris
:return: dados carregados em uma matriz
"""
data = pd.read_csv("https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data", header=None)
# utiliza somente as duas primeiras classes
data = data[:100]
# transforma as classes em 0 e 1
data[4] = np.where(data.iloc[:, -1] == 'Iris-setosa', 0, 1)
data = np.asmatrix(data, dtype='float64')
return data
|
fe2a1a999406f23676e58f75f1d5999e9f0697e8
| 3,647,863
|
import select
from datetime import datetime
async def activate_clients(
*,
client_id: int,
session: Session = Depends(get_session),
):
"""
Activate a client using its id as a key.
Parameters
----------
client_id : int
ID of the client to be activated.
session : Session
SQL session that is to be used to activate a client.
Defaults to creating a dependency on the running SQL model session.
"""
statement = select(Client).where(Client.id == client_id)
client_to_update = session.exec(statement).one()
client_to_update.is_active = True
client_to_update.updated_at = datetime.now()
session.add(client_to_update)
session.commit()
session.refresh(client_to_update)
return client_to_update
|
bdd679d94fc68d4c4c75f410d1ed3eec193f868b
| 3,647,864
|
from sys import stdout
def group(spanins, prob_threshold, scope, valid_relation_set, mode):
"""
for each unary instance that is classified as being in a relation, get the other argument which is also classifier as being in the same relation but different role
ner1/2: list of unary instances
"""
assert scope in ['intra', 'cross']
# Grouping requirement
# two could be in the same relation if all of the following requirements are satisified:
# 1. both are assigned with pos pred_relation_label
# 2. one must be subj and one must be obj
# 3. relation must be the same
# 4. ner1, ner2, relation exists in the valid_relation_set
if scope == 'intra':
print("getting sent2entities")
sent2entities = get_sent2entities(spanins)
rels = []
num_sents = len(sent2entities)
for i, sentid in enumerate(sent2entities):
stdout.write(f"\rgrouping, {i}/{num_sents}")
stdout.flush()
rels.extend(group_for_sent(sent2entities[sentid], valid_relation_set, prob_threshold, mode))
print()
return rels
|
d76a02d341be1e836cfba560b11acfcc3f1527b9
| 3,647,865
|
def simulate_until_target_substate_or_max_t(
_simulate_until_attractor_or_target_substate_or_max_t, initial_state, perturbed_nodes_by_t,
predecessor_node_lists, truth_tables):
"""
Perform simulation to figure whether it reaches target substate.
Does not return states of simulations that don't reach target substate.
Target substate is not considered as reached until all the
perturbations are carried out. Initial state can be considered as
reached target substate if no perturbations are present.
:param _simulate_until_attractor_or_target_substate_or_max_t: [function] to perform simulation
:param initial_state: initial state of the network
:param perturbed_nodes_by_t: dict (by time steps) of dicts (by nodes) of node states
:param predecessor_node_lists: list of predecessor node lists
:param truth_tables: list of dicts (key: tuple of predecessor node states, value: resulting node state)
:return: list of states where last state contains target substate,
or None if target substate was not reached
"""
states, *_, target_substate_is_reached, _ = _simulate_until_attractor_or_target_substate_or_max_t(
initial_state, perturbed_nodes_by_t, predecessor_node_lists, truth_tables)
return states if target_substate_is_reached else None
|
526ef8085dcbe4bcbc112c3bd4626ec5247e2f97
| 3,647,866
|
import requests
from bs4 import BeautifulSoup
def query_snpedia_online(rsid):
"""
@param soup:
@param rsid:
"""
rsid = rsid.capitalize()
url = "https://bots.snpedia.com/index.php"
rsid_url = f"{url}/{rsid}"
page = requests.get(rsid_url)
soup = BeautifulSoup(page.content, "html.parser")
columns, genotypes = parse_snpedia_online(soup, rsid)
return columns, genotypes
|
138b252917b027564826212cfe96abafef3071b3
| 3,647,867
|
import os
import json
import aiohttp
async def get_session(client_id: str, client_secret: str) -> AuthToken:
"""
Use the Authorization Code Grant flow to get a token.
This opens a browser tab.
"""
refresh_token_file = os.path.join(config.config_dir(), '.refresh.token')
base_url = 'https://bitbucket.org/site/oauth2'
# If we have a refresh token, use that
existing_token = None
if os.path.isfile(refresh_token_file):
with open(refresh_token_file) as f:
existing_token = json.load(f)
now = arrow.utcnow()
if existing_token and arrow.get(existing_token['expires_at']) - now > timedelta(minutes=5):
log.info('Found existing token')
return existing_token
# Otherwise, send the user to the browser flow
redirect_uri = 'https://localhost:8888'
client = WebApplicationClient(client_id)
auth_url = client.prepare_request_uri(f'{base_url}/authorize', redirect_uri=redirect_uri)
print(f'Please go to the following link, then copy the redirected URL back here.\n\n\t{auth_url}\n')
code = client.parse_request_uri_response(input('URL: '))['code']
token_reqest_params = parse_qs(client.prepare_request_body(code=code, redirect_uri=redirect_uri))
async with aiohttp.ClientSession() as session:
resp = await session.post(
f'{base_url}/access_token',
headers={'Authorization': aiohttp.BasicAuth(client_id, client_secret).encode()},
data=token_reqest_params
)
if resp.status != 200:
log.error(await resp.text())
raise Exception('Could not authenticate with the Bitbucket API')
token: AuthToken = await resp.json()
token['expires_at'] = now.shift(seconds=token['expires_in']).format(arrow.FORMAT_RFC3339)
with open(refresh_token_file, 'w') as f:
json.dump(token, f)
return token
|
e92625219a3c48f805c0e18a172d1e7a75fceca1
| 3,647,868
|
def lower(value: str): # Only one argument.
"""Converts a string into all lowercase"""
return value.lower()
|
59da46b7df5a2afdb106703568635b94174ea57c
| 3,647,869
|
import pprint
def validate_oidc():
"""Demonstrates how an access token is validated"""
token = request.headers['Authorization'].split(' ')[1]
message = check_oidc_token(token)
pprint.pprint(message)
return jsonify({
'success': message['success']
})
|
d76d510d1b53a10e12ac9a5c085c0650bc8fb965
| 3,647,870
|
def merge(a, b, path=None):
"""From https://stackoverflow.com/a/7205107"""
if path is None:
path = []
for key in b:
if key in a:
if isinstance(a[key], dict) and isinstance(b[key], dict):
merge(a[key], b[key], path + [str(key)])
elif a[key] == b[key]:
pass # same leaf value
else:
pass # ignore conflicts, left dict wins.
else:
a[key] = b[key]
return a
|
8f7990f28168fe0e3eaca790baddc0088baedf65
| 3,647,871
|
def norm_sq(f,alpha,n,L_mat_long,step):
""" This function is the log-likelihood functional with the squared L2 norm
of \hat{f_\beta} as the regularization term.
"""
L_mat=L_mat_long.reshape(n,len(f))
f[f <=0] = 1e-6
val=np.log(np.dot(L_mat,f))
return -sum(val)/n+ alpha*step**2*sum(f**2)
|
11a2b0fbd296b344b94cd3d5509bb0d4a12ab5fc
| 3,647,872
|
def get_applications(device_id: str = None, rpc_channel: InstrumentServer = None):
"""
获取手机应用列表
:param device_id:
:param rpc_channel:
:return:
"""
if not rpc_channel:
_rpc_channel = init(device_id)
else:
_rpc_channel = rpc_channel
application_list = _rpc_channel.call(
"com.apple.instruments.server.services.device.applictionListing",
"installedApplicationsMatching:registerUpdateToken:",
{}, "").parsed
if not rpc_channel:
_rpc_channel.stop()
return application_list
|
150884e18349003e33011477603e2a6462bd8492
| 3,647,873
|
def open_mfdataset(files, use_cftime=True, parallel=True, data_vars='minimal', chunks={'time':1},
coords='minimal', compat='override', drop=None, **kwargs):
"""optimized function for opening large cf datasets.
based on https://github.com/pydata/xarray/issues/1385#issuecomment-561920115
"""
def drop_all_coords(ds):
return ds.reset_coords(drop=True)
ds = xr.open_mfdataset(files, parallel=parallel, decode_times=False, combine='by_coords',
preprocess=drop_all_coords, decode_cf=False, chunks=chunks,
data_vars=data_vars, coords=coords, compat=compat, **kwargs)
return xr.decode_cf(ds, use_cftime=use_cftime)
|
ef31c732919f6b3cda0c6e5d9114fac7c39f40f7
| 3,647,874
|
def wls_sparse(X, y, w=1., calc_cov=False, verbose=False, **kwargs):
"""
Parameters
----------
X
y
w
calc_cov
verbose
kwargs
Returns
-------
"""
# The var returned by ln.lsqr is normalized by the variance of the error. To
# obtain the correct variance, it needs to be scaled by the variance of the error.
if w is None: # gracefully default to unweighted
w = 1.
w_std = np.asarray(np.sqrt(w))
wy = np.asarray(w_std * y)
w_std = np.broadcast_to(
np.atleast_2d(np.squeeze(w_std)).T, (X.shape[0], 1))
if not sp.issparse(X):
wX = w_std * X
else:
wX = X.multiply(w_std)
# noinspection PyTypeChecker
out_sol = ln.lsqr(wX, wy, show=verbose, calc_var=True, **kwargs)
p_sol = out_sol[0]
# The residual degree of freedom, defined as the number of observations
# minus the rank of the regressor matrix.
nobs = len(y)
npar = X.shape[1] # ==rank
degrees_of_freedom_err = nobs - npar
# wresid = np.exp(wy) - np.exp(wX.dot(p_sol)) # this option is better.
# difference is small
wresid = wy - wX.dot(p_sol) # this option is done by statsmodel
err_var = np.dot(wresid, wresid) / degrees_of_freedom_err
if calc_cov:
# assert np.any()
arg = wX.T.dot(wX)
if sp.issparse(arg):
# arg is square of size double: 1 + nt + no; single: 2 : nt
# arg_inv = np.linalg.inv(arg.toarray())
arg_inv = np.linalg.lstsq(
arg.todense(), np.eye(npar), rcond=None)[0]
else:
# arg_inv = np.linalg.inv(arg)
arg_inv = np.linalg.lstsq(
arg, np.eye(npar), rcond=None)[0]
# for tall systems pinv (approximate) is recommended above inv
# https://vene.ro/blog/inverses-pseudoinverses-numerical-issues-spee
# d-symmetry.html
# but better to solve with eye
# p_cov = np.array(np.linalg.pinv(arg) * err_var)
# arg_inv = np.linalg.pinv(arg)
# else:
# try:
# arg_inv = np.linalg.lstsq(arg, np.eye(nobs), rcond=None)[0]
#
# except MemoryError:
# print('Try calc_cov = False and p_cov = np.diag(p_var); '
# 'And neglect the covariances.')
# arg_inv = np.linalg.lstsq(arg, np.eye(nobs), rcond=None)[0]
p_cov = np.array(arg_inv * err_var)
p_var = np.diagonal(p_cov)
assert np.all(p_var >= 0), 'Unable to invert the matrix' + str(p_var)
return p_sol, p_var, p_cov
else:
p_var = out_sol[-1] * err_var # normalized covariance
return p_sol, p_var
|
ff0bec6d6cdcee85506514348e8a812926427dee
| 3,647,875
|
import os
def register_module():
"""Registers this module for use."""
def on_module_disable():
tags.Registry.remove_tag_binding(MathTag.binding_name)
def on_module_enable():
tags.Registry.add_tag_binding(MathTag.binding_name, MathTag)
global_routes = [
(RESOURCES_URI + '/.*', tags.ResourcesHandler),
(MATHJAX_URI + '/(fonts/.*)', sites.make_zip_handler(os.path.join(
appengine_config.BUNDLE_ROOT, 'lib', 'mathjax-fonts-2.3.0.zip'))),
(MATHJAX_URI + '/(.*)', sites.make_zip_handler(os.path.join(
appengine_config.BUNDLE_ROOT, 'lib', 'mathjax-2.3.0.zip')))]
namespaced_routes = []
global custom_module # pylint: disable=global-statement
custom_module = custom_modules.Module(
'Mathematical Formula Display',
'Provides a custom tag to embed mathematical formulas using TeX or MML.'
, global_routes, namespaced_routes,
notify_module_disabled=on_module_disable,
notify_module_enabled=on_module_enable)
return custom_module
|
6cdba610a18d4893b13bdf34a30c2fe7b05ff970
| 3,647,876
|
from typing import Tuple
def sobel_gradients(source: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
"""
Computes partial derivations to detect angle gradients.
"""
grad_x = generic_filter(source, np.matrix([
[1, 0, -1],
[2, 0, -2],
[1, 0, -1]]
))
grad_y = generic_filter(source, np.matrix([
[1, 2, 1],
[0, 0, 0],
[-1, -2, -1]]
))
def normalize_angle(x: float) -> int:
x = round(x % 180)
if x >= 0 and x <= 22.5:
return 0
elif x > 22.5 and x <= 67.5:
return 45
elif x > 67.5 and x <= 112.5:
return 90
elif x > 112.5 and x <= 157.5:
return 135
elif x > 157.5 and x <= 180:
return 0
thetas = np.arctan2(grad_y, grad_x)
thetas = np.vectorize(normalize_angle)(thetas)
grads = np.hypot(grad_y, grad_x)
return grads, thetas
|
19c3e3eec46bee738b1e80dd73c5477f72dcf73c
| 3,647,877
|
from typing import Mapping
def flat_dict(d, prefix=""):
"""
Loop through dictionary d
Append any key, val pairs to the return list ret
Add the prefix to any key param
Recurse if encountered value is a nested dictionary.
"""
if not isinstance(d, Mapping):
return d
ret = {}
for key, val in d.items():
if isinstance(val, Mapping):
ret = {**ret, **flat_dict(val, prefix=prefix + str(key) + "_")}
else:
ret[prefix + str(key)] = val
return ret
|
f0c1f519126dea89c25ee38a9b0dd788c40d2088
| 3,647,878
|
import logging
def _get_filehandler_with_formatter(logname, formatter=None):
""" Return a logging FileHandler for given logname using a given
logging formatter
:param logname: Name of the file where logs will be stored, ".log"
extension will be added
:param formatter: An instance of logging.Formatter or None if the default
should be used
:return:
"""
handler = logging.FileHandler(logname)
if formatter is not None:
handler.setFormatter(formatter)
return handler
|
1cc6f83480e691c4c54c359deabd6364da65f320
| 3,647,879
|
import torch
def gen_data_tensors(
df: pd.DataFrame,
lag: int = 6,
batch_size: int = 32,
validation_ratio: float = 0.2
) -> (DataLoader, DataLoader, TensorDataset, TensorDataset):
"""
Primary goal: create dataloader object.
"""
x_train, y_train = generate_supervised(df, lag=lag)
# Transform DataFrame to NumpyArray.
x_train, y_train = map(lambda x: x.values, (x_train, y_train))
# Generating Validation Set.
x_train, x_val, y_train, y_val = train_test_split(
x_train, y_train, test_size=validation_ratio, shuffle=True
)
# Transform to Tensor
x_train, y_train, x_val, y_val = map(
torch.tensor, (x_train, y_train, x_val, y_val)
)
assert batch_size <= x_train.shape[0] and batch_size <= x_val.shape[0],\
"Batch size cannot be greater than number of training instances."
train_ds = TensorDataset(x_train, y_train)
train_dl = DataLoader(train_ds, batch_size=batch_size, shuffle=True)
val_ds = TensorDataset(x_val, y_val)
val_dl = DataLoader(val_ds, batch_size=batch_size, shuffle=True)
return train_dl, val_dl, train_ds, val_ds
|
1451d38bd695163d84784f5a6b9b791c3987d56b
| 3,647,880
|
import json
def staff_for_site():
"""
Used by the Req/Req/Create page
- note that this returns Person IDs
"""
try:
site_id = request.args[0]
except:
result = current.xml.json_message(False, 400, "No Site provided!")
else:
table = s3db.hrm_human_resource
ptable = db.pr_person
query = (table.site_id == site_id) & \
(table.deleted == False) & \
(table.status == 1) & \
((table.end_date == None) | \
(table.end_date > request.utcnow)) & \
(ptable.id == table.person_id)
rows = db(query).select(ptable.id,
ptable.first_name,
ptable.middle_name,
ptable.last_name,
orderby=ptable.first_name)
result = []
append = result.append
for row in rows:
append({"id" : row.id,
"name" : s3_fullname(row)
})
result = json.dumps(result)
response.headers["Content-Type"] = "application/json"
return result
|
d8890f31ae67abf72cdfbd14dd2af08762131e90
| 3,647,881
|
def element_z(sym_or_name):
"""Convert element symbol or name into a valid element atomic number Z.
Args:
sym_or_name: string type representing an element symbol or name.
Returns:
Integer z that is a valid atomic number matching the symbol or name.
Raises:
ElementZError: if the symbol or name cannot be converted.
"""
try:
return _Z_FROM_SYMBOL[validated_symbol(sym_or_name)]
except ElementSymbolError:
pass
try:
return _Z_FROM_NAME[validated_name(sym_or_name)]
except ElementNameError:
raise ElementZError("Must supply either the element symbol or name")
|
b79fec9062539f98ad8c96cdc41a52f7e9c67fd9
| 3,647,882
|
from typing import Tuple
def to_int(s: str) -> Tuple[bool, int]:
"""Convert a string s to an int, if possible."""
try:
n = int(s)
return True, n
except Exception:
return False, 0
|
27d24b881f5987037f750a1cee022f7b1daa7c33
| 3,647,883
|
def simple_computation(maximum_terms:int=None, configuration_of=None):
"""
Simple 4-operand computations
移除了分数项,因为除法运算会表示为分数
禁用了括号(random_term的expression参数),因为会导致溢出
:return: Problem object
"""
if not configuration_of: configuration_of = 'simple_computation'
func_config = combine_configurations(type_config[configuration_of], global_config)
if maximum_terms:
func_config['maximum_terms'] = maximum_terms
func_config['symbol'] = process_raw_symbol(func_config['symbol'])
number_of_terms = randint(2, func_config['maximum_terms'])
random_term_kwargs = {'interval':func_config['interval'],
'denominator_interval': func_config['denominator_interval'],
'float_precision': func_config['float_precision'],
'frac': False,
'expression': False,
'symbol': func_config['symbol']}
str_question = str(random_term(**random_term_kwargs))
for term_number in range(number_of_terms):
# operand term
str_question += choice(['+', '-', '*', '/'])+str(random_term(**random_term_kwargs))
answer = sympify(str_question) if func_config['symbol'] else sympify(str_question).round(func_config['float_precision'])
question = sympify(str_question, evaluate=False)
problem = Problem(question, answer)
return problem
|
0dae4396f74b9a254d0c882c022018c7a69d69cd
| 3,647,884
|
def add_torrents_from_folder(path, transmission_url, torrentleech_username, torrentleech_password, torrentleech_rss_key):
"""Console script for media_server_utils."""
core.add_torrents_from_folder(path, transmission_url, torrentleech_username, torrentleech_password, torrentleech_rss_key)
return 0
|
ac058ff014c76a3af4054076961b52011bc7329c
| 3,647,885
|
def cut_graph(G, w):
"""
Cut a graph down to a given depth
Inputs: - G Input graph
- w Depth to cut to
Output: - cut_G Cut graph
"""
# Copy the initial graph and get the number of nodes
cut_G = G.copy()
N = len(G.nodes)
# Check all nodes
for i in range(N):
# If the depth is greater than w, remove the node
if nx.shortest_path_length(G, source=0, target=i) > w:
cut_G.remove_node(i)
return cut_G
|
314f804d2d42146e1dcfb1f4a5373f92a4fe2f17
| 3,647,886
|
def archive_filter_search(articles_qs):
"""
gets the qs and filters and sends back to the hook for rendering.
"""
return articles_qs.exclude(updates__article__stage=STAGE_PUBLISHED)
|
2f5783de98110b9b74cabcf571a5919b409d1479
| 3,647,887
|
from typing import List
def init_plotscript(config, markets: List, startup_candles: int = 0):
"""
Initialize objects needed for plotting
:return: Dict with candle (OHLCV) data, trades and pairs
"""
if "pairs" in config:
pairs = expand_pairlist(config['pairs'], markets)
else:
pairs = expand_pairlist(config['exchange']['pair_whitelist'], markets)
# Set timerange to use
timerange = TimeRange.parse_timerange(config.get('timerange'))
data = load_data(
datadir=config.get('datadir'),
pairs=pairs,
timeframe=config['timeframe'],
timerange=timerange,
startup_candles=startup_candles,
data_format=config.get('dataformat_ohlcv', 'json'),
)
if startup_candles and data:
min_date, max_date = get_timerange(data)
logger.info(f"Loading data from {min_date} to {max_date}")
timerange.adjust_start_if_necessary(timeframe_to_seconds(config['timeframe']),
startup_candles, min_date)
no_trades = False
filename = config.get('exportfilename')
if config.get('no_trades', False):
no_trades = True
elif config['trade_source'] == 'file':
if not filename.is_dir() and not filename.is_file():
logger.warning("Backtest file is missing skipping trades.")
no_trades = True
try:
trades = load_trades(
config['trade_source'],
db_url=config.get('db_url'),
exportfilename=filename,
no_trades=no_trades,
strategy=config.get('strategy'),
)
except ValueError as e:
raise OperationalException(e) from e
if not trades.empty:
trades = trim_dataframe(trades, timerange, 'open_date')
return {"ohlcv": data,
"trades": trades,
"pairs": pairs,
"timerange": timerange,
}
|
220118a5d438227932ba9473471d59ff03b44412
| 3,647,888
|
from typing import List
from typing import Callable
def get_one(data: List[LogEntry], filterfun: Callable) -> LogEntry:
"""Get a single entry and assert that after filtering only a single entry
remains."""
filtered = list(filter(filterfun, data))
if len(filtered) != 1:
raise ValueError(f"Entries not unique after filtering: {filtered}")
return filtered[0]
|
ece1b0b9c654f85eda89e2fc8736c84b4a2ca9ca
| 3,647,889
|
import os
def parse_ignorelist(f):
# type: (IO[Text]) -> Tuple[Ignorelist, Set[Text]]
"""
Parse the ignorelist file given by `f`, and return the parsed structure.
:returns: a tuple of an Ignorelist and a set of files that are completely
skipped by the linter (i.e. have a '*' entry).
"""
data = defaultdict(lambda:defaultdict(set)) # type: Ignorelist
skipped_files = set() # type: Set[Text]
for line in f:
line = line.strip()
if not line or line.startswith("#"):
continue
parts = [item.strip() for item in line.split(":")]
if len(parts) == 2:
error_types_s, file_match = parts
line_number = None # type: Optional[int]
else:
error_types_s, file_match, line_number_s = parts
line_number = int(line_number_s)
error_types = {item.strip() for item in error_types_s.split(",")}
file_match = os.path.normcase(file_match)
if "*" in error_types:
skipped_files.add(file_match)
else:
for error_type in error_types:
data[error_type][file_match].add(line_number)
return data, skipped_files
|
17fb3af2a4cd00e93f89023928493cc2cc2fc33e
| 3,647,890
|
from typing import Optional
def create(env_name: str,
episode_length: int = 1000,
action_repeat: int = 1,
auto_reset: bool = True,
batch_size: Optional[int] = None,
**kwargs) -> Env:
"""Creates an Env with a specified brax system."""
env = _envs[env_name](**kwargs)
if episode_length is not None:
env = wrappers.EpisodeWrapper(env, episode_length, action_repeat)
if batch_size:
env = wrappers.VectorWrapper(env, batch_size)
if auto_reset:
env = wrappers.AutoResetWrapper(env)
return env # type: ignore
|
1d7e8bf147843799f01e7a894b86fce74ca86da7
| 3,647,891
|
def from_software_version(software_version):
"""
Returns the product version dependant limits_constants. This is based on
the running software version on the product and can change based on up when
you ask a cluster if upgrading.
Args:
software_version: (str) software version ex "3.1.2.0" or "2.2.7"
"""
return _get_limits(software_version=software_version)
|
fc978610a6aa55a956bb849cae107bc134934f55
| 3,647,892
|
def _from_parse_feature(parse_feature):
"""Convert a single feature spec to a ColumnSchema."""
# FixedLenFeature
if isinstance(parse_feature, tf.FixedLenFeature):
representation = FixedColumnRepresentation(parse_feature.default_value)
return ColumnSchema(parse_feature.dtype, parse_feature.shape,
representation)
# FixedLenSequenceFeature
if isinstance(parse_feature, tf.FixedLenSequenceFeature):
raise ValueError('DatasetSchema does not support '
'FixedLenSequenceFeature yet.')
# VarLenFeature
if isinstance(parse_feature, tf.VarLenFeature):
representation = ListColumnRepresentation()
return ColumnSchema(parse_feature.dtype, [None], representation)
# SparseFeature
if isinstance(parse_feature, tf.SparseFeature):
index_field = SparseIndexField(name=parse_feature.index_key,
is_sorted=parse_feature.already_sorted)
representation = SparseColumnRepresentation(
value_field_name=parse_feature.value_key,
index_fields=[index_field])
return ColumnSchema(parse_feature.dtype, [parse_feature.size],
representation)
raise ValueError('Cannot interpret feature spec {} with type {}'.format(
parse_feature, type(parse_feature)))
|
9c7034c3b7663a0c49dc69dbf8507f90f4cacf83
| 3,647,893
|
import json
def conditional_patch_resource(
service_account_json, base_url, project_id, cloud_region, dataset_id, fhir_store_id
):
"""
If a resource is found based on the search criteria specified in
the query parameters, updates part of that resource by
applying the operations specified in a JSON Patch document.
"""
url = "{}/projects/{}/locations/{}".format(base_url, project_id, cloud_region)
# The search query in this request updates all Observations
# if the subject of the Observation is a particular patient.
resource_path = "{}/datasets/{}/fhirStores/{}/fhir/Observation".format(
url, dataset_id, fhir_store_id
)
# Make an authenticated API request
session = get_session(service_account_json)
headers = {"Content-Type": "application/json-patch+json"}
body = json.dumps(
[
{
"op": "replace",
"path": "/valueQuantity/value",
# Sets the BPM for all matching Observations to 80. This
# is the portion of the request being patched.
"value": 80,
}
]
)
# The search query is passed in as a query string parameter.
params = {"identifier": "my-code-system|ABC-12345"}
response = session.patch(resource_path, headers=headers, params=params, data=body)
response.raise_for_status()
print(response.url)
resource = response.json()
print(
"Conditionally patched all Observations with the "
"identifier 'my-code-system|ABC-12345' to use a BPM of 80."
)
print(json.dumps(resource, indent=2))
return resource
|
e04eada0184d38c8d0b1ec4620fd5ef1f0bc90d5
| 3,647,894
|
def cal_big_F(p, f):
"""
calculate finite strain big F for linearized form
not fully tested
:param p: pressure
:param f: small f
:return: big F
"""
return p / (3. * f * np.power((1. + 2. * f), 2.5))
|
2d98b9e525837cd8d6dd17266f92f959743ad8f2
| 3,647,895
|
from typing import List
def multiply_aug(data_aug: List[str], factor: int) -> List[str]:
"""
The original idea here was to use to to speed up some vasp calculations for
supercells by initializing the entire CHGCAR file.
The current code does not deal with transformation of the Augemetation charges after regridding.
This is a naive way to multiply the Augmentation data in the CHGCAR,
a real working implementation will require analysis of the PAW projection operators.
However, even with such an implementation, the speed up will be minimal due to VASP's interal
minimization algorithms.
Args:
data_aug: The original augmentation data from a CHGCAR
factor: The multiplication factor (some integer number of times it gets repeated)
Returns:
List of strings for each line of the Augmentation data.
"""
res = [] # type: List[str]
cur_block = [] # type: List[str]
cnt = 0
for ll in data_aug:
if "augmentation" in ll:
if cur_block:
for j in range(factor):
cnt += 1
cur_block[
0
] = f"augmentation occupancies{cnt:>4}{cur_block[0].split()[-1]:>4}\n"
res.extend(cur_block)
cur_block = [ll]
else:
cur_block.append(ll)
else:
for j in range(factor):
cnt += 1
cur_block[
0
] = f"augmentation occupancies{cnt:>4}{cur_block[0].split()[-1]:>4}\n"
res.extend(cur_block)
return res
|
2baef4c98dbb83f1a08f11e58f3c4cf82ad8ea64
| 3,647,896
|
def _parse_instance_chain(chain_str):
""" 返回对象链解析出来的实例对象。"""
chain = chain_str.split('.')
instance_name = chain.pop(0)
attr = session['instances'][instance_name]
for attr_name in chain:
attr = getattr(attr, attr_name)
return attr
|
531b78ee80f3b6437b885ef89b7f285e6cf6a8a5
| 3,647,897
|
import math
def epochs_lists(
draw,
start_time=math.inf,
max_epochs=5,
min_deme_size=FLOAT_EPS,
max_deme_size=FLOAT_MAX,
):
"""
A hypothesis strategy for creating lists of Epochs for a deme.
:param float start_time: The start time of the deme.
:param int max_epochs: The maximum number of epochs in the list.
"""
assert max_epochs >= 2
times = draw(
st.lists(
st.floats(
min_value=0,
max_value=min(FLOAT_MAX, start_time),
exclude_max=True,
width=32,
),
unique=True,
min_size=1,
max_size=max_epochs,
)
)
times.sort(reverse=True)
epochs = []
for i, end_time in enumerate(times):
start_size = draw(st.floats(min_value=min_deme_size, max_value=max_deme_size))
if i == 0 and math.isinf(start_time):
end_size = start_size
else:
end_size = draw(st.floats(min_value=min_deme_size, max_value=max_deme_size))
cloning_rate = draw(st.floats(min_value=0, max_value=1))
selfing_rate = draw(st.floats(min_value=0, max_value=prec32(1 - cloning_rate)))
epochs.append(
dict(
end_time=end_time,
start_size=start_size,
end_size=end_size,
cloning_rate=cloning_rate,
selfing_rate=selfing_rate,
)
)
return epochs
|
9eebece7ac1dc2f9ad6d13f7368de62a6db9433c
| 3,647,898
|
import torch
def mlp_layers(nch_input, nch_layers, b_shared=True, bn_momentum=0.1, dropout=0.0):
""" [B, Cin, N] -> [B, Cout, N] or
[B, Cin] -> [B, Cout]
"""
layers = []
last = nch_input
for i, outp in enumerate(nch_layers):
if b_shared:
weights = torch.nn.Conv1d(last, outp, 1)
else:
weights = torch.nn.Linear(last, outp)
layers.append(weights)
layers.append(torch.nn.BatchNorm1d(outp, momentum=bn_momentum))
layers.append(torch.nn.ReLU())
if b_shared == False and dropout > 0.0:
layers.append(torch.nn.Dropout(dropout))
last = outp
return layers
|
8085b99b828fcbadee191d90737d582f7dd9ce73
| 3,647,899
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.