content stringlengths 35 762k | sha1 stringlengths 40 40 | id int64 0 3.66M |
|---|---|---|
def list_rbd_volumes(pool):
"""List volumes names for given ceph pool.
:param pool: ceph pool name
"""
try:
out, err = _run_rbd('rbd', '-p', pool, 'ls')
except processutils.ProcessExecutionError:
# No problem when no volume in rbd pool
return []
return [line.strip() for line in out.splitlines()] | 4c80b6c952a19834a79622c31453af62db12e740 | 27,600 |
from typing import Optional
from datetime import datetime
def parse_date(string: Optional[str]) -> Optional[date]:
"""
Parse a string to a date.
"""
if not string or not isinstance(string, str):
return None
try:
return isoparse(string[:10]).date()
except ValueError:
pass
try:
return datetime.strptime(string[:10], "%Y-%m-%d").date()
except ValueError:
pass | f29bb415a0f8d08dbcefe8ae95b5d08f898eecfb | 27,601 |
def butter_bandpass(lowcut, highcut, fs, order=5):
"""
Taken from
https://scipy-cookbook.readthedocs.io/items/ButterworthBandpass.html
Creates a butterworth bandpass filter of order 'order', over frequency
band [lowcut, highcut].
:param lowcut: Lowcut frequency in Hz
:param highcut: Highcut frequency in Hz
:param fs: Sampling frequency in Hz
:param order: width of influence in points
:return filt: Filter to be passed to butter_bandpass_filter for evaluation
"""
nyq = 0.5 * fs
low = lowcut / nyq
high = highcut / nyq
b, a = sig.butter(order, [low, high], btype='band')
return b, a | 148f581e36a0d1a53f931b5ce2301db8dd2cde17 | 27,602 |
import subprocess
def get_current_SSID():
"""Helper function to find the WiFi SSID name.
Returns:
str: Wifi SSID name("" on Exception).
"""
try:
p = subprocess.Popen(["iwgetid", "-r"], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = p.communicate()
return out.strip().decode("utf-8")
except Exception:
return "" | f88021770d292cf21d7de7221efc4c0aef0099b5 | 27,603 |
def get_archives_to_prune(archives, hook_data):
"""Return list of keys to delete."""
files_to_skip = []
for i in ['current_archive_filename', 'old_archive_filename']:
if hook_data.get(i):
files_to_skip.append(hook_data[i])
archives.sort(key=itemgetter('LastModified'),
reverse=False) # sort from oldest to newest
# Drop all but last 15 files
return [i['Key'] for i in archives[:-15] if i['Key'] not in files_to_skip] | 7701e7b145ea28148b77eb63475d7b0e9127d2f0 | 27,604 |
def tf_pad(tensor, paddings, mode):
"""
Pads a tensor according to paddings.
mode can be 'ZERO' or 'EDGE' (Just use tf.pad for other modes).
'EDGE' padding is equivalent to repeatedly doing symmetric padding with all
pads at most 1.
Args:
tensor (Tensor).
paddings (list of list of non-negative ints).
mode (str).
Returns:
Padded tensor.
"""
paddings = np.array(paddings, dtype=int)
assert np.all(paddings >= 0)
while not np.all(paddings == 0):
new_paddings = np.array(paddings > 0, dtype=int)
paddings -= new_paddings
new_paddings = tf.constant(new_paddings)
if mode == 'ZERO':
tensor = tf.pad(tensor, new_paddings, 'CONSTANT', constant_values=0)
elif mode == 'EDGE':
tensor = tf.pad(tensor, new_paddings, 'SYMMETRIC')
else:
raise Exception('pad type {} not recognized'.format(mode))
return tensor | e2e1e9ac2cbef63c4b12bdf35eb35090973d744a | 27,605 |
def compute_reference_gradient_siemens(duration_ms, bandwidth, csa=0):
"""
Description: computes the reference gradient for exporting RF files
to SIEMENS format, assuming the gradient level curGrad is desired.
Theory: the reference gradient is defined as that gradient for which
a 1 cm slice is excited for a 5.12 ms pulse. Demanding the product
Slicethickness * gamma * gradient * duration
to be equal in both cases (reference and current), one obtains
gamma*refGrad*(10 mm)*(5.12 ms) = gamma*curGrad*curThickness*pulse.tp
However, gamma*curGrad*curThickness = the pulses's bandwidth, pulseBW, so
refGrad = pulseBW*pulse.tp / (gamma*(10 mm)*(5.12 ms))
In general, the formula is,
(pulse_duration[ms]*pulse_bandwidth[kHz])
Ref_grad [mT/m] = --------------------------------
(Gyr[kHz/mT] * Ref_slice_thickness[m]* Ref_pulse_duration[ms])
Input Variables
Variables Name Units Description
------------------------------------
duration_ms ms Duration of pulse
bandwidth kHz Bandwidth of current pulse
csa kHz Chemical shift artifact "immunity" - see below.
Optional, set to 0 if not present.
Output Variables
Variables Name Units Description
------------------------------------
ref_grad mT/m Reference gradient
Chemical Shift Artifact immunity:
Since different chemical shifts shift the excitation region, it follows
the if we want to excite a range [-x,+x], we will not actually excite
that range for any offset other than 0 if we calibrate our gradient
for 0 offset. However, we CAN calibrate our gradient for 0 offset BUT
excite a larger range [-x-dx, x+dx] such that the pulse will affect
all chemical shifts equally. This of course comes at the price of
exciting a larger region which might have unwanted signals. This however
is good for:
1. Cases in which there are not external unwanted signals.
2. For dual-band suppression pulses, one sometimes uses the PASSBAND,
which is also the VOI, to calibrate the pulse. If we don't want
any spins in the VOI affected despite their varying chemical shifts
we can grant them immunity, at the cost of pushing away the
suppression bands - this works if, e.g., we're interested in killing
off fat away from the VOI, so we don't care if a bit of signal comes
from the region close to the VOI.
To use, set CSA to the range of +-chemical shifts you want to feel the
pulse. e.g., if you want all spins +-100 Hz from resonance to be affected
equally within the VOI, set CSA = 0.1.
"""
ref_slice_thickness = 0.01 # in meters
ref_duration = 5.12 # ms
gyromagnetic_ratio = 42.57 # kHz/milliTesla
ref_grad = ((bandwidth-2*csa)*duration_ms)/(gyromagnetic_ratio*ref_slice_thickness*ref_duration)
return ref_grad | 65cf8bd8e805e37e5966170daeae90594e45595e | 27,606 |
def schedule_conv2d_NHWC_quantized_native(cfg, outs):
""" Interface for native schedule_conv2d_NHWC_quantized"""
return _schedule_conv2d_NHWC_quantized(cfg, outs, False) | 450b6fb914c8b0c971604319417d56c7148b5737 | 27,607 |
def calc_square_dist(a, b, norm=True):
"""
Calculating square distance between a and b
a: [bs, npoint, c]
b: [bs, ndataset, c]
"""
a = tf.expand_dims(a, axis=2) # [bs, npoint, 1, c]
b = tf.expand_dims(b, axis=1) # [bs, 1, ndataset, c]
a_square = tf.reduce_sum(tf.square(a), axis=-1) # [bs, npoint, 1]
b_square = tf.reduce_sum(tf.square(b), axis=-1) # [bs, 1, ndataset]
a = tf.squeeze(a, axis=2) # [bs, npoint,c]
b = tf.squeeze(b, axis=1) # [bs, ndataset, c]
if norm:
dist = tf.sqrt(a_square + b_square - 2 * tf.matmul(a, tf.transpose(b, [0, 2, 1]))) / tf.cast(tf.shape(a)[-1], tf.float32) # [bs, npoint, ndataset]
else:
dist = a_square + b_square - 2 * tf.matmul(a, tf.transpose(b, [0, 2, 1])) # [bs, npoint, ndataset]
return dist | d855e438d4bcca4eb43a61fa6761a1cd5afd7731 | 27,608 |
def read_config(lines):
"""Read the config into a dictionary"""
d = {}
current_section = None
for i, line in enumerate(lines):
line = line.strip()
if len(line) == 0 or line.startswith(";"):
continue
if line.startswith("[") and line.endswith("]"):
current_section = line[1:-1]
d[current_section] = {}
else:
if "=" not in line:
raise ValueError("No = in line: {}".format(line))
key, val = line.split("=", maxsplit=1)
if key in d[current_section]:
old_val = d[current_section][key]
if type(old_val) == list:
old_val.append(val)
else:
d[current_section][key] = [old_val, val]
else:
d[current_section][key] = val
return d | 613ed9291ab6546700b991fc9a5fc301c55ae497 | 27,609 |
def get_cert_subject_hash(cert):
"""
Get the hash value of the cert's subject DN
:param cert: the certificate to get subject from
:return: The hash value of the cert's subject DN
"""
try:
public_bytes = cert.public_bytes(encoding=serialization.Encoding.PEM)
cert_c = crypto.load_certificate(crypto.FILETYPE_PEM, public_bytes)
hash_subject = cert_c.get_subject().hash()
except Exception:
LOG.exception()
raise exception.SysinvException(_(
"Failed to get certificate subject hash."))
return hash_subject | 070284984018ba08f568541a4d7ba815d72bd025 | 27,610 |
import requests
def icon_from_url(url: str):
"""
A very simple attempt at matching up a game URL with its representing icon.
We attempt to parse the URL and return the favicon. If that fails, return
a pre-determined image based on the URL.
"""
if not url:
return
# Allow the user to override icons in the configuration
for partial_string, icon_url in configuration.get("icons").items():
if partial_string in url:
return icon_url
# Try some known-good icons
for partial_string, icon_url in default_icon_urls.items():
if partial_string in url:
return icon_url
# Try the site's favicon
parts = urlparse(url)
if parts.netloc:
icon_url = f"{parts.scheme}://{parts.netloc}/favicon.ico"
response = requests.get(icon_url)
try:
response.raise_for_status()
return icon_url
except requests.HTTPError:
LOGGER.warning(
"Invalid icon URL (return code %s): %s", response.status_code, icon_url
)
except requests.ConnectionError as e:
LOGGER.warning("Error while connecting to %s: %s", icon_url, e)
return None | 85f8a539cacbc86e58cf4e3815babdc744352626 | 27,611 |
import numpy
def extract_spikes(hd5_file, neuron_num=0):
"""Extracts the spiking data from the hdf5 file. Returns an array of
spike times.
Keyword arguments:
neuron_num -- the index of the neuron you would like to access.
"""
with h5py.File(hd5_file, "r+") as f:
neuron_list = f['NF']['value']['neurons']['value']
if len(neuron_list) <= 10:
neuron_str = "_" + str(neuron_num)
else:
neuron_str = "_" + "0" * (2 - len(str(neuron_num))) + str(neuron_num)
timestamps = numpy.array(neuron_list[neuron_str]['value']['timestamps']['value'][0])
return(timestamps) | 85df1525595c0141dd885d041b54b02aa5dc1283 | 27,612 |
def problem_kinked(x):
"""Return function with kink."""
return np.sqrt(np.abs(x)) | 12897b83fa4c42cfbe608add92cf5ef0736463ef | 27,613 |
def read_dicom(filename):
"""Read DICOM file and convert it to a decent quality uint8 image.
Parameters
----------
filename: str
Existing DICOM file filename.
"""
try:
data = dicom.read_file(filename)
img = np.frombuffer(data.PixelData, dtype=np.uint16).copy()
if data.PhotometricInterpretation == 'MONOCHROME1':
img = img.max() - img
img = img.reshape((data.Rows, data.Columns))
return img, data.ImagerPixelSpacing[0]
except:
return None | e8b621dfb7348e12e1fe8d00ae02009789205e86 | 27,614 |
import logging
def _filter_all_warnings(record) -> bool:
"""Filter out credential error messages."""
if record.name.startswith("azure.identity") and record.levelno == logging.WARNING:
message = record.getMessage()
if ".get_token" in message:
return not message
return True | f16490ef39f9e3a63c791bddcba1c31176b925b7 | 27,615 |
import copy
def _to_minor_allele_frequency(genotype):
"""
Use at your own risk
"""
g_ = copy.deepcopy(genotype)
m_ = g_.metadata
clause_ = m_.allele_1_frequency > 0.5
F = MetadataTF
m_.loc[clause_,[F.K_ALLELE_0, F.K_ALLELE_1]] = m_.loc[clause_,[F.K_ALLELE_1, F.K_ALLELE_0]].values
m_.loc[clause_, F.K_ALLELE_1_FREQUENCY] = 1 - m_.loc[clause_, F.K_ALLELE_1_FREQUENCY].values
variants = g_.variants
for i,swap in enumerate(clause_):
if swap:
variants[i] = 2 - variants[i]
return g_ | 424bf40e29f103abea3dd30ee662c3c695545a10 | 27,616 |
def ioka(z=0, slope=950, std=None, spread_dist='normal'):
"""Calculate the contribution of the igm to the dispersion measure.
Follows Ioka (2003) and Inoue (2004), with default slope value falling
in between the Cordes and Petroff reviews.
Args:
z (array): Redshifts.
slope (float): Slope of the DM-z relationship.
std (float): Spread around the DM-z relationship.
spread_dist (str): Spread function option. Choice from
('normal', 'lognormal', 'log10normal')
Returns:
dm_igm (array): Dispersion measure of intergalactic medium [pc/cm^3]
"""
if std is None:
std = 0.2*slope*z
# Set up spread distribution
mean = slope*z
if spread_dist == 'normal':
f = np.random.normal
elif spread_dist == 'lognormal':
def f(mean, std):
return gd.lognormal(mean, std)
elif spread_dist == 'log10normal':
def f(mean, std):
return gd.log10normal(mean, std)
else:
raise ValueError('spread_dist input not recognised')
return f(mean, std).astype(np.float32) | 422f0e7d6a7c88b8ea6666e192271db81d966743 | 27,617 |
def nn_policy(state_input, policy_arch, dim_action, **kwargs):
"""
Fully-connected agent policy network
"""
with tf.variable_scope('policy_net', reuse=tf.AUTO_REUSE):
for i, h in enumerate(policy_arch):
state_input = layer.Dense(h, activation='tanh', # dtype='float64',
name="fc_{}".format(i))(state_input)
action_out = layer.Dense(dim_action,
activation='tanh', # dtype='float64',
name="fc_action_out")(state_input)
return action_out, state_input | 8513ba88fe77076711c9aaf6bb148df8d9f18c1a | 27,618 |
def _check_geom(geom):
"""Check if a geometry is loaded in.
Returns the geometry if it's a shapely geometry object. If it's a wkt
string or a list of coordinates, convert to a shapely geometry.
"""
if isinstance(geom, BaseGeometry):
return geom
elif isinstance(geom, str): # assume it's a wkt
return loads(geom)
elif isinstance(geom, list) and len(geom) == 2: # coordinates
return Point(geom) | 5f7e1cc405ab6c67cb6f8342e23698d1e330d49c | 27,619 |
from typing import List
import glob
def read_csvs_of_program(program: str) -> List[pd.DataFrame]:
"""
Given the name of an algorithm program, collects the list of CVS benchmarks recorded
for that particular program.
:param program: name of the program which benchmarks should be retrieved
:return: list of benchmark CSV files
"""
csv_files = glob.glob(f'./{program}_*.csv')
dataframes_per_program = []
for csv_file in csv_files:
dataframe = pd.read_csv(csv_file, sep=';', decimal='.', encoding='utf-8')
dataframes_per_program.append(dataframe)
return dataframes_per_program | de763b5f790150f0340c58fc9d3d53f16d530f34 | 27,620 |
from typing import List
from typing import Tuple
def print_best_metric_found(
tuning_status: TuningStatus,
metric_names: List[str],
mode: str
) -> Tuple[int, float]:
"""
Prints trial status summary and the best metric found.
:param tuning_status:
:param metric_names:
:param mode:
:return: trial-id and value of the best metric found
"""
if tuning_status.overall_metric_statistics.count == 0:
return
# only plot results of the best first metric for now in summary, plotting the optimal metrics for multiple
# objectives would require to display the Pareto set.
metric_name = metric_names[0]
print("-" * 20)
print(f"Resource summary (last result is reported):\n{str(tuning_status)}")
if mode == 'min':
metric_per_trial = [
(trial_id, stats.min_metrics.get(metric_name, np.inf))
for trial_id, stats in tuning_status.trial_metric_statistics.items()
]
metric_per_trial = sorted(metric_per_trial, key=lambda x: x[1])
else:
metric_per_trial = [
(trial_id, stats.max_metrics.get(metric_name, -np.inf))
for trial_id, stats in tuning_status.trial_metric_statistics.items()
]
metric_per_trial = sorted(metric_per_trial, key=lambda x: -x[1])
best_trialid, best_metric = metric_per_trial[0]
print(f"{metric_name}: best {best_metric} for trial-id {best_trialid}")
print("-" * 20)
return best_trialid, best_metric | e91c3222e66ded7ce3ab4ddcf52a7ae77fe84e9f | 27,621 |
def get_svn_revision(path = '.', branch = 'HEAD'):
""" Returns the SVN revision associated with the specified path and git
branch/tag/hash. """
svn_rev = "None"
cmd = "git log --grep=^git-svn-id: -n 1 %s" % (branch)
result = exec_cmd(cmd, path)
if result['err'] == '':
for line in result['out'].split('\n'):
if line.find("git-svn-id") > 0:
svn_rev = line.split("@")[1].split()[0]
break
return svn_rev | 76d94aca9453e1d949bf70fc6bff4b77bb519479 | 27,622 |
def coerce_str_to_bool(val: t.Union[str, int, bool, None], strict: bool = False) -> bool:
"""
Converts a given string ``val`` into a boolean.
:param val: any string representation of boolean
:param strict: raise ``ValueError`` if ``val`` does not look like a boolean-like object
:return: ``True`` if ``val`` is thruthy, ``False`` otherwise.
:raises ValueError: if ``strict`` specified and ``val`` got anything except
``['', 0, 1, true, false, on, off, True, False]``
"""
if isinstance(val, str):
val = val.lower()
flag = ENV_STR_BOOL_COERCE_MAP.get(val, None)
if flag is not None:
return flag
if strict:
raise ValueError('Unsupported value for boolean flag: `%s`' % val)
return bool(val) | 5ff88bee44b07fb1bd34d1734ba72485a2412b0c | 27,623 |
import csv
def print_labels_from_csv(request):
"""
Generates a PDF with labels from a CSV.
"""
if request.FILES:
response = HttpResponse(content_type='application/pdf')
response['Content-Disposition'] = 'attachment; filename=labels-from-csv.pdf'
canvas = Canvas(response, pagesize=(Roll.width, Roll.height))
hoja = Roll(LogisticsLabel, canvas)
if request.POST.get('96x30'):
canvas = Canvas(response, pagesize=(Roll96x30.width, Roll96x30.height))
hoja = Roll96x30(LogisticsLabel96x30, canvas)
iterator = hoja.iterator()
label_list = csv.reader(request.FILES.get('labels'))
label_list.next() # consumo header
for row in label_list:
label = iterator.next()
label.name = row[0].upper()
label.address = '\n'.join(row[1:])
label.draw()
hoja.flush()
canvas.save()
return response
else:
return render(request, 'print_labels_from_csv.html') | 1674d136f5ed183913961fe2f7ce23d4b245f3d7 | 27,624 |
def get_ucs_node_list():
"""
Get UCS nodes
"""
nodeList = []
api_data = fit_common.rackhdapi('/api/2.0/nodes')
for node in api_data['json']:
if node["obms"] != [] and node["obms"][0]["service"] == "ucs-obm-service":
nodeList.append(node)
return nodeList | 4fc81f7e71a33be3670d99916828a2348b0e63cd | 27,625 |
import itertools
import platform
def grid_search(fn, grd, fmin=True, nproc=None):
"""Grid search for optimal parameters of a specified function.
Perform a grid search for optimal parameters of a specified
function. In the simplest case the function returns a float value,
and a single optimum value and corresponding parameter values are
identified. If the function returns a tuple of values, each of
these is taken to define a separate function on the search grid,
with optimum function values and corresponding parameter values
being identified for each of them. On all platforms except Windows
(where ``mp.Pool`` usage has some limitations), the computation
of the function at the grid points is computed in parallel.
**Warning:** This function will hang if `fn` makes use of
:mod:`pyfftw` with multi-threading enabled (the
`bug <https://github.com/pyFFTW/pyFFTW/issues/135>`_ has been
reported).
When using the FFT functions in :mod:`sporco.linalg`,
multi-threading can be disabled by including the following code::
import sporco.linalg
sporco.linalg.pyfftw_threads = 1
Parameters
----------
fn : function
Function to be evaluated. It should take a tuple of parameter
values as an argument, and return a float value or a tuple of
float values.
grd : tuple of array_like
A tuple providing an array of sample points for each axis of the
grid on which the search is to be performed.
fmin : bool, optional (default True)
Determine whether optimal function values are selected as minima
or maxima. If `fmin` is True then minima are selected.
nproc : int or None, optional (default None)
Number of processes to run in parallel. If None, the number of
CPUs of the system is used.
Returns
-------
sprm : ndarray
Optimal parameter values on each axis. If `fn` is multi-valued,
`sprm` is a matrix with rows corresponding to parameter values
and columns corresponding to function values.
sfvl : float or ndarray
Optimum function value or values
fvmx : ndarray
Function value(s) on search grid
sidx : tuple of int or tuple of ndarray
Indices of optimal values on parameter grid
"""
if fmin:
slct = np.argmin
else:
slct = np.argmax
fprm = itertools.product(*grd)
if platform.system() == 'Windows':
fval = list(map(fn, fprm))
else:
if nproc is None:
nproc = mp.cpu_count()
pool = mp.Pool(processes=nproc)
fval = pool.map(fn, fprm)
pool.close()
pool.join()
if isinstance(fval[0], (tuple, list, np.ndarray)):
nfnv = len(fval[0])
fvmx = np.reshape(fval, [a.size for a in grd] + [nfnv,])
sidx = np.unravel_index(slct(fvmx.reshape((-1, nfnv)), axis=0),
fvmx.shape[0:-1]) + (np.array((range(nfnv))),)
sprm = np.array([grd[k][sidx[k]] for k in range(len(grd))])
sfvl = tuple(fvmx[sidx])
else:
fvmx = np.reshape(fval, [a.size for a in grd])
sidx = np.unravel_index(slct(fvmx), fvmx.shape)
sprm = np.array([grd[k][sidx[k]] for k in range(len(grd))])
sfvl = fvmx[sidx]
return sprm, sfvl, fvmx, sidx | 2c0cb9abde9d6e1abb152af14d361a87d1e7ee5f | 27,626 |
def wysiwyg_form_fields(context):
"""Returns activity data as in field/value pair"""
app = context['app_title']
model = context['entity_title']
try:
return wysiwyg_config(app, model)
except (KeyError, AttributeError):
return None | 86abca4a8711c3d5eec425975ee00055d0e78ae2 | 27,627 |
def thetagrids(angles=None, labels=None, fmt=None, **kwargs):
"""
Get or set the theta gridlines on the current polar plot.
Call signatures::
lines, labels = thetagrids()
lines, labels = thetagrids(angles, labels=None, fmt=None, **kwargs)
When called with no arguments, `.thetagrids` simply returns the tuple
(*lines*, *labels*). When called with arguments, the labels will
appear at the specified angles.
Parameters
----------
angles : tuple with floats, degrees
The angles of the theta gridlines.
labels : tuple with strings or None
The labels to use at each radial gridline. The
`.projections.polar.ThetaFormatter` will be used if None.
fmt : str or None
Format string used in `matplotlib.ticker.FormatStrFormatter`.
For example '%f'. Note that the angle in radians will be used.
Returns
-------
lines : list of `.lines.Line2D`
The theta gridlines.
labels : list of `.text.Text`
The tick labels.
Other Parameters
----------------
**kwargs
*kwargs* are optional `~.Text` properties for the labels.
See Also
--------
.pyplot.rgrids
.projections.polar.PolarAxes.set_thetagrids
.Axis.get_gridlines
.Axis.get_ticklabels
Examples
--------
::
# set the locations of the angular gridlines
lines, labels = thetagrids(range(45, 360, 90))
# set the locations and labels of the angular gridlines
lines, labels = thetagrids(range(45, 360, 90), ('NE', 'NW', 'SW', 'SE'))
"""
ax = gca()
if not isinstance(ax, PolarAxes):
raise RuntimeError('thetagrids only defined for polar axes')
if all(param is None for param in [angles, labels, fmt]) and not kwargs:
lines = ax.xaxis.get_ticklines()
labels = ax.xaxis.get_ticklabels()
else:
lines, labels = ax.set_thetagrids(angles,
labels=labels, fmt=fmt, **kwargs)
return lines, labels | a595c4f0ff5af7dae7e20b261d11c6f690344db1 | 27,628 |
def get_ordered_patterns(order, patterns):
"""
Place the sites in the pattern dictionary into the order that they were
added.
Input:
order: a list of pattern ids providing the order
patterns: a dictionary of patterns
Output: Nested array with columns for sites, their order, and amplicon id
"""
order_dict = {pattern: order + 1 for order, pattern in enumerate(order)}
result = []
for pattern, loci in patterns.items():
for locus, data in loci.items():
for site in data['sites']:
result.append([site.strip(), order_dict[pattern], pattern])
return np.array(result, dtype=str) | b3622fa9b3330923da819cd923582db13199f86a | 27,629 |
def extract_pphour(file, fecha, llat, llon):
"""
"""
l_lat = llat
l_lon = np.array(llon) % 360
i_lat, i_lon, lat, lon = get_index_lat(fecha, file, llat, llon)
tiempos = get_index_time(file, fecha)
di = tiempos[1]
di_f = (di + dt.timedelta(days=8)).replace(hour=0)
i_t1 = [i for i in range(len(tiempos)) if tiempos[i] == di][0]
i_t2 = [i for i in range(len(tiempos)) if tiempos[i] == di_f][0]
# Creamos una variable aux
res = np.empty([i_t2 - i_t1 + 1, len(lat), len(lon)])
res[:] = np.nan
fdates = [datef for datef in tiempos if datef>=di and datef<=di_f]
pp1 = file.variables['apcpsfc'][i_t1:i_t2, i_lat[0]:i_lat[1]+1,
i_lon[0]:i_lon[1]+1]
pp2 = file.variables['apcpsfc'][i_t1+1:i_t2+1, i_lat[0]:i_lat[1]+1,
i_lon[0]:i_lon[1]+1]
pphour = pp2 - pp1
return lon, lat, pphour, fdates | e5e2a2271e85a6d5a9f6fc4a189b1119f62d471b | 27,630 |
def bubble_sort2(array):
"""冒泡排序的经典实现"""
n = len(array)
stop = False
#子过程运行闭区间的右端点标记定义为i,范围[n-1,1],转换为适合while惯用区间为[n-1,0)
i = n-1
while i > 0 and not stop:
stop = True
#子过程交换位置标记定义为j,范围为[0,i-1],转换为适合while惯用区间为[0,i)
#子过程运行区间为[0,i]
j = 0
while j < i:
if array[j] > array[j+1]:
array[j],array[j+1] = array[j+1],array[j]
stop = False
j += 1
i -= 1
return array | c95706b40c6e328e321f68de117ee20faa5c1c31 | 27,631 |
def encriptar(texto):
"""função que recebe um texto e retorna o mesmo criptografado"""
#1. Filtro de entrada para evitar erros
#1.1 Verifica se texto é uma string
#1.1 Se texto é vazio, retorna texto
#2. remove os espaços em branco do texto
texto = texto.replace(" ","")
#3. sobre a grade
#3.1 Calcula o tamanho da grade (valor de n)
n = ceil(sqrt(len(texto)))
#3.2 Cria a grade
grade = [["" for _ in range(n)] for __ in range(n)]
#4. insere o texto na grade, no sentido vertical,
# de cima para baixo, da esquerda para a direita.
for j in range(n): #j = coluna
for i in range(n): #i = linha
try:
elemento,texto = texto[0],texto[1:]
except:
elemento = ""
finally:
#linha.append(elemento)
grade[i][j] = elemento
#5. transforma a grade em um texto
texto_criptografado = " ".join(["".join(x) for x in grade])
#6. retorna o resultado da transformação
return texto_criptografado | 63afaf6bae94a7a9c43b3fea39bdd5df2916b195 | 27,632 |
from typing import Union
from typing import IO
import os
from typing import Optional
from typing import List
from typing import Type
from typing import Dict
def load_spn_json(f: Union[IO, os.PathLike, str], leaves: Optional[List[Type[Leaf]]] = None) -> Node:
"""
Load SPN from file by using the JSON format.
:param f: A file-like object or a filepath of the input JSON file.
:param leaves: An optional list of custom leaf classes. Useful when dealing with user-defined leaves.
:return: The loaded SPN with initialied ids for each node.
:raises ValueError: If multiple custom leaf classes with the same name are defined.
"""
# Set the default leaf classes map
leaf_map: Dict[str, Type[Leaf]] = {
cls.__name__: cls
for cls in [
Bernoulli, Categorical, Isotonic, Uniform, Gaussian, BinaryCLT
]
}
# Augment the leaf mapper dictionary, if custom leaf classes are defined
if leaves is not None:
for cls in leaves:
name = cls.__name__
if name in leaf_map:
raise ValueError("Custom leaf class {} already defined".format(name))
leaf_map[name] = cls
# Load the NetworkX graph
graph = load_digraph_json(f)
# Convert the NetworkX graph to a SPN
return digraph_to_spn(graph, leaf_map) | 05ba2aaf73fbb918d0290ed1f01aa08579d0082d | 27,633 |
from enum import Enum
def extend_enum(*inherited_enums: type[Enum]):
"""EXPERIMENTAL
Join multiple enums into one.
Modified version from: https://stackoverflow.com/a/64045773/14748231
"""
# All members from all enums which injected in result enum
joined_members = {}
def _add_item_if_not_exist(item) -> None:
# Add given item to joined_members dict.
# If item.name is already existing key, raise ValueError.
if item.name not in joined_members:
joined_members[item.name] = item.value
else:
raise ValueError(f"{item.name} key already in joined_members")
def wrapper(applied_enum: type[Enum]):
@wraps(applied_enum)
def inner():
# Add all items from inherited enums.
for inherited_enum in inherited_enums:
for item in inherited_enum:
_add_item_if_not_exist(item)
# Add all items from applied enum.
for item in applied_enum:
_add_item_if_not_exist(item)
# Finally, return result Enum with collected members injected.
ResEnum = Enum(applied_enum.__name__, joined_members)
ResEnum.__doc__ = applied_enum.__doc__
return ResEnum
return inner()
return wrapper | 4ec9e36fdd584555e5c201d3a1899007fa9adb85 | 27,634 |
import requests
import csv
import io
def tsv_reader():
""" read register-like data from government-form-data TSV"""
resp = requests.get(url=url)
resp.raise_for_status()
return csv.DictReader(io.StringIO(resp.text), delimiter=sep) | 2487f44f516afc8727d1d0fd8b253578aaaef026 | 27,635 |
import time
def GetMonotime():
"""Older tornado doesn't have monotime(); stay compatible."""
if hasattr(tornado.util, 'monotime_impl'):
return tornado.util.monotime_impl
else:
return time.time | e0ea587512213b2a830b4912ae67928cb8515b98 | 27,636 |
def test_multiplica():
"""
Target function returns the sum of the multilica of two given vector.
Expect output as np.float object.
"""
dict_1 = file_read.read_file('../../data/10mM_2,7-AQDS_1M_KOH_25mVs_0.5step_2.txt')
data = file_read.data_frame(dict_1, 1)
col_x1, col_x2 = baseline.split(data.Potential)
col_y1, col_y2 = baseline.split(data.Current)
a_val = baseline.multiplica(col_x1, col_y1)
assert isinstance(a_val == np.float64), ("Output should be float object,"
" but fuction is returning{}".format(type(a_val)))
b_val = np.multiply(col_x1, col_y1).sum()
(np.testing.assert_almost_equal(a_val, b_val, decimal=3),
"Calculation is incorrect")
return "Test Passed for multiplica function!" | 7334a9500da86df959e6d6056a044d240dfbec95 | 27,637 |
def last_name_first(n):
"""Returns: copy of n in form 'last-name, first-name'
Precondition: n string in form 'first-name last-name
n has only space, separating first and last."""
assert type(n) == str, str(n) + " is not a string"
assert is_two_words(n), n+' has the wrong form'
# Compute the value
end_first = n.find(' ')
first = n[:end_first]
last = n[end_first + 1:]
return last + ', ' + first | 448b23cc70294a28b82d36af95c2dab772b30e9d | 27,638 |
import logging
def by_count(logger: logging.Logger, once_every: int) -> logging.Logger:
"""
The returned logger will only permit at most one print every `once_every` logging calls from the code
line this function was called from.
Usage example::
for i in range(100):
log_throttling.by_count(logger, once_every=10).info(
"This line will only log values that are multiples of 10: %s", i
)
**Notes**:
\\1. Throttling is configured per code line that this function is called from.
Changing the parameter from that used previously for that line will reset the throttling
counter for the line.
\\2. Throttling does not nest. e.g.::
log_throttling.by_time(log_throttling.by_count(logger, 10), 1).info("...")
Will simply ignore the nested `by_count`.
:param logger: A `logging.Logger` object to "wrap". The return value from this function can be used
just like a normal logger.
:param once_every: The number of logging calls for which a single call is allowed to be written.
:return: A throttled `logging.Logger`-like object.
"""
strategy = _ThrottlingByCount(once_every)
return _by_custom_strategy(logger, strategy) | 9c67c84a4e4371a25e5947f6fcb92b860cc12e9e | 27,639 |
import os
import subprocess
import platform
def has_new_code(is_git):
"""Tests if there are any newer versions of files on the server.
Args:
- is_git: True if we are working in a git repository.
"""
os.chdir(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
if not is_git:
results, _ = run_cmd(['svn', 'st'])
else:
results, _ = run_cmd(['git', 'status'])
for line in results.split('\n'):
if not is_git and (not line.strip().startswith('?') and line != ''):
return True
elif is_git and ('Changes to be committed' in line or
'Changes not staged for commit:' in line):
return True
if is_git:
p = subprocess.Popen(['git', 'log', '-1'],
stdout=subprocess.PIPE,
shell=(platform.system() == 'Windows'))
output, _ = p.communicate()
if find_git_info(output) is None:
return True
return False | 90da67be202a717962a8a87e74c7d66148d2703b | 27,640 |
def load_ps_label(frame_id):
"""
:param frame_id: file name of pseudo label
:return gt_box: loaded gt boxes (N, 9) [x, y, z, w, l, h, ry, label, scores]
"""
if frame_id in PSEUDO_LABELS:
gt_box = PSEUDO_LABELS[frame_id]['gt_boxes']
else:
raise ValueError('Cannot find pseudo label for frame: %s' % frame_id)
return gt_box | 53b73ddfe4a51676f7e81f9e266724ccc5e494fa | 27,641 |
import json
async def execute(
url_path: str,
body: dict,
codef,
service_type: ServiceType,
) -> dict:
"""
API 요청 실행 함수.
실제 사용자에게 제공되는 함수 내부에서 이 함수를 호출해서 사용할 것을 권장한다.
:param url_path: 요청 URL 경로
:param body: post 요청 바디
:param codef: codef 인스턴스
:param service_type: 서비스 타입
:return:
"""
req_domain = get_codef_domain(service_type)
client_id, client_secret = codef.get_client_info(service_type)
await set_token(client_id, client_secret, codef, service_type)
body = json.dumps(body, ensure_ascii=False)
return await request_product(
req_domain + url_path,
codef.get_access_token(service_type),
body,
codef.get_session(),
) | e0c55b38697121e3fa671fc7585fe477c977c724 | 27,642 |
def main() -> int:
""" Execute all tasks.
"""
get_googletest()
return 0 | c2850f781245cacd77b3707881b337a113dcb048 | 27,643 |
import asyncio
async def async_unload_entry(
hass: HomeAssistantType,
config_entry: config_entries.ConfigEntry,
) -> bool:
"""Unload Energosbyt Plus entry"""
log_prefix = _make_log_prefix(config_entry, "setup")
entry_id = config_entry.entry_id
update_delegators: UpdateDelegatorsDataType = hass.data[DATA_UPDATE_DELEGATORS].pop(
entry_id
)
tasks = [
hass.config_entries.async_forward_entry_unload(config_entry, domain)
for domain in update_delegators.keys()
]
unload_ok = all(await asyncio.gather(*tasks))
if unload_ok:
hass.data[DATA_API_OBJECTS].pop(entry_id)
hass.data[DATA_FINAL_CONFIG].pop(entry_id)
cancel_listener = hass.data[DATA_UPDATE_LISTENERS].pop(entry_id)
cancel_listener()
_LOGGER.info(
log_prefix
+ (
"Интеграция выгружена"
if IS_IN_RUSSIA
else "Unloaded configuration entry"
)
)
else:
_LOGGER.warning(
log_prefix
+ (
"При выгрузке конфигурации произошла ошибка"
if IS_IN_RUSSIA
else "Failed to unload configuration entry"
)
)
return unload_ok | c3e7a928c801f65b4ca997fb8b3734059b95e210 | 27,644 |
def euler_vec(z, y, x, n):
"""
Return (n,3,3) tensor with each (3,3) block containing an
Euler rotation with angles z, y, x. Optionally each of z, y, x
can be a vector of length n.
"""
L = np.zeros((n, 3, 3), "d")
cosx, sinx = np.cos(x), np.sin(x)
L[:, 0, 0] = 1
L[:, 1, 1] = L[:, 2, 2] = cosx
L[:, 1, 2] = -sinx
L[:, 2, 1] = sinx
N = np.zeros((n, 3, 3), "d")
cosy, siny = np.cos(y), np.sin(y)
N[:, 0, 0] = N[:, 2, 2] = cosy
N[:, 1, 1] = 1
N[:, 0, 2] = siny
N[:, 2, 0] = -siny
ret = np.einsum("ijk,ikl->ijl", L, N)
M = np.zeros((n, 3, 3), "d")
cosz, sinz = np.cos(z), np.sin(z)
M[:, 0, 0] = M[:, 1, 1] = cosz
M[:, 0, 1] = -sinz
M[:, 1, 0] = sinz
M[:, 2, 2] = 1
ret = np.einsum("ijk,ikl->ijl", ret, M)
return ret | 799dd414ff8fc1aa405072bb2d5d155751ad86a4 | 27,645 |
def ReadBenderEllipse( filename, dataFrame=False, headerLine=None, useDefaultColumnNames=True ):
"""Read in an ellipse fit generated by Bender/Saglia code and store it
in a dictionary (or, optionally, a ListDataFrame object). Columns are
converted to 1-D numpy arrays.
headerLine indicates which line contains the column titles (first line = 1,
etc.); the actual data is assumed to start immediately after.
Normally, the function attempts to locate the header line automatically
(first line in file with same number of elements [excluding any initial "#"]
as last line in file). The headerLine keyword is mainly useful for perverse
situations (e.g., there is a line in the header that happens to have 12
words in it).
Because we (currently) don't know how the Bender code handles position
angles, we don't attempt to "correct" the PA.
"""
lines = open(filename).readlines()
nTotalLines = len(lines)
lastLine = lines[-1]
nCols = len(lastLine.split())
# find the header line -- should be first line which has same number of elements
# as the last line in the file
if headerLine is None:
headerString = None
for i in range(nTotalLines):
tmpLine = lines[i].lstrip("#")
if len(tmpLine.split()) == nCols:
headerString = tmpLine
headerLineIndex = i
break
if headerString is None:
print("Unable to find header line!\n")
return None
else:
headerLineIndex = headerLine - 1
headerString = lines[headerLineIndex]
if useDefaultColumnNames:
colheaders = DEFAULT_BENDER_COLUMNS
else:
colheaders = headerString.split()
# get rid of excess space at end, if any
colheaders[-1] = colheaders[-1].strip()
# find first data line:
firstDataLine = None
for j in range(headerLineIndex + 1, nTotalLines):
tmpLine = lines[j]
if len(tmpLine.split()) == nCols:
firstDataLine = j
break
if firstDataLine is None:
print("Unable to find first data line!\n")
return None
dataLines = [line for line in lines[firstDataLine:] if line[0] != "#"]
nDataLines = len(dataLines)
dataDict = {}
for i in range(nCols):
cname = colheaders[i]
dataDict[cname] = np.array([ float(line.split()[i]) for line in dataLines ])
dataDict["r_eq"] = EquivRadius(dataDict)
colheaders.append("r_eq")
# Convert to dataFrame, if requested:
if dataFrame is True:
frameList = []
for cname in colheaders:
frameList.append(dataDict[cname])
result = du.ListDataFrame(frameList, colheaders)
# extra conveninces
#result.AddColumnName("sma", "a")
#result.AddColumnName("intens", "i")
# add meta-data
result.tableFile = filename
else:
result = dataDict
return result | 8d8f816f2ac7375bcc6814c34fd69a11e352255b | 27,646 |
def getQuote(symbolStringCSV, detailFlag = 'ALL' ):
"""
Returns the live quote of a single or many companies
symbolStringCSV <str> is a comma separated value of tickers
detailFlag <'ALL' or 'INTRADAY'> specifies whether all data is returned or just a subset with intraday
sample usage:
getQuote('TVIX, GOOG', detailFlag = 'INTRADAY')
"""
url = urlRoot().format('market','quote/'+symbolStringCSV) + '?' + \
'detailFlag={}'.format(detailFlag)
# print url
return accessMethod(url) | b40489c5d0680126abf58d32f7e487332599ea8b | 27,647 |
def get_user_resources_permissions_dict(user, request, resource_types=None, resource_ids=None,
inherit_groups_permissions=True, resolve_groups_permissions=False):
# type: (models.User, Request, Optional[List[Str]], Optional[List[int]], bool, bool) -> ResourcePermissionMap
"""
Creates a dictionary of resources ID with corresponding permissions of the user.
.. seealso::
:func:`regroup_permissions_by_resource`
:param user: user for which to find resources permissions
:param request: request with database session connection
:param resource_types: filter the search query with only the specified resource types
:param resource_ids: filter the search query to only the specified resource IDs
:param inherit_groups_permissions:
Whether to include group inherited permissions from user memberships or not.
If ``False``, return only user-specific resource permissions.
Otherwise, resolve inherited permissions using all groups the user is member of.
:param resolve_groups_permissions: whether to combine corresponding user/group permissions into one or not.
:return:
Only resources which the user has permissions on, or including all :term:`Inherited Permissions`, according to
:paramref:`inherit_groups_permissions` argument.
"""
ax.verify_param(user, not_none=True, http_error=HTTPNotFound,
msg_on_fail=s.UserResourcePermissions_GET_NotFoundResponseSchema.description)
# full list of user/groups permissions, filter afterwards according to flags
res_perm_tuple_list = UserService.resources_with_possible_perms(
user, resource_ids=resource_ids, resource_types=resource_types, db_session=request.db)
if not inherit_groups_permissions and not resolve_groups_permissions:
res_perm_tuple_list = filter_user_permission(res_perm_tuple_list, user)
return regroup_permissions_by_resource(res_perm_tuple_list, resolve=resolve_groups_permissions) | 6c3c647f304167328282505f118633654b9422a2 | 27,648 |
import json
import requests
def create_new_index(index_name: str):
"""
Алгоритм внутри по шагам
:param index_name: str: название текущей версии индекса
:return: new_index_name: str: название обновленного индекса
"""
print("*" * 10)
# получаем новое имя индекса
new_index_name = up_index_version(index_name)
print(index_name, "->", new_index_name)
# создаем новый индекс с настройками старого
new_index_url = f"{ES_URL}/{new_index_name}"
# читаем settings существующего индекса
with open(f"{dir_name_settings}/{index_name}.json", "r+") as f:
settings = json.load(f)
# проверяем, есть ли изменения settings
new_settings = get_new_settings()
settings["settings"]["index"].pop("sort", None)
if new_settings:
print(f"[INFO] settings update: {new_settings}")
merge_settings(settings, new_settings)
print("[INFO] settings updated")
# сохраняем новые settings для индекса
with open(f"{dir_name}/new_settings/{new_index_name}.json", "w") as f:
json.dump(settings, f, **JSON_DUMP_PARAMS)
# читаем mapping существующего индекса
with open(f"{dir_name_mappings}/{index_name}.json", "r+") as f:
mappings = json.load(f)
# проверяем, есть ли обновления mapping ДО put'a
if RECOURSIVE_MAPPING_UPDATE:
check_mappings_for_updates(mappings, index_name)
# сохраняем новый mapping для индекса
with open(f"{dir_name_new_mappings}/{new_index_name}.json", "w") as fw:
json.dump(mappings, fw, **JSON_DUMP_PARAMS)
# put'аем новые settings и mapping
if REALLY:
print(f"request PUT {new_index_url} settings")
resp = requests.put(new_index_url, json={**settings, **mappings})
if resp.status_code == 200:
print(f"[WARN] {resp.text}")
else:
print(f"[WARN] {resp.text}")
print("*" * 10)
return new_index_name | 797a1f2fa556f640588837c644bfbbf2b425daab | 27,649 |
def _parse_slice_str(slice_str):
"""Parses the given string as a multidimensional array slice and returns a
list of slice objects and integer indices."""
is_valid = False
if len(slice_str) > 2:
is_valid = slice_str[0] == "[" and slice_str[-1] == "]"
sliced_inds = []
if is_valid:
slice_str_list = [x.strip() for x in slice_str[1:-1].split(",")]
for s in slice_str_list:
parts = s.split(":")
if len(parts) > 3:
is_valid = False
break
if len(parts) == 1:
try:
sliced_inds.append(int(s))
except:
is_valid = False
break
else:
try:
start = int(parts[0]) if len(parts[0]) > 0 else None
stop = int(parts[1]) if len(parts[1]) > 0 else None
if len(parts) == 3:
step = int(parts[2]) if len(parts[2]) > 0 else None
else:
step = None
except:
is_valid = False
break
sliced_inds.append(slice(start, stop, step))
if not is_valid:
raise ValueError("Invalid slice specified: %s" % slice_str)
return sliced_inds | 6eb7a6b5d1dc2ee57e878b37be70e1e75d7d6ecc | 27,650 |
def AverageZComparison(x, y):
""" Take the average of second and third element in an array and compare
which is bigger. To be used in conjunction with the sort function. """
xsum = x[1]+x[2]
ysum = y[1]+y[2]
if xsum < ysum:
return -1
if xsum > ysum:
return 1
return 0 | 84c9e7b92df4b3e4914c769293f71790def5e4dd | 27,651 |
def convblock(in_channels, out_channels, kernel_size, stride=1, padding=0, use_bn=True):
"""
Returns convolution block
"""
if use_bn:
return [
nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding),
nn.BatchNorm2d(out_channels),
nn.ReLU(True)
]
else:
return [
nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding),
nn.ReLU(True)
] | 157cc5a88e0f1e0d0f4f3afc228fd25d1a67d058 | 27,652 |
def get_caffe_op_type(layer, input_channels=1, output_channels=1):
"""
Gets the relevant Toolkit Enum for the corresponding Caffe layer stage type.
:param layer:
The particular layer field of the caffe Net msg that we want to discover the type.
:return: StageType Enum
"""
if isConvolution(layer.type):
return StageType.convolution
if isFCL(layer.type):
return StageType.fully_connected_layer
if isSoftMax(layer.type):
return StageType.soft_max
if isPooling(layer.type):
pooling_type = layer.pooling_param.pool
if pooling_type == 0: # Max
return StageType.max_pooling
if pooling_type == 1: # Average
return StageType.average_pooling
if pooling_type == 2: # Stochastic
throw_error(ErrorTable.StageTypeNotSupported, "Stochastic Pooling")
return StageType.stochastic_pooling
if isLRN(layer.type):
return StageType.LRN
if isEltwise(layer.type):
if layer.type == 'Bias':
return StageType.eltwise_sum
elif layer.eltwise_param.operation == 0:
return StageType.eltwise_prod
elif layer.eltwise_param.operation == 2:
return StageType.eltwise_max
else:
return StageType.eltwise_sum
if isBatchNorm(layer.type) or isScale(layer.type):
return StageType.scale
if isPReLU(layer.type):
return StageType.prelu
if isSigmoid(layer.type):
return StageType.sigmoid
if isTanH(layer.type):
return StageType.tanh
if isDeconvolution(layer.type):
return StageType.deconvolution
if isReshape(layer.type):
return StageType.reshape
if isFlatten(layer.type):
return StageType.toplanemajor
if isPower(layer.type):
return StageType.power
if isCrop(layer.type):
return StageType.crop
if isDepthwiseConvolution(layer, output_channels, input_channels):
return StageType.depthwise_convolution
if isPermute(layer.type):
return StageType.permute
if isNormalize(layer.type):
return StageType.normalize
if isPriorBox(layer.type):
return StageType.prior_box
if isDetectionOutput(layer.type):
return StageType.detection_output
throw_error(ErrorTable.StageTypeNotSupported, layer.type) | e912014862643e3724fad8f10c0b8ee7133c77e1 | 27,653 |
def comp_volumes(self):
"""Compute the Lamination volumes (Vlam, Vvent, Vslot, Vwind)
Parameters
----------
self : LamSlotWind
A LamSlotWind object
Returns
-------
V_dict: dict
Lamination volume dictionnary (Vlam, Vvent, Vslot, Vwind) [m**3]
"""
V_dict = LamSlot.comp_volumes(self)
Lf = self.comp_length() # Include radial ventilation ducts
V_dict["Vwind"] = Lf * self.slot.Zs * self.slot.comp_surface_wind()
return V_dict | 4b9444b9ac7c78e6a7719ab73dfa5cabcd78debf | 27,654 |
def elliptical_orbit():
"""Draw an example of a planet with an elliptical orbit around its star."""
fig, axes = plt.subplots(1, 1)
orbit = Ellipse(xy=(0, 0), width=2, height=1.5, facecolor='lightblue')
axes.add_artist(orbit)
axes.plot([-1, 0], [0, 0])
axes.annotate(
'semi-major axis',
xy=(-0.5, 0),
xytext=(-0.8, -0.2),
arrowprops=dict(arrowstyle='wedge')
)
axes.annotate(
'orbit center',
xy=(0, 0),
xytext=(-0.21, 0.115),
arrowprops=dict(arrowstyle='wedge')
)
plt.plot(
[-.75], [0.5],
marker='o', markersize=4,
color='green', label='planet'
)
plt.plot(
[0], [0],
marker='o', markersize=10,
color='orange', label='star'
)
# formatting
plt.xlim(-1.25, 1.25)
plt.ylim(-1.1, 0.75)
plt.legend(loc='lower center', ncol=2)
# remove axes
axes.xaxis.set_visible(False)
axes.yaxis.set_visible(False)
# remove box around image
for spine in axes.spines:
axes.spines[spine].set_visible(False)
return axes | ccb12b80111009a59cacff522a35cd41a6e73ad4 | 27,655 |
from datetime import datetime
def get_date_range_for_date(date_str, interval):
"""
Given a date string, parse it and derive a range based on the given
interval. The interval is inclusive on the lower end, and exclusve on the
higher end. For example, given a date str of 2019-03-10 and a 'month'
interval, this will return a range of 2019-03-01 -- 2019-03-31.
:param date_str: Any ISO date or partial date. 2019, 2019-03,
2019-03-01, 2019-12-18 21:00:00
:param interval: Any interval defined in
globus_portal_framework.constants.FILTER_DATE_RANGES. Examples include:
'year', 'month', 'day', 'hour'
:return:
A date range dict. Example:
{
'from': '2019-12-18 21:00:00'
'to': '2019-12-18 21:00:01'
}
"""
dt = parse_date_filter(date_str)['datetime']
# If filtering on a month or year, chop off the extra part of the
# datetime so we don't accidentally search on the previous month
# or next month
day = datetime.timedelta(days=1)
if interval == FILTER_SECOND:
second = datetime.timedelta(seconds=1)
from_d, to_d = dt - second, dt + second
elif interval == FILTER_MINUTE:
from_d = dt.replace(second=0)
to_d = from_d + datetime.timedelta(seconds=59)
elif interval == FILTER_HOUR:
from_d = dt.replace(minute=0, second=0)
to_d = from_d + datetime.timedelta(minutes=59, seconds=59)
elif interval == FILTER_DAY:
dt = dt.replace(hour=0, minute=0, second=0)
from_d, to_d = dt, dt + day
elif interval == FILTER_MONTH:
from_d = dt.replace(day=1, hour=0, minute=0, second=0)
inc_month = 1 if dt.month == 12 else dt.month + 1
inc_year = dt.year + 1 if inc_month == 1 else dt.year
to_d = from_d.replace(month=inc_month, year=inc_year) - day
elif interval == FILTER_YEAR:
dt = dt.replace(day=1, month=1, hour=0, minute=0, second=0)
year = datetime.timedelta(days=365)
from_d, to_d = dt, dt + year
else:
raise exc.GlobusPortalException('Invalid date type {}'
''.format(interval))
# Globus search can handle any time format, so using the most precise will
# work every time.
dt_format_type = DATETIME_PARTIAL_FORMATS['time']
return {
'from': from_d.strftime(dt_format_type),
'to': to_d.strftime(dt_format_type)
} | 4b2d1f47d4984fbd9fe590e2698b288560aa0162 | 27,656 |
def sigmaStarDFA(sigma=None):
"""
Given a alphabet S returns the minimal DFA for S*
:param sigma: set of symbols
:return: DFA
.. versionadded:: 1.2"""
if sigma is None:
raise
d = DFA()
d.setSigma(sigma)
i = d.addState()
d.setInitial(i)
d.addFinal(i)
for a in d.Sigma:
d.addTransition(i, a, i)
return d | 51ab7fa365b356f03aab57b777bd245163ba6b02 | 27,657 |
def feature_engineer(train, test, bureau, bureau_balance, credit_card_balance,
installments_payments, pos_cash_balance, previous_application):
"""
This function read all the data from the competition and do manual feature engineer to it.
:param train:
:param test:
:param bureau:
:param bureau_balance:
:param credit_card_balance:
:param installments_payments:
:param pos_cash_balance:
:param previous_application:
:return: (Dataframe) train
(Datafarme) test
"""
bureau_counts = count_categorical(bureau, group_var='SK_ID_CURR', df_name='bureau')
bureau_agg = agg_numeric(bureau.drop(columns=['SK_ID_BUREAU']), group_var='SK_ID_CURR', df_name='bureau')
bureau_balance_counts = count_categorical(bureau_balance, group_var='SK_ID_BUREAU', df_name='bureau_balance')
bureau_balance_agg = agg_numeric(bureau_balance, group_var='SK_ID_BUREAU', df_name='bureau_balance')
credit_card_balance_counts = count_categorical(credit_card_balance,
group_var='SK_ID_CURR', df_name='credit_card_balance')
credit_card_balance_agg = agg_numeric(credit_card_balance.drop(columns=['SK_ID_PREV']),
group_var='SK_ID_CURR', df_name='credit_card_balance')
# Reason: Installments_payments_counts table contains no object value.
# installments_payments_counts = count_categorical(installments_payments,
# group_var='SK_ID_CURR', df_name='installments_payments')
installments_payments_agg = agg_numeric(installments_payments.drop(columns=['SK_ID_PREV']),
group_var='SK_ID_CURR', df_name='installments_payments')
pos_cash_balance_counts = count_categorical(pos_cash_balance, group_var='SK_ID_CURR', df_name='pos_cash_balance')
pos_cash_balance_agg = agg_numeric(pos_cash_balance.drop(columns=['SK_ID_PREV']),
group_var='SK_ID_CURR', df_name='pos_cash_balance')
previous_application_counts = count_categorical(previous_application,
group_var='SK_ID_CURR', df_name='previous_application_counts')
previous_application_agg = agg_numeric(previous_application.drop(columns=['SK_ID_PREV']),
group_var='SK_ID_CURR', df_name='previous_application')
# Dataframe grouped by the loan
bureau_by_loan = bureau_balance_agg.merge(bureau_balance_counts,
right_index=True, left_on='SK_ID_BUREAU', how='outer')
# Merge to include the SK_ID_CURR
bureau_by_loan = bureau[['SK_ID_BUREAU', 'SK_ID_CURR']].merge(bureau_by_loan, on='SK_ID_BUREAU', how='left')
# Aggregate the stats for each client
bureau_balance_by_client = agg_numeric(bureau_by_loan.drop(columns=['SK_ID_BUREAU']),
group_var='SK_ID_CURR', df_name='client')
original_features = list(train.columns)
print('Original Number of Features: ', len(original_features))
# TODO: We can also first deal with pos_cash_balance and credit card balance before merge.
# Merge with the value counts of bureau
train = train.merge(bureau_counts, on='SK_ID_CURR', how='left')
# Merge with the stats of bureau
train = train.merge(bureau_agg, on='SK_ID_CURR', how='left')
# Merge with the monthly information grouped by client
train = train.merge(bureau_balance_by_client, on='SK_ID_CURR', how='left')
# Merge with credit card balance counts
train = train.merge(credit_card_balance_counts, on='SK_ID_CURR', how='left')
# Merge with credit card balance agg
train = train.merge(credit_card_balance_agg, on='SK_ID_CURR', how='left')
# Merge with installments payments agg
train = train.merge(installments_payments_agg, on='SK_ID_CURR', how='left')
# Merge with pos_cash_balance counts
train = train.merge(pos_cash_balance_counts, on='SK_ID_CURR', how='left')
# Merge with pos_cash_balance agg
train = train.merge(pos_cash_balance_agg, on='SK_ID_CURR', how='left')
# Merge with previous_application counts
train = train.merge(previous_application_counts, on='SK_ID_CURR', how='left')
# Merge with previous application agg
train = train.merge(previous_application_agg, on='SK_ID_CURR', how='left')
new_features = list(train.columns)
print('Number of features using previous loans from other institutions data: ', len(new_features))
missing_train = missing_values_table(train)
missing_train_vars = list(missing_train.index[missing_train['% of Total Values'] > 90])
# Test
# Merge with the value counts of bureau
test = test.merge(bureau_counts, on='SK_ID_CURR', how='left')
# Merge with the stats of bureau
test = test.merge(bureau_agg, on='SK_ID_CURR', how='left')
# Merge with the monthly information grouped by client
test = test.merge(bureau_balance_by_client, on='SK_ID_CURR', how='left')
# Merge with credit card balance counts
test = test.merge(credit_card_balance_counts, on='SK_ID_CURR', how='left')
# Merge with credit card balance agg
test = test.merge(credit_card_balance_agg, on='SK_ID_CURR', how='left')
# Merge with installments payments agg
test = test.merge(installments_payments_agg, on='SK_ID_CURR', how='left')
# Merge with pos_cash_balance counts
test = test.merge(pos_cash_balance_counts, on='SK_ID_CURR', how='left')
# Merge with pos_cash_balance agg
test = test.merge(pos_cash_balance_agg, on='SK_ID_CURR', how='left')
# Merge with previous_application counts
test = test.merge(previous_application_counts, on='SK_ID_CURR', how='left')
# Merge with previous application agg
test = test.merge(previous_application_agg, on='SK_ID_CURR', how='left')
print('Shape of Training Data: ', train.shape)
print('Shape of Testing Data: ', test.shape)
train_labels = train['TARGET']
# Align the dataframes, this will remove the 'TARGET' column
train, test = train.align(test, join='inner', axis=1)
train['TARGET'] = train_labels
print('Training Data Shape: ', train.shape)
print('Testing Data Shape ', test.shape)
missing_test = missing_values_table(test)
missing_test_vars = list(missing_test.index[missing_test['% of Total Values'] > 90])
len(missing_test_vars)
missing_columns = list(set(missing_test_vars + missing_train_vars))
print('There are %d columns with more than 90%% missing in either the training or testing data.'
% len(missing_columns))
# Drop the missing columns
train = train.drop(columns=missing_columns)
test = test.drop(columns=missing_columns)
train.to_csv('train_all_raw.csv', index=False)
test.to_csv('test_all_raw.csv', index=False)
# Calculate all correlations in dataframe
corrs = train.corr()
corrs = corrs.sort_values('TARGET', ascending=False)
# Set the threshold
threshold = 0.8
# Empty dictionary to hold correlated variables
above_threshold_vars = {}
# For each column, record the variables that are above the threshold
for col in corrs:
above_threshold_vars[col] = list(corrs.index[corrs[col] > threshold])
# Track columns to remove and columns already examined
cols_to_remove = []
cols_seen = []
cols_to_remove_paire = []
# Iterate through columns and correlated columns
for key, value in above_threshold_vars.items():
# Keep track of columns already examined
cols_seen.append(key)
for x in value:
if x == key:
next
else:
# Only want to remove on in a pair
if x not in cols_seen:
cols_to_remove.append(x)
cols_to_remove_paire.append(key)
cols_to_remove = list(set(cols_to_remove))
print('Number of columns to remove: ', len(cols_to_remove))
train_corrs_removed = train.drop(columns=cols_to_remove)
test_corrs_removed = test.drop(columns=cols_to_remove)
print('Training Corrs Removed Shape: ', train_corrs_removed.shape)
print('Test Corrs Removed ShapeL ', test_corrs_removed.shape)
train_corrs_removed.to_csv('train_all_corrs_removed.csv', index=False)
test_corrs_removed.to_csv('test_all_corrs_removed.csv', index=False)
return train_corrs_removed, test_corrs_removed | f240c60c56503e93f3fb0792b8387a97389cfa70 | 27,658 |
from datetime import datetime
def timestamp_to_datetime(seconds, tz=None):
"""Returns a datetime.datetime of `seconds` in UTC
:param seconds: timestamp relative to the epoch
:param tz: timezone of the timestamp
"""
if tz is None:
tz = timezone.utc
dt = datetime.fromtimestamp(seconds, tz)
return dt.astimezone(timezone.utc) | 2b53d69aeb2c7e5f88602af47d2d7b1ee40e0730 | 27,659 |
import re
def initialize(plugins, exclude_regex=None, rootdir='.'):
"""Scans the entire codebase for high entropy strings, and returns a
SecretsCollection object.
:type plugins: tuple of detect_secrets.plugins.base.BasePlugin
:param plugins: rules to initialize the SecretsCollection with.
:type exclude_regex: str|None
:type rootdir: str
:rtype: SecretsCollection
"""
output = SecretsCollection(plugins, exclude_regex)
git_files = _get_git_tracked_files(rootdir)
if not git_files:
return output
if exclude_regex:
regex = re.compile(exclude_regex, re.IGNORECASE)
git_files = filter(
lambda x: not regex.search(x),
git_files
)
for file in git_files:
output.scan_file(file)
return output | 80c262041736c58f79a4356781b3ab831b6f1daa | 27,660 |
def decompose_job_id(job_id):
"""Thin wrapper around generic decompose_job_id to use our local SPACER."""
return utils_decompose_job_id(job_id, spacer=SPACER) | f41553a6864940816da53982746a9f06220347e6 | 27,661 |
from re import U
from re import T
def build(P , word_size , first_hidden_size , encoding_size) :
"""
create entity and relation encoding
"""
P["W_word_left_input"] = U.initial_weights(2*word_size , first_hidden_size)
P["W_word_right_input"] = U.initial_weights(2*word_size , first_hidden_size)
P["W_encoding_output"] = U.initial_weights(2*first_hidden_size , encoding_size)
def batched_triplet_encoding(e_left , relation , e_right) :
left_input = T.concatenate([e_left , relation] , axis=1) #batched version
right_input = T.concatenate([relation , e_right] , axis=1) #batched version
left_hidden = T.tanh(T.dot(left_input , P["W_word_left_input"]))
right_hidden = T.tanh(T.dot(right_input , P["W_word_right_input"]))
all_hidden = T.concatenate([left_hidden , right_hidden] , axis = 1) #batched version
encoding = T.tanh(T.dot(all_hidden , P["W_encoding_output"]))
return encoding
def vector_triplet_encoding(e_left , relation , e_right) :
left_input = T.concatenate([e_left , relation] , axis=0) #batched version
right_input = T.concatenate([relation , e_right] , axis=0) #batched version
left_hidden = T.tanh(T.dot(left_input , P["W_word_left_input"]))
right_hidden = T.tanh(T.dot(right_input , P["W_word_right_input"]))
all_hidden = T.concatenate([left_hidden , right_hidden] , axis = 0) #batched version
encoding = T.tanh(T.dot(all_hidden , P["W_encoding_output"]))
return encoding
return batched_triplet_encoding , vector_triplet_encoding | 9c465b9033b23aa938ea600326bc5add1000edb5 | 27,662 |
def resample_to_wet_dry_medians(ds, wet_month=None, dry_month=None, inplace=True):
"""
Takes a xarray dataset/array and a list of wet, dry months which
to resample to. An annualised wet and dry season median image for
given wet, dry months will be created. For example: one wet, one
dry image for 2018, one wet, one dry image for 2019, etc. Time
dimension required, or error occurs.
Parameters
----------
ds: xarray dataset/array
A dataset with x, y and time dims.
wet_month : int or list
An int or a list representing the month(s) that represent
the wet season months. Example [1, 2, 3] for Jan, Feb,
Mar.
dry_month : int or list
An int or a list representing the month(s) that represent
the dry season months. Example [9, 10, 11] for Sep, Oct,
Nov.
inplace : bool
Create a copy of the dataset in memory to preserve original
outside of function. Default is True.
Returns
----------
ds : xarray dataset or array.
"""
# notify
print('Resampling dataset to annual wet and dry medians.')
# check xr type, dims
if not isinstance(ds, (xr.Dataset, xr.DataArray)):
raise TypeError('Dataset not an xarray type.')
elif 'time' not in list(ds.dims):
raise ValueError('No time dimension in dataset.')
# check wet, dry month if none given
if wet_month is None or dry_month is None:
raise ValueError('Must provide at least one wet and dry month.')
# check wet dry list, convert if not
wet_months = wet_month if isinstance(wet_month, list) else [wet_month]
dry_months = dry_month if isinstance(dry_month, list) else [dry_month]
# create copy ds if not inplace
if not inplace:
ds = ds.copy(deep=True)
# we need a dataset, try and convert from array
was_da = False
if isinstance(ds, xr.DataArray):
try:
was_da = True
ds = ds.to_dataset(dim='variable')
except:
raise TypeError('Failed to convert xarray DataArray to Dataset.')
# if dask, must compute for resample median
# note, there seems to be a dask-resample bug where nan is returned
# randomly when dask resampled. leaving this here in case bug occurs
#was_dask = False
#if bool(ds.chunks):
#print('Dask detected, not supported here. Computing, please wait.')
#was_dask = True
#ds = ds.compute()
# split into wet, dry
ds_wet = ds.where(ds['time.month'].isin(wet_months), drop=True)
ds_dry = ds.where(ds['time.month'].isin(dry_months), drop=True)
# create month map
month_map = {
1: 'JAN',
2: 'FEB',
3: 'MAR',
4: 'APR',
5: 'MAY',
6: 'JUN',
7: 'JUL',
8: 'AUG',
9: 'SEP',
10: 'OCT',
11: 'NOV',
12: 'DEC'
}
# get wet, dry start month as string
wet_start_month = month_map.get(wet_month[0])
dry_start_month = month_map.get(dry_month[0])
# resample wet, dry into annual wet, dry medians
ds_wet = ds_wet.resample(time='AS-' + wet_start_month).median(keep_attrs=True)
ds_dry = ds_dry.resample(time='AS-' + dry_start_month).median(keep_attrs=True)
# concat wet, dry datasets back together
ds = xr.concat([ds_wet, ds_dry], dim='time').sortby('time')
# if was dask, make dask again
# related to above comment on dask-resample issue
#if was_dask:
#ds = ds.chunk({'time': 1})
# notify and return
print('Resampled dataset to annual wet and dry medians successfully.')
if was_da:
ds = ds.to_array()
return ds | 4c04f63064c4ad3556f717123a8cd3f10dfd975b | 27,663 |
def validate_path_for_get_public_key(path: list, slip44_id: int) -> bool:
"""
Checks if path has at least three hardened items and slip44 id matches.
The path is allowed to have more than three items, but all the following
items have to be non-hardened.
"""
length = len(path)
if length < 3 or length > 5:
return False
if path[0] != 44 | HARDENED:
return False
if path[1] != slip44_id | HARDENED:
return False
if path[2] < HARDENED or path[2] > 20 | HARDENED:
return False
if length > 3 and is_hardened(path[3]):
return False
if length > 4 and is_hardened(path[4]):
return False
return True | 1e0ef325283a2cfc6aceba748c18fa2dbc9a34c0 | 27,664 |
import base64
from datetime import datetime
def generate_result(pic_type, img_path, predicted_breed=''):
"""
Generate a result of predicted dog breed for display in Jupyter Notebook.
Args:
pic_type (str): Type of picture, either 'dog', 'human' or 'error'
img_path (str): Path to the image provided by the user
predicted_breed (str): The breed that was predicted based on the image
Returns:
str: A URI of the base64 encoded result in HTML
"""
img_content = None
with open(img_path, 'rb') as img:
img_content = base64.b64encode(img.read())
ts = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
lookup = TemplateLookup(['templates/'], output_encoding='utf-8',
strict_undefined=True)
template = lookup.get_template('dog.html.mako')
raw_html = template.render(
pic_type=pic_type,
img_content=img_content.decode(),
timestamp=ts,
breed=predicted_breed
)
return f'data:text/html;base64,{base64.b64encode(raw_html).decode()}' | 345db6c17e0ec2a3d65963ba403200c1bde3229c | 27,665 |
def concatSeriesMovies(dataFrame1, dataFrame2):
"""
Join, concat two dataFrames
Parameters:
dataFrame1: string
The name of the dataFrame the user wants to concat.
dataFrame2: string
The name of the dataFrame the user wants to concat.
Returns:
a new dataFrame
"""
concatedDataFrame = pd.concat([dataFrame1, dataFrame2], ignore_index=True)
return concatedDataFrame | cb063ca82dac809f05a60a920f77d7a53ed5d3af | 27,666 |
def Pattern7(s):
""" Compute the correlator for this pattern:
↓ ○
↑ ↑
and symmetry-equivalent patterns
"""
res = 0.0
s = np.pad(s, ((0, 0), (2, 2), (2, 2)))
L = s.shape[-1]
for i in range(L-2):
for j in range(L-2):
res += s[0, i, j] * s[0, i+1, j] * s[1, i+1, j+1] * s[2, i+2, j+1]
res += s[0, i+1, j] * s[0, i+1, j+1] * s[1, i, j+1] * s[2, i, j+2]
res += s[2, i, j] * s[1, i+1, j] * s[0, i+1, j+1] * s[0, i+2, j+1]
res += s[2, i+1, j] * s[1, i+1, j+1] * s[0, i, j+1] * s[0, i, j+2]
res += s[2, i, j] * s[1, i, j+1] * s[0, i+1, j+1] * s[0, i+1, j+2]
res += s[0, i, j+1] * s[0, i+1, j+1] * s[1, i+1, j] * s[2, i+2, j]
res += s[0, i, j] * s[0, i, j+1] * s[1, i+1, j+1] * s[2, i+1, j+2]
res += s[2, i, j+1] * s[0, i+1, j] * s[1, i+1, j+1] * s[0, i+2, j]
res += s[1, i, j] * s[1, i+1, j] * s[0, i+1, j+1] * s[2, i+2, j+1]
res += s[1, i+1, j] * s[1, i+1, j+1] * s[0, i, j+1] * s[2, i, j+2]
res += s[2, i, j] * s[0, i+1, j] * s[1, i+1, j+1] * s[1, i+2, j+1]
res += s[2, i+1, j] * s[0, i+1, j+1] * s[1, i, j+1] * s[1, i, j+2]
res += s[2, i, j] * s[0, i, j+1] * s[1, i+1, j+1] * s[1, i+1, j+2]
res += s[1, i, j+1] * s[1, i+1, j+1] * s[0, i+1, j] * s[2, i+2, j]
res += s[1, i, j] * s[1, i, j+1] * s[0, i+1, j+1] * s[2, i+1, j+2]
res += s[2, i, j+1] * s[1, i+1, j] * s[0, i+1, j+1] * s[1, i+2, j]
return res | 2dea3f7cae06cec65f9ee416b61367320d9893cc | 27,667 |
def list_camera_ports():
"""
Test the ports and returns a tuple with the available ports and the ones that are working
"""
non_working_ports = []
working_ports = []
available_ports = []
dev_port = 0
while len(non_working_ports) <= 3: # If there are more than 3 non working ports stop the testing.
camera = cv2.VideoCapture(dev_port)
if not camera.isOpened():
non_working_ports.append(dev_port)
else:
is_reading, img = camera.read()
w = camera.get(cv2.CAP_PROP_FRAME_WIDTH)
h = camera.get(cv2.CAP_PROP_FRAME_HEIGHT)
if is_reading:
print("Port %s is working and reads images (%s x %s)" %(dev_port, w, h))
working_ports.append(dev_port)
else:
available_ports.append(dev_port)
dev_port +=1
return available_ports, working_ports, non_working_ports | 192cdb62807f89a05a67f7efbbc267bcc1b1f305 | 27,668 |
from pathlib import Path
def parse_taxid_names(file_path):
"""
Parse the names.dmp file and output a dictionary mapping names to taxids
(multiple different keys) and taxids to scientific names.
Parameters
----------
file_path : str
The path to the names.dmp file.
Returns
-------
name2taxid : dict
Keys are all possible names and values are taxids.
taxid2name : dict
Keys are taxids and values are scientific names.
"""
names = Path(file_path)
with names.open() as f:
lines_processed = 0
name2taxid = {}
taxid2name = {}
for line in f:
lines_processed += 1
if lines_processed % 1000000 == 0:
print('processing line', str(lines_processed))
entries = [entry.strip() for entry in line.split('|')]
name2taxid[entries[1]] = entries[0]
if 'scientific name' in line:
taxid2name[entries[0]] = entries[1]
return name2taxid, taxid2name | 1d136f73a56ac8d3c02fd53c6e7928a39440e27a | 27,669 |
def spec_to_in_spec_list(spec: np.array):
"""
Split full spectrogram song into list of sub-spectrograms, each of length 400ms
:param spec:
spec (np.array): 2D input numpy array spectrogram of given song (512 frequency bins x length of song (in 10ms)
:return:
in_spec_list (list): List of numpy 2D numpy array spectrograms containing the song split up into 400ms bins
"""
# Create empty list of sub-spectrograms
in_spec_list = []
no_seq = int(np.floor(spec.shape[1]/seq_len))
# Iterate through full spectrogram & split up into 400ms bins
for n in range(no_seq):
# BUG: potentially last snippet not exactly 400s and not 400 columns
in_spec_list.append(spec[:, n*400:n*400+400])
return in_spec_list | 5702da704c67b431e413d414628cdeeab2e8399d | 27,670 |
import os
def cache_trial_timing_bins(trials, trials_timing, bin_dur=.010, data_path='.'):
"""
todo: reinclude a check for bilaterals?
"""
cpath = os.path.join(data_path, 'caches', 'density')
tt = trials_timing
tt = add_uid(tt)
ttgb = tt.groupby('uid')
# load already cached trials and exclude from the current caching process all
# trials that have been cached already
if os.path.exists(cpath):
ctrials = pd.read_pickle(cpath)
trials = trials[~trials.uid.isin(ctrials.uid.unique())]
else:
ctrials = None
if len(trials) > 0:
maxdur = trials.dur.max()
bins = np.arange(0, maxdur+1e-10, bin_dur)
bin_labs = (bins[1:]+bins[:-1])/2
bin_labs_l = ['L_{:0.3f}'.format(i) for i in bin_labs]
bin_labs_r = ['R_{:0.3f}'.format(i) for i in bin_labs]
bin_labs = bin_labs_l + bin_labs_r
density = np.empty([len(trials), 2, len(bins)-1]) # trials x LR x bins
print('Caching new trials...')
for ti,tuid in enumerate(trials.uid.values):
if ti%250==0: print(ti,'/',len(trials))
tim = ttgb.get_group(tuid)
assert all(tim.time >= 0)
assert all(tim.time <= maxdur)
binl,_ = np.histogram(tim[tim.side==0].time, bins=bins)
binr,_ = np.histogram(tim[tim.side==1].time, bins=bins)
assert len(tim) == np.sum(binl) + np.sum(binr)
density[ti, 0, :] = binl
density[ti, 1, :] = binr
density = np.concatenate(density.transpose([1,0,2]), axis=1)
density_df = pd.DataFrame(density, columns=list(bin_labs))
# add trial info onto result
df = trials[['session','idx','uid','dur','side','outcome']].reset_index(drop=True)
result = df.join(density_df)
# combine cached and new results
result = pd.concat([ctrials, result])
result = result.reset_index(drop=True)
pd.to_pickle(result, cpath)
else:
result = ctrials
return result | b5d210dca88455e05ceedadc4087c86dbfad28a8 | 27,671 |
def dn_outfunc(e1, e2, W, V1=None, V2=None, b=None):
"""Applies a bilinear function based on given parameters.
This is a building block of Neural Tensor Network (see the reference paper
below). It takes two input variables and one or four parameters, and
outputs one variable.
To be precise, denote six input arrays mathematically by
:math:`e^1\\in \\mathbb{R}^{I\\cdot J}`,
:math:`e^2\\in \\mathbb{R}^{I\\cdot K}`,
:math:`W\\in \\mathbb{R}^{J \\cdot K \\cdot L}`,
:math:`V^1\\in \\mathbb{R}^{J \\cdot L}`,
:math:`V^2\\in \\mathbb{R}^{K \\cdot L}`, and
:math:`b\\in \\mathbb{R}^{L}`,
where :math:`I` is mini-batch size.
In this document, we call :math:`V^1`, :math:`V^2`, and :math:`b` linear
parameters.
The output of forward propagation is calculated as
.. math::
y_{il} = \\sum_{jk} e^1_{ij} e^2_{ik} W_{jkl} + \\
\\sum_{j} e^1_{ij} V^1_{jl} + \\sum_{k} e^2_{ik} V^2_{kl} + b_{l}.
Note that V1, V2, b are optional. If these are not given, then this
function omits the last three terms in the above equation.
.. note::
This function accepts an input variable ``e1`` or ``e2`` of a non-matrix
array. In this case, the leading dimension is treated as the batch
dimension, and the other dimensions are reduced to one dimension.
.. note::
In the original paper, :math:`J` and :math:`K`
must be equal and the author denotes :math:`[V^1 V^2]`
(concatenation of matrices) by :math:`V`.
Args:
e1 (~chainer.Variable): Left input variable.
e2 (~chainer.Variable): Right input variable.
W (~chainer.Variable): Quadratic weight variable.
V1 (~chainer.Variable): Left coefficient variable.
V2 (~chainer.Variable): Right coefficient variable.
b (~chainer.Variable): Bias variable.
Returns:
~chainer.Variable: Output variable.
See:
`Reasoning With Neural Tensor Networks for Knowledge Base Completion
<http://papers.nips.cc/paper/5028-reasoning-with-neural-tensor-
networks-for-knowledge-base-completion>`_ [Socher+, NIPS2013].
"""
flags = [V1 is None, V2 is None, b is None]
if any(flags):
if not all(flags):
raise ValueError('All coefficients and bias for bilinear() must '
'be None, if at least one of them is None.')
return DN_outFunction()(e1, e2, W)
else:
return DN_outFunction()(e1, e2, W, V1, V2, b) | f1c642b7fcaf91d49df993177b76715774fc17bf | 27,672 |
def action_can_be_queued(action_type):
"""
test the action_type whether can be queued
Inputs: action_type, int
Outputs: true or false
"""
need_args = actions.RAW_FUNCTIONS[action_type].args
result = False
for arg in need_args:
if arg.name == 'queued':
result = True
break
return result | e05f56b047d5c14bc2bcb27e82e789a41b12e090 | 27,673 |
def bench4(x):
"""A benchmark function for test purposes.
f(x) = float(x) ** 2
where x is a string. It has a single minima with f(x*) = 0 at x* = "0".
This benchmark is used for checking support of categorical variables.
"""
return float(x[0]) ** 2 | 2c7bf171f917b599db6f7553b2a959cb8c691c93 | 27,674 |
def drop_columns_from_dataframe_if_all_elements_are_nan(df, elements_list=['', '']):
""" Takes two parameters:
df: Dataframe
elements_list: By default it will identify np.nan. If you want to add additional elements, as an example, you can do this ['', ' ']
"""
m = df.applymap(lambda i: i if i not in elements_list else np.nan).apply(lambda c: c.isnull().all())
columns_to_drop = df.columns[m]
df.drop(columns_to_drop, axis=1, inplace=True)
return df | 5f66e8c33a918872e2cfeabff2116a43fda9095f | 27,675 |
def drop_db(code, confirm_by_typing_db_code_again=None):
"""
Delete a history database.
Parameters
----------
code : str, required
the database code
confirm_by_typing_db_code_again : str, required
enter the db code again to confirm you want to drop the database, its config,
and all its data
Returns
-------
dict
status message
"""
params = {"confirm_by_typing_db_code_again": confirm_by_typing_db_code_again}
response = houston.delete("/history/databases/{0}".format(code), params=params)
houston.raise_for_status_with_json(response)
return response.json() | a135ad9e3abdecb891b0ef5b6a1754d01a85fda8 | 27,676 |
def _nex_group(seq, core, spatial, c_num=4, s_num=4):
"""Build spatial stream index"""
# step 1
ant_num = c_num * s_num
seq_diff = np.diff(seq)
offset = np.where(seq_diff != 0)[0]
offset = np.r_[0, offset + 1]
count = np.diff(np.r_[offset, len(seq)])
offset = offset[count == ant_num]
offset = offset[:, None] + np.r_[:ant_num]
# step 2
core = core[offset]
spatial = spatial[offset]
p = core * s_num + spatial
p = np.argsort(p, axis=-1)
offset = offset[:, :1] + p
offset = offset.reshape(-1, c_num, s_num)
return offset | 2c9ff96b965c48aa0a0aace2cfab2e5d8b1d3a9e | 27,677 |
import asyncio
async def test_async_init_timeout(circuit):
"""Test the async initialization time_out."""
async def w3():
await asyncio.sleep(0.1)
return 3
logger = TimeLogger('logger')
out = edzed.ValuePoll(
'out',
func=w3,
interval=10, # don't care
init_timeout=0.2,
on_output=edzed.Event(logger),
initdef='DEFAULT')
asyncio.create_task(circuit.run_forever())
await circuit.wait_init()
await circuit.shutdown()
logger.compare([(100, 3)]) | 1915a66e8e82987f878f05a4a9016037e25c7da0 | 27,678 |
def compute_diffraction(
bundle,
key,
s,
eta,
L,
nlebedev=74,
nomega=12,
mode="xray",
form="raw",
anisotropy="cos2",
print_level=False,
):
"""Compute the I(s, eta) elastic scattering signal for a Bundle.
See aimsprop/notes/ued for details on this property.
Notes:
* All frames for each initial condition (IC) in bundle should be aligned so
that the transition dipole moment from S0 -> Sex at t=0 is on z. This
is required for proper computation of anisotropy.
* All frames should be weighted by geometric considerations at the IC
(e.g., conformational wells, Wigner weights, etc), by the cross
section for the optical transition at the IC (e.g., oscillator
strength and excitation energy window), and by the frame weight due
to non-adiabatic dynamics.
Params:
bundle (Bundle) - the Bundle object to compute the property for (modified in
place)
key (str) - the name of the property.
s (np.ndarray) - list of scattering vector norms in Angstrom^-1. The
relationship between s and theta (scattering angle) is given as,
s = 4 pi / L * sin(theta / 2).
eta (np.ndarray) - list of azimuthal scattering angles in radians.
L (float) - effective wavelength of scattering particle (x-ray
wavelength or UED deBroglie wavelength) in Angstrom. Used to
convert through scattering angle theta.
nlebedev (int) - Lebedev number to use for solid angle orientation
quadrature.
nomega (int) - number of uniform quadrature points to use for plane
orientation quadrature.
mode (str) - 'xray' or 'ued' for selection of form factors
form (str) - 'raw' or 'mod' for modified/raw diffraction intensities
I(s) or M(s).
anisotropy (str) - 'none' or 'cos2' for isotropic of cos^2 (z)
anisotropty.
print_level (bool) - print progress if true (useful to track long
property computations)
Result/Return:
bundle - reference to the input Bundle object. The properties
"""
# Validity checks
if mode not in ["xray", "ued"]:
raise ValueError("Unknown mode: %s" % mode)
if form not in ["raw", "mod"]:
raise ValueError("Unknown form: %s" % form)
if anisotropy not in ["none", "cos2"]:
raise ValueError("Unknown anisotropy: %s" % anisotropy)
# Compute scattering angles via Bragg equation
theta = 2.0 * np.arcsin(s * L / (4.0 * np.pi))
tt, ee = np.meshgrid(theta, eta, indexing="ij")
ss, ee = np.meshgrid(s, eta, indexing="ij")
# Compute scattering vectors
sx = ss * np.cos(tt / 2.0) * np.sin(ee)
sy = ss * np.sin(tt / 2.0)
sz = ss * np.cos(tt / 2.0) * np.cos(ee)
# Get a rotation quadrature for the orientations of the frames
if nlebedev == 1 and nomega == 1:
# Fixed orientation
Rs = [np.eye(3)]
ws = [1.0]
else:
# Rotation quadrature
Rs, ws = rotation.rotation_quadrature(nlebedev=nlebedev, nomega=nomega)
# Get atomic form factors for appropriate x-ray/ued mode
factors = formfactor.AtomicFormFactor.build_factors(bundle.frames[0], mode=mode)
# Compute atomic scattering Iat
D = np.zeros_like(sx)
for A, factor in enumerate(factors):
F = factor.evaluate_N(qx=sx, qy=sy, qz=sz, x=0.0, y=0.0, z=0.0)
D += (np.abs(F) ** 2).real
# Compute IAM scattering, integrating over all orientation angles
for find, frame in enumerate(bundle.frames):
if print_level:
print(("Frame %5d of %5d" % (find, len(bundle.frames))))
I = np.zeros_like(sx)
for R, w in zip(Rs, ws):
# cos(z)^2 pump anisotropy
cos2 = R[2, 2] ** 2 if anisotropy == "cos2" else 1.0
# Rotated molecule
xyz = np.dot(frame.xyz, R)
# Compute diffraction
N = np.zeros_like(I, dtype=complex)
for A, factor in enumerate(factors):
x = xyz[A, 0]
y = xyz[A, 1]
z = xyz[A, 2]
N += factor.evaluate_N(qx=sx, qy=sy, qz=sz, x=x, y=y, z=z)
F = (np.abs(N) ** 2).real
if form == "mod":
F = (F - D) / D
I += w * cos2 * F
frame.properties[key] = I
return bundle | 380043d47e6a5786ab2907a4b1bd11495784f052 | 27,679 |
def convert_dict_id_values_to_strings(dict_list):
"""This function ensures that the ``id`` keys in a list of dictionaries use string values.
:param dict_list: List (or tuple) of dictionaries (or a single dictionary) containing API object data
:type dict_list: list, tuple, dict, None
:returns: A new dictionary list with properly formatted ``id`` values
:raises: :py:exc:`TypeError`
"""
dict_list = [dict_list] if isinstance(dict_list, dict) else dict_list
new_dict_list = []
for single_dict in dict_list:
if not isinstance(single_dict, dict):
raise TypeError("The 'dict_list' argument must be a dictionary or a list of dictionaries.")
if 'id' in single_dict and not isinstance(single_dict.get('id'), str):
single_dict['id'] = str(single_dict.get('id'))
new_dict_list.append(single_dict)
return new_dict_list | 7d1348910e5802c928b94bc74d71f3ce35770215 | 27,680 |
def delete_images(request):
"""
Deletes images which are passed via HTTP query.
"""
Image.objects.filter(pk__in=request.POST.getlist("images")).delete()
return HttpResponseRedirect(reverse("lfs_manage_global_images")) | 29e2e3be3730a2a3552bc78feb8e8817e594dab5 | 27,681 |
import fnmatch
def is_requirements_file(location):
"""
Return True if the ``location`` is likely for a pip requirements file.
For example::
>>> is_requirements_file('dev-requirements.txt')
True
>>> is_requirements_file('requirements.txt')
True
>>> is_requirements_file('requirements.in')
True
>>> is_requirements_file('requirements.pip')
True
>>> is_requirements_file('requirements-dev.txt')
True
>>> is_requirements_file('some-requirements-dev.txt')
True
>>> is_requirements_file('reqs.txt')
False
>>> is_requirements_file('requires.txt')
True
"""
filename = fileutils.file_name(location)
req_files = (
'*requirements*.txt',
'*requirements*.pip',
'*requirements*.in',
'requires.txt',
)
return any(fnmatch.fnmatchcase(filename, rf) for rf in req_files) | 2577595ef6d2bcb553a0354623c40589f96a5fb3 | 27,682 |
def bisect_steps_remaining():
"""Estimate of remaining steps, including the current one.
This is an approximation."""
# https://github.com/git/git/blob/566a1439f6f56c2171b8853ddbca0ad3f5098770/bisect.c#L1043
return floor(log(bisect_revisions(), 2)) | ded51395ead2c7ea76aa4ff3c2cf7d6195f81537 | 27,683 |
def create_lstm_model(fingerprint_input, model_settings, model_size_info,
is_training):
"""Builds a model with a lstm layer (with output projection layer and
peep-hole connections)
Based on model described in https://arxiv.org/abs/1705.02411
model_size_info: [projection size, memory cells in LSTM]
"""
if is_training:
dropout_prob = tf.placeholder(tf.float32, name='dropout_prob')
input_frequency_size = model_settings['dct_coefficient_count']
input_time_size = model_settings['spectrogram_length']
fingerprint_4d = tf.reshape(fingerprint_input,
[-1, input_time_size, input_frequency_size])
num_classes = model_settings['label_count']
projection_units = model_size_info[0]
LSTM_units = model_size_info[1]
with tf.name_scope('LSTM-Layer'):
with tf.variable_scope("lstm"):
lstmcell = tf.contrib.rnn.LSTMCell(LSTM_units, use_peepholes=True,
num_proj=projection_units)
_, last = tf.nn.dynamic_rnn(cell=lstmcell, inputs=fingerprint_4d,
dtype=tf.float32)
flow = last[-1]
with tf.name_scope('Output-Layer'):
W_o = tf.get_variable('W_o', shape=[projection_units, num_classes],
initializer=tf.contrib.layers.xavier_initializer())
b_o = tf.get_variable('b_o', shape=[num_classes])
logits = tf.matmul(flow, W_o) + b_o
if is_training:
return logits, dropout_prob
else:
return logits | d050e3b984f2aff1b0466b5cab73d262243d6cc9 | 27,684 |
def tile(x: Tensor, count: int, dim=0) -> Tensor:
"""
Tiles x on dimension dim count times. From OpenNMT. Used for beam search.
:param x: tensor to tile
:param count: number of tiles
:param dim: dimension along which the tensor is tiled
:return: tiled tensor
"""
if isinstance(x, tuple):
h, c = x
return tile(h, count, dim=dim), tile(c, count, dim=dim)
perm = list(range(len(x.size())))
if dim != 0:
perm[0], perm[dim] = perm[dim], perm[0]
x = x.permute(perm).contiguous()
out_size = list(x.size())
out_size[0] *= count
batch = x.size(0)
x = x.view(batch, -1) \
.transpose(0, 1) \
.repeat(count, 1) \
.transpose(0, 1) \
.contiguous() \
.view(*out_size)
if dim != 0:
x = x.permute(perm).contiguous()
return x | 04b0e507e1600f3cb3185b170ee72286c2d20a7f | 27,685 |
from typing import Callable
from typing import Coroutine
from typing import Any
from typing import Optional
import asyncio
def cancellable_request(handler: Callable[..., Coroutine[Any, Any, Optional[Any]]]):
"""this decorator periodically checks if the client disconnected and then will cancel the request and return a 499 code (a la nginx).
Usage:
decorate the cancellable route and add request: Request as an argument
@cancellable_request
async def route(
_request: Request,
...
)
"""
@wraps(handler)
async def decorator(*args, **kwargs) -> Optional[Any]:
request = kwargs["_request"]
handler_task = asyncio.create_task(
handler(*args, **kwargs), name="cancellable_request/handler"
)
auto_cancel_task = asyncio.create_task(
_cancel_task_if_client_disconnected(request, handler_task),
name="cancellable_request/auto_cancel",
)
try:
return await handler_task
except CancelledError:
logger.warning(
"request %s was cancelled by client %s!", request.url, request.client
)
return Response("Oh No!", status_code=499)
finally:
auto_cancel_task.cancel()
return decorator | 10e50a565f45c0e4babf68386b244557e1727bc2 | 27,686 |
import time
def add_central_server_member_delete_global_error_cert(case,
client, ss2_host,
ss2_username, ss2_password):
"""
Restores security server after member being deleted in central server
:param case:
:param cs_ssh_host: str - central server ssh host
:param cs_ssh_user: str - central server ssh username
:param cs_ssh_pass: str - central server ssh password
:param ca_ssh_host: str - ca ssh host
:param ca_ssh_user: str - ca ssh username
:param ca_ssh_pass: str - ca ssh password
:param client: dict - client info
:param ss2_host: str - security server 2 host
:param ss2_username: str - security server 2 username
:param ss2_password: str - security server 2 password
:param ss2_ssh_host: str - security server 2 ssh host
:param cert_path: str - certificate filename
:param user: dict - user, under which the changes are made
:return:
"""
self = case
sync_timeout = 120
def add_cs_member_delete_global_error_cert():
self.log('Open members page')
self.wait_until_visible(type=By.CSS_SELECTOR, element=sidebar.MEMBERS_CSS).click()
# MEMBER_10 Add new X-Road Member
self.log('MEMBER_10 Add new X-Road Member')
add_member_to_cs(self, member=client)
self.log('Wait until servers have synchronized')
time.sleep(sync_timeout)
self.log('Open security server, which was deleted from central server')
self.reload_webdriver(url=ss2_host, username=ss2_username, password=ss2_password)
self.log('Open keys and certificates')
self.wait_until_visible(type=By.CSS_SELECTOR, element=sidebar.KEYSANDCERTIFICATES_BTN_CSS).click()
self.wait_until_visible(type=By.ID, element=keys_and_certificates_table.KEYS_AND_CERTIFICATES_TABLE_ID)
self.wait_jquery()
self.log('Click on the certificate, which has global error status and delete it')
self.by_xpath(keys_and_certificates_table.GLOBAL_ERROR_CERTIFICATE_ROW_XPATH).click()
self.by_id(keys_and_certificates_table.DELETE_BTN_ID).click()
popups.confirm_dialog_click(self)
self.wait_jquery()
return add_cs_member_delete_global_error_cert | 77ea9a9de677947285f7bda233520bc43df84e18 | 27,687 |
def get_country_models(model):
"""
Get all valid domain-specific models for a given model.
:param str model:
:return:
"""
domains = get_domain_for(model, country=None)
return ['{}{}-K9'.format(model, domain) for domain in domains] | 48e171e304e75216ac0dd2d70613996b8ae5f9e7 | 27,688 |
import os
def FileExtensionMatch(filePath, supportedFileTypeList):
"""
Check whether the file extension matches any of the supported file types.
Parameters
----------
filePath : string
File path
supportedFileTypeList : list
List of supported file extensions
"""
return (os.path.splitext(filePath)[1] in supportedFileTypeList) | bdab68917ead387269f52a51465f500a581967f6 | 27,689 |
def _cprint_bad_contrast3(fgcolor, bgcolor, bold, underlined):
"""Returns 1 if one of the conditions of poor contrast is matched """
# black on black with LIGHT BG
_c1 = (fgcolor == 8) and (bgcolor == 0) and (CPRINT_PAR["light_background"])
if _c1:
return 1
else:
return 0 | 89d74cfe47fecd3fd21fc3d7f47a5674df80b669 | 27,690 |
def download_from_mongo(context, sel_filter, projection):
"""
Download panda DataFrame from a mongoDB server
:param context: execution context
:param sel_filter: a SON object specifying elements which must be present for a document to be included in the
result set
:param projection: a list of field names that should be returned in the result set or a dict specifying the fields
to include or exclude. If projection is a list “_id” will always be returned.
Use a dict to exclude fields from the result (e.g. projection={‘_id’: False}).
:return: panda DataFrame or None
:rtype: panda.DataFrame
"""
df = None
client = context.resources.mongo_warehouse.get_connection(context)
if client is not None:
# get database collection
collection = client.get_collection()
# retrieve a cursor for required records
# https://api.mongodb.com/python/current/api/pymongo/collection.html#pymongo.collection.Collection.find
context.log.info(f'Document retrieval in progress')
cursor = collection.find(filter=sel_filter, projection=projection)
entries = list(cursor)
context.log.info(f'{len(entries)} documents retrieved')
context.log.info(f'DataFrame loading in progress')
df = pd.DataFrame.from_dict(entries)
# tidy up
cursor.close()
client.close_connection()
context.log.info(f'Loaded {len(df)} records')
return df | 863a0fdc7761de38d578039c764052ecac2e57f8 | 27,691 |
def create_group(current_session, groupname, description):
"""
Creates a group and returns it
"""
return gp.create_group(current_session, groupname, description) | 0a5adea7b0a57ec3f44d260da250b06c09eca939 | 27,692 |
import requests
def call_api(endpoint):
"""
Build the API URL and request data
:param str endpoint: specific api endpoint to hit
:return response: server's reponse to the request
"""
url = BASE_URL + endpoint
try: # try to get json data
response = requests.get(url).json()
except ValueError: # if bytes, convert to str
response = requests.get(url).content.decode('utf-8')
except Exception as e:
response = e
return handle_response(response) | 6f2527c6eb777c2cd0ab80636865d7adda032506 | 27,693 |
import inspect
def test_callable_args(func, args):
"""
Return True when this function can be called with the given arguments.
"""
assert isinstance(args, (list, tuple))
signature = getattr(inspect, 'signature', None)
if signature is not None:
# For Python 3, use inspect.signature.
try:
sig = _signatures_cache[func]
except KeyError:
sig = signature(func)
_signatures_cache[func] = sig
try:
sig.bind(*args)
except TypeError:
return False
else:
return True
else:
# For older Python versions, fall back to using getargspec.
spec = inspect.getargspec(func)
# Drop the 'self'
def drop_self(spec):
args, varargs, varkw, defaults = spec
if args[0:1] == ['self']:
args = args[1:]
return inspect.ArgSpec(args, varargs, varkw, defaults)
spec = drop_self(spec)
# When taking *args, always return True.
if spec.varargs is not None:
return True
# Test whether the given amount of args is between the min and max
# accepted argument counts.
return len(spec.args) - len(spec.defaults or []) <= len(args) <= len(spec.args) | 704697253a88009394297a4688a9b3179b976f0d | 27,694 |
def get_kde_caseduration_json(df, parameters=None):
"""
Gets the estimation of KDE density for the case durations calculated on the log/dataframe
(expressed as JSON)
Parameters
--------------
df
Pandas dataframe
parameters
Possible parameters of the algorithm, including:
Parameters.GRAPH_POINTS -> number of points to include in the graph
Parameters.CASE_ID_KEY -> Column hosting the Case ID
Returns
--------------
json
JSON representing the graph points
"""
cases = get_cases_description(df, parameters=parameters)
duration_values = [x["caseDuration"] for x in cases.values()]
return case_duration_commons.get_kde_caseduration_json(duration_values, parameters=parameters) | 54262948216c43e0bbd44ec9ed2ed691903d9a9d | 27,695 |
def render_text(name: str,
data: str,
font_size: int = 10,
fig_width_per_char: float = 0.1,
fig_height: float = 0.4,
img_height: int = None,
img_width: int = None,
**kwargs):
"""Render a text string.
Args:
name: name of the text
data: the string to be rendered
font_size: text font size
fig_width_per_char: the width of each character measured by ``figsize``
of ``plt.subplots()``.
fig_height: the height of the text label measured by ``figsize`` of
``plt.subplots()``.
img_height (int): height of the output image
img_width (int): width of the output image
**kwargs: extra arguments forwarded to ``ax.text``.
"""
fig, ax = plt.subplots(
figsize=(len(data) * fig_width_per_char, fig_height))
kwargs['fontsize'] = font_size
ax.text(0, 0, data, **kwargs)
ax.axis('off')
return _convert_to_image(name, fig, dpi, img_height, img_width) | d956b8dc885f1f582c2a54147bd42623f2a796c4 | 27,696 |
def index_closed(client, index_name):
"""Return True if index is closed"""
try:
# 1.0 params
index_metadata = client.cluster.state(
index=index_name,
metric='metadata',
)
except TypeError:
# 0.90 params:
index_metadata = client.cluster.state(
filter_blocks=True,
filter_index_templates=True,
filter_indices=index_name,
filter_nodes=True,
filter_routing_table=True,
)
return index_metadata['metadata']['indices'][index_name]['state'] == 'close' | 9da234b5ef4b6f6c2835f3adf67aac804cc92919 | 27,697 |
import os
def calculate_partial_volume(dirname, label, _isAccumulated=True):
"""
This function...
:param dirname:
:param label:
:param _isAccumulated:
:return:
"""
label = label.upper()
maskSum = 0.0
if _isAccumulated:
fileDir = _config.get("Results", "accumulated")
else:
fileDir = _config.get("Results", "partials")
if label == "ICV":
for sublabel in accumulated:
if sublabel == "background_total":
continue
else:
# print "sublabel: ", sublabel, calculate_partial_volume(dirname, sublabel, True)
maskSum += calculate_partial_volume(dirname, sublabel, True)
return maskSum
labelFile = os.path.join(dirname, fileDir, "POSTERIOR_" + label + ".nii.gz")
assert os.path.exists(labelFile), "File not found: %s" % labelFile
image = sitk.ReadImage(labelFile)
nda = sitk.GetArrayFromImage(image)
maskSum = nda.sum()
## print maskSum
size = image.GetSpacing()
## print size
return maskSum * size[0] * size[1] * size[2] | 43827a3880c8b197cb4432776d37bcf7e3f24624 | 27,698 |
def get_contributions_with_user_as_submitter(event, user):
"""Get a list of contributions in which the `user` has submission rights"""
return (_query_contributions_with_user_as_submitter(event, user)
.options(joinedload('acl_entries'))
.order_by(db.func.lower(Contribution.title))
.all()) | ae30ded175f52aed5d50e629d5e260577bffe0f3 | 27,699 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.