content stringlengths 35 416k | sha1 stringlengths 40 40 | id int64 0 710k |
|---|---|---|
def ci(v):
"""
Calculate the chemotaxis index
"""
return ((v[0] + v[3]) - (v[1] + v[2])) / float(v[6]) | f95b5c742a3bf52bcd4433f195539f0531b44f2f | 40,956 |
def list_all_regions(session):
"""Returns all regions where Lambda is currently supported"""
return session.get_available_regions("lambda") | 6368a3a357826f49978a06705270bc0124f64b0e | 40,958 |
def get_openid_client(client):
"""
:param django_keycloak.models.Client client:
:rtype: keycloak.openid_connect.KeycloakOpenidConnect
"""
openid = client.realm.realm_api_client.open_id_connect(
client_id=client.client_id,
client_secret=client.secret
)
if client.realm._well_known_oidc:
openid.well_known.contents = client.realm.well_known_oidc
return openid | 3fe92c95d6dd8069822275e398675cfbd7e3d0a6 | 40,959 |
def sieve_of_eratosthenes():
"""Sieve of Eratosthenes
This one is from an infamous benchmark, "The Great Computer
Language Shootout".
URL is: http://www.bagley.org/~doug/shootout/
"""
flags = [True] * (8192+1)
count = 0
i = 2
while i <= 8192:
if flags[i]:
k = i + i
while k <= 8192:
flags[k] = False
k = k + i
count = count + 1
i = i + 1
return count | 0dfb4ee1316bb5bed03382debba645940e6af0e5 | 40,960 |
import copy
def formatLocalization(row, media, default_object):
""" Given a localization row format it for uploading to the tator system """
new_obj = copy.copy(default_object)
new_obj.update(row.to_dict())
new_obj["media_id"] = media.id
# Normalize coordinates
for width_comp in ['x', 'width', 'x0', 'x1']:
if width_comp in new_obj:
new_obj[width_comp] /= media.width
for height_comp in ['y', 'height', 'y0', 'y1']:
if height_comp in new_obj:
new_obj[height_comp] /= media.height
print(new_obj)
return new_obj | 0d20877b643c1dee12dc2e7fa8b563fda76bf688 | 40,961 |
def should_preserve_falsy_metadata_value(value):
"""There are falsy values we want to keep as metadata."""
# pylint:disable=g-explicit-bool-comparison, singleton-comparison
return value in (0, 0.0, False)
# pylint:enable=g-explicit-bool-comparison, singleton-comparison | 3e38dc670d5743cae1cf650ff577a7b9e9f69a29 | 40,964 |
import numpy as np
def Delbouille73(ini, endi, atlasdir=None):
"""
Extract spectral data from the original disk-center
intensity atlas recorded at the Jungfraujoch Observatory:
Delbouille, Neven, Roland (1973)
Wavelength range: 3000 - 10.000 A
Wavelength step (visible): 0.002 A
CALL: atlas,xlam = Delbouille73(ini = waveIni ,endi = waveEndi)
Downloaded from:
http://bass2000.obspm.fr/solar_spect.php
Args:
ini (int): Initial wavelength
endi (int): Final wavelength
atlasdir (string, optional): Atlas directory
"""
# Atlas directory
if atlasdir is None:
atlasdir = str(__file__).split('/')
sdir = '/'.join(atlasdir[0:-2])+'/data'
else:
sdir = atlasdir
file0 = np.load(sdir + '/Delbouille73.npy')
lmbda0 = np.arange(3000., 10000., 0.002)
iniI = np.argmin(abs(ini - lmbda0))
endiI = np.argmin(abs(endi - lmbda0))
lmbda = lmbda0[iniI:endiI]
varFinal = file0[iniI:endiI]
return [varFinal / 1e4, lmbda] | b6b84fbbf7dbb1fd6ea8105810924939baeab1bf | 40,965 |
def customise_csc_DQM(process):
"""Do nothing special. May need some adjustments for unganged ME11
"""
return process | 5cc3d1eddfad4a04232c55d34f1af326d3dbf2e3 | 40,966 |
def get_center_indices(data, centers):
"""
Outputs the indices of the given centroids in the original dataset.
In case multiple similar arrays are present in the dataset, the first
match is returned. Required since K++ initializer outputs the
actual centroid arrays while the kmedoids implementation needs indices.
data: input matrix, list of arrays
centroids: list of centroid arrays
"""
indices = list()
for medoid in centers:
indices.append([i for i, m in enumerate(data) if list(m) == list(medoid)][0])
return indices | 78521413a8272a6fb6ce4dcd69b0b7418b937efa | 40,968 |
async def get_pr_for_commit(gh, sha):
"""Find the PR containing the specific commit hash."""
prs_for_commit = await gh.getitem(
f"/search/issues?q=type:pr+repo:python/cpython+sha:{sha}"
)
if prs_for_commit["total_count"] > 0: # there should only be one
return prs_for_commit["items"][0]
return None | c8abfbc613748d88b224db2523b15535169da118 | 40,969 |
def underlying_function(thing):
"""Original function underlying a distribution wrapper."""
return getattr(thing, '__wrapped__', thing) | c83ce77b08b1eec84f13b9ba439203fcc045f098 | 40,972 |
def get_factor(units, unit_id):
"""
Returns the factor of a Unit-Config
"""
if not units:
return None
for unit in units:
if unit.id == unit_id:
if unit.to_base_function:
return unit.to_base_function
return unit.factor | c941556cba4baa08569c89c9c90edc5f620b5346 | 40,973 |
def write(scope, filename, lines, mode=['a']):
"""
Writes the given string into the given file.
The following modes are supported:
- 'a': Append to the file if it already exists.
- 'w': Replace the file if it already exists.
:type filename: string
:param filename: A filename.
:type lines: string
:param lines: The data that is written into the file.
:type mode: string
:param mode: Any of the above listed modes.
"""
with open(filename[0], mode[0]) as fp:
fp.writelines(['%s\n' % line.rstrip() for line in lines])
return True | 4c5b905dabd35211a95d6af1ea82967a8ae6ae44 | 40,974 |
from typing import Iterable
from typing import Callable
def _get_filter(only_prefix: Iterable[str], ignore_prefix: Iterable[str]) -> Callable[[str], bool]:
"""Create filter for members to extract.
:param only_prefix: Extract only internal paths starting with these prefixes
:param ignore_prefix: Ignore internal paths starting with these prefixes
"""
if only_prefix:
def _filter(name):
return any(name.startswith(prefix) for prefix in only_prefix
) and all(not name.startswith(prefix) for prefix in ignore_prefix)
else:
def _filter(name):
return all(not name.startswith(prefix) for prefix in ignore_prefix)
return _filter | c9d609a62f4eaad68334a2e7003a2f91fa8d9146 | 40,975 |
def get_direction_per_spike(df, cells, value_query, threshold=1):
"""
For a list of cells in a dataframe, return a list of values that are
associated with spiking activity.
:param df: Dataframe containing the spiking activity, and the values
to be queried
:param cells: List of cells (dataframe column headings)
:param value_query: Dataframe column heading containing values
:param threshold: Spiking threshold to be reached. Default: 1 spike per bin
:return: List of values, and their weights (if there are >1 spike per
temporal bin)
"""
value_list = []
weights = []
for cell in cells:
values = df[value_query][(df[cell] >= threshold)]
weights_cell = df[cell][(df[cell] >= threshold)]
value_list.append(values)
weights.append(weights_cell)
return value_list, weights | 18ebb4992df35b58106bbe85b883c7a13e8ff73f | 40,976 |
def get_highlightjs_stub():
"""
Get HighlightJS node as an etree
Returns
-------
str
"""
raw_html = '<script>'
with open('prism.js') as f:
text = f.read()
raw_html = raw_html + text + '</script>'
with open('prism-onedark.css') as f:
text = f.read()
raw_html = raw_html + '<style>' + text + ' code[class*="language-"], pre[class*="language-"] {font-size: 12px}</style>'
return raw_html | 29d862edefb02cf1a0e852bb54fd5800c6041ea4 | 40,979 |
def calculate(power):
"""Returns the sum of the digits of the number 2 to the power of the specified number"""
answer = sum(list(map(int, str((2 ** power)))))
return answer | 5f0146e2c885c8c4636a6765a2b8e567e758928a | 40,981 |
import re
import os
def get_sample_names(args):
"""names can come from --name or --list (file)"""
if len(args.names) > 0:
return re.split(r'\s*,\s*', args.names)
if len(args.list) > 0 and os.path.isfile(args.list):
files_fh = open(args.list, 'r')
return files_fh.read().splitlines()
return [] | b54a20c869be54e24527c7fd521d7ba6852e7c99 | 40,983 |
import pandas
def load_star_as_dataframe(star_filename):
"""Generate a pandas dataframe from a star file with one single data loop.
Written for high efficiency. Star headers have the leading _ stripped and are turned into the pandas header.
Extra comment lines after the header loop_ are currently unsupported and should be removed. before loading.
Args:
star_filename (str): Filename of the star file from Warp (relion style)
returns:
:py:class:`pandas.Dataframe`: dataframe from the star file.
"""
with open(star_filename) as f:
pos = 0
columns = []
cur_line = f.readline()
while not cur_line.startswith("loop_"):
cur_line = f.readline()
cur_line = f.readline()
while cur_line.startswith("_"):
pos = f.tell()
columns.append(cur_line.split()[0][1:])
cur_line = f.readline()
f.seek(pos)
df = pandas.read_csv(f, delim_whitespace=True, names=columns)
return df | ba47a3c1ae09d2575ea0900dbe0b6bb8719b71e5 | 40,984 |
import base64
def isbase64(input_file):
"""
VERIFICA SE UM VALOR É UMA BASE64.
FUNCIONA PARA VALORES EM FORMATO:
1) BYTES
2) STRING
# Arguments
input_file - Required : Valor a ser verificado (Bytes | String)
# Returns
verificator - Required : Verificador de base64 (Boolean)
"""
try:
# VERIFICANDO SE O ENVIADO ESTÁ EM FORMATO STRING
if isinstance(input_file, str):
# CONVERTENDO A STRING EM BYTES
input_file_bytes = bytes(input_file, 'ascii')
# VERIFICANDO SE O ENVIADO ESTÁ EM FORMATO DE BYTES
elif isinstance(input_file, bytes):
# MANTENDO EM BYTES
input_file_bytes = input_file
else:
raise ValueError("Argument must be string or bytes")
return base64.b64encode(base64.b64decode(input_file_bytes)) == input_file_bytes, input_file_bytes
except Exception:
return False, None | 0331c37034ed60589b75f6181de6a519eeb0e4b3 | 40,986 |
from bs4 import BeautifulSoup
def is_html_text(txt):
"""
Check is input text is html or not
Args:
txt (str): input text
Returns:
bool: Returns True if html else False.
"""
return bool(
BeautifulSoup(txt, "html.parser").find()
) | 1fb303e79bf1fd8773bc4cc03aaee304e9987aca | 40,987 |
import random
def mod_hosts_map(cloud_map, n, **kwargs):
"""
Selects n random hosts for the given stack and modifies/adds
the map entry for those hosts with the kwargs.
"""
population = cloud_map.keys()
# randomly select n hosts
hosts = random.sample(population, n)
# modify the hosts
for host in hosts:
cloud_map[host].update(kwargs)
return cloud_map | 87f766df9701a9da4988aae0ac272cd32c31b011 | 40,988 |
import time
def get_ts():
"""
获取当前的timestamp(秒)
:return: 当前的timestamp
"""
return int(time.time()) | cb71dfa0b47d3bfb1cc8cc2d060fa75a39ad409b | 40,989 |
def dist(v1, v2):
""" distance between two vectors. """
d = ((v2.x - v1.x)**2 + (v2.y - v1.y)**2) ** 0.5
return d | d06b3fb6543a1531d71db7345912153008e7e7d1 | 40,990 |
def array_shape(x) -> tuple[int, ...]:
"""Return the shape of 'x'."""
try:
return tuple(map(int, x.shape))
except AttributeError:
raise TypeError(f"No array shape defined for type {type(x)}") | efceb1a84ff05b94d2aa2e89c7956a96e4337090 | 40,995 |
def reset_slider(modal_open, selected_confidence):
"""
Reset the confidence slider range value to [0, 60] after closing the
modal component.
Parameters
----------
modal_open : bool
A boolean that describes if the modal component is open or not
selected_confidence : list of float
The selected minimum and maximum values of the confidence slider
Returns
-------
list of float
The default minimum and maximum values of the confidence slider
"""
# The modal window is closed so reset the slider values.
if not modal_open:
return [0, 60]
# The modal window is open so return the current slider values.
return selected_confidence | e6a43415eb56e8a77ef8cb3a1b58f71e4bf9443c | 40,996 |
def get_type_check(expected_type):
"""
Any -> (Any -> bool)
:param expected_type: type that will be used in the generated boolean check
:return: a function that will do a boolean check against new types
"""
return lambda x: type(x) is expected_type | 3e16e7fdf7461702cef4835d0bcfb2ed4131dac8 | 40,998 |
def fmt(x, pos):
"""
Format color bar labels
"""
if abs(x) > 1e4 or (abs(x) < 1e-2 and abs(x) > 0):
a, b = f"{x:.2e}".split("e")
b = int(b)
return fr"${a} \cdot 10^{{{b}}}$"
elif abs(x) > 1e2 or (float(abs(x))).is_integer():
return fr"${int(x):d}$"
elif abs(x) > 1e1:
return fr"${x:.1f}$"
elif abs(x) == 0.0:
return fr"${x:.1f}$"
else:
return fr"${x:.2f}$" | 02e5c2ecbcbfcd50d40b06ee7792275aa4e32ae3 | 41,000 |
def is_binarystring(s):
"""Return true if an object is a binary string (not unicode)"""
return isinstance(s, bytes) | 9d4950b7c11b4055460236076200c13a6d51127a | 41,001 |
def is_superset(token, tokens):
"""If a token is a superset of another one, don't include it."""
for other in tokens:
if other == token:
continue
if other in token:
return True
return False | c9530f43ab9f9c123b3cff10b043376a280c57f5 | 41,002 |
def train(svi, loader, use_cuda=False):
"""
per epoch training function.
:param svi: pyro svi module
:param loader: data loader from loader_function
:param use_cuda: GPU command
:return: loss for that epoch
"""
epoch_loss = 0.
for x in loader:
if use_cuda:
x = x.cuda()
epoch_loss += svi.step(x)
normalizer_train = len(loader.dataset)
total_epoch_loss_train = epoch_loss / normalizer_train
return total_epoch_loss_train | 798e14d823421a94ee7cf32f9eec5f968bfae69b | 41,004 |
import torch
def mu_inverse(y):
""" Inverse operation of mu-law transform for 16-bit integers. """
assert y.min() >= -1 and y.max() <= 1
return torch.sign(y) / 32768. * ((1 + 32768.) ** torch.abs(y) - 1) | d4af6953e6770d52597cf49e40bda1abbbbf057f | 41,005 |
def standardise_satellite(satellite_code):
"""
:type satellite_code: str
:rtype: str
>>> standardise_satellite('LANDSAT-5')
'LANDSAT_5'
"""
if not satellite_code:
return None
return satellite_code.upper().replace('-', '_') | 7fc02ce847d0a538a691f709c77216bf684aabf6 | 41,010 |
def spherical(h, r, sill, nugget=0):
"""
Spherical variogram model function. Calculates the
dependent variable for a given lag (h). The nugget (b) defaults to be 0.
Parameters
----------
h : float
The lag at which the dependent variable is calculated at.
r : float
Effective range of autocorrelation.
sill : float
The sill of the variogram, where the semivariance begins to saturate.
nugget : float, default=0
The nugget of the variogram. This is the value of independent
variable at the distance of zero.
Returns
-------
gamma : numpy float
Coefficients that describe effective range of spatial autocorrelation
"""
a = r / 1.
if h <= r:
return nugget + sill * ((1.5 * (h / a)) - (0.5 * ((h / a) ** 3.0)))
else:
return nugget + sill | 074575957faf3a047c649fa31e6f0cc4ff7c4fdf | 41,011 |
import os
def falcon_dir():
"""Returns a path to where the Crowdstrike directory is on the
system. First looks for the v6 location, then older paths.
:return: Full path to ``falconctl``.
:rtype: str
.. code-block:: bash
salt '*' crowdstrike.falconctl_path
"""
falcondir6 = '/Library/Application Support/CrowdStrike/Falcon/'
return falcondir6 if os.path.exists(falcondir6) else '/Library/CS/' | 9edb5444273504aa6b700812ebe8c52a7790594c | 41,012 |
def array11(arr, index):
"""
:return: number of times 11 occurs in arr
"""
if index >= len(arr):
return 0
if arr[index] == 11:
return 1 + array11(arr, index+1)
else:
return array11(arr, index+1) | 4fc70815c92e4f48e0d0694851adc1c0a1c6b5cc | 41,013 |
def get_from_session(request):
"""
Get user from request's session.
"""
return request.environ['beaker.session'].get('user') | 30ad13274797fe3cb183ae1a11e6d07fa83fdf12 | 41,014 |
import torch
def quadratic_expansion(x):
"""
Example :math:`z(x_1, x_2) = (x_1^2, x_1x_2, x_2^2)`
Cubic or higher polynomial expansions can be constructed by iterating this function.
:param torch.tensor x: input time series of shape (dims, timesteps)
:returns: tensor of z with shape (dims', timesteps)
:rtype: torch.tensor
"""
app = []
for d in range(x.shape[0]):
app.append(x[d:d+1, :]*x[d:, :])
return torch.cat(tuple(app), dim=0) | 701ab5aba2b6a6f95fc65a434378881a4246dacc | 41,017 |
import socket
def get_checksum(source):
"""
Подсчет контрольной суммы по алгоритму RFC1071
:param source: пакет, контрольную сумму которого нужно посчитать
:return: контрольная сумма
"""
check_sum = 0
count = 0
# Считаем complement sum длиной 32 бита
while count < len(source) - 1:
one_step = source[count + 1] * 256 + source[count]
check_sum += one_step
check_sum &= 0xFFFFFFFF
count += 2
if len(source) % 2:
check_sum += source[len(source) - 1]
check_sum &= 0xFFFFFFFF
# Сворачиваем сумму в 16 бит путём сложения её половин
# check_sum >> 16 - получаем "левую половину"
# check_sum & 0xFFFF - получаем "правую половину"
check_sum = (check_sum >> 16) + (check_sum & 0xFFFF)
# Инвертируем
check_sum = ~check_sum & 0xFFFF
# Меняем местами половины контрольной суммы
check_sum = (check_sum >> 8 & 0x00FF) | (check_sum << 8 & 0xFF00)
socket.htons(check_sum)
return check_sum | a7f77e9d5d12143f839b647184760e720638a72d | 41,018 |
import itertools
def lowpass(img):
"""
A simple pure python low pass (antialiasing) filter.
Applies a gaussian blur on a 2D list of floats.
"""
width = len(img[0])
height = len(img)
# create result image. The corners will not be altered
result = [x[:] for x in img]
mask = [1, 4, 1,
4, 12, 4,
1, 4, 1]
for x, y in itertools.product(range(1, width - 1), range(1, height - 1)):
part = (img[y - 1][x - 1:x + 2] +
img[y][x - 1:x + 2] +
img[y + 1][x - 1:x + 2])
masked = 1.0 / 32 * sum(map(lambda x, y: x * y, part, mask))
result[y][x] = masked
return result | 8824903b8d6bd373ee8ff3fad6a61bfcaed64b4f | 41,021 |
import subprocess
def gitGetVersionIDs(directory, filename):
"""
Returns the commit IDs for every version of a file.
"""
resp = subprocess.check_output(["git", "rev-list", "--all", "--", filename])
return resp.splitlines() | 8982d2cf5825a7f0543cc50ce4f281932740d732 | 41,022 |
def S_convolute_values(_data_list, _transformer):
"""
Returns new data samples where values are transformed by transformer values.
"""
c_data = []
ds = len(_data_list)
ts = len(_transformer)
if ds != ts:
return []
for i in range(ds):
c_data.append(_data_list[i] + _transformer[i])
return c_data | c13a2750999e4aa144f610a905f96d845b6dbfcc | 41,023 |
def get_valid_entries(board, row, col):
"""Checks valid entries for given cell in sudoku
params : board : list (sudoku 9 X 9)
: row : int
: col : int
returns : list (list of valid entries)
"""
used_entries = [0] * 10
used_entries[0] = 1
block_row = row // 3
block_col = col // 3
# Row and Column
for m in range(9):
used_entries[board[m][col]] = 1
used_entries[board[row][m]] = 1
# Square
for m in range(3):
for n in range(3):
used_entries[board[m + block_row * 3][n + block_col * 3]] = 1
valid_entries = [i for i in range(1, 10) if used_entries[i] == 0]
return valid_entries | 4c9178cf4c859dbfc8957c19cbf0ba5f60df5786 | 41,027 |
import json
def get_error_msg(exception) -> str:
"""
Parse the http response body to get relevant error message
"""
http_response_body = json.loads(exception.body.decode("utf-8"))
exception_msg = http_response_body["message"]
return exception_msg | 6a98431a163ef6f6ec453182fe68b0ebdad41969 | 41,029 |
import six
def _add_simplify(SingleActionType, BulkActionType):
"""
Add .simplify method to "Bulk" actions, which returns None for no rows, non-Bulk version for a
single row, and the original action otherwise.
"""
if len(SingleActionType._fields) < 3:
def get_first(self):
return SingleActionType(self.table_id, self.row_ids[0])
else:
def get_first(self):
return SingleActionType(self.table_id, self.row_ids[0],
{ key: col[0] for key, col in six.iteritems(self.columns)})
def simplify(self):
return None if not self.row_ids else (get_first(self) if len(self.row_ids) == 1 else self)
BulkActionType.simplify = simplify | b67d24d63df7ee8de56cc9c47ae64150373fef3b | 41,030 |
import json
def read_inputs(jpath):
"""
read_inputs reads the input json file and stores it information in a dictionary
Parameters
----------
jpath : string
the input JSON file
Returns
-------
paths: dict
Returns a dictionary of the json file
"""
with open(jpath) as file:
paths = json.load(file)
return paths | f3f91db4a60d8267df0b6a8ff0e3b722864c4055 | 41,031 |
def same_type(arg1, *args):
"""Compares the class or type of two or more objects."""
t = getattr(arg1, '__class__', type(arg1))
for arg in args:
if getattr(arg, '__class__', type(arg)) is not t:
return False
return True | 43e88129f095da4b4f6c27c9c8b46383b36acbb0 | 41,032 |
def get_author_name(pypi_pkg):
"""Get author's name"""
author_name = pypi_pkg["pypi_data"]["info"]["author"]
return author_name | 7c4ff935af36824a954605846d8a91bd927470c4 | 41,033 |
def cpp_options(deps):
"""Command-line flags from the immediate C++ deps."""
return ["-optP" + flag for flag in deps.cpp_flags] | 52b11d657f3afb4286bda441c3dcdef77adb95e6 | 41,034 |
def ema(data_1m, ema_short, ema_long):
"""
## Calculate EMA
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
## Inputs:
# data_1m candlestick data for 1 minute cycle
# ema_short EMA parameter of shorter cycle
# ema_long EMA parameter of longer cycle
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
## Outputs:
# data_1m candlestick data with EMA
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
"""
# inspect the parameters
if not (ema_long>0 and ema_short>0 and ema_long>ema_short):
raise Exception('ema_long should be larger than ema_short,please reenter')
# calculate indicators
for item in data_1m.keys():
# initialize ema
data_1m[item]['ema_short'] = 0
data_1m[item]['ema_long'] = 0
data_1m[item].iloc[0,3] = data_1m[item].iloc[0,2]
data_1m[item].iloc[0,4] = data_1m[item].iloc[0,2]
for i in range(1,len(data_1m[item])):
data_1m[item].iloc[i,3] = ((ema_short-1)/(ema_short+1))*data_1m[item].iloc[i-1,3] + (2/(ema_short+1))*data_1m[item].iloc[i,2]
data_1m[item].iloc[i,4] = ((ema_long-1)/(ema_long+1))*data_1m[item].iloc[i-1,4] + (2/(ema_long+1))*data_1m[item].iloc[i,2]
return data_1m | b177d49a3db20faa0233b46bcc37c52e2b3f9032 | 41,035 |
def calculate_pvi(row, rowNumberR, rowNumberD, rowNumberTotal, weight=1.0):
"""Returns pvi with given weight."""
try:
rNumber = float(row[rowNumberR])
except ValueError:
rNumber = 0
try:
dNumber = float(row[rowNumberD])
except ValueError:
dNumber = 0
try:
tNumber = float(row[rowNumberTotal])
except ValueError:
tNumber = 0
try:
percentR = 100.0 * rNumber / tNumber
percentD = 100.0 * dNumber / tNumber
pvi = percentR - percentD
pvi *= weight
except ZeroDivisionError:
pvi = 1000
return pvi | 0062e66d7cb571cc7b87d4422e9311c9e6cf2225 | 41,036 |
def _cleanup_frame(frame):
"""Rename and re-order columns."""
frame = frame.rename(columns={'Non- Hispanic white': 'White'})
frame = frame.reindex(['Asian', 'Black', 'Hispanic', 'White'],
axis=1)
return frame | 7a25ee47e726314de57dc80d103a0189d3583024 | 41,037 |
import os
def listdir(path):
"""List files at the specified path excluding hidden files"""
return filter(lambda x: not x.startswith('.'), os.listdir(path)) | a439d8da1056073eb57b872df6764299496ebd61 | 41,038 |
def frmt_db_lctn(location):
""" Formats the database location into nicer, more readable style
:param location: sms deposit location
:returns: Formated sms deposit location
"""
if location:
return location.replace("_", " ").replace('-', ' ').title() | 709b77c96eaa6aa43211445cc03279a28f84cd56 | 41,039 |
import pkg_resources
def riptide_assets_dir() -> str:
""" Path to the assets directory of riptide_lib. """
return pkg_resources.resource_filename('riptide', 'assets') | 6c82e359d46bb6a82cbf01559574c07629acc23b | 41,040 |
def insert_column(table, col_data, col_name, filename=''):
"""Little helper to insert column data"""
table[col_name] = col_data
if filename != '':
table.write(filename, overwrite=True)
return table | 791eb2b01473d1e53b9ff5454f17a32bfb5ebebd | 41,042 |
def _rfr_getitem_ ( self , key ) :
"""Get fit-parameter through the key/name
>>> fit_result = ...
>>> sigma = fit_result['sigma']
"""
##
pars = self.floatParsFinal()
for p in pars :
if key == p.GetName() : return p
#
pars = self.constPars()
for p in pars :
if key == p.GetName() : return p
raise KeyError ( 'RooFitResult: invalid key %s ' % key ) | 628b1062e65c5ad1c5efc1dea82b23c000271392 | 41,043 |
def updated_def_variables(variables, provided_var_dict, params_to_add):
"""Add CFN parameters to template based on the specified lists.
Example params_to_add list:
params_to_add = [
{'var_name': 'OtherTags',
'var_type': CFNString,
'description': 'Extra tag value to apply to the instances'},
{'var_name': 'OtherSGs',
'var_type': CFNString,
'description': 'Extra security group to apply to the instances'}
]
"""
for param_to_add in params_to_add:
if param_to_add['var_name'] in provided_var_dict:
for key, _value in provided_var_dict[param_to_add['var_name']].value.iteritems(): # noqa pylint: disable=C0301
variables[key] = {
'type': param_to_add['var_type'],
'description': param_to_add['description']
}
return variables | 022d92ebf653c746f0b782b1bb71f3c169c2d91f | 41,044 |
def drugbank_compa(prob, treat, drug_to_id, id_to_info, scale_fac=1):
"""
:param prob: list of words
:param treat: list of words
:param drug_to_id:
:param id_to_info: id_to_indication or id_to_adr
:return:
"""
for w_treat in treat:
if w_treat in drug_to_id:
if set(prob) & id_to_info[drug_to_id[w_treat]]:
return 1.*scale_fac
return -1.*scale_fac | 8d89b5d1f6f7d3b0fad95b4d5552dc99987b6a90 | 41,045 |
def signal_to_noise_limit_tag_from_signal_to_noise_limit(signal_to_noise_limit):
"""Generate a signal to noise limit tag, to customize phase names based on limiting the signal to noise ratio of
the dataset being fitted.
This changes the phase name 'phase_name' as follows:
signal_to_noise_limit = None -> phase_name
signal_to_noise_limit = 2 -> phase_name_snr_2
signal_to_noise_limit = 10 -> phase_name_snr_10
"""
if signal_to_noise_limit is None:
return ""
else:
return "__snr_" + str(signal_to_noise_limit) | b659aac99a4c728219818d8a5dc9f0fd14defcfd | 41,046 |
import re
def check_rules(rules, text_list, current_labels, fallback_label):
"""Finds rule's match in a text and returns list of labels to attach.
If no rule matches returns False for match and fallback label will be attached.
Args:
rules (list): List of rules
text_list (list): List of strings to search in
current_labels (list): List of already attached labels
fallback_label (str): Label to attach if no rule matches
Returns:
tuple: (match, labels)
match (bool): True if any rule matches, False otherwise
labels (list): List of labels to attach
"""
labels = set()
match = False
for rule in rules:
for text in text_list:
result = re.search(rule["pattern"], text)
if result is not None:
match = True
if rule["label"] not in current_labels:
labels.add(rule["label"])
# fallback label
if not match:
if fallback_label not in current_labels:
labels.add(fallback_label)
return match, labels | 5a244532510e1898bef9a6f98d556e4c2e4b7b09 | 41,047 |
def Base_setTextValue(self, param):
"""
- name: input username
setTextValue:
text: |
multi line text1
multi line text2
id: elementid1
"""
txt = self.getvalue(param)
if txt is None:
raise Exception("text not set: param=%s" % (param))
elem = self.findmany2one(param)
self.driver.execute_script("arguments[0].value = arguments[1];", elem, txt)
return self.return_element(param, elem) | 3030ed2241364002246f0c8c1fadf3c244658eff | 41,048 |
def table_to_list(cells, pgs):
"""Output list of lists"""
l = [0, 0, 0]
for (i, j, u, v, pg, value) in cells:
r = [i, j, pg]
l = [max(x) for x in zip(l, r)]
tab = [
[
["" for x in range(l[0] + 1)] for x in range(l[1] + 1)
] for x in range(l[2] + 1)
]
for (i, j, u, v, pg, value) in cells:
tab[pg][j][i] = value
return tab | d7bd9b05352b47575508748ce32d9a315bdebb2b | 41,049 |
from datetime import datetime
def get_Mk_global(date):
"""
Based on the script BackRuns_OneSite_ByDay.ksh, calculating the globalMetMk value
:param date: datetime object
:return: integer representing the globalMetMk value
"""
seconds = int(datetime.strftime(date, "%s"))
if seconds < 1136073600:
return 0
elif seconds < 1230768000:
return 3
elif seconds < 1257811200:
return 4
elif seconds < 1268092800:
return 5
elif seconds < 1367280000:
return 6
elif seconds < 1405382400:
return 7
elif seconds < 1440460800:
return 8
elif seconds < 1499731200:
return 9
else:
return 10 | c05e5dffb2219358147184be28123349071994fc | 41,050 |
def tf_b(tf, _):
"""Boolean term frequency."""
return 1.0 if tf > 0.0 else 0.0 | fddd407e4e04f9d3a890303431e462731a79cb3c | 41,051 |
def session_validate(self, now_s):
"""
This gets called during session clean up loop, it will be passed
a session object, if it returns False, the session object will
be deleted. Overwrite if needed
"""
return True | 89a8e290b0f8351f3fcd0f12c458096912b14d9c | 41,052 |
def dump_netrc(self):
"""Dump the class data in the format of a .netrc file."""
rep = ''
for host in self.hosts.keys():
attrs = self.hosts[host]
rep = rep + 'machine ' + host + '\n\tlogin ' + str(attrs[0]) + '\n'
if attrs[1]:
rep = rep + 'account ' + str(attrs[1])
rep = rep + '\tpassword ' + str(attrs[2]) + '\n'
for macro in self.macros.keys():
rep = rep + 'macdef ' + macro + '\n'
for line in self.macros[macro]:
rep = rep + line
rep = rep + '\n'
return rep | 08cb24400c7857006f2d2ae226a002485e0cc975 | 41,054 |
import torch
def image_positional_encoding(shape):
"""Generates *per-channel* positional encodings for 2d images.
The positional encoding is a Tensor of shape (N, 2*C, H, W) of (x, y) pixel
coordinates scaled to be between -.5 and .5.
Args:
shape: NCHW shape of image for which to generate positional encodings.
Returns:
The positional encodings.
"""
n, c, h, w = shape
zeros = torch.zeros(n, c, h, w)
return torch.cat((
(torch.arange(-.5, .5, 1 / h)[None, None, :, None] + zeros),
(torch.arange(-.5, .5, 1 / w)[None, None, None, :] + zeros)),
dim=1) | b5187f9a97ade7fdad06d54e7a308e7ede8fcf9a | 41,055 |
def list_keys(client, bucket, prefix, token=None):
"""
Recursive function used to retrieve all the object keys that match with a given prefix in the given S3 bucket.
:param client: Client for the Amazon S3 service.
:param bucket: The S3 bucket name.
:param prefix: The prefix used for filtering.
:param token: The continuation token returned by a previous call.
:return: The found keys matching the prefix.
"""
keys = list()
response = client.list_objects_v2(
Bucket=bucket,
Prefix=prefix,
ContinuationToken=token
) if token else client.list_objects_v2(
Bucket=bucket,
Prefix=prefix
)
if 'Contents' in response:
for item in response['Contents']:
keys.append(item['Key'])
if 'NextContinuationToken' in response:
keys += list_keys(client, bucket, prefix, response['NextContinuationToken'])
return keys | 3fa42dccf36dd3c2c8e76a71a1add0d28b0abe98 | 41,057 |
def RepairMissingData(time_series, first_value):
"""Given a list of time series value in a string format, replace missing
values. If the first time point is missing, set it to first_value. This
should be 1 if the log transform will be taken or 0 otherwise. If later
time points are missing, set them to the previous observed time point.
"""
if time_series[0] == '':
time_series[0] = first_value
for i in range(1, len(time_series)):
if time_series[i] == '':
time_series[i] = time_series[i-1]
return time_series | c2075048c662ecba2c37a95ea12513b105a05d16 | 41,059 |
def replace_stop(sequence):
"""
For a string, replace all '_' characters with 'X's
Args:
sequence (string)
Returns:
string: '_' characters replaced with 'X's
"""
return sequence.replace('_', 'X') | e4f0356b5b6b8c2101540c275215e0527ba909c4 | 41,060 |
from pathlib import Path
def tmpfile(tmpdir, request):
"""Path to an empty temporary file."""
path = Path(tmpdir / 'temp.' + request.param)
path.touch()
return path | 14194a46b8799f967a2ad6ac89cea3f4a3ddbddc | 41,062 |
def test_function(a:int, b:int) -> int:
"""Adds two numbers together
Args:
a (int): Number 1
b (int): Number 2
Returns:
int: Sum of both numbers
"""
return a + b | 76d76743c02600f665b7c0ee28a08bc4e0784238 | 41,063 |
def check_abc_lec_status(output):
"""
Reads abc_lec output and determines if the files were equivelent and
if there were errors when preforming lec.
"""
equivalent = None
errored = False
for line in output:
if "Error: The network has no latches." in line:
errored = True
if line.startswith("Networks are NOT EQUIVALENT"):
equivalent = False
elif line.startswith("Networks are equivalent"):
equivalent = True
# Returns None if could not determine LEC status
return equivalent, errored | 5c2c17e91fed34d46bf911d404e16efe40adebdf | 41,064 |
def createVocabularyList(wordsList):
"""
创建不重复的词汇列表,返回list。
其中该词汇列表,可以作为判断文本片段的特征
:param wordsList:
:return:
"""
wordSet = set()
for words in wordsList:
wordSet = wordSet | set(words)
return list(wordSet) | c9d8e1344e2c343953e69ac0fa1fb6079b3afcb7 | 41,065 |
import os
def queue_jsons(path):
"""
From response directory, return a list of json files to be fed into rekognition_json_to_df.
path -- (str) may be absolute or relative from measureyes/src/aws/app/
example: "../data/<subdir>/".
"""
f_list = os.listdir(path)
queue = [path + file for file in f_list]
queue.sort()
return queue | 472300a7c71fc62f2ab4295ed4505517cb6b72a8 | 41,067 |
from typing import List
from typing import Any
import random
def get_random_sample(items: List[Any], k: int) -> List[Any]:
"""
Thin wrapper around ``random.sample`` that takes the min of ``k`` and ``len(items)``
when sampling, so as to avoid a ``ValueError``.
"""
return random.sample(items, min(k, len(items))) | 6fe2c5df8a2d8fa34a8f694e1108ce096c91dd4c | 41,068 |
def parameters_builder(name,sensitive,value="",description=""):
"""
Function creates the parameter
"""
parameter = {"parameter":{"name":"","sensitive":"","value":"","description":""}}
parameter['parameter']['name']=name
parameter['parameter']['sensitive']=sensitive
parameter['parameter']['value']=value
parameter['parameter']['description']=description
return parameter | 670537b4824c4b8f62e3a4a8eff52ea33c80b901 | 41,071 |
import math
def projectionVolume(R1, R2, y1, y2):
"""Return the projected volume of a shell of radius R1->R2 onto an
annulus on the sky of y1->y2.
this is the integral:
Int(y=y1,y2) Int(x=sqrt(R1^2-y^2),sqrt(R2^2-y^2)) 2*pi*y dx dy
=
Int(y=y1,y2) 2*pi*y*( sqrt(R2^2-y^2) - sqrt(R1^2-y^2) ) dy
This is half the total volume (front only)
"""
def truncSqrt(x):
if x > 0:
return math.sqrt(x)
else:
return 0.
p1 = truncSqrt(R1**2 - y2**2)
p2 = truncSqrt(R1**2 - y1**2)
p3 = truncSqrt(R2**2 - y2**2)
p4 = truncSqrt(R2**2 - y1**2)
return (2./3.) * math.pi * ((p1**3 - p2**3) + (p4**3 - p3**3)) | fc09c74fdb6c80d76df1dd996357b5f8cd9eab83 | 41,075 |
def read_log_entry(file_name):
"""Read the last line from a log file and return it as
a string.
NOTE: This only works if the default date format is used,
along with a [space][delimiter][space] format.
:param str file_name:
Path to the log file
"""
last_line = '' # This will hold the most recent log entry
with open(file_name, 'r') as stream:
for line in stream:
last_line = line
return last_line[25:].rstrip() | 92af5eb6fd97fd36ed134a05f1f624bd49862e10 | 41,077 |
def prepend(value: str, char="_"):
"""Prepend a string to a value if the value doesn't already start with that string
Examples:
>>> prepend('a')
'_a'
>>> prepend('_a')
'_a'
>>> prepend('my_str', '--')
'--my_str'
>>> prepend('---my_str', '-')
'---my_str'
"""
if value.strip() == "":
return ""
return value if value.startswith(char) else f"{char}{value}" | 05a82a8d6dbedb550069610ba47eb0a6f545f925 | 41,078 |
import pickle
def load_binary(binary_path="dictionary.dat"):
"""Load the dictionary pickle file to a python dictionary (dict)
Keyword Arguments:
binary_path {str} -- path to the pickle dictionary file (default: {"dictionary.dat"})
"""
try:
with open(binary_path, 'rb') as file:
my_depickler = pickle.Unpickler(file)
dictionary = my_depickler.load()
return dictionary
except:
return {} | 756c93d5f701b69827442f73fd0481040b209191 | 41,079 |
def fibonacci(n: int) -> int:
"""
1 0 LOAD_CONST 1 (1)
2 LOAD_FAST 0 (n)
4 LOAD_METHOD 0 (bit_length)
6 CALL_METHOD 0
8 LOAD_CONST 1 (1)
10 BINARY_SUBTRACT
12 BINARY_LSHIFT
14 STORE_FAST 1 (m)
2 16 LOAD_CONST 2 (0)
18 LOAD_CONST 1 (1)
3 20 LOAD_FAST 1 (m)
>> 22 POP_JUMP_IF_FALSE 84
4 24 DUP_TOP_TWO
26 INPLACE_MULTIPLY
28 DUP_TOP
30 INPLACE_ADD
5 32 ROT_TWO
34 DUP_TOP
36 INPLACE_MULTIPLY
6 38 ROT_THREE
40 ROT_THREE
42 DUP_TOP
44 INPLACE_MULTIPLY
7 46 DUP_TOP
48 ROT_THREE
50 INPLACE_ADD
8 52 ROT_THREE
54 INPLACE_ADD
9 56 LOAD_FAST 0 (n)
58 LOAD_FAST 1 (m)
60 INPLACE_AND
62 POP_JUMP_IF_FALSE 70
10 64 DUP_TOP
66 ROT_THREE
68 INPLACE_ADD
11 >> 70 ROT_TWO
12 72 LOAD_FAST 1 (m)
74 LOAD_CONST 1 (1)
76 INPLACE_RSHIFT
78 DUP_TOP
80 STORE_FAST 1 (m)
13 82 JUMP_ABSOLUTE 22
14 >> 84 POP_TOP
86 RETURN_VALUE
"""
m = 1 << (n.bit_length() - 1)
Fn = 0
Fnm1 = 1
while m:
Fn2 = Fn * Fn
Fn = 2 * Fnm1 * Fn + Fn2
Fnm1 = Fnm1 * Fnm1 + Fn2
if n & m:
Fnm1, Fn = Fn, Fnm1 + Fn
m >>= 1
return Fn | 90de8f76e91420c962f3d59f3594f047027c2a85 | 41,080 |
def create_tables():
"""Create relations in the database"""
users = """
CREATE TABLE IF NOT EXISTS users (
id SERIAL PRIMARY KEY,
firstname VARCHAR(20) NOT NULL,
lastname VARCHAR(20) NOT NULL,
othername VARCHAR(20),
email VARCHAR(30) NOT NULL,
phonenumber VARCHAR(24) NOT NULL,
password VARCHAR(128) NOT NULL,
passportUrl VARCHAR(256) NOT NULL,
isAdmin BOOLEAN DEFAULT FALSE,
isPolitician BOOLEAN DEFAULT FALSE
);
"""
blacklist = """
CREATE TABLE IF NOT EXISTS blacklist (
id SERIAL PRIMARY KEY,
token TEXT NOT NULL,
blacklisted_on TIMESTAMPTZ NOT NULL
);
"""
parties = """
CREATE TABLE IF NOT EXISTS parties (
id SERIAL PRIMARY KEY,
party_name VARCHAR(50) NOT NULL,
hq_address VARCHAR(100) NOT NULL,
logo_url VARCHAR(256) NOT NULL
);
"""
offices = """
CREATE TABLE IF NOT EXISTS offices (
id SERIAL PRIMARY KEY,
office_name VARCHAR(50) NOT NULL,
office_type VARCHAR(50) NOT NULL,
is_occupied BOOLEAN DEFAULT FALSE
);
"""
candidates = """
CREATE TABLE IF NOT EXISTS candidates (
office_id INTEGER NOT NULL,
candidate_id INTEGER NOT NULL,
party_id INTEGER NOT NULL,
created_on TIMESTAMPTZ NOT NULL,
-- define the FK constraints - to ensure the candidate is a a registered
-- user and the office also exists
FOREIGN KEY (office_id) REFERENCES offices (id) ON DELETE CASCADE,
FOREIGN KEY (candidate_id) REFERENCES users (id) ON DELETE CASCADE,
FOREIGN KEY (party_id) REFERENCES parties (id) ON DELETE CASCADE,
-- define a composite primary key based on 2 Fields to ensure a candidate
-- is not registered twice
CONSTRAINT id PRIMARY KEY (office_id, candidate_id)
);
"""
votes = """
CREATE TABLE IF NOT EXISTS votes(
office_id INTEGER NOT NULL,
candidate_id INTEGER NOT NULL,
created_on TIMESTAMPTZ NOT NULL,
created_by INTEGER NOT NULL,
-- define the FK constraints - to ensure the candidate referenced and
-- office exists in the candidates table
FOREIGN KEY (office_id, candidate_id) REFERENCES
candidates (office_id, candidate_id) ON DELETE CASCADE,
FOREIGN KEY (created_by) REFERENCES users (id) ON DELETE CASCADE,
-- composite primary key made up 2 fields ensuring a voters vote is not
-- registered twice
CONSTRAINT vote_id PRIMARY KEY (office_id, created_by)
);
"""
petitions = """
CREATE TABLE IF NOT EXISTS petitions(
id SERIAL PRIMARY KEY,
office_id INTEGER NOT NULL,
contested_by INTEGER NOT NULL,
created_by INTEGER NOT NULL,
body VARCHAR NOT NULL,
evidence VARCHAR NOT NULL,
created_on TIMESTAMPTZ NOT NULL,
-- FK to ensure referential integrity is maintained
-- disallow deleting a user if they have created a petition
FOREIGN KEY (office_id, contested_by) REFERENCES
candidates (office_id, candidate_id) ON DELETE CASCADE,
FOREIGN KEY (created_by) REFERENCES users (id) ON DELETE RESTRICT
);
"""
applications = """
CREATE TABLE IF NOT EXISTS applications(
applicant_id INTEGER PRIMARY KEY,
office_id INTEGER NOT NULL,
party_id INTEGER NOT NULL,
approved BOOLEAN DEFAULT FALSE,
requested_on TIMESTAMPTZ NOT NULL,
-- FK for referential integrity
FOREIGN KEY (applicant_id) REFERENCES users (id) ON DELETE CASCADE,
FOREIGN KEY (office_id) REFERENCES offices (id) ON DELETE CASCADE,
FOREIGN KEY (party_id) REFERENCES parties (id) ON DELETE CASCADE
);
"""
return [users, blacklist, parties, offices, candidates, applications,
votes, petitions] | 9699f7d95f52aa0af60c2ce62a09538bb8de6f00 | 41,081 |
from pathlib import Path
def is_local_file(filename, root_dir):
"""
See https://github.com/IDSIA/sacred/blob/master/sacred/dependencies.py
Parameters
----------
filename
root_dir
Returns
-------
"""
file_path = Path(filename).expanduser().resolve()
root_path = Path(root_dir).expanduser().resolve()
return root_path in file_path.parents | 49a361ee27546d8e7d71930b991ed8854051e0a8 | 41,083 |
def _is_right(a, b, p):
"""given a line (defined by points a and b) and a point (p),
return true if p is to the right of the line and false otherwise
raises a ValueError if p lies is colinear with a and b
"""
ax, ay = a[0], a[1]
bx, by = b[0], b[1]
px, py = p[0], p[1]
value = (bx - ax) * (py - ay) - (by - ay) * (px - ax)
if value == 0:
raise ValueError(
"p is colinear with a and b, 'tis neither right nor left.")
return value < 0 | 6d630eadc77587de60ef6aefff3cac01d9ba3191 | 41,085 |
def rename(record, rename_page_id):
"""Rename page_id."""
record['rev_id'] = record['revid']
del record['revid']
record['user_text'] = record['user']
del record['user']
record['user_id'] = record['userid']
del record['userid']
record['text'] = record['*']
del record['*']
record['page_id'] = rename_page_id
# These fields are not used in testcases also not included in the Wikipedia
# API but might be useful in downstream applications. Thus here we use a
# placeholder value to ensure the same format of the input data in testing and
# running and reduce the complexity of testdata collection.
for entry in ['format', 'model', 'user_ip', 'page_title', 'page_namespace']:
record[entry] = 'placeholder'
return record | 74fd5c8372786094b57f41dfc49fdd5b41349427 | 41,086 |
def boundary(info, error, otype, oslots, rank):
"""Computes boundary data.
For each slot, the nodes that start at that slot and the nodes that end
at that slot are collected.
Boundary data is used by the API functions
`tf.core.locality.Locality.p`.
and
`tf.core.locality.Locality.n`.
Parameters
----------
info: function
Method to write informational messages to the console.
error: function
Method to write error messages to the console.
otype: iterable
The data of the *otype* feature.
oslots: iterable
The data of the *oslots* feature.
rank: tuple
The data of the *rank* precompute step.
Returns
-------
tuple
* first: tuple of tuple
The *n*-th member is the tuple of nodes that start at slot *n*,
ordered in *reversed* canonical order (`tf.core.nodes`);
* last: tuple of tuple
The *n*-th member is the tuple of nodes that end at slot *n*,
ordered in canonical order;
Notes
-----
!!! hint "why reversed canonical order?"
Just for symmetry.
"""
(otype, maxSlot, maxNode, slotType) = otype
oslots = oslots[0]
firstSlotsD = {}
lastSlotsD = {}
for (node, slots) in enumerate(oslots):
realNode = node + 1 + maxSlot
firstSlotsD.setdefault(slots[0], []).append(realNode)
lastSlotsD.setdefault(slots[-1], []).append(realNode)
firstSlots = tuple(
tuple(sorted(firstSlotsD.get(n, []), key=lambda node: -rank[node - 1]))
# array("I", sorted(firstSlotsD.get(n, []), key=lambda node: -rank[node - 1]))
for n in range(1, maxSlot + 1)
)
lastSlots = tuple(
tuple(sorted(lastSlotsD.get(n, []), key=lambda node: rank[node - 1]))
# array("I", sorted(lastSlotsD.get(n, []), key=lambda node: rank[node - 1]))
for n in range(1, maxSlot + 1)
)
return (firstSlots, lastSlots) | b6927dc374fa638b42fb7e85742f074cba7ab88a | 41,087 |
def standard_x(x_train, x_test=None):
"""
Data normalization (centering and variance normalization).
:param x_train: training data
:type x_train: np.ndarray
:param x_test: testing data
:type x_test: np.ndarray | None
:return: normalized data
:rtype: np.ndarray | (np.ndarray, np.ndarray)
"""
x_mean = x_train.mean(axis=0)
x_std = x_train.std(axis=0)
x_train1 = ((x_train - x_mean) / x_std).values
if x_test is not None:
x_test1 = ((x_test - x_mean) / x_std).values
return x_train1, x_test1
return x_train1 | 181ae9a64b0ed444a4ef49f933cf1d2490763fe5 | 41,088 |
import argparse
def parse_args():
"""Parse arguments.
Returns:
argument namespace
"""
# Parse arguments
parser = argparse.ArgumentParser()
parser.add_argument('--dataset',
choices=['cas13'],
default='cas13',
help=("Dataset to use."))
parser.add_argument('--cas13-subset',
choices=['exp', 'pos', 'neg', 'exp-and-pos'],
help=("Use a subset of the Cas13 data. See parse_data module "
"for descriptions of the subsets. To use all data, do not "
"set."))
parser.add_argument('--cas13-classify',
action='store_true',
help=("If set, only classify Cas13 activity into inactive/active"))
parser.add_argument('--cas13-regress-on-all',
action='store_true',
help=("If set, perform regression for Cas13 data on all data "
"(this can be reduced using --cas13-subset)"))
parser.add_argument('--cas13-regress-only-on-active',
action='store_true',
help=("If set, perform regression for Cas13 data only on the "
"active class"))
parser.add_argument('--cas13-normalize-crrna-activity',
action='store_true',
help=("If set, normalize the activity of each crRNA (guide) "
"across its targets to have mean 0 and stdev 1; this means "
"prediction is performed based on target differences (e.g., "
"mismatches) rather than inherent sequence of the crRNA"))
parser.add_argument('--cas13-use-difference-from-wildtype-activity',
action='store_true',
help=("If set, use the activity value of a guide g and target t "
"pair to be the difference between the measured activity of "
"g-t and the mean activity between g and all wildtype "
"(matching) targets of g; this means prediction is "
"performed based on targeted differences (e.g., mismatches) "
"rather than inherent sequence of the crRNA"))
parser.add_argument('--context-nt',
type=int,
default=10,
help=("nt of target sequence context to include alongside each "
"guide"))
parser.add_argument('--regression-scoring-method',
choices=['mse', 'rho'],
default='rho',
help=("Method to use for scoring regression results; 'mse' for "
"mean squared error, 'rho' for Spearman rank correlation"))
parser.add_argument('--test-split-frac',
type=float,
default=0.3,
help=("Fraction of the dataset to use for testing the final "
"model"))
parser.add_argument('--models-to-use',
nargs='+',
help=("List of model names to use. If not set, use all."))
parser.add_argument('--nested-cross-val',
action='store_true',
help=("If set, perform nested cross-validation to evaluate "
"model selection, rather than just cross-validation to "
"select a single model"))
parser.add_argument('--nested-cross-val-outer-num-splits',
type=int,
default=5,
help=("Number of outer folds to use for nested cross-validation"))
parser.add_argument('--nested-cross-val-out-tsv',
help=("Path to output TSV at which to write metrics on the "
"validation data for each outer fold of nested "
"cross-validation (one row per outer fold; each column "
"gives a metric)"))
parser.add_argument('--nested-cross-val-feat-coeffs-out-tsv',
help=("Path to output TSV at which to write a coefficient for "
"each feature (only linear models) for each outer fold "
"of nested cross-validation"))
parser.add_argument('--nested-cross-val-run-for',
nargs='+',
type=int,
help=("If set, only run the given outer splits (0-based). If "
"not set, run for all."))
parser.add_argument('--seed',
type=int,
default=1,
help=("Random seed"))
args = parser.parse_args()
# Print the arguments provided
print(args)
return args | e2556aa009ee9901e01a2b81bd6ba93e70c55875 | 41,089 |
def _is_pyfar_type(obj):
""" True if object is a Pyfar-type.
"""
type_str = obj if isinstance(obj, str) else type(obj).__name__
return type_str in [
'Orientations',
'Coordinates',
'Signal',
'Filter',
'FilterFIR',
'FilterIIR',
'FilterSOS',
'SphericalVoronoi',
'TimeData',
'FrequencyData'] | 429f81ddd2492814febd337d071dcd591163742a | 41,090 |
def _offset(offset, size):
"""Calculate the start of a member of `size` after `offset` within a
struct."""
return ((size - (offset % size)) % size) + offset | 97e277def96dab568d6f2cbe1fd7111d0cac6427 | 41,091 |
def precision_to_string(precision):
"""Translates a precision number (represented as Python string) into a descriptive string"""
if precision == "16":
return "Half"
elif precision == "32":
return "Single"
elif precision == "64":
return "Double"
elif precision == "3232":
return "ComplexSingle"
elif precision == "6464":
return "ComplexDouble"
else:
raise("Unknown precision: " + precision) | 7f7d9b099091944d4fae1c007d83c375770a6b20 | 41,094 |
def show_getter_setter_method(input_object):
"""
批量输出某一类别下的Getter 和 Setter 方法
:param input_object:
:return:
"""
template = '''
@property
def {key}(self):
return self._{key}
@{key}.setter
def {key}(self, value):
self._{key} = value
'''
ret = []
for key in input_object.__dict__:
if "logger" in key: continue
input_key = key
if "_" == key[0]:
input_key = key[1:]
ret.append( template.format(key=input_key))
return "\n".join(ret) | 1a3c757a8b90e97bdd3c480193e2dae956101d48 | 41,095 |
import os
def files_list(path):
"""
Return the files in `path`
"""
return os.listdir(path) | 13c890b41adfac169ba1eda400826652c49d8650 | 41,096 |
def get_ideological_topic_means(objective_topic_loc,
objective_topic_scale,
ideological_topic_loc,
ideological_topic_scale):
"""Returns neutral and ideological topics from variational parameters.
For each (k,v), we want to evaluate E[beta_kv], E[beta_kv * exp(eta_kv)],
and E[beta_kv * exp(-eta_kv)], where the expectations are with respect to the
variational distributions. Like the paper, beta refers to the obective topic
and eta refers to the ideological topic.
Dropping the indices and denoting by mu_b the objective topic location and
sigma_b the objective topic scale, we have E[beta] = exp(mu + sigma_b^2 / 2),
using the mean of a lognormal distribution.
Denoting by mu_e the ideological topic location and sigma_e the ideological
topic scale, we have E[beta * exp(eta)] = E[beta]E[exp(eta)] by the
mean-field assumption. exp(eta) is lognormal distributed, so E[exp(eta)] =
exp(mu_e + sigma_e^2 / 2). Thus, E[beta * exp(eta)] =
exp(mu_b + mu_e + (sigma_b^2 + sigma_e^2) / 2).
Finally, E[beta * exp(-eta)] =
exp(mu_b - mu_e + (sigma_b^2 + sigma_e^2) / 2).
Because we only care about the orderings of topics, we can drop the exponents
from the means.
Args:
objective_topic_loc: Variational lognormal location parameter for the
objective topic (beta). Should be shape [num_topics, num_words].
objective_topic_scale: Variational lognormal scale parameter for the
objective topic (beta). Should be positive, with shape
[num_topics, num_words].
ideological_topic_loc: Variational Gaussian location parameter for the
ideological topic (eta). Should be shape [num_topics, num_words].
ideological_topic_scale: Variational Gaussian scale parameter for the
ideological topic (eta). Should be positive, with shape
[num_topics, num_words].
Returns:
neutral_mean: A matrix with shape [num_topics, num_words] denoting the
variational mean for the neutral topics.
positive_mean: A matrix with shape [num_topics, num_words], denoting the
variational mean for the ideological topics with an ideal point of +1.
negative_mean: A matrix with shape [num_topics, num_words], denoting the
variational mean for the ideological topics with an ideal point of -1.
"""
neutral_mean = objective_topic_loc + objective_topic_scale ** 2 / 2
positive_mean = (objective_topic_loc +
ideological_topic_loc +
(objective_topic_scale ** 2 +
ideological_topic_scale ** 2) / 2)
negative_mean = (objective_topic_loc -
ideological_topic_loc +
(objective_topic_scale ** 2 +
ideological_topic_scale ** 2) / 2)
return neutral_mean, positive_mean, negative_mean | 3c0681e4b2834a34b0a0c97e5638bf0237d5272c | 41,098 |
def get_gamma_function(gamma):
"""
:param gamma: desired factor gamma
:return: Returns the lambda function of the gamma adjust operation
"""
return lambda x: pow(x / 255, gamma) * 255 | 48175cacc3c41fcac4da9dfdf7bc475c2f813bb6 | 41,099 |
import re
def replace_initials(s):
"""
For a string s, find all occurrences of A. B. etc and replace them with A B etc
:param s:
:return: string with replacements made
"""
def repl_function(m):
"""
Helper function for re.sub
"""
return m.group(0)[0]
t = re.sub('[A-Z]\.', repl_function, s)
return t | da08a0d154e683b9e3ffa4ebd1332a8a855b05be | 41,100 |
def me_check(pr, fa, ma, ref_count=False):
"""
Simplest possible way to check ME
-1 for skipped sites ('.' or hom-ref in all samples
0 for consistent
1 for inconsistent
"""
pr = pr.split(':')[0].split('/')
fa = fa.split(':')[0].split('/')
ma = ma.split(':')[0].split('/')
if '.' in pr + fa + ma:
return -1
null = ["0", "0"]
if not ref_count and pr == null and fa == null and ma == null:
return -1
if pr[0] in fa and pr[1] in ma:
return 0
if pr[1] in fa and pr[0] in ma:
return 0
return 1 | c133b42140c36c1ad5dcffa252dc939048493e6f | 41,101 |
def get_model_queue_data(job):
"""
Formats the queued model data to return to the server
:return: [(id:int, model_id:int, title:str, progress:int, max_progress:int), ...]
"""
return (job.get_id(), job.get_info().get("model_id"), job.get_title(), job.get_progress(), job.get_max_progress()) | 07aad5c6c5b20b66a4cad585069739b058b96d44 | 41,102 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.