content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
|---|---|---|
def diff_smf(mstar_arr, volume, h1_bool, colour_flag=False):
"""
Calculates differential stellar mass function in units of h=1.0
Parameters
----------
mstar_arr: numpy array
Array of stellar masses
volume: float
Volume of survey or simulation
h1_bool: boolean
True if units of masses are h=1, False if units of masses are not h=1
colour_flag (optional): boolean
'R' if galaxy masses correspond to red galaxies & 'B' if galaxy masses
correspond to blue galaxies. Defaults to False.
Returns
---------
maxis: array
Array of x-axis mass values
phi: array
Array of y-axis values
err_tot: array
Array of error values per bin
bins: array
Array of bin edge values
counts: array
Array of number of things in each bin
"""
if not h1_bool:
# changing from h=0.7 to h=1 assuming h^-2 dependence
logmstar_arr = np.log10((10**mstar_arr) / 2.041)
else:
logmstar_arr = np.log10(mstar_arr)
if survey == 'eco' or survey == 'resolvea':
bin_min = np.round(np.log10((10**8.9) / 2.041), 1)
if survey == 'eco' and colour_flag == 'R':
bin_max = np.round(np.log10((10**11.5) / 2.041), 1)
bin_num = 6
elif survey == 'eco' and colour_flag == 'B':
bin_max = np.round(np.log10((10**11) / 2.041), 1)
bin_num = 6
elif survey == 'resolvea':
# different to avoid nan in inverse corr mat
bin_max = np.round(np.log10((10**11.5) / 2.041), 1)
bin_num = 7
else:
# For eco total
bin_max = np.round(np.log10((10**11.5) / 2.041), 1)
bin_num = 7
bins = np.linspace(bin_min, bin_max, bin_num)
elif survey == 'resolveb':
bin_min = np.round(np.log10((10**8.7) / 2.041), 1)
bin_max = np.round(np.log10((10**11.8) / 2.041), 1)
bins = np.linspace(bin_min, bin_max, 7)
# Unnormalized histogram and bin edges
counts, edg = np.histogram(logmstar_arr, bins=bins) # paper used 17 bins
dm = edg[1] - edg[0] # Bin width
maxis = 0.5 * (edg[1:] + edg[:-1]) # Mass axis i.e. bin centers
# Normalized to volume and bin width
err_poiss = np.sqrt(counts) / (volume * dm)
err_tot = err_poiss
phi = counts / (volume * dm) # not a log quantity
phi = np.log10(phi)
return maxis, phi, err_tot, bins, counts
|
9152a86023c78e47ae0813c489c897800140f174
| 3,644,300
|
def get_parameter(dbutils, parameter_name: str, default_value='') -> str:
"""Creates a text widget and gets parameter value. If ran from ADF, the value is taken from there."""
dbutils.widgets.text(parameter_name, default_value)
return dbutils.widgets.get(parameter_name)
|
cf8359e6acea68ea26e24cc656847e5560019bd1
| 3,644,301
|
def single_init(cfg: GenomeConfig):
"""Random initialized floating GRU value, calculated via a normal distribution."""
return clip(gauss(cfg.gru_init_mean, cfg.gru_init_stdev), a_min=cfg.gru_min_value, a_max=cfg.gru_max_value)
|
a72e534259a0d3e0fa3f3081b049bc8c5c316686
| 3,644,302
|
def get_recent_articles(request):
"""
获取最近更新内容
"""
user = get_login_user(request)
recommend = request.POST.get('recommend', 'recommend')
if recommend == 'unrecommend':
articles = Article.objects.raw(get_other_articles_sql)
elif recommend == 'recommend':
articles = Article.objects.raw(get_recommend_articles_sql)
else:
logger.warning(f'未知的类型:{recommend}')
user_sub_feeds = []
if user:
user_sub_feeds = get_user_sub_feeds(user.oauth_id)
context = dict()
context['articles'] = articles
context['user'] = user
context['user_sub_feeds'] = user_sub_feeds
return render(request, 'explore/recent_articles.html', context=context)
|
9ea03f931d67c669f99a87a27ec88bb78a7cd7e2
| 3,644,303
|
import copy
def add_close_export_to_cell(cell):
"""
Adds an HTML comment to close question export for PDF filtering to the top of ``cell``. ``cell``
should be a Markdown cell. This adds ``<!-- END QUESTION-->`` as the first line of the cell.
Args:
cell (``nbformat.NotebookNode``): the cell to add the close export to
Returns:
``nbformat.NotebookNode``: the cell with the close export comment at the top
"""
cell = copy.deepcopy(cell)
source = get_source(cell)
source = ["<!-- END QUESTION -->\n", "\n"] + source
cell['source'] = "\n".join(source)
return cell
|
4fa7d83a8c262979b2d3ef95ddc7c0c50c7e68f7
| 3,644,304
|
def get_ram_list_linux():
"""Get RAM list using dmidecode."""
cmd = ['sudo', 'dmidecode', '--type', 'memory']
dimm_list = []
manufacturer = 'Unknown'
size = 0
# Get DMI data
proc = run_program(cmd)
dmi_data = proc.stdout.splitlines()
# Parse data
for line in dmi_data:
line = line.strip()
if line == 'Memory Device':
# Reset vars
manufacturer = 'Unknown'
size = 0
elif line.startswith('Size:'):
size = line.replace('Size: ', '')
try:
size = string_to_bytes(size, assume_binary=True)
except ValueError:
# Assuming empty module
size = 0
elif line.startswith('Manufacturer:'):
manufacturer = line.replace('Manufacturer: ', '')
dimm_list.append([size, manufacturer])
# Save details
return dimm_list
|
3511ea9f5e09ae467c7d9cb7c42f83741a431eda
| 3,644,305
|
def get_capability_list(capability=esdl.Producer):
"""Returns a list of all subtypes of the specified capability.
Used to get a list of e.g. all producers in ESDL
The list is automatically generated based on the ESDL meta model"""
subtype_list = list()
for eclassifier in esdl.eClass.eClassifiers:
if isinstance(eclassifier, EClass):
if capability.eClass in eclassifier.eAllSuperTypes() and not eclassifier.abstract:
subtype_list.append(eclassifier.name)
subtype_list.sort()
return subtype_list
|
700dec944e8a1185c5763a6b32c08fe553f35459
| 3,644,306
|
import errno
def _get_exec_binary(binary, kw):
"""
On win32, the subprocess module can only reliably resolve the
target binary if it's actually a binary; as for a Node.js script
it seems to only work iff shell=True was specified, presenting
a security risk. Resolve the target manually through which will
account for that.
The kw argument is the keyword arguments that will be passed into
whatever respective subprocess.Popen family of methods. The PATH
environment variable will be used if available.
"""
binary = which(binary, path=kw.get('env', {}).get('PATH'))
if binary is None:
raise_os_error(errno.ENOENT)
return binary
|
654d8f01419712ac774e0f7c5d4b02b9219d3153
| 3,644,307
|
import site
def init_SSE_square(Lx, Ly):
"""Initialize a starting configuration on a 2D square lattice."""
n_sites = Lx*Ly
# initialize spins randomly with numbers +1 or -1, but the average magnetization is 0
spins = 2*np.mod(np.random.permutation(n_sites), 2) - 1
op_string = -1 * np.ones(10, np.intp) # initialize with identities
bonds = []
for x0 in range(Lx):
for y0 in range(Ly):
s0 = site(x0, y0, Lx, Ly)
s1 = site(np.mod(x0+1, Lx), y0, Lx, Ly) # bond to the right
bonds.append([s0, s1])
s2 = site(x0, np.mod(y0+1, Ly), Lx, Ly) # bond to the top
bonds.append([s0, s2])
bonds = np.array(bonds, dtype=np.intp)
return spins, op_string, bonds
|
0c0681b3a28680ed4acf6e2d7ed5719031df948b
| 3,644,308
|
import signal
def filter_signal(eeg_df, iqrs, dic_filt_opts):
"""
Filter signal
"""
all_labels = list(eeg_df.columns)
# check the order of labels
label_grouped = False
if all_labels[0].split('.')[-1] == all_labels[1].split('.')[-1]:
label_grouped = True
data_labels = all_pow_nodes
meta_labels = [lab for lab in all_labels if lab not in data_labels]
eeg_pow_filt = []
for phase in eeg_df.phase.unique():
print('\t',phase)
sub = eeg_df.loc[ (eeg_df.phase == phase), :].copy()
sub = sub.reset_index(drop=True)
meta = sub[meta_labels].values # [N, ]
data = sub[data_labels].values # always [N,70]
if dic_filt_opts['per_phases']:
th_up_all = iqrs[(dic_filt_opts['datafiltset'], phase)] # OLDER ORDER
else:
th_up_all = iqrs[(dic_filt_opts['datafiltset'], dic_filt_opts['setphase'])] # OLDER ORDER
if label_grouped:
th_up_all = iqr_by_group(th_up_all) # group iqrs
print('\tFiltering --> nodes are grouped')
m_thresh = np.repeat([np.array(th_up_all)], data.shape[0], axis=0)
mask = data > m_thresh
data[mask] = m_thresh[mask] / 2.
# median filter applying
for rr in range(data.shape[1]): # by colums (70 cols = 14 channesl * 5 waves)
data[:, rr] = signal.medfilt(data[:, rr], kernel_size=3)
df = pd.DataFrame(np.concatenate((data, meta), axis=1), columns=data_labels + meta_labels)
eeg_pow_filt.append(df)
del df
eeg_pow_filt = pd.concat(eeg_pow_filt, axis=0, ignore_index=True)
return eeg_pow_filt
|
f9dd9108d3c17a59eaae9fe509a88a6c3be55db2
| 3,644,309
|
from typing import List
def get_sym_inequiv_components(
components: List[Component], spg_analyzer: SpacegroupAnalyzer
) -> List[Component]:
"""Gets and counts the symmetrically inequivalent components.
Component data has to have been generated with ``inc_site_ids=True``.
Args:
components: A list of structure components, generated using
:obj:`pymatgen.analysis.dimensionality.get_structure_components`,
with ``inc_site_ids=True``.
spg_analyzer: A `pymatgen.symmetry.analyzer.SpacegroupAnalyzer` analyzer
object for the structure containing the components.
Returns:
A list of the symmetrically inequivalent components. Any duplicate
components will only be returned once. The component objects are in the
same format is given by
:obj:`pymatgen.analysis.dimensionality.get_structure_components` but
the additional property:
- ``"count"`` (:obj:`int`): The number of times this component appears
in the structure.
"""
components = deepcopy(components)
sym_inequiv_components = {}
equivalent_atoms = spg_analyzer.get_symmetry_dataset()["equivalent_atoms"]
for component in components:
sym_indices = frozenset(equivalent_atoms[x] for x in component["site_ids"])
# if two components are composed of atoms that are symmetrically
# equivalent they are the same.
if sym_indices in sym_inequiv_components:
sym_inequiv_components[sym_indices]["count"] += 1
continue
component["count"] = 1
sym_inequiv_components[sym_indices] = component
return list(sym_inequiv_components.values())
|
5ce83345712ac336a6af2b02421bfef9a62bbf0f
| 3,644,310
|
def aic(llf, nobs, df_modelwc):
"""
Akaike information criterion
Parameters
----------
llf : {float, array_like}
value of the loglikelihood
nobs : int
number of observations
df_modelwc : int
number of parameters including constant
Returns
-------
aic : float
information criterion
References
----------
https://en.wikipedia.org/wiki/Akaike_information_criterion
"""
return -2.0 * llf + 2.0 * df_modelwc
|
3940c1c86325630248fdf4a50c2aa19b4f4df623
| 3,644,311
|
def summarize_logs(df, wells, cat, props, sr=0.5):
"""
Function to calculate petrophysical summaries based on well and categorical data. All logs averaged with simple
arithmetic means (maybe supply log permeability to have a better averaged estimation)
Parameters:
logs (pd.DataFrame): dataframe containing well logs data, use appropiate filters in advance to provide net logs
wells (string): column with well names in the logs dataframe
cat (string): column with filtering discrete property in the logs dataframe
props (list:string): list of properties (logs) to be summarized
sr (float): log sampling rate in project units for net thickness calculations
Returns:
summ (pd.DataFrame): dataframe with summarized data
"""
col_list = []
col_list.append(wells)
col_list.append(cat)
[col_list.append(i) for i in props]
df1 = df[col_list].dropna(axis=0, how='any')
col_list.append('NetH')
summ = pd.DataFrame(columns=col_list)
idx = 0
for well in df1[wells].unique():
for cat_ in df1[cat].unique():
summ.loc[idx, [wells, cat]] = [well, cat_]
summ.loc[idx, props] = df1[(df1[wells]==well)&(df1[cat]==cat_)][props].mean()
summ.loc[idx, 'NetH'] = df1[(df1[wells]==well)&(df1[cat]==cat_)][props[0]].count() * sr
idx += 1
for col in summ.columns:
if col not in [wells, cat]:
summ[col] = pd.to_numeric(summ[col], errors='ignore')
return summ
|
3a48fa7d1efc83a8216f01505a645d37b173ecbb
| 3,644,312
|
import math
def run_trap_harvesting(prev_values = [], selected_harvest= 0, radius= default_radius, height= default_height, slope= default_slope, delta= default_delta, constant_population= True):
"""Runs the model for one harvesting cycle. Where a harvesting cycle is period of time ending in the next low tide in which the trap is closed with fish inside.
Args:
prev_values is an array of arrays with:
[0]: The total number of harvested fish at hour indexed
[1]: The total number of fish in the trap at hour at hour indexed
[2]: the total number of fish outside the trap at hour indexed
[3]: list of the size of all harvests
The values in this array are the history of the model. if the model is being run from the start, pass in [].
selected_harvest: how many fish will be harvested this cycle. This is to be user selected
radius: the radius of the semi-circular trap created
height: the height of the trap
slope: slope of the beach
delta: how far down the y axis the "center" of the semi-circle is from the origin
constant_population: if true the population will reset to max_fish after every harvest, else it will decrease by the number of harvested fish
Returns:
An 2d array containing:
[0]: The total number of harvested fish at hour indexed
[1]: The total number of fish in the trap at hour at hour indexed
[2]: the total number of fish outside the trap at hour indexed
[3]: list of the size of all harvests
[4]: a boolean showing if the model is completed
This returned array is shows one more cycle of harvesting than the inputed one.
Throws:
ValueError if harvesting is not a positive integer <= the number of the fish in the trap
"""
movement_rate = 0.025
max_fish = 1000
perimeter_ratio = (np.pi * radius) / (np.pi * 25)
tide_values = get_tide_values()
perimeter = get_perimeter(radius, height, delta, slope)
height_adjustment =1 / min(1, height / 4)
#TODO
#if allowing users to input arbitrary values check that all the user inputs are within reasonable bounds or throw an error if they are not
if(len(prev_values) == 0):
#if the model is just starting
current_free_fish = max_fish
current_caught_fish = 0
total_harvested = [0]
in_trap = [0]
out_trap = [max_fish]
catches = []
else:
#update the model with the harvest the user selected
total_harvested = prev_values[0]
in_trap = prev_values[1]
out_trap = prev_values[2]
catches = prev_values[3]
current_free_fish = out_trap[-1]
current_caught_fish = in_trap[-1]
try:
selected_harvest = int(selected_harvest)
except ValueError:
raise ValueError("selected_harvest must be a positive integer not larger than the number of fish in the trap")
if(selected_harvest > current_caught_fish or selected_harvest < 0):
raise ValueError("selected_harvest must be a positive integer not larger than the number of fish in the trap")
catches.append(selected_harvest)
level = tide_values[len(in_trap) - 1]
coverage = get_ratio_of_perimeter_covered(level, perimeter, radius)
free_to_caught = current_free_fish * coverage * movement_rate * perimeter_ratio
caught_to_free = current_caught_fish * coverage * movement_rate * perimeter_ratio * height_adjustment
current_caught_fish = current_caught_fish - caught_to_free + free_to_caught
current_free_fish = current_free_fish + caught_to_free - free_to_caught
if(constant_population):
current_free_fish = max_fish
else:
current_free_fish = current_free_fish + (current_caught_fish - selected_harvest)
total_harvested.append(total_harvested[-1] + selected_harvest)
#empty the traps and record the step after the selected harvest
current_caught_fish = 0
in_trap.append(current_caught_fish)
out_trap.append(current_free_fish)
#drop tide values already ran
tide_values = tide_values[len(in_trap) - 1 : len(tide_values)]
for level in tide_values:
coverage = get_ratio_of_perimeter_covered(level, perimeter, radius)
if(math.floor(current_caught_fish) != 0 and coverage == 0):
return [total_harvested, in_trap, out_trap, catches, False]
free_to_caught = current_free_fish * coverage * movement_rate * perimeter_ratio
caught_to_free = current_caught_fish * coverage * movement_rate * perimeter_ratio
current_caught_fish = current_caught_fish - caught_to_free + free_to_caught
current_free_fish = current_free_fish + caught_to_free - free_to_caught
total_harvested.append(total_harvested[-1])
in_trap.append(current_caught_fish)
out_trap.append(current_free_fish)
return [total_harvested, in_trap, out_trap, catches, True]
|
ed153837e4e3d538fa483ca2bba6eb68f98319a2
| 3,644,313
|
def get_tfpn_mean(targets, predictions):
"""
给定标签和预测,返回对应所有类的 Tp, FN, FP, TN 的平均值
:param targets:
:param predictions:
:return:
"""
cm = confusion_matrix(targets, predictions)
total = np.array(cm).sum()
TP = cm.diagonal().sum()
FN = total - TP
FP = FN
TN = total * len(cm) - TP - FN - FP
return TP, FN, FP, TN
|
06c3b184c1b35a22eb3594e934c3aef8e278ebaa
| 3,644,314
|
def cal_deltaE00_from_LCh(LCh_1, Lab_2):
"""
Calculate the color difference :math:`\Delta E_{00}` between two given colorspace arrays.
:param LCh_1: array-like
:param Lab_2: array-like
:return: numeric or ndarray
"""
Lab_1 = LCh2Lab(LCh_1)
return deltaE00(Lab_1, Lab_2)
|
b774be88ad2ca7032f0471963b2720b0e6ecf5f7
| 3,644,315
|
def get_var_type_glue(vtype):
"""Get glue module from variable's type.
Parameters
----------
vtype: data type
Returns
-------
Glue Module if glue exists, otherwise None.
"""
global DTYPE_TO_GLUE, PKG_NAME_TO_GLUE_ARGS
glue_mod = DTYPE_TO_GLUE.get(vtype, None)
if glue_mod is not None:
return glue_mod
pkg_name = vtype.__module__.split('.')[0]
if pkg_name not in PKG_NAME_TO_GLUE_ARGS:
return None
# try to register glue_mod
_register_glue_real(*PKG_NAME_TO_GLUE_ARGS[pkg_name])
return DTYPE_TO_GLUE.get(vtype, None)
|
d7ba7798286142b70e0dbd8e938f2e3a4ae0423e
| 3,644,316
|
def contract_TRG(state, svd_option_1st=None, svd_option_rem=None):
"""
Contract the PEPS using Tensor Renormalization Group.
Parameters
----------
svd_option_1st: tensorbackends.interface.Option, optional
Parameters for the first SVD in TRG. Will default to tensorbackends.interface.ReducedSVD() if not given.
svd_option_rem: tensorbackends.interface.Option, optional
Parameters for the remaining SVD truncations. Will perform SVD if given.
Returns
-------
output: state.backend.tensor or scalar
The contraction result.
References
----------
https://journals.aps.org/prl/abstract/10.1103/PhysRevLett.99.120601
https://journals.aps.org/prb/abstract/10.1103/PhysRevB.78.205116
"""
# base case
if state.shape <= (2, 2):
return contract_BMPS(state, svd_option_rem)
# SVD each tensor into two
tn = np.empty(state.shape + (2,), dtype=object)
for (i, j), tsr in np.ndenumerate(state.grid):
str_uv = 'abi,icdpq' if (i+j) % 2 == 0 else 'aidpq,bci'
tn[i,j,0], _, tn[i,j,1] = state.backend.einsumsvd(
'abcdpq->' + str_uv, tsr,
option=svd_option_1st or ReducedSVD(),
absorb_s='even'
)
tn[i,j,(i+j)%2] = tn[i,j,(i+j)%2].reshape(*(tn[i,j,(i+j)%2].shape + (1, 1)))
return _contract_TRG(state, tn, svd_option_rem)
|
0e6876c778e6a2df552a6de8b3253d7a860e1987
| 3,644,317
|
def riccati_3(nmax,x):
"""Riccati bessel function of the 3rd kind
returns (r3, r3'), n=0,1,...,nmax"""
x = np.asarray(x)
result = np.zeros((2,nmax) + x.shape, dtype=complex)
for n in range(nmax):
yn = special.spherical_yn(n+1,x)
ynp = special.spherical_yn(n+1,x, derivative=True)
result[0,n] = x*yn
result[1,n] = yn + x*ynp
return result
|
32d344e8ac4e1f01bbe5605dd4c9a6563497ac71
| 3,644,318
|
def conv_batch_relu_forward(x, w, b, gamma, beta, conv_param, bn_param):
"""
Convenience layer that performs a convolution, a batch, and a ReLU.
Inputs:
- x: Input to the convolutional layer
- w, b, conv_param: Weights and parameters for the convolutional layer
- gamma, beta, bn_param : batch norm parameters
Returns a tuple of:
- out: Output from the pooling layer
- cache: Object to give to the backward pass
"""
convOut, conv_cache = layers.conv_forward(x, w, b, conv_param)
normOut, norm_cache = layers.spatial_batchnorm_forward(convOut, gamma, beta, bn_param)
out, relu_cache = layers.relu_forward(normOut)
cache = (conv_cache, norm_cache, relu_cache)
return out, cache
|
8c306a2337307ec68aa5a536b4aef0dc4f34cf39
| 3,644,319
|
def hex_layout(npos, width, rotate=None):
"""Compute positions in a hexagon layout.
Place the given number of positions in a hexagonal layout projected on
the sphere and centered at z axis. The width specifies the angular
extent from vertex to vertex along the "X" axis. For example::
Y ^ O O O
| O O O O
| O O + O O
+--> X O O O O
O O O
Each position is numbered 0..npos-1. The first position is at the center,
and then the positions are numbered moving outward in rings.
Args:
npos (int): The number of positions packed onto wafer.
width (float): The angle (in degrees) subtended by the width along
the X axis.
rotate (array, optional): Optional array of rotation angles in degrees
to apply to each position.
Returns:
(array): Array of quaternions for the positions.
"""
zaxis = np.array([0, 0, 1], dtype=np.float64)
nullquat = np.array([0, 0, 0, 1], dtype=np.float64)
sixty = np.pi/3.0
thirty = np.pi/6.0
rtthree = np.sqrt(3.0)
rtthreebytwo = 0.5 * rtthree
angdiameter = width * np.pi / 180.0
# find the angular packing size of one detector
nrings = hex_nring(npos)
posdiam = angdiameter / (2 * nrings - 2)
result = np.zeros((npos, 4), dtype=np.float64)
for pos in range(npos):
if pos == 0:
# center position has no offset
posrot = nullquat
else:
# Not at the center, find ring for this position
test = pos - 1
ring = 1
while (test - 6 * ring) >= 0:
test -= 6 * ring
ring += 1
sectors = int(test / ring)
sectorsteps = np.mod(test, ring)
# Convert angular steps around the ring into the angle and distance
# in polar coordinates. Each "sector" of 60 degrees is essentially
# an equilateral triangle, and each step is equally spaced along
# the edge opposite the vertex:
#
# O
# O O (step 2)
# O O (step 1)
# X O O O (step 0)
#
# For a given ring, "R" (center is R=0), there are R steps along
# the sector edge. The line from the origin to the opposite edge
# that bisects this triangle has length R*sqrt(3)/2. For each
# equally-spaced step, we use the right triangle formed with this
# bisection line to compute the angle and radius within this
# sector.
# The distance from the origin to the midpoint of the opposite
# side.
midline = rtthreebytwo * float(ring)
# the distance along the opposite edge from the midpoint (positive
# or negative)
edgedist = float(sectorsteps) - 0.5 * float(ring)
# the angle relative to the midpoint line (positive or negative)
relang = np.arctan2(edgedist, midline)
# total angle is based on number of sectors we have and the angle
# within the final sector.
posang = sectors * sixty + thirty + relang
posdist = rtthreebytwo * posdiam * float(ring) / np.cos(relang)
posx = np.sin(posdist) * np.cos(posang)
posy = np.sin(posdist) * np.sin(posang)
posz = np.cos(posdist)
posdir = np.array([posx, posy, posz], dtype=np.float64)
norm = np.sqrt(np.dot(posdir, posdir))
posdir /= norm
posrot = qa.from_vectors(zaxis, posdir)
if rotate is None:
result[pos] = posrot
else:
prerot = qa.rotation(zaxis, rotate[pos] * np.pi / 180.0)
result[pos] = qa.mult(posrot, prerot)
return result
|
85141e08c8a75a54953ba78c520b87d377aad3fb
| 3,644,320
|
def dup_zz_hensel_step(m, f, g, h, s, t, K):
"""
One step in Hensel lifting in `Z[x]`.
Given positive integer `m` and `Z[x]` polynomials `f`, `g`, `h`, `s`
and `t` such that::
f == g*h (mod m)
s*g + t*h == 1 (mod m)
lc(f) is not a zero divisor (mod m)
lc(h) == 1
deg(f) == deg(g) + deg(h)
deg(s) < deg(h)
deg(t) < deg(g)
returns polynomials `G`, `H`, `S` and `T`, such that::
f == G*H (mod m**2)
S*G + T**H == 1 (mod m**2)
References
==========
1. [Gathen99]_
"""
M = m**2
e = dup_sub_mul(f, g, h, K)
e = dup_trunc(e, M, K)
q, r = dup_div(dup_mul(s, e, K), h, K)
q = dup_trunc(q, M, K)
r = dup_trunc(r, M, K)
u = dup_add(dup_mul(t, e, K), dup_mul(q, g, K), K)
G = dup_trunc(dup_add(g, u, K), M, K)
H = dup_trunc(dup_add(h, r, K), M, K)
u = dup_add(dup_mul(s, G, K), dup_mul(t, H, K), K)
b = dup_trunc(dup_sub(u, [K.one], K), M, K)
c, d = dup_div(dup_mul(s, b, K), H, K)
c = dup_trunc(c, M, K)
d = dup_trunc(d, M, K)
u = dup_add(dup_mul(t, b, K), dup_mul(c, G, K), K)
S = dup_trunc(dup_sub(s, d, K), M, K)
T = dup_trunc(dup_sub(t, u, K), M, K)
return G, H, S, T
|
4ce2c7e9aaf52a9ef3e7ce68d164c82959b22ebb
| 3,644,321
|
def generate_sobol_index_sample_sets(samplesA, samplesB, index):
"""
Given two sample sets A and B generate the sets :math:`A_B^{I}` from
The rows of A_B^I are all from A except for the rows with non zero entries
in the index I. When A and B are QMC samples it is best to change as few
rows as possible
See
Variance based sensitivity analysis of model output. Design and estimator
for the total sensitivity index
"""
nvars = samplesA.shape[0]
I = np.arange(nvars)
mask = np.asarray(index, dtype=bool)
samples = np.vstack([samplesA[~mask], samplesB[mask]])
J = np.hstack([I[~mask], I[mask]])
samples = samples[np.argsort(J), :]
return samples
|
15b02e1995bc922b33d6e0e32bdde1559f0a762e
| 3,644,322
|
import logging
def setup_logfile_logger(log_path, log_level=None, log_format=None, date_format=None):
"""
Set up logging to a file.
"""
# Create the handler
handler = WatchedFileHandler(log_path, mode='a', encoding='utf-8', delay=0)
if log_level:
# Grab and set the level
level = LOG_LEVELS.get(log_level.lower(), logging.ERROR)
handler.setLevel(level)
# Set the default console formatter config
if not log_format:
log_format = '%(asctime)s [%(name)s][%(levelname)s] %(message)s'
if not date_format:
date_format = '%Y-%m-%d %H:%M:%S'
formatter = logging.Formatter(log_format, datefmt=date_format)
handler.setFormatter(formatter)
root_logger.addHandler(handler)
return handler
|
cfc22a2e334aad5d4aa573014af8a8bac4a7e6b1
| 3,644,323
|
import warnings
def fix_encoding_and_explain(text):
"""
Deprecated copy of `ftfy.fix_encoding_and_explain()`.
"""
warnings.warn(
"`fix_encoding_and_explain()` has moved to the main module of ftfy.",
DeprecationWarning,
)
return ftfy.fix_encoding_and_explain(text)
|
3a76fefcbc68b6cf68f3262b90ff277424bf1eba
| 3,644,324
|
def parse_16bit_color(color16):
"""解析16位的颜色
:param color16: 16位的颜色值
"""
r = int(gamma5[int((color16 >> 11) & 0x1F)])
g = int(gamma6[int((color16 >> 5) & 0x3F)])
b = int(gamma5[int(color16 & 0x1F)])
return (r, g, b)
|
c66448c9e886db1e696afa706577d02b3411cd92
| 3,644,325
|
def orders():
"""
List all orders
"""
orders = Order.query.filter_by(user_id=current_user.id).all()
return render_template('customer/orders.html', orders=orders, title="Orders")
|
921552a41ef673cb9c8f6c414ba4a12b3643617a
| 3,644,326
|
def packpeeklist1(n1, n2, n3, n4, n5):
"""
Packs and returns 5 item list
"""
listp = [n1, n2, n3, n4, n5]
return listp
|
4b781ff3e8eb4a1bd51f8e834fab5462371a85c5
| 3,644,327
|
from typing import List
def valid_commands(commands: List[str]) -> List[str]:
"""
Get list of valid commands from list of commands.
:param (list) commands: User-supplied commands.
:return:
"""
return [command for command in commands if command in available_commands()]
|
25054d8acb8bee16855adba25e846bc128fb9f23
| 3,644,328
|
def duck_list(request):
""" lists all ducks """
ducks = Duck.objects.all()
return render(request, 'duck/list.html', {'duck_list': ducks})
|
206e586c2709d4c5526e26ff50cabdfe440125bc
| 3,644,329
|
def get_debian_version(file_path):
"""
Get the version of a debian file
:param file_path: the path of the debian file
:return: the version of the debian file
"""
cmd_args = ["dpkg-deb", "-f", file_path, "Version"]
debian_version = run_command(cmd_args)
return debian_version
|
61c8779d4b235a1d74bf819299f95077e5ff001a
| 3,644,330
|
from typing import Optional
def hash_type(
draw, hash_type_strategy: Optional[SearchStrategy[HashType]] = None
) -> HashType:
"""Composite strategy for fetching a :class:`~modist.package.hasher.HashType`."""
return draw(HashType_strategy if not hash_type_strategy else hash_type_strategy)
|
79152dda823dcd227545ed2bb117229344fc341a
| 3,644,331
|
def get_initializer(initializer_name):
"""Get the corresponding initializer function based on the initializer string.
API of an initializer:
init_fn, hparams = get_initializer(init)
new_params, final_l = init_fn(loss, init_params, hps,
num_outputs, input_shape)
Args:
initializer_name: (str) e.g. default.
Returns:
initializer
Raises:
ValueError if model is unrecognized.
"""
try:
return _ALL_INITIALIZERS[initializer_name][0]
except KeyError:
raise ValueError('Unrecognized initializer: {}'.format(initializer_name))
|
778941a5e7937600cca2a48371d2540cab6476ab
| 3,644,332
|
import logging
from unittest.mock import call
def sh(cmd, grid=False, infile=None, outfile=None, errfile=None,
background=False):
"""
simple wrapper for system calls
"""
if grid:
return 0 # A fake retcode
else:
if infile:
cmd += " < {0} ".format(infile)
if outfile and outfile != "stdout":
cmd += " > {0} ".format(outfile)
if errfile:
cmd += " 2> {0} ".format(errfile)
if background:
cmd += " & "
logging.debug(cmd)
return call(cmd, shell=True)
|
0520e05c30d127bcc3c8e4057c4d839432c51334
| 3,644,333
|
from .model_store import get_model_file
import os
def get_voca(base_persons,
vertices,
model_name=None,
pretrained=False,
root=os.path.join("~", ".tensorflow", "models"),
**kwargs):
"""
Create VOCA model with specific parameters.
Parameters:
----------
base_persons : int
Number of base persons (subjects).
vertices : int
Number of 3D geometry vertices.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
net = VOCA(
base_persons=base_persons,
vertices=vertices,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
in_channels = kwargs["in_channels"] if ("in_channels" in kwargs) else 3
input_shape = (1,) + (in_channels,) + net.in_size if net.data_format == "channels_first" else\
(1,) + net.in_size + (in_channels,)
net.build(input_shape=input_shape)
net.load_weights(
filepath=get_model_file(
model_name=model_name,
local_model_store_dir_path=root))
return net
|
9c7fa8c52bd634f5a1b6511607976515d55202fd
| 3,644,334
|
def _l2_project_reference(z_p, p, z_q):
"""Projects distribution (z_p, p) onto support z_q under L2-metric over CDFs.
The supports z_p and z_q are specified as tensors of distinct atoms (given
in ascending order).
Let Kq be len(z_q) and Kp be len(z_p). This projection works for any
support z_q, in particular Kq need not be equal to Kp.
Args:
z_p: Tensor holding support of distribution p, shape `[batch_size, Kp]`.
p: Tensor holding probability values p(z_p[i]), shape `[batch_size, Kp]`.
z_q: Tensor holding support to project onto, shape `[Kq]`.
Returns:
Projection of (z_p, p) onto support z_q under Cramer distance.
"""
# Broadcasting of tensors is used extensively in the code below. To avoid
# accidental broadcasting along unintended dimensions, tensors are defensively
# reshaped to have equal number of dimensions (3) throughout and intended
# shapes are indicated alongside tensor definitions. To reduce verbosity,
# extra dimensions of size 1 are inserted by indexing with `None` instead of
# `tf.expand_dims()` (e.g., `x[:, None, :]` reshapes a tensor of shape
# `[k, l]' to one of shape `[k, 1, l]`).
# Extract vmin and vmax and construct helper tensors from z_q
vmin, vmax = z_q[0], z_q[-1]
d_pos = tf.concat([z_q, vmin[None]], 0)[1:] # 1 x Kq x 1
d_neg = tf.concat([vmax[None], z_q], 0)[:-1] # 1 x Kq x 1
# Clip z_p to be in new support range (vmin, vmax).
z_p = tf.clip_by_value(z_p, vmin, vmax)[:, None, :] # B x 1 x Kp
# Get the distance between atom values in support.
d_pos = (d_pos - z_q)[None, :, None] # z_q[i+1] - z_q[i]. 1 x B x 1
d_neg = (z_q - d_neg)[None, :, None] # z_q[i] - z_q[i-1]. 1 x B x 1
z_q = z_q[None, :, None] # 1 x Kq x 1
# Ensure that we do not divide by zero, in case of atoms of identical value.
d_neg = tf.where(d_neg > 0, 1./d_neg, tf.zeros_like(d_neg)) # 1 x Kq x 1
d_pos = tf.where(d_pos > 0, 1./d_pos, tf.zeros_like(d_pos)) # 1 x Kq x 1
delta_qp = z_p - z_q # clip(z_p)[j] - z_q[i]. B x Kq x Kp
d_sign = tf.cast(delta_qp >= 0., dtype=p.dtype) # B x Kq x Kp
# Matrix of entries sgn(a_ij) * |a_ij|, with a_ij = clip(z_p)[j] - z_q[i].
# Shape B x Kq x Kp.
delta_hat = (d_sign * delta_qp * d_pos) - ((1. - d_sign) * delta_qp * d_neg)
p = p[:, None, :] # B x 1 x Kp.
return tf.reduce_sum(tf.clip_by_value(1. - delta_hat, 0., 1.) * p, 2)
|
e6c43d1d05237410ff94d3a3b76bc705f2064d46
| 3,644,335
|
def _make_blocksizes(bricksize, surveysize, nlods, dtype, factor=(1,1,1), verbose=None):
"""
CURRENTLY NOT USED.
Calculate the minimum blocksize to read at each lod level. Clip to
the survey size. Also compute the memory needed to hold one buffer
for each lod. Note that the genlod algorithm currently assumes
that the block size is the same for all levels except for the
clipping. And it currently handles clipping itself and might not
like us to do so. Currently this function is not very useful.
"""
blocksizes = np.zeros((nlods, 3), dtype=np.int64)
ss = np.array(surveysize, dtype=np.int64)
bs = np.array([2*factor[0]*bricksize[0],
2*factor[1]*bricksize[1],
ss[2]], dtype=np.int64)
iterations = 0
for lod in range(nlods):
bs = np.minimum(bs, ss)
blocksizes[lod] = bs
iterations += np.product((ss+bs-1) // bs)
ss = (ss + 1) // 2
bytesused = np.sum(np.product(blocksizes, axis=1)) * int(np.dtype(dtype).itemsize)
returntype = namedtuple("BlockSizeInfo", "blocksizes bytesused iterations")
result = returntype(blocksizes, bytesused, iterations)
print(result)
return result
|
defc1f11d8f3684ccce4a7df7db6c011848187af
| 3,644,336
|
from ._backend import _check_backend
from ._kit2fiff_gui import Kit2FiffFrame
def kit2fiff():
"""Convert KIT files to the fiff format.
The recommended way to use the GUI is through bash with::
$ mne kit2fiff
"""
_check_mayavi_version()
_check_backend()
gui = Kit2FiffFrame()
gui.configure_traits()
return gui
|
b57b74d036378b1265e991b8c49d55bce41807c0
| 3,644,337
|
import tqdm
def sweep_dec_given_x(full_model, z_dec_model, sample1, sample2, sample_layer_name,
sweep_z_samples=False,
nb_samples=10,
nargout=1,
tqdm=tqdm):
"""
sweep the latent space given two samples in the original space
specificaly, get z_mu = enc(x) for both samples, and sweep between those z_mus
"sweep_z_samples" does a sweep between two samples, rather than between two z_mus.
Example:
sample_layer_name='img-img-dense-vae_ae_dense_sample'
"""
# get a model that also outputs the samples z
full_output = [*full_model.outputs,
full_model.get_layer(sample_layer_name).get_output_at(1)]
full_model_plus = keras.models.Model(full_model.inputs, full_output)
# get full predictions for these samples
pred1 = full_model_plus.predict(sample1[0])
pred2 = full_model_plus.predict(sample2[0])
img1 = sample1[0]
img2 = sample2[0]
# sweep range
x_range = np.linspace(0, 1, nb_samples)
# prepare outputs
outs = [None] * nb_samples
for xi, x in enumerate(tqdm(x_range)):
if sweep_z_samples:
z = x * pred1[3] + (1-x) * pred2[3]
else:
z = x * pred1[1] + (1-x) * pred2[1]
if isinstance(sample1[0], (list, tuple)): # assuming prior or something like that
outs[xi] = z_dec_model.predict([z, *sample1[0][1:]])
else:
outs[xi] = z_dec_model.predict(z)
if nargout == 1:
return outs
else:
return (outs, [pred1, pred2])
|
9a5bccfa85f0bdda5953b8a72c66238e0ff5d548
| 3,644,338
|
def process(observation, current_game_state):
"""
Args:
observation: An observation, which agents get as an input from kaggle environment.
current_game_state: An object provided by kaggle to simplify game info extraction.
Returns:
processed_observations: A prepared observation to save to the buffer.
"""
global units_actions_dict
player = current_game_state.players[observation.player]
opponent = current_game_state.players[(observation.player + 1) % 2]
width, height = current_game_state.map.width, current_game_state.map.height
shift = int((MAX_MAP_SIDE - width) / 2) # to make all feature maps 32x32
turn = current_game_state.turn
player_units_coords = {}
player_city_tiles_coords = {}
player_research_points = player.research_points
player_city_tiles_count = player.city_tile_count
player_cities_count = len(player.cities)
player_units_count = len(player.units)
player_workers_count = 0
player_carts_count = 0
for unit in player.units:
if unit.is_worker():
player_workers_count += 1
elif unit.is_cart():
player_carts_count += 1
else:
raise ValueError
opponent_research_points = opponent.research_points
opponent_city_tiles_count = opponent.city_tile_count
opponent_cities_count = len(opponent.cities)
opponent_units_count = len(opponent.units)
opponent_workers_count = 0
opponent_carts_count = 0
for unit in opponent.units:
if unit.is_worker():
opponent_workers_count += 1
elif unit.is_cart():
opponent_carts_count += 1
else:
raise ValueError
current_cycle, to_next_day, to_next_night, is_night = get_timing(turn)
# map data, define resources and roads, 0 or 1 for bool, 0 to around 1 for float;
# layers:
# 0 - a resource
# 1 - is wood
# 2 - wood amount
# 3 - is coal
# 4 - coal amount
# 5 - is uranium
# 6 - uranium amount
# 7 - fuel equivalent
# 8 - if a resource is available for the player, 1 when ready
# 9 - a road lvl
# 10 - 19 for coordinates
# number_of_resources_layers = 20
# A1 = np.zeros((number_of_resources_layers, MAX_MAP_SIDE, MAX_MAP_SIDE), dtype=np.half)
# for yy in range(height):
# for xx in range(width):
# cell = current_game_state.map.get_cell(xx, yy)
# x, y = yy + shift, xx + shift
# if cell.has_resource():
# A1[0, x, y] = 1 # a resource at the point
# resource = cell.resource
# if resource.type == "wood":
# A1[1, x, y] = 1
# wood_amount = resource.amount
# A1[2, x, y] = wood_amount / WOOD_BOUND
# fuel = wood_amount * WOOD_FUEL_VALUE
# A1[8, x, y] = 1 # wood is always available
# elif resource.type == "coal":
# A1[3, x, y] = 1
# coal_amount = resource.amount
# A1[4, x, y] = coal_amount / COAL_BOUND
# fuel = coal_amount * COAL_FUEL_VALUE
# A1[8, x, y] = min(player_research_points / COAL_RESEARCH_POINTS, 1)
# elif resource.type == "uranium":
# A1[5, x, y] = 1
# uran_amount = resource.amount
# A1[6, x, y] = uran_amount / URAN_BOUND
# fuel = uran_amount * URAN_FUEL_VALUE
# A1[8, x, y] = min(player_research_points / URAN_RESEARCH_POINTS, 1)
# else:
# raise ValueError
# A1[7, x, y] = fuel / FUEL_BOUND
# A1[9, x, y] = cell.road / MAX_ROAD
# A1[10:15, x, y] = to_binary(np.asarray((x,), dtype=np.uint8), m=5)
# A1[15:20, x, y] = to_binary(np.asarray((y,), dtype=np.uint8), m=5)
# map data, define resources and roads, 0 or 1 for bool, 0 to around 1 for float;
# layers:
# 0 - a resource
# 1 - is available
# 2 - amount
# 3 - fuel equivalent
# 4 - a road lvl
# 5 - 14 for coordinates
# 15 - next available resource
number_of_resources_layers = 16
A1 = np.zeros((number_of_resources_layers, MAX_MAP_SIDE, MAX_MAP_SIDE), dtype=np.half)
for yy in range(height):
for xx in range(width):
cell = current_game_state.map.get_cell(xx, yy)
x, y = yy + shift, xx + shift
if cell.has_resource():
A1[0, x, y] = 1 # a resource at the point
resource = cell.resource
fuel = 0
if resource.type == "wood":
A1[1, x, y] = 1
wood_amount = resource.amount
A1[2, x, y] = wood_amount / WOOD_BOUND
fuel = wood_amount * WOOD_FUEL_VALUE
elif resource.type == "coal":
if player_research_points >= COAL_RESEARCH_POINTS:
A1[1, x, y] = 1
coal_amount = resource.amount
A1[2, x, y] = coal_amount / COAL_BOUND
fuel = coal_amount * COAL_FUEL_VALUE
else:
A1[15, x, y] = 1
elif resource.type == "uranium":
if player_research_points >= URAN_RESEARCH_POINTS:
A1[1, x, y] = 1
uran_amount = resource.amount
A1[2, x, y] = uran_amount / URAN_BOUND
fuel = uran_amount * URAN_FUEL_VALUE
elif player_research_points >= URAN_RESEARCH_POINTS - 50:
A1[15, x, y] = 1
else:
raise ValueError
A1[3, x, y] = fuel / FUEL_BOUND
A1[4, x, y] = cell.road / MAX_ROAD
A1[5:10, x, y] = to_binary(np.asarray((x,), dtype=np.uint8), m=5)
A1[10:15, x, y] = to_binary(np.asarray((y,), dtype=np.uint8), m=5)
# define city tiles, 0 or 1 for bool, 0 to around 1 for float;
# layers:
number_of_main_layers = 39
A2 = np.zeros((number_of_main_layers, MAX_MAP_SIDE, MAX_MAP_SIDE), dtype=np.half)
# 0 - a unit
# 1 - is player
# 2 - is opponent
# 3 - at the city tile
# 4 - first place in the city tile is occupied by the unit - fill later (in get_separate_outputs)
# 5 - second place is occupied by the unit, and the first was occupied before - fill later
# 6 - third place is occupied - fill later
# 7 - forth place is occupied - fill later
# 8 - the place number is more than 4th - fill later
# 9 - is worker - X0
# 10 - is cart - X1
# 11 - can act - X2
# 12 - can build - X3
# 13 - cargo wood - X4
# 14 - cargo coal - X5
# 15 - cargo uranium - X6
# 16 - cargo space left - X7
# 17 - fuel equivalent - X8
# 18 - is city tile
# 19 - is player
# 20 - is opponent
# 21 - can act
# 22 - amount of city tiles in the city, which the city tile belongs to
# 23 - current city upkeep
# 24 - fuel amount
# 25 - ratio if city can survive, 1 and more means it can
# 26 - amount of all friendly city tiles
# 27 - amount of cities
# 28 - units build limit reached (workers + carts == city tiles)
# 29 - number of workers
# 30 - number of carts
# 31 - number of friendly units
# 32 - research progress for coal
# 33 - research progress for uranium
# 34 - progress (from 0 to 1) until next day
# 35 - progress until next night
# 36 - progress until finish
# 37 - is night
# 38 - current cycle
# start with city tiles to know their positions to fill units cells
for k, city in list(player.cities.items()) + list(opponent.cities.items()):
if city.team == player.team:
city_tiles_count = player_city_tiles_count
cities_count = player_cities_count
units_count = player_units_count
workers_count = player_workers_count
carts_count = player_carts_count
research_points = player_research_points
elif city.team == opponent.team:
city_tiles_count = opponent_city_tiles_count
cities_count = opponent_cities_count
units_count = opponent_units_count
workers_count = opponent_workers_count
carts_count = opponent_carts_count
research_points = opponent_research_points
else:
raise ValueError
current_light_upkeep = city.get_light_upkeep()
current_fuel = city.fuel
current_city_tiles_count = 0
for _ in city.citytiles:
current_city_tiles_count += 1
for city_tile in city.citytiles:
# city tile group
y, x = city_tile.pos.x + shift, city_tile.pos.y + shift
A2[18, x, y] = 1
if city_tile.team == player.team:
A2[19, x, y] = 1
elif city_tile.team == opponent.team:
A2[20, x, y] = 1
else:
raise ValueError
if city_tile.can_act():
A2[21, x, y] = 1
if city_tile.team == player.team:
player_city_tiles_coords[f"ct_{x}_{y}"] = (x, y) # to save only the operable units
A2[22, x, y] = current_city_tiles_count / CITY_TILES_IN_CITY_BOUND
A2[23, x, y] = UPKEEP_BOUND_PER_TILE / current_light_upkeep
A2[24, x, y] = current_fuel / FUEL_BOUND
A2[25, x, y] = min(1, current_fuel / (min(10, to_next_day) * current_light_upkeep)) # ratio to survive
# common group
A2[26, x, y] = city_tiles_count / CITY_TILES_BOUND
A2[27, x, y] = cities_count / CITIES_BOUND
if units_count == city_tiles_count:
A2[28, x, y] = 1
A2[29, x, y] = workers_count / WORKERS_BOUND
A2[30, x, y] = carts_count / CARTS_BOUND
A2[31, x, y] = units_count / UNITS_BOUND
A2[32, x, y] = min(research_points / COAL_RESEARCH_POINTS, 1)
A2[33, x, y] = min(research_points / URAN_RESEARCH_POINTS, 1)
A2[34, x, y] = 1 - to_next_day / CYCLE_LENGTH
A2[35, x, y] = 1 - to_next_night / CYCLE_LENGTH
A2[36, x, y] = turn / MAX_DAYS
A2[37, x, y] = is_night
A2[38, x, y] = current_cycle / TOTAL_CYCLES
for unit in player.units + opponent.units:
# unit group
if unit.team == player.team:
city_tiles_count = player_city_tiles_count
cities_count = player_cities_count
units_count = player_units_count
workers_count = player_workers_count
carts_count = player_carts_count
research_points = player_research_points
elif unit.team == opponent.team:
city_tiles_count = opponent_city_tiles_count
cities_count = opponent_cities_count
units_count = opponent_units_count
workers_count = opponent_workers_count
carts_count = opponent_carts_count
research_points = opponent_research_points
else:
raise ValueError
y, x = unit.pos.x + shift, unit.pos.y + shift
A2[0, x, y] = 1
if unit.team == player.team:
A2[1, x, y] = 1
elif unit.team == opponent.team:
A2[2, x, y] = 1
else:
raise ValueError
is_unit_at_home = 1 if A2[18, x, y] == 1 else 0
A2[3, x, y] = is_unit_at_home
X = np.zeros(9, dtype=np.half)
if unit.is_worker():
X[0] = 1
elif unit.is_cart():
X[1] = 1
else:
raise ValueError
if unit.can_act():
X[2] = 1
if unit.can_build(current_game_state.map):
X[3] = 1
X[4] = unit.cargo.wood / WORKERS_CARGO
X[5] = unit.cargo.coal / WORKERS_CARGO
X[6] = unit.cargo.uranium / WORKERS_CARGO
X[7] = unit.get_cargo_space_left() / WORKERS_CARGO
X[8] = (unit.cargo.wood * WOOD_FUEL_VALUE +
unit.cargo.coal * COAL_FUEL_VALUE +
unit.cargo.uranium * URAN_FUEL_VALUE) / FUEL_BOUND
# there are many unit can share the same position at home
# so save unique unit parameters in X array and store it in dictionary if unit is at home
# if unit is not at home so it has a unique position, put it inside A2 array
if is_unit_at_home:
if unit.can_act() and unit.team == player.team:
player_units_coords[unit.id] = ((x, y), (X, unit.is_worker()))
else:
if unit.can_act() and unit.team == player.team:
player_units_coords[unit.id] = ((x, y), (None, unit.is_worker()))
A2[9:18, x, y] = X
# common group
A2[26, x, y] = city_tiles_count / CITY_TILES_BOUND
A2[27, x, y] = cities_count / CITIES_BOUND
if units_count == city_tiles_count:
A2[28, x, y] = 1
A2[29, x, y] = workers_count / WORKERS_BOUND
A2[30, x, y] = carts_count / CARTS_BOUND
A2[31, x, y] = units_count / UNITS_BOUND
A2[32, x, y] = min(research_points / COAL_RESEARCH_POINTS, 1)
A2[33, x, y] = min(research_points / URAN_RESEARCH_POINTS, 1)
A2[34, x, y] = 1 - to_next_day / CYCLE_LENGTH
A2[35, x, y] = 1 - to_next_night / CYCLE_LENGTH
A2[36, x, y] = turn / MAX_DAYS
A2[37, x, y] = is_night
A2[38, x, y] = current_cycle / TOTAL_CYCLES
A = np.concatenate((A2, A1), axis=0)
# define headers
# layers:
# 0 - an operable one
# 1 - is worker
# 2 - is cart
# 3 - is city tile
# 4 - prev pos for units
# 5 - prev prev pos for units
number_of_header_layers = 6
units_headers = {}
if player_units_coords:
for k, ((x, y), (X, is_worker)) in player_units_coords.items():
head = np.zeros((number_of_header_layers, MAX_MAP_SIDE, MAX_MAP_SIDE), dtype=np.half)
worker = np.array([1, 1, 0, 0], dtype=np.half)
cart = np.array([1, 0, 1, 0], dtype=np.half)
head[:4, x, y] = worker if is_worker else cart
if k in units_actions_dict.keys():
units_actions_dict[k].append((x, y))
unit_prev_pos = units_actions_dict[k][-2]
if len(units_actions_dict[k]) > 2:
unit_prev_prev_pos = units_actions_dict[k][-3]
else:
unit_prev_prev_pos = units_actions_dict[k][-2]
else:
units_actions_dict[k] = []
units_actions_dict[k].append((x, y))
unit_prev_pos = (x, y)
unit_prev_prev_pos = (x, y)
head[4, unit_prev_pos[0], unit_prev_pos[1]] = 1
head[5, unit_prev_prev_pos[0], unit_prev_prev_pos[1]] = 1
head = np.moveaxis(head, 0, -1)
units_headers[k] = (head, (x, y), X, is_worker)
city_tiles_headers = {}
if player_city_tiles_coords:
for k, (x, y) in player_city_tiles_coords.items():
head = np.zeros((number_of_header_layers, MAX_MAP_SIDE, MAX_MAP_SIDE), dtype=np.half)
head[:4, x, y] = np.array([1, 0, 0, 1], dtype=np.half)
head = np.moveaxis(head, 0, -1)
city_tiles_headers[k] = head
B = np.moveaxis(A, 0, -1)
outputs = {"stem": B,
"units_headers": units_headers,
"city_tiles_headers": city_tiles_headers}
return outputs
|
3a54ad62fa341ca57528c5ee32b45d749982f286
| 3,644,339
|
def source_files(goto, wkdir, srcdir=None):
"""Source files appearing in symbol table.
Source file path names in symbol table are absolute or relative to
wkdir. If srcdir is given, return only files under srcdir.
"""
wkdir = srcloct.abspath(wkdir)
srcs = [dfn['file']
for dfn in parse_symbol_table(symbol_table(goto), wkdir)]
srcs = [src for src in srcs if src and not srcloct.is_builtin(src)]
if srcdir:
srcdir = srcloct.abspath(srcdir)
srcs = [src for src in srcs if src.startswith(srcdir)]
return sorted(set(srcs))
|
bacb86942b5f82ecc902699b81de5d92868ddd57
| 3,644,340
|
def textBlurBackground(img, text, font, fontScale, textPos, textThickness=1, textColor=(0, 255, 0), kneral=(33, 33),
pad_x=3, pad_y=3):
"""
Draw text with background blured, control the blur value, with kernal(odd, odd)
@param img:(mat) which you want to draw text
@param text: (string) text you want draw
@param font: fonts face, like FONT_HERSHEY_COMPLEX, FONT_HERSHEY_PLAIN etc.
@param fontScale: (double) the size of text, how big it should be.
@param textPos: tuple(x,y) position where you want to draw text
@param textThickness:(int) fonts weight, how bold it should be.
@param textColor: tuple(BGR), values -->0 to 255 each
@param kneral: tuple(3,3) int as odd number: higher the value, more blurry background would be
@param pad_x: int(pixels) padding of in x direction
@param pad_y: int(pixels) padding of in y direction
@return: img mat, with text drawn, with background blured
call the function:
img =textBlurBackground(img, 'Blured Background Text', cv2.FONT_HERSHEY_COMPLEX, 0.9, (20, 60),2, (0,255, 0), (49,49), 13, 13 )
"""
(t_w, t_h), _ = cv.getTextSize(text, font, fontScale, textThickness) # getting the text size
x, y = textPos
blur_roi = img[y - pad_y - t_h: y + pad_y, x - pad_x:x + t_w + pad_x] # croping Text Background
img[y - pad_y - t_h: y + pad_y, x - pad_x:x + t_w + pad_x] = cv.blur(blur_roi,
kneral) # merging the blured background to img
cv.putText(img, text, textPos, font, fontScale, textColor, textThickness)
# cv.imshow('blur roi', blur_roi)
# cv.imshow('blured', img)
return img
|
dd4c49a7cf15af4273e1b3689fa6caabe8242ea0
| 3,644,341
|
from re import T
def local_response_normalization_2d_v2(in_vw, alpha, k, beta, n):
"""
cross-channel local response normalization for 2D feature maps
- input is bc01
output[i]
= value of the i-th channel
= input[i] / (k + alpha * sum(input[j]^2 for j) ** beta)
- where j is over neighboring channels (from i - n // 2 to i + n // 2)
This code is adapted from pylearn2.
https://github.com/lisa-lab/pylearn2/blob/master/LICENSE.txt
"""
assert n % 2 == 1, "n must be odd"
in_var = in_vw.variable
b, ch, r, c = in_vw.symbolic_shape()
half_n = n // 2
input_sqr = T.sqr(in_var)
extra_channels = T.zeros((b, ch + 2 * half_n, r, c))
input_sqr = T.set_subtensor(extra_channels[:, half_n:half_n + ch, :, :],
input_sqr)
scale = k + alpha * treeano.utils.smart_sum([input_sqr[:, i:i + ch, :, :]
for i in range(n)])
scale = scale ** beta
return in_var / scale
|
23f810dbd4f36d1817c57ceeabbacad1cf8e0239
| 3,644,342
|
import os
def mhc_datasets(table='mhc_data', path='./iedb/', remove_c=False,
remove_u=False, remove_modes=False):
"""
Parameters: 'table' is the table that the data is retrieved
- must be 'mhc_data', 'mhc_test1', 'mhc_test2', or 'mhc_train'
'path' is where the database is stored
remove every sequence with a 'c'
remove every sequence with a 'u'
remove the unusual modes of the dataset
if the table name is 'mhc_data' then will return the entire remaining dataset, otherwise,
returns (in order): the amino acid sequences, the -log10 of binding affinities, and the alleles
"""
if table != 'mhc_data' and table != 'mhc_train' and table != 'mhc_test1' and table != 'mhc_test2':
raise Exception('table name ' + table + ' does not exist')
selection = '*'
if table != 'mhc_data':
selection = 'sequence, meas, mhc'
conn = sql.connect(os.path.join(path, 'mhc.db'))
c = conn.cursor()
c.execute(_create_query(selection, table, remove_c, remove_u, remove_modes))
dataset = np.array(c.fetchall())
conn.close()
if table == 'mhc_data':
return dataset
if table == 'mhc_train':
# Temporary solution to remove benchmark overlaps from train set:
off_limits = np.loadtxt(os.path.join(path, 'benchmark_ic50_sequences.csv'),
delimiter=',', dtype=str)
idx = ~np.array([(seq in off_limits) for seq in dataset[:, 0]]).astype(bool)
dataset = dataset[idx, :]
return dataset.T[0], -np.log10(dataset.T[1].astype(float)), dataset.T[2]
|
8f05454a9362f8115834a2caee40e28a48877741
| 3,644,343
|
def add_new_user():
"""
This function adds a new user
:return: Response Code
"""
newuser = {}
if request.method == "POST":
try:
newuser['username'] = str(request.data.get('username').strip())
newuser['first_name'] = str(request.data.get('first_name').strip())
newuser['last_name'] = str(request.data.get('last_name').strip())
newuser['email'] = str(request.data.get('email').strip())
newuser['password'] = str(request.data.get('password').strip())
newuser['verification_code'] = str(request.data.get(
'verification_code').strip())
except Exception as e:
print(e)
abort(500)
user = User(**newuser)
user.save()
return make_response(jsonify(status=201, msg="User {} successfully added".format(user.username) +
"to database"), 201)
|
32abdd61ff4ad574a0e097553d3332f6f67d57dd
| 3,644,344
|
def has_wildcard(url) -> bool:
"""
Check if the url contains a wildcard in last subdomain.
:param url: The url to check
:type url: str
:return: True if the url contains a wildcard in the last subdomain, False otherwise
:rtype: bool
"""
subdomain = extract(url).subdomain
return subdomain.split(".")[0] == "*"
|
5dbf1a0220ad6c4af3bfe344a3aaa97473918995
| 3,644,345
|
def tmle_calculator(y, ystar1, ystar0, ystara, h1w, h0w, haw, splits,
measure='ate', lower_bound=None, upper_bound=None):
"""Function to calculate TMLE estimates for SingleCrossfitTMLE, and DoubleCrossfitTMLE
"""
if measure in ["ate", "risk_difference"]:
# Unbounding if continuous outcome (ate)
if measure == "ate":
# Unbounding continuous outcomes
y = tmle_unit_unbound(y, mini=lower_bound, maxi=upper_bound)
ystar1 = tmle_unit_unbound(ystar1, mini=lower_bound, maxi=upper_bound)
ystar0 = tmle_unit_unbound(ystar0, mini=lower_bound, maxi=upper_bound)
ystara = tmle_unit_unbound(ystara, mini=lower_bound, maxi=upper_bound)
# Point Estimate
estimate = np.mean(ystar1 - ystar0)
# Variance estimate
variance = []
for s in set(splits):
ys = y[splits == s]
ystar1s = ystar1[splits == s]
ystar0s = ystar0[splits == s]
ystaras = ystara[splits == s]
haws = haw[splits == s]
ic = haws * (ys - ystaras) + (ystar1s - ystar0s) - estimate
variance.append(np.var(ic, ddof=1))
return estimate, (np.mean(variance) / y.shape[0])
elif measure == 'risk_ratio':
# Point Estimate
estimate = np.mean(ystar1) / np.mean(ystar0)
variance = []
for s in set(splits):
ys = y[splits == s]
ystar1s = ystar1[splits == s]
ystar0s = ystar0[splits == s]
ystaras = ystara[splits == s]
h1ws = h1w[splits == s]
h0ws = h0w[splits == s]
ic = (1/np.mean(ystar1s) * (h1ws * (ys - ystaras)) + ystar1s - np.mean(ystar1s) -
(1/np.mean(ystar0s) * (-1 * h0ws * (ys - ystaras)) + ystar0s - np.mean(ystar0s)))
variance.append(np.var(ic, ddof=1))
return estimate, (np.mean(variance) / y.shape[0])
elif measure == 'odds_ratio':
# Point Estimate
estimate = (np.mean(ystar1) / (1-np.mean(ystar1))) / (np.mean(ystar0) / (1-np.mean(ystar0)))
variance = []
for s in set(splits):
ys = y[splits == s]
ystar1s = ystar1[splits == s]
ystar0s = ystar0[splits == s]
ystaras = ystara[splits == s]
h1ws = h1w[splits == s]
h0ws = h0w[splits == s]
ic = ((1-np.mean(ystar1s))/np.mean(ystar1s)*(h1ws*(ys - ystaras) + ystar1s) -
(1-np.mean(ystar0s))/np.mean(ystar0s)*(-1*h0ws*(ys - ystaras) + ystar0s))
variance.append(np.var(ic, ddof=1))
return estimate, (np.mean(variance) / y.shape[0])
else:
raise ValueError("Invalid measure requested within function: tmle_calculator. Input measure is " +
str(measure) + " but only 'ate', 'risk_difference', 'risk_ratio', and "
"'odds_ratio' are accepted.")
|
36f6b131044bd3b53044a4bfe0954eff1325bb59
| 3,644,346
|
def gen_gap(Pn, T, Q):
"""Runs the generalization gap test. This test
simply checks the difference between the likelihood
assigned to the training set versus that assigned to
a held out test set.
Inputs:
Pn: (n X d) np array containing the held out test sample
of dimension d
T: (l X d) np array containing the training sample of
dimension d
Q: trained model of type scipy.neighbors.KernelDensity
Outputs:
log_lik_gap: scalar representing the difference of the log
likelihoods of Pn and T
"""
return Q.score(T) - Q.score(Pn)
|
d57d16c06d05cea86e6f6ea89484574f20500170
| 3,644,347
|
def get_shapes(node, intermediate=False, exclusive=False):
"""Get the shapes of given node.
Args:
node (str): Node to query its shapes
intermediate (bool): Get intermediate shapes when True.
exclusive (bool): Only return the intermediate shapes if True.
Please note that the intermediate flag must be True as well.
Returns:
list: The shapes found below given node.
"""
# if given node is a list, assume first element
if isinstance(node, list):
node = node[0]
LOG.info("Given node is a list. Using first element.")
# return as list if given node is already a shape
if cmds.objectType(node, isAType="shape"):
return [node]
# query shapes
shapes = cmds.listRelatives(
node, shapes=True, type="deformableShape", path=True
)
shapes = shapes or []
# separate shapes orig
orig = []
for each in list(shapes): # duplicated `shapes` object to remove safely
if cmds.ls(each, intermediateObjects=True):
orig.append(each)
shapes.remove(each)
if not intermediate:
return shapes
if exclusive:
return orig
return shapes + orig
|
9e6d1c3e9030d1ce2804953cc7316d53840d3195
| 3,644,348
|
def solve_mip_mlp_elided(verif_instance):
"""Compute optimal attack loss for MLPs, via exactly solving MIP."""
assert MIP_SOLVERS, 'No MIP solvers installed with cvxpy.'
assert verif_instance.type == utils.VerifInstanceTypes.MLP_ELIDED
params, bounds, obj, obj_const = (
verif_instance.params, verif_instance.bounds, verif_instance.obj,
verif_instance.const)
layer_sizes = utils.mlp_layer_sizes(params)
on_state = []
post_activations = [cp.Variable((1, layer_sizes[0]))]
pre_activations = []
constraints = []
for (i, param) in enumerate(params):
W, b = param
b = jnp.reshape(b, (1, b.size))
on_state.append(cp.Variable((1, b.size), boolean=True))
pre_activations.append(cp.Variable((1, b.size)))
post_activations.append(cp.Variable((1, b.size)))
# Linear relaxation of ReLU constraints
constraints += [pre_activations[-1] == post_activations[-2]@W + b]
constraints += [post_activations[-1] >= pre_activations[-1]]
constraints += [post_activations[-1] >= 0]
# If ReLU is off, post activation is non-positive. Otherwise <= ub
constraints += [post_activations[-1] <= cp.multiply(on_state[-1],
bounds[i+1].ub)]
# If ReLU is off, pre-activation is non-positive. Otherwise <= ub_pre
constraints += [pre_activations[-1] <= cp.multiply(on_state[-1],
bounds[i+1].ub_pre)]
# If ReLU is on, post-activation == pre-activation
# Define <= here, >= constraint added above.
constraints += [post_activations[-1]-pre_activations[-1] <=
cp.multiply(1-on_state[-1],
bounds[i+1].ub-bounds[i+1].lb_pre)]
# Optionally, include IBP bounds to speed up MIP solving
# Post activations are within bounds
# i=0 case encodes input constraint
for (i, post) in enumerate(post_activations):
constraints += [post <= bounds[i].ub]
constraints += [post >= bounds[i].lb]
# # Pre activations are within bounds
for (i, pre) in enumerate(pre_activations):
constraints += [pre <= bounds[i+1].ub_pre]
constraints += [pre >= bounds[i+1].lb_pre]
# Set objective over final post-activations
obj_cp = cp.sum(cp.multiply(obj, post_activations[-1]))
# Define and solve problem
problem = cp.Problem(cp.Maximize(obj_cp), constraints)
# NB: Originally, we used cp.ECOS_BB here, but cvxpy 1.1 drops support,
# so we just use the first available MIP solver (which is dependent on user
# installation).
problem.solve(solver=MIP_SOLVERS[0])
# Report results
info = {
'problem': problem,
'post': post_activations,
'pre': pre_activations,
}
return obj_cp.value + obj_const, info
|
89ef1f133598feaf73d265411b5ed6597736ddf5
| 3,644,349
|
def compute_kullback_leibler_check_statistic(n=100, prngstate=None):
"""Compute the lowest of the survival function and the CDF of the exact KL
divergence KL(N(mu1,s1)||N(mu2,s2)) w.r.t. the sample distribution of the
KL divergence drawn by computing log(P(x|N(mu1,s1)))-log(P(x|N(mu2,s2)))
over a sample x~N(mu1,s1). If we are computing the KL divergence
accurately, the exact value should fall squarely in the sample, and the
tail probabilities should be relatively large.
"""
if prngstate is None:
raise TypeError('Must explicitly specify numpy.random.RandomState')
mu1 = mu2 = 0
s1 = 1
s2 = 2
exact = gaussian_kl_divergence(mu1, s1, mu2, s2)
sample = prngstate.normal(mu1, s1, n)
lpdf1 = gaussian_log_pdf(mu1, s1)
lpdf2 = gaussian_log_pdf(mu2, s2)
estimate, std = kl.kullback_leibler(sample, lpdf1, lpdf2)
# This computes the minimum of the left and right tail probabilities of the
# exact KL divergence vs a gaussian fit to the sample estimate. There is a
# distinct negative skew to the samples used to compute `estimate`, so this
# statistic is not uniform. Nonetheless, we do not expect it to get too
# small.
return erfc(abs(exact - estimate) / std) / 2
|
8c7036e89a3bfd347b613efac76c1b8dffde2cfa
| 3,644,350
|
def build_signature(inputs, outputs):
"""Build the signature for use when exporting the graph.
Args:
inputs: a dictionary from tensor name to tensor
outputs: a dictionary from tensor name to tensor
Returns:
The signature, a SignatureDef proto, specifies the input/output tensors
to bind when running prediction.
"""
signature_inputs = {
key: saved_model_utils.build_tensor_info(tensor)
for key, tensor in inputs.items()
}
signature_outputs = {
key: saved_model_utils.build_tensor_info(tensor)
for key, tensor in outputs.items()
}
signature_def = signature_def_utils.build_signature_def(
signature_inputs, signature_outputs,
signature_constants.PREDICT_METHOD_NAME)
return signature_def
|
ac760f7efcb27cf985aa9048015ba3be77230bc4
| 3,644,351
|
def fuse_depthwise_conv2d(input_graph_def):
"""Modifies the provided graph by fusing a set of ops into a single
_FusedDepthwiseConv2d op.
DepthwiseConv2dNative + BiasAdd + Activation => _FusedDepthwiseConv2dNative
Args:
input_graph_def: A GraphDef containing a model.
Returns:
Modified graph with FusedDepthwiseConv2dNative ops generated, and modified
weights.
Raises:
ValueError: If the graph is badly formed with duplicate node names.
"""
# Two passes approach, first find pattern of
# DepthwiseConv2dNative + BiasAdd + Activation
# Then find pattern of
# DepthwiseConv2dNative + BiasAdd
graph_def = _fuse_depthwise_conv2d_with_match_function(
input_graph_def, _find_contraction_with_bias_and_activation)
graph_def = _fuse_depthwise_conv2d_with_match_function(
graph_def, _find_contraction_with_bias)
graph_def = _fuse_depthwise_conv2d_with_match_function(
graph_def, _find_contraction_with_activation)
return graph_def
|
bbfdfbc02debfcad5e1c965c09c9039c3f15faac
| 3,644,352
|
def pandas_to_example_str(obj, *, local_data_model=None) -> str:
"""
Convert data frame to a Python source code string.
:param obj: data frame to convert.
:param local_data_model: data model to use.
:return: Python source code representation of obj.
"""
if local_data_model is None:
local_data_model = data_algebra.default_data_model
pd_module_name = local_data_model.presentation_model_name
if not local_data_model.is_appropriate_data_instance(obj):
raise TypeError("Expect obj to be local_data_model.pd.DataFrame")
obj = obj.reset_index(drop=True, inplace=False)
nrow = obj.shape[0]
pandas_string = pd_module_name + ".DataFrame({"
for k in obj.columns:
col = obj[k]
nulls = local_data_model.bad_column_positions(col)
cells = ["None" if nulls[i] else col[i].__repr__() for i in range(nrow)]
pandas_string = (
pandas_string + "\n " + k.__repr__() + ": [" + ", ".join(cells) + "],"
)
pandas_string = pandas_string + "\n })"
return pandas_string
|
a0c1bd23797413b739c496e621c43a4f43293c17
| 3,644,353
|
from typing import Counter
def get_results_object_model(target_node, paths_dict, name_to_description, q1_doid_to_disease, probs=False):
"""
Returns pathway results as an object model
:param target_node: target_node DOID:1234
:param paths_dict: a dictionary (keys OMIM id's) with values (path_name,path_type)
:param name_to_description: a dictionary to translate between source_node and genetic condition name
:param q1_doid_to_disease: a dictionary to translate between target_node and disease name
:param probs: optional probability of the OMIM being the right one
:return: ``dict``
"""
ret_obj = dict()
source_node_list = paths_dict.keys()
if len(source_node_list) > 0:
if target_node in q1_doid_to_disease:
doid_name = q1_doid_to_disease[target_node]
else:
doid_name = target_node
ret_obj['target_disease'] = doid_name
ret_source_nodes_dict = dict()
ret_obj['source_genetic_conditions'] = ret_source_nodes_dict
source_node_names = []
for source_node in source_node_list:
if source_node in name_to_description:
source_node_names.append(name_to_description[source_node])
else:
source_node_names.append(source_node)
for source_node in source_node_list:
source_node_dict = {}
path_names, path_types = paths_dict[source_node]
if len(path_names) == 1:
path_list = []
path_list.append({'type': 'node',
'name': source_node,
'desc': name_to_description.get(source_node, '')})
path_names = path_names[0]
path_types = path_types[0]
for index in range(1, len(path_names) - 1):
if index % 2 == 1:
path_list.append({'type': 'rel',
'name': path_types[index]})
else:
path_list.append({'type': 'node',
'name': path_names[index],
'desc': get_node_property(path_names[index], 'name')})
path_list.append({'type': 'node',
'name': target_node,
'desc': q1_doid_to_disease.get(target_node, '')})
if probs:
if source_node in probs:
source_node_dict['conf'] = probs[source_node]
source_node_dict['path'] = path_list
else:
# print(to_print)
if probs:
if source_node in probs:
source_node_dict['conf'] = probs[source_node]
relationships_and_counts_dict = Counter(map(tuple, path_types))
relationships = list(relationships_and_counts_dict.keys())
counts = []
for rel in relationships:
counts.append(relationships_and_counts_dict[rel])
relationships_and_counts = []
for i in range(len(counts)):
relationships_and_counts.append((relationships[i], counts[i]))
relationships_and_counts_sorted = sorted(relationships_and_counts, key=lambda tup: tup[1])
count_list = []
for index in range(len(relationships_and_counts_sorted)):
relationship = relationships_and_counts_sorted[index][0]
count = relationships_and_counts_sorted[index][1]
count_list.append({'count': count,
'reltype': str(relationship)})
source_node_dict['counts'] = count_list
ret_source_nodes_dict[source_node] = source_node_dict
return ret_obj
|
5ac0ba140a80edf12005112330191d910673e34a
| 3,644,354
|
def MaxLonSep( maxarc, baselat ):
"""Calculates the maximum separation in longitude that a point can have
from a reference point at latitude baselat and still be within a given
great circle arc length, maxarc, of the reference point. All quantities
in radians."""
if abs(baselat) + maxarc <= 0.5 * pi:
#result = asin( abs( sin(maxarc) ) / cos( baselat ) )
#result = acos(sqrt(cos(baselat)**2 - sin(maxarc)**2)/cos(baselat))
c = cos( baselat )
s = abs( sin( maxarc ) )
y = s
x = sqrt( ( c + s ) * ( c - s ) )
result = atan2( y, x )
else:
result = pi
return result
|
cffd6639441f548682e47c9af22c194a18d0f9fe
| 3,644,355
|
def auto_read(filename):
"""Automatically determine the format of filename and open accordingly"""
#XXX: this won't work correctly on pipes
#would be better to use file magic
f = open(filename, 'r')
firstchar = f.read(1)
f.close()
if firstchar == '#':
return gnucap_read(filename)
else:
return spice_read(filename)
|
0485626e6305aa43ece6b6cf36a924f7526af26c
| 3,644,356
|
import time
def WaitForOperation(client, messages, operation_name,
operation_description=None,
project=None, timeout=180):
"""Wait for an operation to complete.
Polls the operation requested approximately every second, showing a
progress indicator. Returns when the operation has completed.
Args:
client: The API client to use.
messages: The API message to use.
operation_name: The name of the operation to wait on, as returned by
operations.list.
operation_description: A short description of the operation to wait on,
such as 'create' or 'delete'. Will be displayed to the user.
project: The name of the project that this operation belongs to.
timeout: Number of seconds to wait for. Defaults to 3 minutes.
Returns:
The operation when it is done.
Raises:
HttpException: A http error response was received while executing api
request. Will be raised if the operation cannot be found.
OperationError: The operation finished with error(s).
Error: The operation the timeout without completing.
"""
tick_increment = 1 # every second(s)
ticks = 0
message = ('Waiting for {0}[{1}]'.format(
operation_description + ' ' if operation_description else '',
operation_name))
request = messages.DeploymentmanagerOperationsGetRequest(
project=project, operation=operation_name)
with progress_tracker.ProgressTracker(message, autotick=False) as ticker:
while ticks < timeout:
operation = client.operations.Get(request)
# Operation status is one of PENDING, RUNNING, DONE
if operation.status == 'DONE':
if operation.error:
raise exceptions.OperationError(
'Error in Operation [{0}]: {1}'.format(
operation_name, dm_util.RenderMessageAsYaml(operation.error)))
else: # Operation succeeded
return operation
ticks += tick_increment
ticker.Tick()
time.sleep(tick_increment)
# Timeout exceeded
raise exceptions.Error(
'Wait for Operation [{0}] exceeded timeout [{1}].'.format(
operation_name, str(timeout)))
|
e63b3951dd98762d28050ebff753f78e88cd0231
| 3,644,357
|
def get_unstaged_files(gitobj):
"""
ref:
http://gitpython.readthedocs.io/en/stable/tutorial.html#obtaining-diff-information
"""
diff = []
diff.extend(gitobj.index.diff(gitobj.head.commit))
diff.extend(gitobj.index.diff(None))
return {"changed": diff, "untracked": gitobj.untracked_files}
|
623a2706bb0d2c428df0f44fe10a473e7d740938
| 3,644,358
|
from typing import Optional
from typing import Union
from typing import Tuple
def conv2d(
inp: Tensor,
weight: Tensor,
bias: Optional[Tensor] = None,
stride: Union[int, Tuple[int, int]] = 1,
padding: Union[int, Tuple[int, int]] = 0,
dilation: Union[int, Tuple[int, int]] = 1,
groups: int = 1,
conv_mode="CROSS_CORRELATION",
compute_mode="DEFAULT",
) -> Tensor:
"""
2D convolution operation.
Refer to :class:`~.Conv2d` for more information.
:param inp: feature map of the convolution operation.
:param weight: convolution kernel.
:param bias: bias added to the result of convolution (if given).
:param stride: stride of the 2D convolution operation. Default: 1
:param padding: size of the paddings added to the input on both sides of its
spatial dimensions. Only zero-padding is supported. Default: 0
:param dilation: dilation of the 2D convolution operation. Default: 1
:param groups: number of groups into which the input and output channels are divided, so as to perform a ``grouped convolution``. When ``groups`` is not 1,
``in_channels`` and ``out_channels`` must be divisible by ``groups``,
and the shape of weight should be `(groups, out_channel // groups,
in_channels // groups, height, width)`.
:type conv_mode: string or :class:`Convolution.Mode`
:param conv_mode: supports "CROSS_CORRELATION". Default:
"CROSS_CORRELATION"
:type compute_mode: string or
:class:`Convolution.ComputeMode`
:param compute_mode: when set to "DEFAULT", no special requirements will be
placed on the precision of intermediate results. When set to "FLOAT32",
"Float32" would be used for accumulator and intermediate result, but only
effective when input and output are of Float16 dtype.
:return: output tensor.
"""
assert conv_mode == "CROSS_CORRELATION" or conv_mode.name == "CROSS_CORRELATION"
assert compute_mode == "DEFAULT" or compute_mode.name == "DEFAULT"
stride_h, stride_w = expand_hw(stride)
pad_h, pad_w = expand_hw(padding)
dilate_h, dilate_w = expand_hw(dilation)
Sparse = builtin.Convolution.Sparse
sparse_type = "DENSE" if groups == 1 else "GROUP"
op = builtin.Convolution(
stride_h=stride_h,
stride_w=stride_w,
pad_h=pad_h,
pad_w=pad_w,
dilate_h=dilate_h,
dilate_w=dilate_w,
strategy=get_conv_execution_strategy(),
mode=conv_mode,
compute_mode=compute_mode,
sparse=sparse_type,
)
inp, weight = utils.convert_inputs(inp, weight)
(output,) = apply(op, inp, weight)
if bias is not None:
output += bias
return output
|
fff9e2430c21757e3a5a4e1146ead63ad2fb5918
| 3,644,359
|
import encodings
def find_tex_directives(texfile, ignore_root_loops=False):
"""Build a dictionary of %!TEX directives.
The main ones we are concerned with are:
root
Specifies a root file to run tex on for this subsidiary
TS-program
Tells us which latex program to run
TS-options
Options to pass to TS-program
encoding
The text encoding of the tex file
Arguments:
texfile
The initial tex file which should be searched for tex directives.
If this file contains a “root” directive, then the file specified
in this directive will be searched next.
ignore_root_loops
Specifies if this function exits with an error status if the tex
root directives contain a loop.
Returns: ``{str: str}``
Examples:
>>> chdir('Tests/TeX')
>>> directives = find_tex_directives('input/packages_input1.tex')
>>> print(directives['root']) # doctest:+ELLIPSIS
/.../Tests/TeX/packages.tex
>>> print(directives['TS-program'])
xelatex
>>> find_tex_directives('makeindex.tex')
{}
>>> chdir('../..')
"""
if not texfile:
return {}
root_chain = [texfile]
directive_regex = compile(r'%\s*!T[E|e]X\s+([\w-]+)\s*=\s*(.+)')
directives = {}
while True:
for encoding in encodings:
try:
lines = [line for (line_number, line)
in enumerate(open(texfile, encoding=encoding))
if line_number < 20]
break
except UnicodeDecodeError:
continue
new_directives = {directive.group(1): directive.group(2).rstrip()
for directive
in [directive_regex.match(line) for line in lines]
if directive}
directives.update(new_directives)
if 'root' in new_directives:
root = directives['root']
new_tex_file = (root if root.startswith('/') else
realpath(join(dirname(texfile), root)))
directives['root'] = new_tex_file
else:
break
if new_tex_file in root_chain:
if ignore_root_loops:
break
print('''<div id="commandOutput"><div id="preText">
<p class="error">There is a loop in your %!TEX root
directives.</p>
</div></div>''')
exit(EXIT_LOOP_IN_TEX_ROOT)
else:
texfile = new_tex_file
root_chain.append(texfile)
return directives
|
df639f11f1609ee5c8a6bca0add8c154a42c481a
| 3,644,360
|
def projects():
"""
Handles the GET & POST request to '/projects'.
GET: requests to render page
POST: request to edit project with sent data
:return: render projects page / Json containing authorisation error / manage(data) function call
"""
if request.method == "GET":
return render_template('projects.html')
else:
if not current_user.is_authenticated or (current_user.role != "admin" and current_user.role != "employee"):
return jsonify(
{'success': False, "message": "You are not authorized to edit the selected projects"}), 400, {
'ContentType': 'application/json'}
data = request.json
for project in data["projects"]:
if current_user.role != "admin" and not employee_authorized_for_project(current_user.name, project):
return jsonify(
{'success': False, "message": "You are not authorized to edit the selected projects"}), 400, {
'ContentType': 'application/json'}
return manage(data)
|
7a8a1d9c4d50623ad557d9dcaf419c5a3e83f521
| 3,644,361
|
def evolve_fqe_givens_sector(wfn: Wavefunction, u: np.ndarray,
sector='alpha') -> Wavefunction:
"""Evolve a wavefunction by u generated from a 1-body Hamiltonian.
Args:
wfn: FQE Wavefunction on n-orbitals
u: (n x n) unitary matrix.
sector: Optional either 'alpha' or 'beta' indicating which sector
to rotate
Returns:
New evolved wfn object.
"""
if sector == 'alpha':
sigma = 0
elif sector == 'beta':
sigma = 1
else:
raise ValueError("Bad section variable. Either (alpha) or (beta)")
if not np.isclose(u.shape[0], wfn.norb()):
raise ValueError(
"unitary is not specified for the correct number of orbitals")
rotations, diagonal = givens_decomposition_square(u.copy())
# Iterate through each layer and time evolve by the appropriate
# fermion operators
for layer in rotations:
for givens in layer:
i, j, theta, phi = givens
if not np.isclose(phi, 0):
op = of.FermionOperator(
((2 * j + sigma, 1), (2 * j + sigma, 0)), coefficient=-phi)
wfn = wfn.time_evolve(1.0, op)
if not np.isclose(theta, 0):
op = of.FermionOperator(((2 * i + sigma, 1),
(2 * j + sigma, 0)),
coefficient=-1j * theta) + \
of.FermionOperator(((2 * j + sigma, 1),
(2 * i + sigma, 0)),
coefficient=1j * theta)
wfn = wfn.time_evolve(1.0, op)
# evolve the last diagonal phases
for idx, final_phase in enumerate(diagonal):
if not np.isclose(final_phase, 1.0):
op = of.FermionOperator(
((2 * idx + sigma, 1), (2 * idx + sigma, 0)),
-np.angle(final_phase))
wfn = wfn.time_evolve(1.0, op)
return wfn
|
7f8334d64a1965424c5a1faf166bbf8741c0e1ae
| 3,644,362
|
import os
def save_tiles(tiles, prefix='', directory=os.getcwd(), format='png'):
"""
Write image files to disk. Create specified folder(s) if they
don't exist. Return list of :class:`Tile` instance.
Args:
tiles (list): List, tuple or set of :class:`Tile` objects to save.
prefix (str): Filename prefix of saved tiles.
Kwargs:
directory (str): Directory to save tiles. Created if non-existant.
Returns:
Tuple of :class:`Tile` instances.
"""
# Causes problems in CLI script.
# if not os.path.exists(directory):
# os.makedirs(directory)
for tile in tiles:
tile.save(filename=tile.generate_filename(prefix=prefix,
directory=directory,
format=format),
format=format)
return tuple(tiles)
|
2848eee201d16ca15eed06019199d95a59393a37
| 3,644,363
|
from typing import Iterable
def epoch_folding_search(times, frequencies, nbin=128, segment_size=5000,
expocorr=False, gti=None, weights=1, fdots=0):
"""Performs epoch folding at trial frequencies in photon data.
If no exposure correction is needed and numba is installed, it uses a fast
algorithm to perform the folding. Otherwise, it runs a *much* slower
algorithm, which however yields a more precise result.
The search can be done in segments and the results averaged. Use
segment_size to control this
Parameters
----------
times : array-like
the event arrival times
frequencies : array-like
the trial values for the frequencies
Other Parameters
----------------
nbin : int
the number of bins of the folded profiles
segment_size : float
the length of the segments to be averaged in the periodogram
fdots : array-like
trial values of the first frequency derivative (optional)
expocorr : bool
correct for the exposure (Use it if the period is comparable to the
length of the good time intervals). If True, GTIs have to be specified
via the ``gti`` keyword
gti : [[gti0_0, gti0_1], [gti1_0, gti1_1], ...]
Good time intervals
weights : array-like
weight for each time. This might be, for example, the number of counts
if the times array contains the time bins of a light curve
Returns
-------
(fgrid, stats) or (fgrid, fdgrid, stats), as follows:
fgrid : array-like
frequency grid of the epoch folding periodogram
fdgrid : array-like
frequency derivative grid. Only returned if fdots is an array.
stats : array-like
the epoch folding statistics corresponding to each frequency bin.
"""
if expocorr or not HAS_NUMBA or isinstance(weights, Iterable):
if expocorr and gti is None:
raise ValueError('To calculate exposure correction, you need to'
' specify the GTIs')
def stat_fun(t, f, fd=0, **kwargs):
return profile_stat(fold_events(t, f, fd, **kwargs)[1])
return \
_folding_search(stat_fun, times, frequencies,
segment_size=segment_size,
use_times=True, expocorr=expocorr, weights=weights,
gti=gti, nbin=nbin, fdots=fdots)
return _folding_search(lambda x: profile_stat(_profile_fast(x, nbin=nbin)),
times, frequencies, segment_size=segment_size,
fdots=fdots)
|
7eaa1d038a883babcf55f239acf519f5c059b0b2
| 3,644,364
|
import re
def apply_matcher(words,
offsets,
dictionary,
max_ngrams=5,
longest_match_only=True,
case_sensitive = False,
split_on=None):
"""
TODO: cleanup!
"""
# covert to source char offsets
text = get_text(words, offsets)
matches = []
for i in range(0, len(words)):
match = None
start = offsets[i]
for j in range(i + 1, min(i + max_ngrams + 1, len(words) + 1)):
end = offsets[j - 1] + len(words[j - 1])
# term types: normalize whitespace & tokenized + whitespace
for term in [
re.sub(r'''\s{2,}''', ' ', text[start:end]).strip(),
' '.join([w for w in words[i:j] if w.strip()])
]:
if match_term(term, dictionary, case_sensitive):
match = end
break
if match:
term = re.sub(r'''\s{2,}''', ' ', text[start:match]).strip()
matches.append(([start, match], term))
if longest_match_only:
# sort on length then end char
matches = sorted(matches, key=lambda x: x[0][-1], reverse=1)
f_matches = []
curr = None
for m in matches:
if curr is None:
curr = m
continue
(i, j), _ = m
if (i >= curr[0][0] and i <= curr[0][1]) and (j >= curr[0][0] and j <= curr[0][1]):
pass
else:
f_matches.append(curr)
curr = m
if curr:
f_matches.append(curr)
return f_matches
return matches
|
c52c0dd7b881d29952ebd09988362156707ad4bc
| 3,644,365
|
import os
def create_and_put_metrics_and_widgets() -> dict:
"""For each repository, aggregates all text and metric data and creates widgets for each
:returns: a dictionary mapping the dashboard name to the list of the text and metric widgets for each repository to
put in the dashboard
:rtype: dict
"""
widgets = {}
for repo_name in os.environ['repo_names'].split(','):
owner = os.environ['owner']
if '/' in repo_name:
[owner, repo_name] = repo_name.split('/')
sorted_widgets = github_docker.aggregate_metrics(owner, repo_name)
# Create a Cloudwatch metric/text widget out of each sorted widget
for widget_title, widget in sorted_widgets.items():
if widget['type'] == 'metric':
title = repo_name
if widget_title != os.environ['default_metric_widget_name']:
title += ' ' + widget_title
formatted_widget = cw_interactions.create_metric_widget(repo_name, widget['data'], title)
elif widget['type'] == 'text':
title = repo_name
if widget_title == os.environ['default_text_widget_name']:
title += ' Properties'
else:
title += ' ' + widget_title
formatted_widget = cw_interactions.create_text_widget(widget['data'], title=title)
else:
print("Invalid widget type specified for widget:", widget_title)
continue
dashboard_name = os.environ['dashboard_name_prefix']
if widget['dashboard_level'] != 'main':
dashboard_name += '-' + repo_name
# Add widgets to dashboard
widgets_for_specified_dashboard = widgets.get(dashboard_name, [])
widgets_for_specified_dashboard.append(formatted_widget)
widgets[dashboard_name] = widgets_for_specified_dashboard
# Add activity widget
main_widgets = widgets.get(os.environ['dashboard_name_prefix'], [])
main_widgets.append(cw_interactions.create_activity_widget(repo_name))
widgets[os.environ['dashboard_name_prefix']] = main_widgets
return widgets
|
a779f86e9bca3090336a18a8716acab9b5372dfb
| 3,644,366
|
def boxbin(x,y,xedge,yedge,c=None,figsize=(5,5),cmap='viridis',mincnt=10,vmin=None,vmax=None,edgecolor=None,powernorm=False,
ax=None,normed=False,method='mean',quantile=None,alpha=1.0,cbar=True,unconditional=False,master_count=np.array([])):
""" This function will grid data for you and provide the counts if no variable c is given, or the median if
a variable c is given. In the future I will add functionallity to do the median, and possibly quantiles.
x: 1-D array
y: 1-D array
xedge: 1-D array for xbins
yedge: 1-D array for ybins
c: 1-D array, same len as x and y
returns
axis handle
cbar handle
C matrix (counts or median values in bin)
"""
midpoints = np.empty(xedge.shape[0]-1)
for i in np.arange(1,xedge.shape[0]):
midpoints[i-1] = xedge[i-1] + (np.abs(xedge[i] - xedge[i-1]))/2.
#note on digitize. bin 0 is outside to the left of the bins, bin -1 is outside to the right
ind1 = np.digitize(x,bins = xedge) #inds of x in each bin
ind2 = np.digitize(y,bins = yedge) #inds of y in each bin
#drop points outside range
outsideleft = np.where(ind1 != 0)
ind1 = ind1[outsideleft]
ind2 = ind2[outsideleft]
if c is None:
pass
else:
c = c[outsideleft]
outsideright = np.where(ind1 != len(xedge))
ind1 = ind1[outsideright]
ind2 = ind2[outsideright]
if c is None:
pass
else:
c = c[outsideright]
outsideleft = np.where(ind2 != 0)
ind1 = ind1[outsideleft]
ind2 = ind2[outsideleft]
if c is None:
pass
else:
c = c[outsideleft]
outsideright = np.where(ind2 != len(yedge))
ind1 = ind1[outsideright]
ind2 = ind2[outsideright]
if c is None:
pass
else:
c = c[outsideright]
if c is None:
c = np.zeros(len(ind1))
df = pd.DataFrame({'x':ind1-1,'y':ind2-1,'c':c})
df2 = df.groupby(["x","y"]).count()
df = df2.where(df2.values >= mincnt).dropna()
C = np.ones([xedge.shape[0]-1,yedge.shape[0]-1])*-9999
for i,ii in enumerate(df.index.values):
C[ii[0],ii[1]] = df.c.values[i]
C = np.ma.masked_where(C == -9999,C)
if normed:
n_samples = np.ma.sum(C)
C = C/n_samples
C = C*100
print('n_samples= {}'.format(n_samples))
if ax is None:
fig = plt.figure(figsize=(5,5))
ax = plt.gca()
else:
pass
if powernorm:
pm = ax.pcolormesh(xedge,yedge,C.transpose(),cmap=cmap,edgecolor=edgecolor,norm=colors.PowerNorm(gamma=0.5),vmin=vmin,vmax=vmax,alpha=alpha)
if cbar:
cbar = plt.colorbar(pm,ax=ax)
else:
cbar = pm
else:
pm = ax.pcolormesh(xedge,yedge,C.transpose(),cmap=cmap,vmin=vmin,vmax=vmax,edgecolor=edgecolor,alpha=alpha)
if cbar:
cbar = plt.colorbar(pm,ax=ax)
else:
cbar = pm
return ax,cbar,C
elif unconditional:
df = pd.DataFrame({'x':ind1-1,'y':ind2-1,'c':c})
if method=='mean':
df2 = df.groupby(["x","y"])['c'].sum()
df3 = df.groupby(["x","y"]).count()
df2 = df2.to_frame()
df2.insert(1,'Count',df3.values)
df = df2.where(df2.Count >= mincnt).dropna()
C = np.ones([xedge.shape[0]-1,yedge.shape[0]-1])
for i,ii in enumerate(df.index.values):
C[ii[0],ii[1]] = df.c.values[i]
C = C/master_count.values
if ax is None:
fig = plt.figure(figsize=(5,5))
ax = plt.gca()
else:
pass
if powernorm:
pm = ax.pcolor(xedge,yedge,C.transpose(),cmap=cmap,vmin=vmin,vmax=vmax,norm=colors.PowerNorm(gamma=0.5),alpha=alpha)
if cbar:
cbar = plt.colorbar(pm,ax=ax)
else:
pm = ax.pcolor(xedge,yedge,C.transpose(),cmap=cmap,vmin=vmin,vmax=vmax,alpha=alpha)
if cbar:
cbar = plt.colorbar(pm,ax=ax)
else:
df = pd.DataFrame({'x':ind1-1,'y':ind2-1,'c':c})
if method=='mean':
df2 = df.groupby(["x","y"])['c'].mean()
elif method=='std':
df2 = df.groupby(["x","y"])['c'].std()
elif method=='median':
df2 = df.groupby(["x","y"])['c'].median()
elif method=='qunatile':
if quantile is None:
print('No quantile given, defaulting to median')
quantile = 0.5
else:
pass
df2 = df.groupby(["x","y"])['c'].apply(percentile(quantile*100))
df3 = df.groupby(["x","y"]).count()
df2 = df2.to_frame()
df2.insert(1,'Count',df3.values)
df = df2.where(df2.Count >= mincnt).dropna()
C = np.ones([xedge.shape[0]-1,yedge.shape[0]-1])*-9999
for i,ii in enumerate(df.index.values):
C[ii[0],ii[1]] = df.c.values[i]
C = np.ma.masked_where(C == -9999,C)
if ax is None:
fig = plt.figure(figsize=(5,5))
ax = plt.gca()
else:
pass
if powernorm:
pm = ax.pcolor(xedge,yedge,C.transpose(),cmap=cmap,vmin=vmin,vmax=vmax,norm=colors.PowerNorm(gamma=0.5),alpha=alpha)
if cbar:
cbar = plt.colorbar(pm,ax=ax)
else:
cbar = pm
else:
pm = ax.pcolor(xedge,yedge,C.transpose(),cmap=cmap,vmin=vmin,vmax=vmax,alpha=alpha)
if cbar:
cbar = plt.colorbar(pm,ax=ax)
else:
cbar = pm
return ax,cbar,C
|
b80a9fdf25d16ecd5e73addae325d1a2348ef900
| 3,644,367
|
def _get_elastic_document(
tasks: list[dict],
symprec: float,
fitting_method: str,
) -> ElasticDocument:
"""
Turn a list of deformation tasks into an elastic document.
Parameters
----------
tasks : list of dict
A list of deformation tasks.
symprec : float
Symmetry precision for deriving symmetry equivalent deformations. If
``symprec=None``, then no symmetry operations will be applied.
fitting_method : str
The method used to fit the elastic tensor. See pymatgen for more details on the
methods themselves. The options are:
- "finite_difference" (note this is required if fitting a 3rd order tensor)
- "independent"
- "pseudoinverse"
Returns
-------
ElasticDocument
An elastic document.
"""
structure = get(tasks[0], "output.transformations.history.0.input_structure")
stresses = []
deformations = []
uuids = []
job_dirs = []
for doc in tasks:
deformation = get(doc, "output.transformations.history.0.deformation")
stress = get(doc, "output.output.stress")
deformations.append(Deformation(deformation))
stresses.append(Stress(stress))
uuids.append(doc["uuid"])
job_dirs.append(doc["output"]["dir_name"])
return ElasticDocument.from_stresses(
structure,
stresses,
deformations,
uuids,
job_dirs,
fitting_method=fitting_method,
symprec=symprec,
)
|
f01ea537fbd73c6a2da529a4da15e358033ed2a9
| 3,644,368
|
from typing import Union
from pathlib import Path
from typing import Counter
def first(filename: Union[str, Path]) -> int:
"""
Sort the input, prepend with 0 and append with 3 + the max.
Return:
(# of successive differences == 1) * (# of successive differences == 3)
"""
with open(filename, "rt") as infile:
jolts = sorted(int(line.strip()) for line in infile)
jolts = [0] + jolts + [jolts[-1] + 3]
diffs = Counter(right - left for left, right in zip(jolts[:-1], jolts[1:]))
return diffs[3] * diffs[1]
|
18ffe3e97d7256ea61fcf6e436d36bb360d0a285
| 3,644,369
|
def charge_is_valid(charge_profile, capacity=6, max_charge_rate=2.5, time_unit=0.5):
"""
Function determining if a charge profile is valid (and fully charges the battery)
"""
if np.all(np.isclose(capacity/time_unit, charge_profile.groupby(charge_profile.index.date).sum())) is False:
return False
elif np.all(charge_profile.groupby(charge_profile.index.date).max() <= max_charge_rate) is False:
return False
else:
return True
|
489717fc834b9492ab3add1ddfaa5c55e2f4d8e9
| 3,644,370
|
def create_slice_obj(start, end, step):
"""Create slice object"""
return slice(start, end, step)
|
88a5c5a9e0d3b714b4316d8744fcdd1a34f347a7
| 3,644,371
|
def binary_cross_entropy_error(y, t):
"""バイナリー交差エントロピー誤差"""
#y.shape (N,C,H,W)
delta = 1e-7
return -np.mean(t*np.log(y + delta) + (1-t)*np.log(1-y + delta))
|
a0090d4d5e6695ab0c4d988b8f0efbdfcd44984c
| 3,644,372
|
def get_abc():
"""
:return: list all the abcs as a list
"""
# ok
return list(abcs.find({}, {'_id': False}))
|
aa2c39bdc8ec1f31f43ea02701b5022f612b286b
| 3,644,373
|
from typing import Optional
from typing import List
def matching_system_code(concept: CodeableConcept, system: str) -> Optional[str]:
"""
Returns a code from a specified *system* contained within a given *concept*.
If no code is found for the given *system*, returns None.
Raises an :class:`AssertionError` if more than one encoding for a *system*
is found within the given FHIR *concept*.
"""
system_codes: List[CodeableConcept] = []
if not concept:
return None
system_codes += list(filter(lambda c: matching_system(c, system), concept.coding))
assert len(system_codes) <= 1, "Multiple encodings found in FHIR concept " + \
f"«{concept.concept_type}» for system «{system}»."
if not system_codes:
return None
return system_codes[0].code
|
cd9005ebcfd9ab15e5d27f7f30b8b4ea4b4db7b0
| 3,644,374
|
def get_pybullet(env_name):
""" Returns pybullet dataset and envrironment.
The dataset is provided through d4rl-pybullet. See more details including
available dataset from its GitHub page.
.. code-block:: python
from d3rlpy.datasets import get_pybullet
dataset, env = get_pybullet('hopper-bullet-mixed-v0')
References:
* https://github.com/takuseno/d4rl-pybullet
Args:
env_name (str): environment id of d4rl-pybullet dataset.
Returns:
tuple: tuple of :class:`d3rlpy.dataset.MDPDataset` and gym environment.
"""
try:
env = gym.make(env_name)
dataset = MDPDataset(**env.get_dataset())
return dataset, env
except ImportError:
raise ImportError(
'd4rl-pybullet is not installed.\n' \
'pip install git+https://github.com/takuseno/d4rl-pybullet')
|
79d3e408698ea7454398490a98fd7d653625cbd4
| 3,644,375
|
from typing import Iterable
from typing import Any
def reverse(d: Iterable) -> Any:
"""Reverses the provided iterable, but also RETURNS it"""
d.reverse()
return d
|
5eff6b170afe6424f113ec4b15f985ee8d306e83
| 3,644,376
|
def scalar(typename):
"""
Returns scalar type from ROS message data type, like "uint8" from "uint8[100]".
Returns type unchanged if already a scalar.
"""
return typename[:typename.index("[")] if "[" in typename else typename
|
729fb68bced11e190b3d32d03bbadd921f191bee
| 3,644,377
|
def subject(mock_messenger: AsyncMock) -> initiator.FirmwareUpdateInitiator:
"""The test subject."""
return initiator.FirmwareUpdateInitiator(mock_messenger)
|
cfab4395d5ffc3de6a33d3eeb2d7ce373f719b06
| 3,644,378
|
def onetangent(ri, rf, ta_transb, k=0, use_alts=True, center='earth'):
"""Orbit transfer with one tangential burn and one nontangential
burn. Must be circular or coaxially elliptic. Currently only for
circular orbits.
:param ri: altitude (or radius) of initial circular orbit (km)
:param rf: altitude (or radius) of initial circular orbit (km)
:param ta_transb: true anomaly of transfer orbit at point b (rad)
:param k: number of revolutions through perigee
:param use_alts: Boolean for switching between ri,rf=altitude
(True) and ri,rf=radius to center
:param center: planetary center of focus; default=earth
:return vtransa: transfer velocity required at point a (km/s)
:return vtransb: transfer velocity required at point b (km/s)
:return fpa_transb: flight path angle for the nontangential
transfer (rad)
:return TOF: time of flight (s)
in work
"""
# update constants and parameters
mu = get_mu(center=center)
if use_alts and center.lower() == 'earth':
ri, rf = [r+r_earth for r in [ri, rf]]
# check location of tangent burn
Rinv = ri/rf
if Rinv > 1:
# tangent burn is at apogee
e_trans = (Rinv-1)/(np.cos(ta_transb)+Rinv)
a_trans = ri/(1+e_trans)
E0 = np.pi
else:
# tangent burn is at perigee
e_trans = (Rinv-1)/(np.cos(ta_transb)-Rinv)
a_trans = ri/(1-e_trans)
E0 = 0.
# compute initial, final, and transfer velocities at a, b
vi = sqrt(mu/ri)
vf = sqrt(mu/rf)
vtransa = sqrt(2*mu/ri - mu/a_trans)
vtransb = sqrt(2*mu/rf - mu/a_trans)
# flight path angle of nontangential transfer
fpa_transb = np.arctan(e_trans*np.sin(ta_transb)
/ (1+e_trans*np.cos(ta_transb)))
# get delta-v's at each point and its total
dva = vtransa - vi
dvb = sqrt( vtransb**2 + vf**2 - 2*vtransb*vf*np.cos(fpa_transb) )
dv_otb = np.abs(dva) + np.abs(dvb)
# computing eccentric anomaly
E = np.arccos((e_trans+np.cos(ta_transb))/(1+e_trans*np.cos(ta_transb)))
# computing time of flight
TOF = sqrt(a_trans**3/mu) * \
(2*k*np.pi+(E-e_trans*np.sin(E))-(E0 - e_trans*np.sin(E0)))
return vtransa, vtransb, fpa_transb, TOF
|
12eae51bc3833df94b063597e2444df851a7960c
| 3,644,379
|
def display_matplot(images, title = None, gray=None):
"""[Standard display fuction used throughout testing to see the output of thhe various transforms.
Displays multilpe plots at once for comparison, always in a square format.]
Arguments:
images {[Array]} -- [the array that contains all of the images you wish to display]
Keyword Arguments:
title {[String]} -- [A title to display on the plot to keep track of which image is bing shown.] (default: {None})
gray {[Opencv const]} -- [The colour space you wish to display the image in.] (default: {None})
Returns:
[matplotlib plot] -- [The created plot]
"""
n = np.ceil(np.sqrt(len(images)))
index = 1
plt.set_cmap('gray')
plt.title(title)
for image in images:
plt.subplot(n, n, index)
plt.imshow(image)
plt.xticks([]), plt.yticks([])
index += 1
plt.waitforbuttonpress(0)
plt.close()
return plt
|
635b6c977d1d71a9d7479e064978c3695115d757
| 3,644,380
|
def get_version():
"""
It returns the pmml version .
Returns
-------
version : String
Returns the version of the pmml.
"""
version = '4.4'
return version
|
162f6e0ffb4c4741fafe2aa16d6fceed16bae99a
| 3,644,381
|
import torch
def customsoftmax(inp, multihotmask):
"""
Custom Softmax
"""
soft = F.softmax(inp, dim=1)
# This takes the mask * softmax ( sums it up hence summing up the classes in border
# then takes of summed up version vs no summed version
return torch.log(
torch.max(soft, (multihotmask * (soft * multihotmask).sum(1, keepdim=True)))
)
|
a0db0926aa9ed804bfab54cfaaf7c4a031809aae
| 3,644,382
|
import scipy
def mandoline(
D_src: np.ndarray,
D_tgt: np.ndarray,
edge_list: np.ndarray,
sigma: float=None,
):
"""
Mandoline solver.
Args:
D_src: (n_src x d) matrix of (example, slices) for the source distribution.
D_tgt: (n_tgt x d) matrix of (example, slices) for the source distribution.
edge_list: list of edge correlations between slices that should be modeled.
sigma: optional parameter that activates RBF kernel-based KLIEP with scale
`sigma`.
Returns: SimpleNamespace that contains
opt: result of scipy.optimize
Phi_D_src: source potential matrix used in Mandoline
Phi_D_tgt: target potential matrix used in Mandoline
n_src: number of source samples
n_tgt: number of target samples
edge_list: the `edge_list` parameter passed as input
"""
# Copy and binarize the input matrices to -1/1
D_src, D_tgt = np.copy(D_src), np.copy(D_tgt)
if np.min(D_src) == 0:
D_src[D_src == 0] = -1
D_tgt[D_tgt == 0] = -1
# Edge list encoding dependencies between gs
if edge_list is not None:
edge_list = np.array(edge_list)
# Create the potential matrices
Phi_D_tgt, Phi_D_src = Phi(D_tgt, edge_list), Phi(D_src, edge_list)
# Number of examples
n_src, n_tgt = Phi_D_src.shape[0], Phi_D_tgt.shape[0]
def f(x):
obj = Phi_D_tgt.dot(x).sum() - n_tgt * scipy.special.logsumexp(Phi_D_src.dot(x))
return -obj
# Set the kernel
kernel = partial(skmetrics.rbf_kernel, gamma=sigma)
def llkliep_f(x):
obj = kernel(
Phi_D_tgt, x[:, np.newaxis]
).sum() - n_tgt * scipy.special.logsumexp(kernel(Phi_D_src, x[:, np.newaxis]))
return -obj
# Solve
if not sigma:
opt = scipy.optimize.minimize(
f, np.random.randn(Phi_D_tgt.shape[1]), method="BFGS"
)
else:
opt = scipy.optimize.minimize(
llkliep_f, np.random.randn(Phi_D_tgt.shape[1]), method="BFGS"
)
return SimpleNamespace(
opt=opt,
Phi_D_src=Phi_D_src,
Phi_D_tgt=Phi_D_tgt,
n_src=n_src,
n_tgt=n_tgt,
edge_list=edge_list,
)
|
5b7817e1ff252724f61572ac6f103ec963c257dd
| 3,644,383
|
def service_transformer_info_get(service): # noqa: E501
"""Retrieve transformer info
Provides information about the transformer. # noqa: E501
:param service: Inxight_Drugs service
:rtype: TransformerInfo
"""
return transformer[service].info
|
a12d4d19efe7bf8a3c185213790366339aad8c9f
| 3,644,384
|
def create_app(config=None, app_name=None):
"""Create a Flask app."""
if app_name is None:
app_name = DefaultConfig.PROJECT
app = Flask(app_name, instance_path=INSTANCE_FOLDER_PATH, instance_relative_config=True)
configure_app(app, config)
configure_hook(app)
configure_blueprints(app)
configure_extensions(app)
configure_logging(app)
configure_template_filters(app)
configure_error_handlers(app)
configure_cli(app)
return app
|
95f5191fc64f5656156fc69bd9565b0a754e014c
| 3,644,385
|
def read_translocations_tumors(gene_A, gene_B,\
tumor_barcodes,\
data_location=default_location):
"""
For a given set of tumor barcode and a gene, finds with a lookup the mutation for this
particular gene on the TCGA dataset.
INPUT:
- gene_A (str): first gene of translocation
- gene_B (str): second gene of translocation
- tumor_barcodes (list): list of tumor barcodes
- data_location (str, optional): where data is located
OUTPUT:
- indicator list with 1 on tumor barcodes with a translocation
"""
translocated_genes = [gene_A, gene_B]
# Read data and filter
df = pd.read_csv(data_location, sep='\t')
df = df[np.isin(df.Gene_A, translocated_genes)]
df = df[np.isin(df.Gene_B, translocated_genes)]
# Common barcode length
barcode_length = np.unique([len(e) for e in df['sampleId'].values])
if barcode_length.shape[0] > 1:
raise ValueError('File does not the same barcoding length')
barcode_length = barcode_length[0]
print(barcode_length)
# Map translocated tumors
translocated_barcodes = df['sampleId'].values.astype(str)
translocated_barcodes = [e.replace('.', '-') for e in translocated_barcodes]
print(translocated_barcodes)
translocated_tumors = np.where(np.isin([e[5:5+barcode_length] for e in tumor_barcodes], translocated_barcodes))
print(translocated_barcodes)
is_translocated = np.zeros(len(tumor_barcodes))
is_translocated[translocated_tumors] = 1
return is_translocated
|
7ae16c2898272676ab1ab2bccde4ca3958fbb4a0
| 3,644,386
|
import numbers
def simplify_if_constant(symbol, keep_domains=False):
"""
Utility function to simplify an expression tree if it evalutes to a constant
scalar, vector or matrix
"""
if keep_domains is True:
domain = symbol.domain
auxiliary_domains = symbol.auxiliary_domains
else:
domain = None
auxiliary_domains = None
if symbol.is_constant():
result = symbol.evaluate_ignoring_errors()
if result is not None:
if (
isinstance(result, numbers.Number)
or (isinstance(result, np.ndarray) and result.ndim == 0)
or isinstance(result, np.bool_)
):
return pybamm.Scalar(result)
elif isinstance(result, np.ndarray) or issparse(result):
if result.ndim == 1 or result.shape[1] == 1:
return pybamm.Vector(
result, domain=domain, auxiliary_domains=auxiliary_domains
)
else:
# Turn matrix of zeros into sparse matrix
if isinstance(result, np.ndarray) and np.all(result == 0):
result = csr_matrix(result)
return pybamm.Matrix(
result, domain=domain, auxiliary_domains=auxiliary_domains
)
return symbol
|
c696ec4a7c81251448c97afe92c32f275284f71e
| 3,644,387
|
def is_visible(window):
"""
Check whether the window is visible or not.
"""
return lib.is_visible(window)
|
23f146625dcaa3f473ddec1684b9f222496bae48
| 3,644,388
|
from typing import Optional
import copy
def fix_mol(
mol: Chem.rdchem.Mol,
n_iter: int = 1,
remove_singleton: bool = False,
largest_only: bool = False,
inplace: bool = False,
) -> Optional[Chem.rdchem.Mol]:
"""Fix error in molecule using a greedy approach.
Args:
mol: input molecule to fix
n_iter: Number of valence fix iteration to apply
remove_singleton: Whether `adjust_singleton` should be applied
largest_only: Whether only the largest fragment should be kept
inplace: Whether to return a copy of the mol or perform in place operation
Returns:
Fixed molecule.
"""
if not inplace:
mol = copy.copy(mol)
m = sanitize_mol(mol) or mol # fail back to mol when the fixer fail
if m is not None:
m = remove_dummies(m)
for _ in range(n_iter):
m = fix_valence(m)
if remove_singleton:
m = adjust_singleton(m)
if largest_only:
# m = max(Chem.rdmolops.GetMolFrags(m, asMols=True, sanitizeFrags=False), key=lambda m: m.GetNumAtoms())
m = rdMolStandardize.FragmentParent(m, skipStandardize=True)
return m
|
6d745d9ec308e577b73243850e6f46c93c5ff24f
| 3,644,389
|
from typing import Iterable
from typing import List
def permutation_circuit(swaps: Iterable[List[Swap[_V]]]) -> PermutationCircuit:
"""Produce a circuit description of a list of swaps.
With a given permutation and permuter you can compute the swaps using the permuter function
then feed it into this circuit function to obtain a circuit description.
Args:
swaps: An iterable of swaps to perform.
Returns:
A MappingCircuit with the circuit and a mapping of node to qubit in the circuit.
"""
# Construct a circuit with each unique node id becoming a quantum register of size 1.
dag = DAGCircuit()
swap_list = list(swaps)
# Set of unique nodes used in the swaps.
nodes = {
swap_node
for swap_step in swap_list
for swap_nodes in swap_step
for swap_node in swap_nodes
}
node_qargs = {node: QuantumRegister(1) for node in nodes}
for qubit in node_qargs.values():
dag.add_qreg(qubit)
inputmap = {node: q[0] for node, q in node_qargs.items()}
# Apply swaps to the circuit.
for swap_step in swap_list:
for swap0, swap1 in swap_step:
dag.apply_operation_back(SwapGate(), [inputmap[swap0], inputmap[swap1]])
return PermutationCircuit(dag, inputmap)
|
c45d5fea5974c3bfb7e695ddc366d4948203e1d1
| 3,644,390
|
def Add(a, b):
"""
Adds two numbers, throws on overflow.
"""
c = a + b
Require(c >= a)
return c
|
cf6c04ed1f5f2f6782e6b91aea739c7e54c1dfe6
| 3,644,391
|
from typing import Dict
from typing import Type
def remap_shared_output_descriptions(output_descriptions: Dict[str, str], outputs: Dict[str, Type]) -> Dict[str, str]:
"""
Deals with mixed styles of return value descriptions used in docstrings. If the docstring contains a single entry of return value description, that output description is shared by each output variable.
:param output_descriptions: Dict of output variable names mapping to output description
:param outputs: Interface outputs
:return: Dict of output variable names mapping to shared output description
"""
# no need to remap
if len(output_descriptions) != 1:
return output_descriptions
_, shared_description = next(iter(output_descriptions.items()))
return {k: shared_description for k, _ in outputs.items()}
|
06d589016a747230f88aa3507bd751fd30095222
| 3,644,392
|
def dist_matrix():
"""Fix dist_matrix for the next two tests."""
dist_matrix = np.array([[0, 4, 5, 6], [4, 0, 7, 8], [5, 7, 0, 9], [6, 8, 9, 0]])
return dist_matrix
|
5bc3e5da5a6c76fd91858a697e2db183c74eb03f
| 3,644,393
|
def fitarg_rename(fitarg, ren):
"""Rename variable names in ``fitarg`` with rename function.
::
#simple renaming
fitarg_rename({'x':1, 'limit_x':1, 'fix_x':1, 'error_x':1},
lambda pname: 'y' if pname=='x' else pname)
#{'y':1, 'limit_y':1, 'fix_y':1, 'error_y':1},
#prefixing
figarg_rename({'x':1, 'limit_x':1, 'fix_x':1, 'error_x':1},
lambda pname: 'prefix_'+pname)
#{'prefix_x':1, 'limit_prefix_x':1, 'fix_prefix_x':1, 'error_prefix_x':1}
"""
tmp = ren
if isinstance(ren, str):
ren = lambda x: tmp + '_' + x
ret = {}
prefix = ['limit_', 'fix_', 'error_', ]
for k, v in fitarg.items():
vn = k
pf = ''
for p in prefix:
if k.startswith(p):
vn = k[len(p):]
pf = p
newvn = pf + ren(vn)
ret[newvn] = v
return ret
|
151233d0f18eaea564afbc6d600d576407504b35
| 3,644,394
|
def flatten(tensor):
"""Flattens a given tensor such that the channel axis is first.
The shapes are transformed as follows:
(N, C, D, H, W) -> (C, N * D * H * W)
"""
C = tensor.size(1)
# new axis order
axis_order = (1, 0) + tuple(range(2, tensor.dim()))
# Transpose: (N, C, D, H, W) -> (C, N, D, H, W)
transposed = tensor.permute(axis_order)
# Flatten: (C, N, D, H, W) -> (C, N * D * H * W)
return transposed.contiguous().view(C, -1)
|
e7586da0abfdea639b3bb760fe31fca1cc849d1d
| 3,644,395
|
import soundfile
import os
def file_to_jsobj(src,
chart_type=DFLT_CHART_TYPE,
enable_playback=DFLT_ENABLE_PLAYBACK,
height=DFLT_HEIGHT,
params=DFLT_PARAMS,
title=None,
subtitle='',
**kwargs
):
"""Renders a time visualization of a WAV file from its file.
:param src: The filepath str (or posix path) or file-like object (e.g. open file, or BytesIO object)
:param chart_type: The chart type to render, either 'peaks' (default) or 'spectrogram'
:param enable_playback: Whether to enable playback on double click (default True)
:param height: The height of the chart in pixels (default 50)
:param params: Extra rendering parameters, currently unused
:param title: The title to display, defaults to the filename
:param subtitle: An optional subtitle to display under the title
:param kwargs: extra kwargs to be passed on to Javascript object constructor
"""
wfsr = soundfile.read(src, dtype='int16')
if title is None and isinstance(src, str):
title = os.path.basename(src)
return wfsr_to_jsobj(wfsr,
chart_type=chart_type,
enable_playback=enable_playback,
height=height,
params=params,
title=title,
subtitle=subtitle,
**kwargs
)
|
26e4c1461614c5c7b0010920dba646fd1d381345
| 3,644,396
|
def node_to_edge(edges, directed=True):
"""
From list of edges, record per node, incoming and outgoing edges
"""
outgoing = defaultdict(set)
incoming = defaultdict(set) if directed else outgoing
nodes = set()
for i, edge in enumerate(edges):
a, b, = edge[:2]
outgoing[a].add(i)
incoming[b].add(i)
nodes.add(a)
nodes.add(b)
nodes = sorted(nodes)
if directed:
return outgoing, incoming, nodes
return outgoing, nodes
|
7e3f7bf93bbf19355b3329762a3531504bbc53a4
| 3,644,397
|
from typing import Counter
def grouping_cumulative(df, col_index, col_column):
""" compute histogram statistic over selected column and in addition group this histograms
:param DataFrame df: rich table
:param str col_index: column which will be used s index in resulting table
:param str col_column: column used for computing a histogram
:return DF:
>>> np.random.seed(0)
>>> df = pd.DataFrame()
>>> df['result'] = np.random.randint(0, 2, 50)
>>> df['user'] = np.array(list('abc'))[np.random.randint(0, 3, 50)]
>>> grouping_cumulative(df, 'user', 'result').astype(int) # doctest: +NORMALIZE_WHITESPACE
0 1
user
a 10 12
b 4 9
c 6 9
"""
df_counts = pd.DataFrame()
for idx, dfg in df[[col_index, col_column]].groupby(col_index):
counts = dict(Counter(dfg[col_column]))
counts[col_index] = idx
df_counts = df_counts.append(counts, ignore_index=True)
df_counts.set_index(col_index, inplace=True)
return df_counts
|
f99b1c2cc4e7bc4e3a3af02414ba82bd057607e9
| 3,644,398
|
def _get_matching_stream(smap, itag):
""" Return the url and signature for a stream matching itag in smap. """
for x in smap:
if x['itag'] == itag and x.get("s"):
return x['url'], x['s']
raise IOError("Sorry this video is not currently supported by pafy")
|
dc83fd3207d5ab4e1c85eb719f5f7d023131565e
| 3,644,399
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.