content
stringlengths 22
815k
| id
int64 0
4.91M
|
|---|---|
async def chatidgetter(chat):
""" Untuk .chatid, kembalikan ID obrolan yang Anda masuki saat itu. """
await chat.edit("ID Pesan: `" + str(chat.chat_id) + "`")
| 23,700
|
def python_to_json_syntax(dict_to_convert):
"""
Recursively update pythonic keys to JSON syntax in a dictionary
and nested dictionaries if present.
Args:
dict_to_convert (dict): Dictionary with keys to convert from
python to JSON syntax.
"""
for key, value in dict_to_convert.iteritems():
old_key = key
for python_syntax in re.finditer(r'_[a-z]', key):
key = key.replace(
python_syntax.group(), python_syntax.group()[1].upper())
dict_to_convert[key] = dict_to_convert.pop(old_key)
# Recursive call for nested dictionaries
if type(value) == dict:
python_to_json_syntax(value)
| 23,701
|
def get_train_tags(force=False):
""" Download (if needed) and read the training tags.
Keyword Arguments
-----------------
force : bool
If true, overwrite existing data if it already exists.
"""
download_train_tags(force=force)
return read_tags(train_tags_file_path)
| 23,702
|
def test_atomic_string_min_length_nistxml_sv_iv_atomic_string_min_length_1_2(mode, save_output, output_format):
"""
Type atomic/string is restricted by facet minLength with value 0.
"""
assert_bindings(
schema="nistData/atomic/string/Schema+Instance/NISTSchema-SV-IV-atomic-string-minLength-1.xsd",
instance="nistData/atomic/string/Schema+Instance/NISTXML-SV-IV-atomic-string-minLength-1-2.xml",
class_name="NistschemaSvIvAtomicStringMinLength1",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
| 23,703
|
def select_own(ligands, decoys, scores):
"""Select ligand ids and decoy ids from full ranked ids."""
#scores format is full OUTDOCK line
selected = set(ligands)
selected.update(decoys)
results = []
for scoreline in scores:
#id = scoreline[extract_all.zincCol] #refer to correct column always
id = scoreline[extract_all.zincCol].split('.')[0] #refer to correct column always
# maybe in this form: zinccode.prot
#print id
if id in selected:
results.append(scoreline)
#print scoreline
return results
| 23,704
|
def create_app():
"""
生成FatAPI对象
:return:
"""
app = FastAPI(
debug=settings.DEBUG,
title=settings.PROJECT_NAME, # 项目名称
description=settings.DESCRIPTION, # 项目简介
docs_url=f"{settings.API_V1}/docs", # 自定义 docs文档的访问路径
redoc_url=f"{settings.API_V1}/redocs", # 禁用 redoc文档
openapi_url=f"{settings.API_V1}/openapi.json"
)
app.add_middleware(SessionMiddleware, secret_key="jwt")
# 其余的一些全局配置可以写在这里 多了可以考虑拆分到其他文件夹
register_redis(app)
# 注册mysql
register_mysql(app)
# 注册mongodb
register_mongodb(app)
# 注册ws
register_ws(app)
# 跨域设置
register_cors(app)
# 注册路由
register_router(app)
# 注册定时任务
# register_task(app)
# 注册捕获全局异常
register_exception(app)
# 请求拦截
register_middleware(app)
# if settings.DEBUG:
# register_static_file(app)
return app
| 23,705
|
def cmp_text_file(text, file):
"""returns True when text and file content are identical
"""
fh = open(file)
ftext = fh.read()
fh.close()
return cmp(ftext, text)
| 23,706
|
def get_alignment_summary(seq_info):
"""
Determine the consensus sequence of an alignment, and create position matrix
Definition of consensus: most common base represented at that position.
"""
consensus_sequence = []
position_matrix = []
for position in seq_info:
#Ignore any ambiguous basecalls - accept A, T, C, G, and 'gap'
base_counts = {
'a':position['bases'].count('a')+position['bases'].count('A'),
't':position['bases'].count('t')+position['bases'].count('T'),
'c':position['bases'].count('c')+position['bases'].count('C'),
'g':position['bases'].count('g')+position['bases'].count('G'),
'-':position['bases'].count('-'),
}
#print(base_counts)
max_basecalls = [key for key, count in base_counts.items() if count == max(base_counts.values())]
if len(max_basecalls) == 1:
consensus_sequence.append(max_basecalls[0])
else:
consensus_sequence.append('n')
#Assembling position_matrix
position_matrix.append(base_counts)
return (''.join(consensus_sequence), position_matrix)
| 23,707
|
def update_tsr(tics, s_date, e_date):
"""this function adds a column in each ticker's stock data table
for its tsr data within a given time period.
"""
db = db_connect()
c = db.cursor()
for tic_draft in tics:
tic = tic_draft.lower()
c.execute("ALTER TABLE table_%s ADD COLUMN tsr float;" % (tic,))
db.commit()
c.execute("SELECT MIN(date_val) FROM table_%s;" % (tic,))
start_date = c.fetchall()[0][0]
# Use whichever date is more recent: the requested date or the first
# date where trade data is available (catches cases where the stock
# started trading after the date requested)
if start_date < s_date:
start_date = s_date
c.execute("SELECT %s FROM table_%s WHERE date_val = '%s';" %
(tic, tic, start_date,))
# sometimes the requested start date will not be a day that has
# stock data affiliated with it, probably becuase the markets
# were not open that day. So we want to step back day by day
# until we reach a day that has stock data available.
start_val_test = c.fetchall()
while start_val_test == []:
start_date = start_date + datetime.timedelta(days=1)
c.execute("SELECT %s FROM table_%s WHERE date_val = '%s';" %
(tic, tic, start_date,))
start_val_test = c.fetchall()
start_val = start_val_test[0][0]
# add column with tsr data.
# tsr is calculated as each day's value's change (as a %) from the
# original date's value. Only works for adjusted stock values;
# else it's not tsr but stock price appreciation.
c.execute("UPDATE table_%s SET tsr = (%s / %s) - 1 WHERE (date_val >= '%s' AND date_val <= '%s');" %
(tic, tic, start_val, start_date, e_date,))
db.commit()
db.close()
| 23,708
|
def calculate_pcx_chord_emission(impact_factor, Ti, w0, mu, Lnu, Vouter, rmax=40.0, nr=101, nlambda=2000,
Lne=2.5, R_outer=35):
"""Calculates PCX emission with only the outer boundary spinning for a given impact factor
Args:
impact_factor (float): impact factor for chord
Ti (float): ion temperature in eV
w0 (float): central wavelength
mu (float): mass in amu
Lnu (float): momentum diffusion length
Vouter (float): velocity in m/s for outer boundary
rmax (float): end of the plasma
nr (int): number of radial points to integrate chord with
nlambda (int): number of wavelength points
Lne (float): density gradient scale length at rmax
R_outer (float): velocity at outer boundary
Returns:
tuple: (np.ndarray, np.ndarray) wavelength and spectrum
"""
r, theta, x = calculate_r_theta_x_from_impact_factor(impact_factor, rmax=rmax, npts=nr)
vel = pcx_velocity_profile(r, Lnu, R_outer, Vouter)
# fig, ax = plt.subplots()
# ax.plot(r, vel)
# plt.show()
vel_adjusted = vel * np.cos(theta)
# ToDo: Should really iterate over w0 to handle the He II complex
w_shifted_max = models.doppler_shift(w0, np.max(vel_adjusted))
sigma = models.doppler_broadening(w_shifted_max, mu, Ti)
wavelength = np.linspace(-1, 1, nlambda) * 10.0 * sigma + w_shifted_max
# Now to build a big spectrum matrix
w_shifts = models.doppler_shift(w0, vel_adjusted)
full_spectrum = models.gaussian(wavelength[np.newaxis, :], w_shifts[:, np.newaxis], sigma, amp=1.0, norm=False)
# fig, ax = plt.subplots()
# ax.plot(vel_adjusted, w_shifts)
# plt.show()
dens = density_profile(r, rmax, Lne)
dens = dens[:, np.newaxis]
full_spectrum *= dens ** 2
# fig, ax = plt.subplots()
# for idx, spec in enumerate(full_spectrum):
# ax.plot(wavelength, spec, 'C0')
# ax.axvline(w_shifts[idx], color='C1')
# plt.show()
# print(full_spectrum.shape)
spectrum = np.trapz(full_spectrum, x=x, axis=0)
# print(spectrum.shape)
# fig, ax = plt.subplots()
# ax.plot(wavelength, spectrum / spectrum.max(), 'C1')
# plt.show()
return wavelength, spectrum
| 23,709
|
def _get_variables(exp:Experiment, config: dict) -> dict:
"""Process the configuration's variables before rendering it"""
return {key: value.format(exp=exp) for key, value in config.get("variables", {}).items()}
| 23,710
|
def func_calc_M(S):
"""
Use molecules structure/symbol to calculate molecular weight
Parameter:
S : structrue in a format: (atomType number) separated by '-' or blank space
number of '-' and spaces does not matter
precendent: '-' > blank space
Example 1:
C2H3O4N5
Example 2:
C2 - H3 - O4 - N5
Example 3:
C2 H3 O4 N5
Example 4:
C2 H3 - O4 - N5
Return:
M : molecular weight (g/mol)
"""
##Test list
##Slist = [ 123, ' ', '- - ', '---', '1,2,','1 +','4 $', #bad
# 'C3H4O5Br1Cl2', 'CHOBrCl','Br Br BrBr', #good
# 'C3 - H -2 - 2 - O', 'C3 - H2 2 - O' #bad]
log = {'nice':True, }
# define Periodic Table
PT = { 'H':1.008, 'B':10.81, 'C':12.01, 'N':14.01, 'O':16.00, 'F':19.00,
'P':30.91, 'S':32.06, 'Cl':35.45, 'Br':79.90, 'I':126.90 }
if not isinstance(S,str):
log['nice'] = False
log['info'] = 'Error: Molecule structure has to be a string'
return log, 0.0
S = S.lower()
proS = []
# format: split by '-' then split by blank space
for t in S.split('-'): proS += t.split()
if len(proS) == 0:
log['nice'] = False
log['info'] = 'Error: empty inputs'
return log, 0.0
proSS = []
# 1D: split to [ character number character number ]
for t in proS:
if t.isdigit():
proSS.append(int(t))
elif t.isalpha():
proSS.append(t)
elif t.isalnum():
stmp = ''
for c in t:
if c.isdigit():
if stmp.isalpha():
proSS.append(stmp)
stmp = ''
else:
if stmp.isdigit():
proSS.append(int(stmp))
stmp = ''
stmp += c
if stmp.isdigit():
proSS.append(int(stmp))
else:
proSS.append(stmp)
else:
log['nice'] = False
log['info'] = 'Error: input < {:} > is not correctly defined'.format(t)
return log, 0.0
proSSS = []
# 1D: split to [ atomtype number atomtype number ]
for t in proSS:
if isinstance(t,int):
proSSS.append(t)
else:
# for character, it may have special cases like Br, Cl
while True:
if 'br' in t or 'cl' in t:
ndx = t.find('br') if 'br' in t else t.find('cl')
if ndx > 0: proSSS += [ c for c in t[:ndx] ]
proSSS.append(t[ndx:ndx+2])
if len(t) >= ndx + 2:
t = t[ndx+2:]
else:
proSSS += [ c for c in t ]
break
else:
proSSS += [ c for c in t ]
break
# No adjacent numbers is allowed
# However the number of each adjacent character is defined at 1
# Consider cases like:
# C 1 2 H <bad>
# C C C 3 <good>
# C 1 H 3 <good>
if not isinstance(proSSS[0],str):
log['nice'] = False
log['info'] = 'Error: the atomtype has to be in the first input along with its numbers\n' + \
' : < {:} > is not correctly defined'.format(proSSS[0])
return log, 0.0
bo = False
for t in proSSS:
if isinstance(t,int):
if bo:
log['nice'] = False
stmp = t
break
bo = True
else:
bo = False
if not log['nice']:
log['info'] = 'Error: no adjacent number inputs is allowd\n' + \
' : < {:} > is not correctly defined'.format(stmp)
return log, 0.0
i = 0
proSSSS = []
# 2D: [ [atomtype, number], [atomtype, number], ... ]
while i < len(proSSS):
j = i + 1
if j < len(proSSS) and isinstance(proSSS[j],int):
proSSSS.append([proSSS[i],proSSS[j]])
i = j
else:
proSSSS.append([proSSS[i],1])
i += 1
# time to check for Periodic Table
M = 0.0
for t in proSSSS:
tmp = t[0].capitalize()
if tmp in PT:
M += PT[tmp] * t[1]
else:
log['nice'] = False
log['info'] = 'Error: atomtype < {:} > is not defined in Periodic Table'.format(tmp)
break
return log, M
| 23,711
|
def find_NN(ngbrof, ngbrin, distance_ULIM=NP.inf, flatten=False, parallel=False,
nproc=None):
"""
-----------------------------------------------------------------------------
Find all nearest neighbours of one set of locations in another set of
locations within a specified distance.
Inputs:
ngbrof [numpy array] Locations for nearest neighbours are to be
determined. Has dimensions MxK where M is the number of locations.
ngbrin [numpy array] Locations from which nearest neighbours are to be
chosen for the locations in ngbrof. Has dimensions NxK.
distance_ULIM
[scalar] Maximum search radius to look for neighbours.
Default=NP.inf
flatten [boolean] If set to True, flattens the output of the nearest
neighbour search algorithm to yield two separate sets of matching
indices - one for ngbrof and the other for ngbrin. Default=False
parallel [boolean] specifies if parallelization is to be invoked. False
(default) means only serial processing. Parallelization is done
over ngbrof
nproc [scalar] specifies number of independent processes to spawn.
Default=None, means automatically determines the number of
process cores in the system and use one less than that to
avoid locking the system for other processes. Applies only
if input parameter 'parallel' (see above) is set to True.
If nproc is set to a value more than the number of process
cores in the system, it will be reset to number of process
cores in the system minus one to avoid locking the system out
for other processes
Outputs:
List containing three items. The first item is a list of M lists where each
of the M inner lists corresponds to one entry in ngbrof and the elements in
the inner list contains indices to ngbrin that are the nearest neighbours of
that specific ngbrof (same as output of cKDTree.query_ball_tree()). The
second item in the output list is a numpy array of indices to ngbrof
(obtained from the first item if input keyword flatten is set to True) or
None (if input keyword flatten is set to False). The third item in the output
list is a numpy array of indices to ngbrin that is a valid neighbour of
ngbrof (obtained from the first item if input keyword flatten is set to
True) or None (if input keyword flatten is set to False).
-----------------------------------------------------------------------------
"""
try:
ngbrof, ngbrin
except NameError:
raise NameError('ngbrof and ngbrin must be specified for finding nearest neighbours.')
if (ngbrof.shape[1] != ngbrin.shape[1]):
raise ValueError('ngbrof and ngbrin must contain same number of columns')
if parallel or (nproc is not None):
if nproc is None:
nproc = max(MP.cpu_count()-1, 1)
else:
nproc = min(nproc, max(MP.cpu_count()-1, 1))
split_ind = NP.arange(ngbrof.shape[0]/nproc, ngbrof.shape[0], ngbrof.shape[0]/nproc)
split_ngbrof_list = NP.split(ngbrof, split_ind, axis=0)
ngbrin_list = [ngbrin] * len(split_ngbrof_list)
distance_ULIM_list = [distance_ULIM] * len(split_ngbrof_list)
pool = MP.Pool(processes=nproc)
lolol = pool.map(find_NN_arg_splitter, IT.izip(split_ngbrof_list, ngbrin_list, distance_ULIM_list))
pool.close()
pool.join()
indNN_list = [subitem for item in lolol for subitem in item]
else:
kdtself = KDT(ngbrof)
kdtother = KDT(ngbrin)
indNN_list = kdtself.query_ball_tree(kdtother, distance_ULIM, p=2.0)
ind_ngbrof = None
ind_ngbrin = None
if flatten:
list_of_ind_tuples = [(i,ind) for i,item in enumerate(indNN_list) for ind in item]
ind_ngbrof, ind_ngbrin = zip(*list_of_ind_tuples)
return [indNN_list, NP.asarray(ind_ngbrof), NP.asarray(ind_ngbrin)]
| 23,712
|
def index(request):
"""Magicaltastic front page.
Plugins can register a hook called 'frontpage_updates_<type>' to add
updates to the front page. `<type>` is an arbitrary string indicating
the sort of update the plugin knows how to handle; for example,
spline-forum has a `frontpage_updates_forum` hook for posting news from
a specific forum.
Hook handlers should return a list of FrontPageUpdate objects.
Standard hook parameters are:
`limit`, the maximum number of items that should ever be returned.
`max_age`, the number of seconds after which items expire.
`title`, a name for the source.
`icon`, an icon to show next to its name.
`limit` and `max_age` are also global options.
Updates are configured in the .ini like so:
spline-frontpage.sources.foo = updatetype
spline-frontpage.sources.foo.opt1 = val1
spline-frontpage.sources.foo.opt2 = val2
Note that the 'foo' name is completely arbitrary and is only used for
grouping options together. This will result in a call to:
run_hooks('frontpage_updates_updatetype', opt1=val1, opt2=val2)
Plugins may also respond to the `frontpage_extras` hook with other
interesting things to put on the front page. There's no way to
customize the order of these extras or which appear and which don't, at
the moment. Such hooks should return an object with at least a
`template` attribute; the template will be called with the object
passed in as its `obj` argument.
Local plugins can override the fairly simple index.mako template to
customize the front page layout.
"""
response = request.response
config = request.registry.settings
cache = request.environ.get('beaker.cache', None)
c = request.tmpl_context
updates = []
global_limit = config['spline-frontpage.limit']
global_max_age = max_age_to_datetime(
config['spline-frontpage.max_age'])
c.sources = config['spline-frontpage.sources']
for source in c.sources:
new_updates = source.poll(global_limit, global_max_age, cache)
updates.extend(new_updates)
# Little optimization: once there are global_limit items, anything
# older than the oldest cannot possibly make it onto the list. So,
# bump global_max_age to that oldest time if this is ever the case.
updates.sort(key=lambda obj: obj.time, reverse=True)
del updates[global_limit:]
if updates and len(updates) == global_limit:
global_max_age = updates[-1].time
# Find the oldest unseen item, to draw a divider after it.
# If this stays as None, the divider goes at the top
c.last_seen_item = None
# Could have a timestamp in a cookie
last_seen_time = None
try:
last_seen_time = datetime.datetime.fromtimestamp(
int(request.cookies['frontpage-last-seen-time']))
except (KeyError, ValueError):
pass
if last_seen_time:
for update in updates:
if update.time > last_seen_time:
c.last_seen_item = update
else:
break
# Save ~now~ as the last-seen time
now = datetime.datetime.now().strftime('%s')
response.set_cookie('frontpage-last-seen-time', now)
# Done! Feed to template
c.updates = updates
# Hook for non-update interesting things to put on the front page.
# This hook should return objects with a 'template' attribute, and
# whatever else they need
c.extras = []
return {}
| 23,713
|
def fd_d1_o4_smoothend(var,grid,mat=False):
"""Centered finite difference, first derivative, 4th order using extrapolation to get boundary points
var: quantity to be differentiated.
grid: grid for var
mat: matrix for the finite-differencing operator. if mat=False then it is created"""
dx = grid[1]-grid[0]
grid0 = np.linspace(grid[0]-2*dx,grid[-1]+2*dx,len(grid)+4)
var0 = interp(grid,var,grid0)
if not mat:
mat=get_mat_fd_d1_o4(len(var0),grid0[1]-grid0[0])
dvar0=-np.dot(mat,var0)
dvar_out=dvar0[2:-2]
return -dvar_out
| 23,714
|
def find_node_pair_solutions(node_pairs, graph):
""" Return path and cost for all node pairs in the path sets. """
node_pair_solutions = {}
counter = 0
for node_pair in node_pairs:
if node_pair not in node_pair_solutions:
cost, path = dijkstra.find_cost(node_pair, graph)
node_pair_solutions[node_pair] = (cost, path)
# Also store the reverse pair
node_pair_solutions[node_pair[::-1]] = (cost, path[::-1])
return node_pair_solutions
| 23,715
|
def create_file(name, size, atime):
"""Create a file of given name, of size in bytes and access time atime."""
with open(name, "wb") as fo, open("/dev/zero", "r") as fi:
ntodo = size
while ntodo > 0:
nwrite = min(ntodo, 2000)
fo.write(fi.read(nwrite))
ntodo -= nwrite
os.utime(name, (atime, atime))
| 23,716
|
def run_e_live():
"""Run e_live from cli."""
parser = ArgumentParser()
parser.add_argument('action', help='action: "all", "name" or "number" of live ebuilds')
print(e_live(parser.parse_args().action))
| 23,717
|
def get_step(a, b, marks=1):
"""Return a coordinate set between ``a`` and ``b``.
This function returns a coordinate point between the two provided
coordinates. It does this by determining the angle of the path
between the two points and getting the sine and cosine from that
angle. The returned coordinate will be ``marks`` away from ``a``.
It is worth noting that if the distance between the two points,
calculated by ``get_distance``, is less than the value of ``marks``,
then a copy of ``b`` is returned.
Args:
a (list): A tuple is also acceptable. This list will have two
items, either ``int``s or ``float``s.
b (list): Exactly the same requirements as ``a``. It can (and
usually will be) a different coordinate.
marks (:obj:`int`, optional): One mark is the measurement
between two adjacent coordinates. To step over a greater
number of coordinates, increase the number of ``marks``.
Returns:
tuple: The returned tuple is a new coordinate set. The location
of the coordinates is determined by ``marks`` and angle
connecting ``a`` and ``b``.
"""
if get_distance(a, b) <= marks:
return b[:]
angle = math.atan2(
-(a[1] - b[1]),
-(a[0] - b[0]),
)
return (
(math.cos(angle) * marks) + a[0],
(math.sin(angle) * marks) + a[1],
)
| 23,718
|
def p_expression_integer_constant(parse):
"""
expression : INT
"""
parse[0] = our_ast.IntegerNode(parse[1])
| 23,719
|
def get_model_mask_neurons(model, layers):
"""
Defines a dictionary of type {layer: tensor} containing for each layer of a model, the binary mask representing
which neurons have a value of zero (all of its parameters are zero).
:param model: PyTorch model.
:param layers: Tuple of layers on which apply the threshold procedure. e.g. (nn.modules.Conv2d, nn.modules.Linear)
:return: Mask dictionary.
"""
mask = {}
for n_m, mo in model.named_modules():
if isinstance(mo, layers):
for n_p, p in mo.named_parameters():
name = "{}.{}".format(n_m, n_p)
if "weight" in n_p:
if isinstance(mo, nn.modules.Linear):
sum = torch.abs(p).sum(dim=1)
mask[name] = torch.where(sum == 0, torch.zeros_like(sum), torch.ones_like(sum))
elif isinstance(mo, nn.modules.Conv2d):
sum = torch.abs(p).sum(dim=(1, 2, 3))
mask[name] = torch.where(sum == 0, torch.zeros_like(sum), torch.ones_like(sum))
elif isinstance(mo, nn.modules.ConvTranspose2d):
sum = torch.abs(p).sum(dim=(0, 2, 3))
mask[name] = torch.where(sum == 0, torch.zeros_like(sum), torch.ones_like(sum))
else:
mask[name] = torch.where(p == 0, torch.zeros_like(p), torch.ones_like(p))
else:
mask[name] = torch.where(p == 0, torch.zeros_like(p), torch.ones_like(p))
return mask
| 23,720
|
def outputStats(m, n, b, image):
"""
Output stats on the downsized images
"""
print("Downsized images are ({}, {})".format(m//b, n//b))
print("Block images are ({}, {})".format(m, n))
print("Average intensity at ({}, {}) is {:.2f}".format(m//4//b, n//4//b, image[m//4, n//4]))
print("Average intensity at ({}, {}) is {:.2f}".format(m//4//b, 3*n//4//b, image[m//4, (3*n)//4]))
print("Average intensity at ({}, {}) is {:.2f}".format(3*m//4//b, n//4//b, image[3*m//4, n//4]))
print("Average intensity at ({}, {}) is {:.2f}".format(3*m//4//b, 3*n//4//b, image[3*m//4, 3*n//4]))
print("Binary threshold: {:.2f}".format(np.median(image)))
| 23,721
|
def ValidateClusterVersion(cluster):
"""Validates the cluster version.
Args:
cluster: object, Anthos Multi-cloud cluster.
Raises:
UnsupportedClusterVersion: cluster version is not supported.
MissingClusterField: expected cluster field is missing.
"""
version = _GetSemver(cluster)
if version < semver.SemVer('1.20.0'):
raise errors.UnsupportedClusterVersion(
'The command get-credentials is supported in cluster version 1.20 '
'and newer. For older versions, use get-kubeconfig.')
| 23,722
|
def connection():
"""Open a new connection or return the cached existing one"""
try:
existing_connection = GLOBAL_CACHE[CACHE_KEY_CONNECTION]
except KeyError:
new_connection = win32com.client.Dispatch(ADO_CONNECTION)
new_connection.Provider = CONNECTION_PROVIDER
new_connection.Open(CONNECTION_TARGET)
return GLOBAL_CACHE.setdefault(CACHE_KEY_CONNECTION, new_connection)
#
if not existing_connection.state:
# Reopen the connection if necessary
existing_connection.Open(CONNECTION_TARGET)
#
return existing_connection
| 23,723
|
def getRigidAtoms():
"""Returns atoms in rigid bodies in PDB"""
atoms = []
fileName = pdbs[0]
subprocess.call(["kgs_prepare.py", fileName])
f = open(fileName[:-4] + ".kgs.pdb", "r")
lineList = f.readlines()
f.close()
if connectivity and not "CONECT" in lineList[-1]:
with open(connectivity) as fread:
with open(fileName[:-4] + ".kgs.pdb", "a") as fwrite:
for line in fread:
if "CONECT" in line:
fwrite.write(line)
subprocess.call(["kgs_rigidity", "--initial", fileName[:-4] + ".kgs.pdb", "--saveData", "2", "--workingDirectory", "./"])
with open(fileName[:-4] + ".kgs_RBs_1.txt") as f:
for line in f:
if not "NaN" in line and line != "\n":
atoms.append(line[:-1])
return atoms
| 23,724
|
def precision(theta,X,Y):
"""
accuracy function
computes the accuracy of the logistic model theta on X with true target variable Y
"""
m = np.shape(X)[0]
H = sigmoid(np.dot(X,theta))
H[H >= 0.5] = 1
H[H < 0.5] = 0
return np.sum(H == Y)/m
| 23,725
|
def test_get_voltage_extremes():
""" Tests the function "get_voltage_extremes" from heartRateMonitor.py
:return: passes if correct voltage min and max values returned, fails o.w.
"""
from heartRateMonitor import get_voltage_extremes
assert get_voltage_extremes([0.0, 0.3, 0.2, 0.6])[0] == pytest.approx(0.0)
assert get_voltage_extremes([0.0, 0.3, 0.2, 0.6])[1] == pytest.approx(0.6)
| 23,726
|
def get_converter(obj, coords=None, dims=None, chains=None):
"""Get the converter to transform a supported object to an xarray dataset.
This function sends `obj` to the right conversion function. It is idempotent,
in that it will return xarray.Datasets unchanged.
Parameters
----------
obj : A dict, or an object from PyStan or PyMC3 to convert
coords : dict[str, iterable]
A dictionary containing the values that are used as index. The key
is the name of the dimension, the values are the index values.
dims : dict[str, Tuple(str)]
A mapping from pymc3 variables to a tuple corresponding to
the shape of the variable, where the elements of the tuples are
the names of the coordinate dimensions.
chains : int or None
The number of chains sampled from the posterior, only necessary for
converting dicts.
Returns
-------
xarray.Dataset
The coordinates are those passed in and ('chain', 'draw')
"""
if isinstance(obj, dict):
return DictToXarray(obj, coords, dims, chains=chains)
elif obj.__class__.__name__ == 'StanFit4Model': # ugly, but doesn't make PyStan a requirement
return PyStanToXarray(obj, coords, dims)
elif obj.__class__.__name__ == 'MultiTrace': # ugly, but doesn't make PyMC3 a requirement
return PyMC3ToXarray(obj, coords, dims)
else:
raise TypeError('Can only convert PyStan or PyMC3 object to xarray, not {}'.format(
obj.__class__.__name__))
| 23,727
|
def timezone_by_tzvar(tzvar):
"""Convert a WWTS tzvar to a tzdata timezone"""
return pytz.timezone(city_by_tzvar(tzvar))
| 23,728
|
def save_csv(csv_path: str, duplicates: pd.DataFrame) -> None:
"""Save a Pandas dataframe as a csv file."""
csv_file = os.path.join(csv_path, 'duplicates.csv')
duplicates.to_csv(csv_file, index=False)
| 23,729
|
def fix_seed(seed: int):
"""Sets random seed everywhere."""
torch.manual_seed(seed)
if torch.cuda.is_available():
torch.cuda.manual_seed(seed)
random.seed(seed)
np.random.seed(seed)
| 23,730
|
def group_recommend(request):
"""
Get or post file/directory discussions to a group.
"""
content_type = 'application/json; charset=utf-8'
result = {}
if request.method == 'POST':
form = GroupRecommendForm(request.POST)
if form.is_valid():
repo_id = form.cleaned_data['repo_id']
attach_type = form.cleaned_data['attach_type']
path = form.cleaned_data['path']
message = form.cleaned_data['message']
# groups is a group_id list, e.g. [u'1', u'7']
groups = request.POST.getlist('groups')
username = request.user.username
groups_not_in = []
groups_posted_to = []
for group_id in groups:
# Check group id format
try:
group_id = int(group_id)
except ValueError:
result['error'] = _(u'Error: wrong group id')
return HttpResponse(json.dumps(result), status=400,
content_type=content_type)
group = get_group(group_id)
if not group:
result['error'] = _(u'Error: the group does not exist.')
return HttpResponse(json.dumps(result), status=400,
content_type=content_type)
# TODO: Check whether repo is in the group and Im in the group
if not is_group_user(group_id, username):
groups_not_in.append(group.group_name)
continue
# save message to group
gm = GroupMessage(group_id=group_id, from_email=username,
message=message)
gm.save()
# send signal
grpmsg_added.send(sender=GroupMessage, group_id=group_id,
from_email=username, message=message)
# save attachment
ma = MessageAttachment(group_message=gm, repo_id=repo_id,
attach_type=attach_type, path=path,
src='recommend')
ma.save()
# save discussion
fd = FileDiscuss(group_message=gm, repo_id=repo_id, path=path)
fd.save()
group_url = reverse('group_discuss', args=[group_id])
groups_posted_to.append(u'<a href="%(url)s" target="_blank">%(name)s</a>' % \
{'url':group_url, 'name':group.group_name})
if len(groups_posted_to) > 0:
result['success'] = _(u'Successfully posted to %(groups)s.') % {'groups': ', '.join(groups_posted_to)}
if len(groups_not_in) > 0:
result['error'] = _(u'Error: you are not in group %s.') % (', '.join(groups_not_in))
else:
result['error'] = str(form.errors)
return HttpResponse(json.dumps(result), status=400, content_type=content_type)
# request.method == 'GET'
else:
repo_id = request.GET.get('repo_id')
path = request.GET.get('path', None)
repo = get_repo(repo_id)
if not repo:
result['error'] = _(u'Error: the library does not exist.')
return HttpResponse(json.dumps(result), status=400, content_type=content_type)
if path is None:
result['error'] = _(u'Error: no path.')
return HttpResponse(json.dumps(result), status=400, content_type=content_type)
# get discussions & replies
path_hash = calc_file_path_hash(path)
discussions = FileDiscuss.objects.filter(path_hash=path_hash, repo_id=repo_id)
msg_ids = [ e.group_message_id for e in discussions ]
grp_msgs = GroupMessage.objects.filter(id__in=msg_ids).order_by('-timestamp')
msg_replies = MessageReply.objects.filter(reply_to__in=grp_msgs)
for msg in grp_msgs:
msg.replies = []
for reply in msg_replies:
if msg.id == reply.reply_to_id:
msg.replies.append(reply)
msg.reply_cnt = len(msg.replies)
msg.replies = msg.replies[-3:]
ctx = {}
ctx['messages'] = grp_msgs
html = render_to_string("group/discussion_list.html", ctx)
result['html'] = html
return HttpResponse(json.dumps(result), content_type=content_type)
| 23,731
|
def test_cluster_ami():
"""
This method return all cluster ami
:return:
"""
assert len(tag_cluster_resources.cluster_ami()) >= 0
| 23,732
|
def easy_map(parser, token):
"""
The syntax:
{% easy_map <address> [<width> <height>] [<zoom>] [using <template_name>] %}
The "address" parameter can be an Address instance or a string describing it.
If an address is not found a new entry is created in the database.
"""
width, height, zoom, template_name = None, None, None, None
params = token.split_contents()
# pop the template name
if params[-2] == 'using':
template_name = params[-1]
params = params[:-2]
if len(params) < 2:
raise template.TemplateSyntaxError('easy_map tag requires address argument')
address = params[1]
if len(params) == 4:
width, height = params[2], params[3]
elif len(params) == 5:
width, height, zoom = params[2], params[3], params[4]
elif len(params) == 3 or len(params) > 5:
raise template.TemplateSyntaxError('easy_map tag has the following syntax: '
'{% easy_map <address> <width> <height> [zoom] [using <template_name>] %}')
return EasyMapNode(address, width, height, zoom, template_name)
| 23,733
|
def pack_feed_dict(name_prefixs, origin_datas, paddings, input_fields):
"""
Args:
name_prefixs: A prefix string of a list of strings.
origin_datas: Data list or a list of data lists.
paddings: A padding id or a list of padding ids.
input_fields: A list of input fields dict.
Returns: A dict for while loop.
"""
data = dict()
data["feed_dict"] = dict()
def map_fn(n, d, p):
# n: name prefix
# d: data list
# p: padding symbol
data[concat_name(n, Constants.IDS_NAME)] = d
n_samples = len(d)
n_devices = len(input_fields)
n_samples_per_gpu = n_samples // n_devices
if n_samples % n_devices > 0:
n_samples_per_gpu += 1
def _feed_batchs(_start_idx, _inpf):
if _start_idx * n_samples_per_gpu >= n_samples:
return 0
x, x_len = padding_batch_data(
d[_start_idx * n_samples_per_gpu:(_start_idx + 1) * n_samples_per_gpu], p)
data["feed_dict"][_inpf[concat_name(n, Constants.IDS_NAME)]] = x
data["feed_dict"][_inpf[concat_name(n, Constants.LENGTH_NAME)]] = x_len
return len(x_len)
parallels = repeat_n_times(
n_devices, _feed_batchs,
list(range(n_devices)), input_fields)
data["feed_dict"]["parallels"] = parallels
if isinstance(name_prefixs, six.string_types):
map_fn(name_prefixs, origin_datas, paddings)
else:
[map_fn(n, d, p) for n, d, p in zip(name_prefixs, origin_datas, paddings)]
return data
| 23,734
|
def get_session_maker():
"""
Return an sqlalchemy sessionmaker object using an engine from get_engine().
"""
return sessionmaker(bind=get_engine())
| 23,735
|
def test_dimension_mismatch():
"""Test that there's an exception if covar and noise matrices are mismatched"""
with pytest.raises(DimensionMismatch):
fan = FactorAnalysis.load_data_cov(A_SQ)
fan.add_noise_cov(B_SQ)
| 23,736
|
def rescale_intensity(arr, in_range, out_range):
""" Return arr after stretching or shrinking its intensity levels.
Parameters
----------
arr: array
input array.
in_range, out_range: 2-tuple
min and max intensity values of input and output arr.
Returns
-------
out: array
array after rescaling its intensity.
"""
imin, imax = in_range
omin, omax = out_range
out = np.clip(arr, imin, imax)
out = (out - imin) / float(imax - imin)
return out * (omax - omin) + omin
| 23,737
|
def get_error_info():
"""Return info about last error."""
msg = "{0}\n{1}".format(str(traceback.format_exc()), str(sys.exc_info()))
return msg
| 23,738
|
def call_oai_api(resumption_token):
"""
Request page of data from the Argitrop OAI API
Parameters
----------
resumption_token : object (first page) or string or xml.etree.ElementTree.Element
token returned by previous request.
Returns
-------
response_xml : string
Response text as XML string
resumption_token : xml.etree.ElementTree.Element
tocken for requesting the next page
"""
oai_api_url = cfg.OAI_ENDPOINT_START % cfg.OAI_DATASET_NAME
if isinstance(resumption_token, ET.Element):
oai_api_url = cfg.OAI_ENDPOINT_CONTINUE % resumption_token.text
if isinstance(resumption_token, str):
oai_api_url = cfg.OAI_ENDPOINT_CONTINUE % resumption_token
headers = {'User-Agent': '%s' % cfg.USER_AGENT }
logger.info('Calling OAI API: %s', oai_api_url)
response = requests.get(oai_api_url, verify=True, headers=headers)
response_xml = ET.fromstring(response.text)
resumption_token = response_xml.find('oai:ListRecords', cfg.OAI_NS).find('oai:resumptionToken', cfg.OAI_NS)
return response_xml, resumption_token
| 23,739
|
def save_last_move_statistics():
"""
Reads outputs of simulation CSV files.
Saves CSV files for each experimental
problem separately.
Tasks: A, B, C, D and E
Depths = 0, 1, 2, 3, 4
Saves files for each task and depth in
separate files
"""
for letter in letters:
directory = f'/Users/agnese/Dropbox/PyCharmProjects/gym_tower_of_london/training/model_simulations/{letter}/'
start = f'{letter}_v0_dyna-h_stats_ep={episodes}'
files = [join(directory, f) for f in listdir(directory) if
isfile(join(directory, f)) and f.startswith(start)]
depth_0, depth_1, depth_2, depth_3, depth_4 = sort_files_by_depth(files)
# print('depth0 = ', len(depth_0))
# print('depth1 = ', len(depth_1))
# print('depth2 = ',len( depth_2))
# print('depth3 = ', len(depth_3))
# print('depth4 = ', len(depth_4))
dfs_0, dfs_1, dfs_2, dfs_3, dfs_4 = get_file_dictionaries(depth_0,
depth_1,
depth_2,
depth_3,
depth_4)
# print('dfs_1', dfs_1)
# print('dfs_2', dfs_2)
# print('dfs_3', dfs_3)
# print('dfs_4', dfs_4)
# print('dfs_0', dfs_0)
df0, df1, df2, df3, df4 = get_combined_results(dfs_0, dfs_1, dfs_2,
dfs_3, dfs_4)
print('df0=', df0.shape)
# print('df1=', df1)
# print('df2=', df2)
# print('df3=', df3)
# print('df4=', df4)
results = [df0, df1, df2, df3, df4]
# print('RESUKTS:', results)
# Save results
for i, f in enumerate(results):
file_name = f'experimental_results/{letter}_depth' \
f'={i}_last_100_moves.csv'
f.to_csv(file_name, index=False)
| 23,740
|
def setup_logger() -> None:
""" Set up the logger instance. INFO to stderr """
logger.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
ch.setLevel(logging.INFO)
logger.addHandler(ch)
| 23,741
|
def save_pano_config(p):
"""
saves a panorama config file to the local disk from the session vars.
:return:
"""
filename = get_filename(p)
with open(filename, 'w') as yml_fh:
yml_fh.write(yaml.dump(session[p + '_config'], default_flow_style=False))
return redirect("/export")
| 23,742
|
def append_source_filess(index_filename, source_files, driver):
"""This appends the paths to different source files to the temporary index file
For example
SRCSRV: source files ---------------------------------------
c:\php-sdk\phpdev\vc15\x86\php-7.2.14-src\ext\pdo_sqlsrv\pdo_dbh.cpp*pdo_sqlsrv/pdo_dbh.cpp
c:\php-sdk\phpdev\vc15\x86\php-7.2.14-src\ext\pdo_sqlsrv\pdo_init.cpp*pdo_sqlsrv/pdo_init.cpp
... ...
c:\php-sdk\phpdev\vc15\x86\php-7.2.14-src\ext\pdo_sqlsrv\shared\core_stream.cpp*shared/core_stream.cpp
c:\php-sdk\phpdev\vc15\x86\php-7.2.14-src\ext\pdo_sqlsrv\shared\core_util.cpp*shared/core_util.cpp
SRCSRV: end ------------------------------------------------
"""
failed = False
with open(index_filename, 'a') as idx_file:
idx_file.write('SRCSRV: source files ---------------------------------------' + os.linesep)
with open(source_files, 'r') as src_file:
for line in src_file:
pos = line.find('shared')
if (pos > 0): # it's a nested folder, so it must be positive
relative_path = line[pos:]
src_line = line[:-1] + '*' + relative_path.replace('\\', '/')
else: # not a file in the shared folder
pos = line.find(driver)
if (pos <= 0):
print('ERROR: Expected to find', driver, 'in', line)
failed = True
break
else:
relative_path = line[pos:]
src_line = line[:-1] + '*' + relative_path.replace('\\', '/')
idx_file.write(src_line)
idx_file.write('SRCSRV: end ------------------------------------------------' + os.linesep)
return failed
| 23,743
|
def is_transport(name):
"""Test if all parts of a name are transport coefficients
For example, efe_GB, chie_GB_div_efi_GB are all composed of transport
coefficients, but gam_GB and chiee_GB_plus_gam_GB are not.
"""
transport = True
try:
for part_name in extract_part_names(split_parts(name)):
transport &= split_name(part_name)[0] in heat_vars + particle_vars + momentum_vars
except ValueError:
transport = False
return transport
| 23,744
|
def blacken(session):
"""Run black code formater."""
session.install("black==19.3b0", "isort==4.3.21")
files = ["noxfile.py", "winterbloom_ad_dacs"]
session.run("black", *files)
session.run("isort", "--recursive", *files)
| 23,745
|
def detect_moved_files(file_manifest, diff):
""" Detect files that have been moved """
previous_hashes = defaultdict(set)
for item in file_manifest['files']: previous_hashes[item['hash']].add(item['path'])
diff_dict = make_dict(diff)
# files with duplicate hashes are assumed to have the same contents
moved_files = {}
not_found = []
for val in diff:
if val['status'] == 'new' and val['hash'] in previous_hashes:
found = None; prev_filtered = []
for itm in previous_hashes[val['hash']]:
if itm.split('/')[-1] == val['path'].split('/')[-1]: found = itm; break
if found != None and found in diff_dict and diff_dict[found]['status'] == 'delete':
previous_hashes[val['hash']].remove(found)
moved_files[val['path']] = {'from' : found, 'to' : val['path']}
else: not_found.append(val)
# At this point all duplicate items which have been moved but which retain the original name
# have been removed from there relevant set. Remaining items are assigned on an ad-hoc basis.
# As there hashes are the same, there contents is assumed to be the same so mis-assignments
# are not very important.
for val in not_found:
itm = previous_hashes[val['hash']].pop()
if itm in diff_dict and diff_dict[itm]['status'] == 'delete':
moved_files[val['path']] = {'from' : itm, 'to' : val['path']}
# Replace separate 'new' and 'delete' with a single 'moved' command.
for key, value in moved_files.iteritems():
moved_from = diff_dict.pop(value['from']) # remove the delete from the diff
moved_to = diff_dict[value['to']]
diff_dict[value['to']] = moved_from # start with where the file was moved from
diff_dict[value['to']]['status'] = 'moved'
diff_dict[value['to']]['moved_from'] = value['from']
diff_dict[value['to']]['path'] = moved_to['path'] # Copy the moved path
diff_dict[value['to']]['created'] = moved_to['created'] # Copy 'created' from the moved file
diff_dict[value['to']]['last_mod'] = moved_to['last_mod'] # Copy last_mod from the moved file
return [change for p, change in diff_dict.iteritems()]
| 23,746
|
def test_valid_version_upload(client, settings, repository, admin_user):
"""Test a valid version upload when enforcement is activated"""
settings.LOCALSHOP_VERSIONING_TYPE = 'versio.version_scheme.Simple3VersionScheme'
key = admin_user.access_keys.create(comment='For testing')
auth = {
'HTTP_AUTHORIZATION': basic_auth_header(key.access_key, key.secret_key)
}
data = {
':action': 'file_upload',
'name': 'package-name',
'version': '0.1.0',
'metadata_version': '1.0',
'md5_digest': '06ffe94789d7bd9efba1109f40e935cf',
'filetype': 'sdist',
'content': NamedStringIO(b'Hi', name='blabla'),
}
response = client.post(
'/repo/%s/' % repository.slug, data=data, **auth)
assert response.status_code == 200
| 23,747
|
def test_declaration():
"""
Test defining tables by declaration.
"""
class GeoAreaTable(TestTable):
name = tables.Column()
population = tables.Column()
assert len(GeoAreaTable.base_columns) == 2
assert 'name' in GeoAreaTable.base_columns
assert not hasattr(GeoAreaTable, 'name')
class CountryTable(GeoAreaTable):
capital = tables.Column()
assert len(CountryTable.base_columns) == 3
assert 'capital' in CountryTable.base_columns
# multiple inheritance
class AddedMixin(TestTable):
added = tables.Column()
class CityTable(GeoAreaTable, AddedMixin):
mayer = tables.Column()
assert len(CityTable.base_columns) == 4
assert 'added' in CityTable.base_columns
# modelforms: support switching from a non-model table hierarchy to a
# modeltable hierarchy (both base class orders)
class StateTable1(tables.ModelTable, GeoAreaTable):
motto = tables.Column()
class StateTable2(GeoAreaTable, tables.ModelTable):
motto = tables.Column()
assert len(StateTable1.base_columns) == len(StateTable2.base_columns) == 3
assert 'motto' in StateTable1.base_columns
assert 'motto' in StateTable2.base_columns
| 23,748
|
def decodecaps(blob):
"""decode a bundle2 caps bytes blob into a dictionary
The blob is a list of capabilities (one per line)
Capabilities may have values using a line of the form::
capability=value1,value2,value3
The values are always a list."""
caps = {}
for line in blob.splitlines():
if not line:
continue
if b'=' not in line:
key, vals = line, ()
else:
key, vals = line.split(b'=', 1)
vals = vals.split(b',')
key = urlreq.unquote(key)
vals = [urlreq.unquote(v) for v in vals]
caps[key] = vals
return caps
| 23,749
|
def create_feature_columns() -> Tuple[list, list, list, list, list]:
"""
Returns:
dense_feature_columns (list): 连续特征的feature_columns
category_feature_columns (list): 类别特征的feature_columns
target_feedid_feature_columns (list): 目标feed的feature_columns
sequence_feature_columns (list): 历史行为队列的feature_columns
label_feature_columns (list): 因变量的feature_columns
"""
category_feature_columns, dense_feature_columns = [], []
target_feedid_feature_columns, sequence_feature_columns = [], []
label_feature_columns = []
# 连续特征
videoplayseconds = fc.numeric_column('videoplayseconds', default_value=0.0)
u_read_comment_7d_sum = fc.numeric_column('u_read_comment_7d_sum', default_value=0.0)
u_like_7d_sum = fc.numeric_column('u_like_7d_sum', default_value=0.0)
u_click_avatar_7d_sum = fc.numeric_column('u_click_avatar_7d_sum', default_value=0.0)
u_forward_7d_sum = fc.numeric_column('u_forward_7d_sum', default_value=0.0)
u_comment_7d_sum = fc.numeric_column('u_comment_7d_sum', default_value=0.0)
u_follow_7d_sum = fc.numeric_column('u_follow_7d_sum', default_value=0.0)
u_favorite_7d_sum = fc.numeric_column('u_favorite_7d_sum', default_value=0.0)
i_read_comment_7d_sum = fc.numeric_column('i_read_comment_7d_sum', default_value=0.0)
i_like_7d_sum = fc.numeric_column('i_like_7d_sum', default_value=0.0)
i_click_avatar_7d_sum = fc.numeric_column('i_click_avatar_7d_sum', default_value=0.0)
i_forward_7d_sum = fc.numeric_column('i_forward_7d_sum', default_value=0.0)
i_comment_7d_sum = fc.numeric_column('i_comment_7d_sum', default_value=0.0)
i_follow_7d_sum = fc.numeric_column('i_follow_7d_sum', default_value=0.0)
i_favorite_7d_sum = fc.numeric_column('i_favorite_7d_sum', default_value=0.0)
c_user_author_read_comment_7d_sum = fc.numeric_column('c_user_author_read_comment_7d_sum', default_value=0.0)
dense_feature_columns += [videoplayseconds, u_read_comment_7d_sum, u_like_7d_sum, u_click_avatar_7d_sum,
u_forward_7d_sum, u_comment_7d_sum, u_follow_7d_sum, u_favorite_7d_sum,
i_read_comment_7d_sum, i_like_7d_sum, i_click_avatar_7d_sum, i_forward_7d_sum,
i_comment_7d_sum, i_follow_7d_sum, i_favorite_7d_sum,
c_user_author_read_comment_7d_sum]
# 类别特征
userid = fc.categorical_column_with_vocabulary_file('userid', os.path.join(FLAGS.vocabulary_dir, 'userid.txt'))
feedid = fc.sequence_categorical_column_with_vocabulary_file('feedid',
os.path.join(FLAGS.vocabulary_dir, 'feedid.txt'))
device = fc.categorical_column_with_vocabulary_file('device', os.path.join(FLAGS.vocabulary_dir, 'device.txt'))
authorid = fc.categorical_column_with_vocabulary_file('authorid',
os.path.join(FLAGS.vocabulary_dir, 'authorid.txt'))
bgm_song_id = fc.categorical_column_with_vocabulary_file('bgm_song_id',
os.path.join(FLAGS.vocabulary_dir, 'bgm_song_id.txt'))
bgm_singer_id = fc.categorical_column_with_vocabulary_file('bgm_singer_id',
os.path.join(FLAGS.vocabulary_dir, 'bgm_singer_id.txt'))
manual_tag_list = fc.categorical_column_with_vocabulary_file('manual_tag_list', os.path.join(FLAGS.vocabulary_dir,
'manual_tag_id.txt'))
his_read_comment_7d_seq = fc.sequence_categorical_column_with_vocabulary_file('his_read_comment_7d_seq',
os.path.join(FLAGS.vocabulary_dir,
'feedid.txt'))
userid_emb = fc.embedding_column(userid, 16)
feedid_emb = fc.shared_embedding_columns([feedid, his_read_comment_7d_seq], 16, combiner='mean')
device_emb = fc.embedding_column(device, 2)
authorid_emb = fc.embedding_column(authorid, 4)
bgm_song_id_emb = fc.embedding_column(bgm_song_id, 4)
bgm_singer_id_emb = fc.embedding_column(bgm_singer_id, 4)
manual_tag_id_emb = fc.embedding_column(manual_tag_list, 4, combiner='mean')
category_feature_columns += [userid_emb, device_emb, authorid_emb, bgm_song_id_emb, bgm_singer_id_emb,
manual_tag_id_emb]
target_feedid_feature_columns += [feedid_emb[0]]
sequence_feature_columns += [feedid_emb[1]]
# label
read_comment = fc.numeric_column("read_comment", default_value=0.0)
label_feature_columns += [read_comment]
return dense_feature_columns, category_feature_columns, target_feedid_feature_columns, sequence_feature_columns, label_feature_columns
| 23,750
|
def an(pos=5):
"""
Alineamiento del texto.
@pos:
1: Abajo izquierda
2: Abajo centro
3: Abajo derecha
4: Mitad derecha
5: Mitad centro
6: Mitad derecha
7: Arriba izquierda
8: Arriba centro
9: Arriba derecha
"""
apos = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
if pos not in apos:
raise ValueError('\n\nan(pos):\n<pos> solo acepta los '
'sigientes valores: ' + str(apos))
else:
return '\\an{:d}'.format(pos)
| 23,751
|
def applyLineWidth(shape, lineWidth):
"""
Applies the line width to the supplied shape node.
:type shape: om.MObject
:type lineWidth: float
:rtype: None
"""
fnDagNode = om.MFnDagNode(shape)
plug = fnDagNode.findPlug('lineWidth', False)
plug.setFloat(lineWidth)
| 23,752
|
def propagate(node, region):
"""
Because KD Tree is built from the bottom up, we must propagate the regions properly from the top
down once the tree has been constructed.
"""
if node is None:
return
node.region = region
# "close off" the above area for below nodes
if node.below:
child = node.region.copy()
if node.orient == VERTICAL:
child.x_max = node.point[X]
else:
child.y_max = node.point[Y]
propagate(node.below, child)
# "close off" the below area, since node is above.
if node.above:
child = node.region.copy()
if node.orient == VERTICAL:
child.x_min = node.point[X]
else:
child.y_min = node.point[Y]
propagate(node.above, child)
| 23,753
|
def arista_output(accesslist):
"""Helper function to generate accesslist ouput appropriate for an
Arista switch/router. This will eventually get rolled up into
a output module or class."""
# I have studied the sacred texts from the merciless Trigger Dojo
# TODO: this should be refactored, it's a bit of a mess and
# doesn't take ICMP in to account.
output = ['ip access-list {}'.format(accesslist.name)]
for entry in accesslist:
for protocol in entry.condition['protocol']:
for srcip in entry.condition['srcip']:
for srcport in _check(entry.condition['srcport']):
for dstip in entry.condition['dstip']:
for dstport in _check(entry.condition['dstport']):
output.append(_build_output(entry.index,
entry.action,
protocol,
srcip, srcport,
dstip, dstport))
return output
| 23,754
|
def main(iterator):
"""
Given a line iterator of the bash file, returns a dictionary of
keys to values
"""
values = {}
for line in iterator:
if not line.startswith('#') and len(line.strip()) > 0:
match_obj = line_regex.search(line)
if match_obj is not None:
key, value = match_obj.group(1), match_obj.group(2)
values[key] = try_parse(value)
return values
| 23,755
|
def is_any(typeref: irast.TypeRef) -> bool:
"""Return True if *typeref* describes the ``anytype`` generic type."""
return isinstance(typeref, irast.AnyTypeRef)
| 23,756
|
def repeat_1d(inputs: tf.Tensor, count: Union[tf.Tensor, int], name="repeat_1d"):
"""Repeats each element of `inputs` `count` times in a row.
'''python
repeat_1d(tf.range(4), 2) -> 0, 0, 1, 1, 2, 2, 3, 3
'''
Parameters:
inputs: A 1D tensor with shape [`size`] to be repeated.
count: An integer, used to specify the number of time elements of `inputs` are repeated.
name: An optional string to specify the `name_scope` of this operation.
Returns:
A 1D tensor with shape [`size` * `count`] and same type as `inputs`.
"""
with tf.name_scope(name):
outputs = tf.expand_dims(inputs, 1)
outputs = tf.tile(outputs, [1, count])
outputs = tf.reshape(outputs, [-1])
return outputs
| 23,757
|
def main(from_dir, to_dir, backup_start, frequency_interval, automatic_backup, upload_zip):
"""
A command line backup tool to keep your files stored at dropbox by using its API.
It runs at given time intervals specified by the user and can perform automatic
backups using cronjobs
"""
FROM_DIR = Path(from_dir)
TO_DIR = Path(to_dir)
BACKUP_START_TIME = backup_start
FREQUENCY_INTERVAL = frequency_interval
AUTOMATIC_BACKUP = automatic_backup
logging.basicConfig(
filename="./backup.log",
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
level=logging.INFO
)
# Dropbox connection object
# Handling the backup folder creation skip if already exist
dbox = DBoxConnection()
if dbox.validate_token():
logging.info("The dropbox auth token is valid")
else:
logging.error("The dropbox token is not valid, get a new one")
if TO_DIR.name not in dbox.get_dirs("", only_names=True, recursive=False):
dbox.create_folder(TO_DIR)
logging.info("The backup directory %s was created in dropbox")
else:
logging.warning("The backup directory %s already exists in dropbox", TO_DIR)
# Setting up the cron jobs
# cron = CronTab(user=True)
# job = cron.new(command="")
# job.setall(datetime(year, month, day, hour, minute))
# datetime.strptime(date_string=)
click.secho(f"Backing up {str(FROM_DIR)} into {str(TO_DIR)} at {str(BACKUP_START_TIME)} every {str(FREQUENCY_INTERVAL)}", fg="green")
# Traverse the directory to backup, skip the files and dirs that must be ignored
VISITED = []
TMP_DIR = Path.cwd()/"tmp"
try:
os.mkdir(TMP_DIR)
except:
pass
if upload_zip:
BUILD_PATH = list(TMP_DIR.parts)
else:
BUILD_PATH = list(TO_DIR.parts)
print("Starting build path = ", BUILD_PATH)
for root, dirs, files in os.walk(FROM_DIR):
CURRENT_DIR = os.path.basename(root)
logging.info("Visiting %s", root)
print("Build path -> ", BUILD_PATH)
if CURRENT_DIR in VISITED:
VISITED.remove(CURRENT_DIR)
PARENT_DIR = ("/".join(BUILD_PATH))[1:]
print(PARENT_DIR)
if upload_zip:
if CURRENT_DIR not in os.listdir(PARENT_DIR):
_ = BUILD_PATH.pop()
else:
if CURRENT_DIR not in dbox.get_dirs(PARENT_DIR, only_names=True, recursive=False):
_ = BUILD_PATH.pop()
BUILD_PATH.append(CURRENT_DIR)
if files:
os.chdir(root)
logging.info("%s has %s files", CURRENT_DIR, len(files))
for FILE in files:
print("File = ", FILE)
if FILE not in IGNORE_FILES:
if upload_zip:
shutil.copy(FILE, "/".join(BUILD_PATH)[1:]+"/")
else:
with open(FILE, 'rb') as f:
try:
dbox.upload_content(file=f.read(), path="/".join(BUILD_PATH)[1:]+"/"+f.name)
print("File uploaded")
except dropbox.exceptions.ApiError as error:
if error.error.is_path():
logging.error("Path error")
else:
logging.warning("Ignoring the file %s", FILE)
if dirs:
logging.info("%s has %s files", CURRENT_DIR, len(dirs))
for DIR in dirs:
if DIR not in IGNORE_DIRS:
VISITED.append(DIR)
try:
if upload_zip:
os.mkdir("/".join(BUILD_PATH)[1:]+"/"+DIR)
else:
dbox.create_folder("/".join(BUILD_PATH)[1:]+"/"+DIR)
logging.info("Creating folder %s at %s", DIR, "/".join(BUILD_PATH)[1:])
except:
logging.warning("Folder %s already exist at %s", DIR, "/".join(BUILD_PATH)[1:])
else:
logging.warning("Ignoring %s directory", DIR)
else:
logging.info("%s has no directories", CURRENT_DIR)
if CURRENT_DIR not in IGNORE_DIRS:
BUILD_PATH.remove(CURRENT_DIR)
| 23,758
|
def _calc_weights(df, asset_dict, weight_by):
""" calculate weights for assets in asset_dict using weight_by method """
weight_by_choices = ('Equal', 'Sharpe Ratio', 'Annual Returns',
'Std Dev', 'Vola', 'DS Vola')
assert weight_by in weight_by_choices, \
"Invalid weight_by '{}'".format(weight_by)
ml = _get_metric_lists(df, asset_dict)
bb.DBG('asset_dict = {}'.format(asset_dict))
bb.DBG('asset_dict_ml = {}'.format(ml))
if weight_by == 'Equal':
n = len(asset_dict)
weights = [1/n] * n
asset_dict.update(zip(asset_dict, weights))
elif weight_by in ('Sharpe Ratio', 'Annual Returns'):
# if there are any negative returns, apply unity-based normalization
# if a return is negative, then sharpe_ratio will also be negative
numpy.seterr('raise')
xmin = min(ml.annual_returns)
if xmin < 0:
a = 1; b = 10
if len(ml.annual_returns) == 1:
ml.annual_returns[0] = ml.sharpe_ratios[0] = a
else:
# Z = a + (x − xmin)*(b − a) (xmax − xmin)
xmax = max(ml.annual_returns)
z = [a + (x-xmin)*(b-a)/(xmax-xmin) for x in ml.annual_returns]
ml.annual_returns = z
# recalculate sharpe_ratios besed on normalized annual_returns
ml.sharpe_ratios = [_sharpe_ratio(annual_ret, std_dev, __m.risk_free_rate)
for annual_ret, std_dev in zip(ml.annual_returns, ml.std_devs)]
if weight_by == 'Sharpe Ratio': metric = ml.sharpe_ratios
else: metric = ml.annual_returns
metric_sum = sum(metric)
if not math.isclose(metric_sum, 0):
weights = [m/metric_sum for m in metric]
else:
print('ZeroMetricWarning: All investment options within this group'
' have zero {} metric. Defaulting to Equal Weighting for {}'
.format(weight_by, asset_dict))
n = len(asset_dict)
weights = [1/n] * n
asset_dict.update(zip(asset_dict, weights))
elif weight_by in ('Std Dev', 'Vola', 'DS Vola'):
if weight_by == 'Std Dev': metric = ml.std_devs
elif weight_by == 'Vola': metric = ml.volas
else: metric = ml.ds_volas
inverse_metric = [1/0.001 if math.isclose(m, 0) else 1/m \
for m in metric]
inverse_metric_sum = sum(inverse_metric)
weights = [m/inverse_metric_sum for m in inverse_metric]
asset_dict.update(zip(asset_dict, weights))
else:
raise Exception('Error: Invalid weight_by {}'.format(weight_by))
| 23,759
|
def create_logger(logfile=r"/tmp/tomoproc.log"):
"""Default logger for exception tracking"""
logger = logging.getLogger("tomoproc_logger")
logger.setLevel(logging.INFO)
# create the logging file handler
fh = logging.FileHandler(logfile)
fh.setFormatter(
logging.Formatter(
'%(asctime)s - %(name)s - %(levelname)s - %(message)s'
)
)
# add handler to logger object
logger.addHandler(fh)
return logger
| 23,760
|
async def async_setup(opp: OpenPeerPower, config: ConfigType) -> bool:
"""Set up the Twente Milieu components."""
async def update(call) -> None:
"""Service call to manually update the data."""
unique_id = call.data.get(CONF_ID)
await _update_twentemilieu(opp, unique_id)
opp.services.async_register(DOMAIN, SERVICE_UPDATE, update, schema=SERVICE_SCHEMA)
return True
| 23,761
|
def parsePDCfile(fpath='data/CPTAC2_Breast_Prospective_Collection_BI_Proteome.tmt10.tsv'):
"""
Takes a PDC file ending in .tmt10.tsv or .itraq.tsv and creates
tidied data frame with Gene, Patient, logratio and diffFromMean values
Parameters
----------
fpath : chr, optional
DESCRIPTION. The default is 'data/CPTAC2_Breast_Prospective_Collection_BI_Proteome.tmt10.tsv'.
Return
-------
None.
"""
dat = pd.read_csv(fpath, sep='\t')
newdat = dat[['Gene', 'NCBIGeneID']]
#retrieve log ratios
pat = re.compile('.*[0-9]+\ Log Ratio')
pats = list(filter(pat.match, dat.keys()))
for pat in pats:
up_pat = pat.replace(' Log Ratio', '')
newdat[up_pat] = dat[pat]
#now tidy data by log ratio by patient
tdat = pd.melt(newdat, id_vars=['Gene', 'NCBIGeneID'],\
var_name='Patient', value_name='logratio')
return tdat
| 23,762
|
def ave(x):
"""
Returns the average value of a list.
:param x: a given list
:return: the average of param x
"""
return np.mean(x)
| 23,763
|
def information_gain(f1, f2):
"""
This function calculates the information gain, where ig(f1,f2) = H(f1) - H(f1|f2)
Input
-----
f1: {numpy array}, shape (n_samples,)
f2: {numpy array}, shape (n_samples,)
Output
------
ig: {float}
"""
ig = entropyd(f1) - conditional_entropy(f1, f2)
return ig
| 23,764
|
def main():
"""
This is a hangman game which will generate a vocabulary to let the user
guess. As the guess is wrong, user will lose one life. The game will
end till the life to zero.
"""
answer = random_word()
guess(answer)
| 23,765
|
def output_if_exists(filename):
"""Returns file name if the file exists
Parameters
----------
filename : str
File in question.
Returns
-------
str
Filename.
"""
if os.path.exists(filename):
return filename
return None
| 23,766
|
def subsequent_chunk_mask(
size: int,
chunk_size: int,
num_left_chunks: int=-1, ) -> paddle.Tensor:
"""Create mask for subsequent steps (size, size) with chunk size,
this is for streaming encoder
Args:
size (int): size of mask
chunk_size (int): size of chunk
num_left_chunks (int): number of left chunks
<0: use full chunk
>=0: use num_left_chunks
Returns:
paddle.Tensor: mask, [size, size]
Examples:
>>> subsequent_chunk_mask(4, 2)
[[1, 1, 0, 0],
[1, 1, 0, 0],
[1, 1, 1, 1],
[1, 1, 1, 1]]
"""
ret = paddle.zeros([size, size], dtype=paddle.bool)
for i in range(size):
if num_left_chunks < 0:
start = 0
else:
start = max(0, (i // chunk_size - num_left_chunks) * chunk_size)
ending = min(size, (i // chunk_size + 1) * chunk_size)
ret[i, start:ending] = True
return ret
| 23,767
|
def main(argv):
"""
Main entry of this script.
Parameters
------
argv: list of str
Arguments received from the command line.
"""
annotation = np.genfromtxt('annotation.csv', dtype='int',
delimiter=',', skip_header=1)
detected = np.genfromtxt('detected.csv', dtype='int',
delimiter=',', skip_header=1)
annotation = np.array(annotation, dtype='float')
detected = np.array(detected, dtype='float')
print(len([i for i in detected if i[1] == 1]))
b = annotation[:, 1].astype(bool)
ann = annotation[b]
ann[:, 1] = ann[:, 1] - 0.90
b = detected[:, 1].astype(bool)
det = detected[b]
det[:, 1] = det[:, 1] - 0.85
b = np.array([True if a[1] and not d[1] else False
for a, d in zip(annotation, detected)])
fne = annotation[b]
fne[:, 1] = fne[:, 1] - 0.80
b = np.array([True if (not a[1] and d[1]) else False
for a, d in zip(annotation, detected)])
fpo = detected[b]
fpo[:, 1] = fpo[:, 1] - 0.75
# Delete the false positives/negatives that are too close
# (because they are most probably the same blink detected with a few frames
# delay/advance)
iFpo = []
iFne = []
for i in range(len(fpo)):
for j in range(len(fne)):
if abs(fpo[i, 0] - fne[j, 0]) <= 4:
iFpo.append(i)
iFne.append(j)
fpo = np.delete(fpo, iFpo, 0)
fne = np.delete(fne, iFne, 0)
print(len(fpo))
print(len(fne))
s = [50 for i in range(5000)]
fig = plt.figure()
ann = plt.scatter(ann[:, 0], ann[:, 1], c='g', marker='o', s=s,
label='Manually annotated blinks')
det = plt.scatter(det[:, 0], det[:, 1], c='b', marker='o', s=s,
label='Automatically detected blinks')
fne = plt.scatter(fne[:, 0], fne[:, 1], c='g', marker='v', s=s,
label='False negatives')
fpo = plt.scatter(fpo[:, 0], fpo[:, 1], c='b', marker='^', s=s,
label='False positives')
plt.xlim([0, 5001])
plt.xticks([i for i in range(0, 5001, 1000)])
plt.ylim([0, 0.6])
plt.xlabel('Frame number', fontsize=15)
plt.ylabel('Blink occurrences', fontsize=15)
plt.yticks([])
plt.legend(handles=[ann, det, fne, fpo], fontsize=10)
mng = plt.get_current_fig_manager()
mng.window.state('zoomed')
plt.suptitle('Evaluation of the Blink Detector', fontsize=30)
plt.show()
| 23,768
|
def filtergt(gtfile, majorgtcount, outfile, chromorder):
"""
cnv with major genotype count larger than majorgtcount will be exclude
"""
gtdf = loadgtfile(gtfile, chromorder)
newdf = filtergenotype(gtdf, majorgtcount)
newdf.to_csv(outfile, sep='\t', index=False, float_format='%.2f', na_rep='NA')
| 23,769
|
def top_tags(request):
"""
Shows a list of the most-used Tags.
Context::
object_list
The list of Tags
Template::
cab/top_tags.html
"""
return render_to_response('cab/top_tags.html',
{ 'object_list': Snippet.objects.top_items('tag', 20) },
context_instance=RequestContext(request))
| 23,770
|
def check_integer_sign(value):
"""
:param value:
:return:
"""
return value >= 0
| 23,771
|
def masks_empty(sample, mask_names):
""" Tests whether a sample has any non-masked values """
return any(not torch.any(sample[name] != 0) for name in mask_names)
| 23,772
|
def quantize_factor(factor_data, quantiles=5, bins=None, by_group=False):
"""
Computes period wise factor quantiles.
Parameters
----------
factor_data : pd.DataFrame - MultiIndex
A MultiIndex DataFrame indexed by date (level 0) and asset (level 1),
containing the values for a single alpha factor, forward returns for each period,
The factor quantile/bin that factor value belongs too, and (optionally) the group the
asset belongs to.
quantiles : int or sequence[float]
Number of equal-sized quantile buckets to use in factor bucketing.
Alternately sequence of quantiles, allowing non-equal-sized buckets
e.g. [0, .10, .5, .90, 1.] or [.05, .5, .95]
Only one of 'quantiles' or 'bins' can be not-None
bins : int or sequence[float]
Number of equal-width (valuewise) bins to use in factor bucketing.
Alternately sequence of bin edges allowing for non-uniform bin width
e.g. [-4, -2, -0.5, 0, 10]
Only one of 'quantiles' or 'bins' can be not-None
by_group : bool
If True, compute quantile buckets separately for each group.
Returns
-------
factor_quantile : pd.Series
Factor quantiles indexed by date and asset.
"""
def quantile_calc(x, _quantiles, _bins):
if _quantiles is not None:
return pd.qcut(x, _quantiles, labels=False) + 1
elif _bins is not None:
return pd.cut(x, _bins, labels=False) + 1
raise ValueError('quantiles or bins should be provided')
grouper = [factor_data.index.get_level_values('date')]
if by_group:
grouper.append('group')
factor_quantile = factor_data.groupby(grouper)['factor'].apply(quantile_calc, quantiles, bins)
factor_quantile.name = 'factor_quantile'
return factor_quantile.dropna()
| 23,773
|
def load_all_dbs(database_dir):
"""Load and return a ShowDB and TrackerDB.
Returns:
showdb: ShowDatabse instance
tracker: TrackerDatabase instance
"""
showdb = load_database(os.path.join(database_dir, '.showdb.json'))
tracker = load_database(os.path.join(database_dir, '.tracker.json'))
return showdb, tracker
| 23,774
|
def listSplit(aList, n):
"""将一个列表以n个元素为一个单元进行均分,返回嵌套列表"""
return [aList[i:i+n] for i in range(0,len(aList),n)]
| 23,775
|
def sub(evm: Evm) -> None:
"""
Subtracts the top two elements of the stack, and pushes the result back
on the stack.
Parameters
----------
evm :
The current EVM frame.
Raises
------
StackUnderflowError
If `len(stack)` is less than `2`.
OutOfGasError
If `evm.gas_left` is less than `3`.
"""
evm.gas_left = subtract_gas(evm.gas_left, GAS_VERY_LOW)
x = pop(evm.stack)
y = pop(evm.stack)
result = x.wrapping_sub(y)
push(evm.stack, result)
| 23,776
|
def refs(request):
""" Настройка назначения анализов вместе """
if request.method == "GET":
rows = []
fraction = directory.Fractions.objects.get(pk=int(request.GET["pk"]))
for r in directory.References.objects.filter(fraction=fraction).order_by("pk"):
rows.append(
{
'pk': r.pk,
'title': r.title,
'about': r.about,
'ref_m': json.loads(r.ref_m) if isinstance(r.ref_m, str) else r.ref_m,
'ref_f': json.loads(r.ref_f) if isinstance(r.ref_f, str) else r.ref_f,
'del': False,
'hide': False,
'isdefault': r.pk == fraction.default_ref_id,
}
)
return JsonResponse(rows, safe=False)
elif request.method == "POST":
pk = int(request.POST["pk"])
default = int(request.POST["default"])
if pk > -1:
fraction = directory.Fractions.objects.get(pk=pk)
for r in json.loads(request.POST["refs"]):
r["ref_m"].pop("", None)
r["ref_f"].pop("", None)
if r["del"] and r["pk"] != -1:
directory.References.objects.filter(pk=r["pk"]).delete()
if r["pk"] == default:
default = -1
elif not r["del"] and r["pk"] == -1:
nrf = directory.References(title=r["title"], about=r["about"], ref_m=r["ref_m"], ref_f=r["ref_f"], fraction=fraction)
nrf.save()
if r["isdefault"]:
default = nrf.pk
else:
row = directory.References.objects.get(pk=r["pk"])
row.title = r["title"]
row.about = r["about"]
row.ref_m = json.dumps(r["ref_m"])
row.ref_f = json.dumps(r["ref_f"])
row.save()
fraction.default_ref = None if default == -1 else directory.References.objects.get(pk=default)
fraction.save()
return JsonResponse({"ok": True})
| 23,777
|
def transform(data):
"""replace the data value in the sheet if it is zero
:param data: data set
:return: data set without zero
"""
data_transformed = data.applymap(zero2minimum)
return data_transformed
| 23,778
|
def findChildren(node, name):
"""Returns all the children of input node, with a matching name.
Arguments:
node (dagNode): The input node to search
name (str): The name to search
Returns:
dagNode list: The children dagNodes
"""
return __findChildren(node, name, False)
| 23,779
|
def test_deposit_invalid_account(client, acc1_usd_deposit_transaction_factory):
"""`POST /transactions/deposit/interactive` fails with an invalid `account` parameter."""
acc1_usd_deposit_transaction_factory()
response = client.post(
DEPOSIT_PATH,
{
"asset_code": "USD",
"account": "GBSH7WNSDU5FEIED2JQZIOQPZXREO3YNH2M5DIBE8L2X5OOAGZ7N2QI6",
},
follow=True,
)
content = json.loads(response.content)
assert response.status_code == 400
assert content == {"error": "invalid 'account'"}
| 23,780
|
def main():
"""
Robot demo
Loads all robots in an empty scene, generate random actions
"""
logging.info("*" * 80 + "\nDescription:" + main.__doc__ + "*" * 80)
# Create empty scene
settings = MeshRendererSettings(enable_shadow=False, msaa=False, texture_scale=0.5)
s = Simulator(mode="gui_interactive", image_width=512, image_height=512, rendering_settings=settings)
scene = EmptyScene(render_floor_plane=True, floor_plane_rgba=[0.6, 0.6, 0.6, 1])
s.import_scene(scene)
# Create one instance of each robot aligned along the y axis
position = [0, 0, 0]
robots = {}
for robot_config_file in os.listdir(os.path.join(igibson.example_config_path, "robots")):
config = parse_config(os.path.join(igibson.example_config_path, "robots", robot_config_file))
robot_config = config["robot"]
robot_name = robot_config.pop("name")
robot = REGISTERED_ROBOTS[robot_name](**robot_config)
s.import_robot(robot)
robot.set_position(position)
robot.reset()
robot.keep_still()
robots[robot_name] = (robot, position[1])
logging.info("Loaded " + robot_name)
logging.info("Moving " + robot_name)
# Set viewer in front
s.viewer.initial_pos = [1.6, 0, 1.3]
s.viewer.initial_view_direction = [-0.7, 0, -0.7]
s.viewer.reset_viewer()
for _ in range(100): # keep still for 10 seconds
s.step()
for _ in range(30):
action = np.random.uniform(-1, 1, robot.action_dim)
robot.apply_action(action)
for _ in range(10):
s.step()
robot.keep_still()
s.reload()
scene = EmptyScene(render_floor_plane=True, floor_plane_rgba=[0.6, 0.6, 0.6, 1])
s.import_scene(scene)
s.disconnect()
| 23,781
|
def get_process_rss(force_update=False, pid=None):
"""
<Purpose>
Returns the Resident Set Size of a process. By default, this will
return the information cached by the last call to _get_proc_info_by_pid.
This call is used in get_process_cpu_time.
<Arguments>
force_update:
Allows the caller to force a data update, instead of using the cached data.
pid:
If force_update is True, this parameter must be specified to force the update.
<Exceptions>
See _get_proc_info_by_pid.
<Returns>
The RSS of the process in bytes.
"""
global last_proc_info_struct
# Check if an update is being forced
if force_update and pid != None:
# Update the info
_get_proc_info_by_pid(pid)
# Get RSS
rss_pages = last_proc_info_struct.ki_rssize
rss_bytes = rss_pages * PAGE_SIZE
return rss_bytes
| 23,782
|
def doc_to_dict(doc) -> Dict:
"""Takes whatever the mongo doc is and turns into json serializable dict"""
ret = {k: stringify_mongovalues(v) for k, v in doc.items() if k != "_id"}
ret["_id"] = str(doc["_id"])
return ret
| 23,783
|
def add_srv_2cluster(cluster_name, srvjson):
"""
添加服务到数据库
:param cluster_name:
:param srvjson:
:return:
"""
status = ''
message = ''
resp = {"status": status, "message": message}
host_name = srvjson.get('host_name')
service_name = srvjson.get('service_name')
sfo_clu_node = SfoClusterNodesMethod.query_host_by_host_name(host_name)
if not sfo_clu_node:
raise ValueError('Not Found Node Host %s' % host_name)
swift_op = SwiftServiceOperation(sfo_clu_node.node_inet_ip)
try:
content = swift_op.install_service(service_name)
except Exception, error:
status = 501
message = str(error)
else:
status = 200
message = content
resp.update({"status": status, "message": message})
return resp, status
| 23,784
|
def _add_left_zeros(number, iteration_digits):
"""Add zeros to the left side of the experiment run number.
Zeros will be added according to missing spaces until iterations_digits are
reached.
"""
number = str(number)
return f'{"0" * (iteration_digits - len(number))}{number}'
| 23,785
|
def slack_webhook_mead_eval_intent(parent_details: Dict, webhook: str, template: str, base_dir: str) -> None:
"""Substitute a template message and post to slack
:param parent_details: The context to use to replace values in the template.
:param webhook: The webhook key
:param template: The message.
:param base_dir: the base dir to read result files from
"""
message: str = Template(template).substitute(parent_details)
mismatched_jobs = []
for job_id in parent_details['executed']:
if 'mead-eval' in job_id:
df = pd.read_csv(os.path.join(base_dir, f'{job_id}.tsv'), header=None, sep='\t')
mismatched = df[df[1] != df[2]].copy()
mismatched.insert(0, 'job_id', job_id)
message = message + f'\n[[job_id]] {job_id} [[failures]]: {len(mismatched)}'
os.remove(os.path.join(base_dir, f'{job_id}.tsv'))
mismatched_jobs.append(mismatched)
if mismatched_jobs:
pd.concat(mismatched_jobs).to_csv(os.path.join(base_dir, 'results.csv'))
requests.post(webhook, json={"text": message})
| 23,786
|
def sharpdiff(y_true, y_pred):
"""
@param y_true: tensor of shape (batch_size, height, width, channels)
@param y_pred: tensor of shape (batch_size, height, width, channels)
@return: the sharpness difference as a scalar
"""
def log10(tensor):
numerator = tf.math.log(tensor);
denominator = tf.math.log(tf.constant(10, dtype = numerator.dtype));
return numerator / denominator;
shape = tf.shape(y_pred);
num_pixels = tf.cast(shape[1] * shape[2] * shape[3], tf.float32);
y_true_dy, y_true_dx = tf.image.image_gradients(y_true);
y_pred_dy, y_pred_dx = tf.image.image_gradients(y_pred);
pred_grad_sum = y_pred_dx + y_pred_dy;
true_grad_sum = y_true_dx + y_true_dy;
grad_diff = tf.abs(true_grad_sum - pred_grad_sum);
grad_diff_red = tf.reduce_sum(grad_diff, [1, 2, 3]);
batch_errors = 10 * log10(1 / ((1 / num_pixels) * grad_diff_red));
return tf.reduce_mean(batch_errors);
| 23,787
|
def admin_setfriend():
""" Set the friend state of a user """
uid = request.args.get("uid", "")
state = request.args.get("state", "1") # Default: set as friend
try:
state = bool(int(state))
except Exception:
return (
"<html><body><p>Invalid state string: '{0}'</p></body></html>"
.format(state)
)
u = User.load_if_exists(uid) if uid else None
if u is None:
return "<html><body><p>Unknown user id '{0}'</p></body></html>".format(uid)
was_friend = u.friend()
u.set_friend(state)
u.set_has_paid(state)
u.update()
logging.info("Friend state of user {0} manually set to {1}".format(uid, state))
return (
"<html><body><p>User '{0}': friend state was '{2}', set to '{1}'</p></body></html>"
.format(uid, state, was_friend)
)
| 23,788
|
def LF_CG_BICLUSTER_BINDS(c):
"""
This label function uses the bicluster data located in the
A global network of biomedical relationships
"""
sen_pos = c.get_parent().position
pubmed_id = c.get_parent().document.name
query = bicluster_dep_df.query("pubmed_id==@pubmed_id&sentence_num==@sen_pos")
if not(query.empty):
if query["B"].sum() > 0.0:
return 1
return 0
| 23,789
|
def setup_argparse(parser: argparse.ArgumentParser) -> None:
"""Setup argument parser for ``cubi-tk org-raw check``."""
return OrganizeCommand.setup_argparse(parser)
| 23,790
|
def single_point_crossover(parents: List[Chromosome], probability: float = 0.7) -> List[Chromosome]:
""" Make the crossover of two parents to generate two child.
The crossover has a probability to be made.
The crossover point is random.
:param parents: selected parents
:param probability: probability that the crossover is made
:return: offspring
"""
cut_point = random.randint(1, len(parents[1].genes) - 1)
if random.random() < probability:
first_child = Chromosome(parents[0].genes[:cut_point] + parents[1].genes[cut_point:])
second_child = Chromosome(parents[1].genes[:cut_point] + parents[0].genes[cut_point:])
else:
first_child = Chromosome(parents[0].genes.copy())
second_child = Chromosome(parents[1].genes.copy())
return [first_child, second_child]
| 23,791
|
def plot_evolution_of_density(lattice_grid_shape: Tuple[int, int] = (50, 50),
initial_p0: float = 0.5,
epsilon: float = 0.08,
omega: float = 1.0,
time_steps: int = 2500,
number_of_visualizations: int = 20):
"""
Executes the experiment for shear wave decay given a sinusoidal density and saves the results.
Args:
lattice_grid_shape: lattice size
initial_p0: shift of density
epsilon: amplitude of sine wave
omega: relaxation parameter
time_steps: number of time steps for simulation
number_of_visualizations: total number of visualization. Has to be divisible by 5.
"""
assert 0 < omega < 2
assert time_steps > 0
assert number_of_visualizations % 5 == 0
density, velocity = sinusoidal_density_x(lattice_grid_shape, initial_p0, epsilon)
f = equilibrium_distr_func(density, velocity)
fig, ax = plt.subplots(int(number_of_visualizations / 5), 5, sharex=True, sharey=True)
ax[0, 0].plot(np.arange(0, lattice_grid_shape[0]), density[:, int(lattice_grid_shape[0] / 2)])
ax[0, 0].set_title('initial')
row_index, col_index = 0, 1
for i in trange(time_steps):
f, density, velocity = lattice_boltzmann_step(f, density, velocity, omega)
if (i + 1) % int(time_steps / number_of_visualizations) == 0:
ax[row_index, col_index].plot(np.arange(0, lattice_grid_shape[-1]),
density[:, int(lattice_grid_shape[0] / 2)])
ax[row_index, col_index].set_title('step ' + str(i))
col_index += 1
if col_index == 5:
col_index = 0
row_index += 1
if row_index == 4:
break
fig.subplots_adjust(left=0.125, right=0.9, bottom=0.1, top=0.9,
wspace=0.75, hspace=0.5)
plt.savefig(r'./figures/shear_wave_decay/evolution_density_surface.pgf')
plt.savefig(r'./figures/shear_wave_decay/evolution_density_surface.svg')
| 23,792
|
def setup_logging():
"""
Sets up the logging for multiple processes. Only enable the logging for the
master process, and suppress logging for the non-master processes.
"""
# Set up logging format.
_FORMAT = "[%(levelname)s: %(filename)s: %(lineno)4d]: %(message)s"
if du.is_master_proc():
# Enable logging for the master process.
logging.root.handlers = []
logging.basicConfig(
level=logging.INFO, format=_FORMAT, stream=sys.stdout
)
else:
# Suppress logging for non-master processes.
_suppress_print()
| 23,793
|
def main():
"""Demonstrate the Anagram Generator."""
print('I can find all phrase anagrams given a word or phrase.\n'
'I\'m fun at parties.')
# Print first 500 results.
word = 'see shells'
print(f'\nAnalyzing: {word}\n')
anagram_phrases = anagram_generator(word)
for i, phrase in enumerate(anagram_phrases):
if i > 500:
break
print(phrase)
| 23,794
|
def test_get_instance_metrics_database_size_metrics(check):
"""
Test the function behaves correctly when `database_size_metrics` is passed
"""
expected = util.COMMON_METRICS
expected.update(util.NEWER_92_METRICS)
expected.update(util.DATABASE_SIZE_METRICS)
res = check._get_instance_metrics(True, False)
assert res['metrics'] == expected
| 23,795
|
def get_node_count(network=None, base_url=DEFAULT_BASE_URL):
"""Reports the number of nodes in the network.
Args:
network (SUID or str or None): Name or SUID of a network or view. Default is the
"current" network active in Cytoscape.
base_url (str): Ignore unless you need to specify a custom domain,
port or version to connect to the CyREST API. Default is http://127.0.0.1:1234
and the latest version of the CyREST API supported by this version of py4cytoscape.
Returns:
int: count of nodes in network.
Raises:
ValueError: if server response has no JSON
CyError: if network name or SUID doesn't exist
requests.exceptions.RequestException: if can't connect to Cytoscape or Cytoscape returns an error
Examples:
>>> get_node_count()
6
>>> get_node_count(52)
6
>>> get_node_count('galFiltered.sif')
6
"""
net_suid = get_network_suid(network, base_url=base_url)
res = commands.cyrest_get(f'networks/{net_suid}/nodes/count', base_url=base_url)
return res['count']
| 23,796
|
def check_dataset_to_ref(dset, ref):
""" Check that all data in Dataset is contained in the ref Dataset. """
# file format (these are not expected to match)
assert dset.file_format == ref.file_format
# global attributes
dset_attributes = dset.ncattrs()
ref_attributes = ref.ncattrs()
for attr in dset_attributes:
print "Global attribute:", attr
assert attr in ref_attributes
attribute_equal(dset, ref, attr, allow_str_case_diff=True)
# cmptypes, expected to be empty
print "Checking cmptypes..."
assert dset.cmptypes == ref.cmptypes
# groups, expected to be empty
print "Checking groups..."
assert dset.groups == ref.groups
# dimensions (these are not expected to match)
print "Checking dimensions"
for dim in dset.dimensions.keys():
print "Dimension", dim
assert dim in ref.dimensions.keys()
# variables
print "Checking variables..."
dset_vars = dset.variables
ref_vars = ref.variables
for v in dset_vars.keys():
print "Variable", v
check_variable_to_ref(dset_vars[v], ref_vars[v])
| 23,797
|
def process_sample(
sample: Dict[str, Any],
relation_vocab: Dict[str, int],
spacy_model: Any,
tokenizer: Any,
) -> Tuple[Optional[Dict[str, Any]], Dict[str, int]]:
"""Processes WebRED sample and updates relation vocabulary.
To process a raw WebRED example, we first extract subj and obj and remove the
annotations from the text. The resulting text is parsed with a spacy model to
find mention spans, and then tokenized with a BERT tokenizer. If necessary, we
override some spacy mentions with the subj and obj WebRED mentions.
Args:
sample: raw WebRED sample. Needs to contain following fields: token, list of
token strings. relation, string describing relation between subj and obj.
relation_vocab: dictionary mapping relation strings to integer labels.
spacy_model: spacy model used to detect mentions.
tokenizer: BERT tokenizer.
Returns:
Processed WebRED sample and updated relation vocabulary.
"""
processed_sample = {}
if sample['num_pos_raters'] < 2:
relation = NO_RELATION
else:
relation = sample['relation']
if relation not in relation_vocab:
relation_vocab[relation] = len(relation_vocab)
label = relation_vocab[relation]
processed_sample['target'] = [label]
text = sample['annotated_text']
# Remove subj and obj annotations from text and store position
def find_span(input_text: str, pattern: Any,
prefix_len: int) -> Tuple[int, int]:
"""Find span corresponding to actual subj or obj strings."""
match = pattern.search(input_text)
span_start = match.start() + prefix_len + 1
# We want inclusive spans, hence -2 instead of -1
span_end = match.end() - 2
return (span_start, span_end)
def replace_and_adjust(
input_text: str, match: Any, prefix_len: int,
inverted_mapping: np.ndarray) -> Tuple[str, np.ndarray]:
"""Remove subj/obj annotations and adjust token mapping accordingly."""
original_span_start = match.start() + prefix_len + 1
original_span_end = match.end() - 1
actual_string = input_text[original_span_start:original_span_end]
new_text = input_text[:match.start()] + actual_string + input_text[match
.end():]
# Inverted mapping maps from remaining tokens to positions in original text
new_inverted_mapping = np.zeros(len(new_text), dtype=np.int32)
new_inverted_mapping[:match.start()] = inverted_mapping[:match.start()]
new_span_start = match.start()
new_span_end = match.start() + len(actual_string)
new_inverted_mapping[new_span_start:new_span_end] = inverted_mapping[
original_span_start:original_span_end]
new_inverted_mapping[new_span_end:] = inverted_mapping[original_span_end +
1:]
return new_text, new_inverted_mapping
inverted_mapping = np.arange(len(text))
subj_pattern = re.compile('SUBJ{[^}]+}')
subj_span = find_span(text, subj_pattern, len('SUBJ'))
obj_pattern = re.compile('OBJ{[^}]+}')
obj_span = find_span(text, obj_pattern, len('OBJ'))
# Remove subj/obj annotations from text
while True:
subj_match = subj_pattern.search(text)
if subj_match is None:
break
text, inverted_mapping = replace_and_adjust(text, subj_match, len('SUBJ'),
inverted_mapping)
while True:
obj_match = obj_pattern.search(text)
if obj_match is None:
break
text, inverted_mapping = replace_and_adjust(text, obj_match, len('OBJ'),
inverted_mapping)
# Adjust spans for removed tokens
mapping = np.zeros(len(sample['annotated_text']), dtype=np.int32) - 1
mapping[inverted_mapping] = np.arange(len(inverted_mapping))
subj_span = (mapping[subj_span[0]], mapping[subj_span[1]])
assert subj_span[0] != -1 and subj_span[1] != -1
obj_span = (mapping[obj_span[0]], mapping[obj_span[1]])
assert obj_span[0] != -1 and obj_span[1] != -1
parsed_text = spacy_model(text)
# We use spacy to parse text, identify noun chunks
mention_char_spans = []
mention_char_spans.append(subj_span)
mention_char_spans.append(obj_span)
def overlaps(first_span: Tuple[int, int], second_span: Tuple[int,
int]) -> bool:
def point_inside_span(point: int, span: Tuple[int, int]) -> bool:
return span[0] >= point and point <= span[1]
spans_overlap = (
point_inside_span(first_span[0], second_span) or
point_inside_span(first_span[1], second_span) or
point_inside_span(second_span[0], first_span) or
point_inside_span(second_span[1], first_span))
return spans_overlap
for chunk in parsed_text.noun_chunks:
span_start_char = parsed_text[chunk.start].idx
span_last_token = parsed_text[chunk.end - 1]
span_end_char = span_last_token.idx + len(span_last_token.text) - 1
char_span = (span_start_char, span_end_char)
# Append only if does not overlap with subj or obj spans. In case spacy
# mention annotation disagrees with tacred annotation, we want to favor
# tacred.
if not overlaps(char_span, subj_span) and not overlaps(char_span, obj_span):
mention_char_spans.append(char_span)
# Sort spans by start char
start_chars = np.array([span[0] for span in mention_char_spans])
sorted_indices = np.argsort(start_chars)
sorted_positions = np.zeros_like(start_chars)
sorted_positions[sorted_indices] = np.arange(len(sorted_positions))
sorted_spans = [mention_char_spans[idx] for idx in sorted_indices]
# Tokenize and get aligned mention positions
_, text_ids, text_mask, mention_spans, span_indices = tokenization_utils.tokenize_with_mention_spans(
tokenizer=tokenizer,
sentence=text,
spans=sorted_spans,
max_length=FLAGS.max_length,
add_bert_tokens=True,
allow_truncated_spans=True,
)
processed_sample['text_ids'] = text_ids
processed_sample['text_mask'] = text_mask
# Subj and obj are the first elements of mention spans.
subj_index = sorted_positions[0]
obj_index = sorted_positions[1]
# Some spans may be dropped by the BERT tokenizer. Here we map indices in the
# original list of spans to the one returned by the tokenizer.
reverse_span_indices = {
original_idx: tokenized_idx
for tokenized_idx, original_idx in enumerate(span_indices)
}
# Skip if subj or obj dropped.
if (subj_index not in reverse_span_indices or
obj_index not in reverse_span_indices):
return None, relation_vocab
subj_index = reverse_span_indices[subj_index]
obj_index = reverse_span_indices[obj_index]
# Make sure we don't discard subj or obj
assert max(subj_index, obj_index) < FLAGS.max_mentions
processed_sample['subject_mention_indices'] = [subj_index]
processed_sample['object_mention_indices'] = [obj_index]
mention_spans = np.array(mention_spans)
mention_start_positions = mention_spans[:, 0]
mention_end_positions = mention_spans[:, 1]
mention_start_positions = mention_start_positions[:FLAGS.max_mentions]
mention_end_positions = mention_end_positions[:FLAGS.max_mentions]
mention_pad_shape = (0, FLAGS.max_mentions - len(mention_start_positions))
mention_mask = np.ones(len(mention_start_positions), dtype=np.int64)
mention_mask = np.pad(mention_mask, mention_pad_shape, mode='constant')
mention_start_positions = np.pad(
mention_start_positions, mention_pad_shape, mode='constant')
mention_end_positions = np.pad(
mention_end_positions, mention_pad_shape, mode='constant')
processed_sample['mention_start_positions'] = mention_start_positions
processed_sample['mention_end_positions'] = mention_end_positions
processed_sample['mention_mask'] = mention_mask
return processed_sample, relation_vocab
| 23,798
|
def transform_s3(key, bucket="songsbuckettest"):
"""
REMEBER TO DO DEFENSIVE PROGRAMMING, WRAP IN TRY/CATCH
"""
s3 = boto3.client('s3')
# print("connection to s3 -- Test")
with tempfile.NamedTemporaryFile(mode='wb') as tmp:
s3.download_fileobj(bucket, key, tmp)
try:
return process_h5_file(tmp.name)
except Exception as e:
return []
| 23,799
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.