content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
|---|---|---|
def conjugate_term(term: tuple) -> tuple:
"""Returns the sorted hermitian conjugate of the term."""
conj_term = [conjugate_field(field) for field in term]
return tuple(sorted(conj_term))
|
d21834ff5c2abe5ff6ec85304db962d809b07637
| 3,650,200
|
def create_notify_policy_if_not_exists(project, user, level=NotifyLevel.involved):
"""
Given a project and user, create notification policy for it.
"""
model_cls = apps.get_model("notifications", "NotifyPolicy")
try:
result = model_cls.objects.get_or_create(project=project,
user=user,
defaults={"notify_level": level})
return result[0]
except IntegrityError as e:
raise exc.IntegrityError(_("Notify exists for specified user and project")) from e
|
3d2eec3e3a5f12a4cbba3c3c111608f38133cf94
| 3,650,201
|
def parse_table(soup, start_gen, end_gen):
"""
- Finds the PKMN names in the soup object and puts them into a list.
- Establishes a gen range.
- Gets rid of repeated entries (formes, e.g. Deoxys) using an OrderedSet.
- Joins the list with commas.
- Handles both Nidorans having 'unmappable' characters in their names (u2642 and u2640).
params: soup (BeautifulSoup object), start_gen (int), end_gen (int)
returns: pkmn_string (string)
"""
pokes = []
for cell in soup.find_all("td", attrs={'style': None}):
for name in cell.find_all("a"):
pokes.append(name.string)
start_index = pokes.index(GEN_STARTS_WITH[start_gen])
end_index = pokes.index(GEN_ENDS_WITH[end_gen]) + 1
# Doesn't have to be ordered, just personal preference.
unique_list = OrderedSet(pokes[start_index:end_index])
if start_gen != end_gen:
print(f"{len(unique_list)} Pokémon from gen {start_gen} to {end_gen} were fetched.")
else:
print(f"{len(unique_list)} Pokémon from gen {start_gen} were fetched.")
pkmn_string = ', '.join(unique_list)
for key, value in NIDORAN_CASE.items():
# Handling of Nidoran male/female symbols.
pkmn_string = pkmn_string.replace(key, value)
return pkmn_string
|
1bb1ce6135f162e532b02e2d95eabd675540878d
| 3,650,202
|
import torch
def get_expert_parallel_src_rank():
"""Calculate the global rank corresponding to a local rank zero
in the expert parallel group."""
global_rank = torch.distributed.get_rank()
local_world_size = get_expert_parallel_world_size()
return (global_rank // local_world_size) * local_world_size
|
0022f953707f26f9a3b3b021422ebc16e1d14213
| 3,650,203
|
from typing import Dict
from typing import Any
from typing import Optional
def _add_extra_kwargs(
kwargs: Dict[str, Any], extra_kwargs: Optional[Dict[str, Any]] = None
) -> Dict[str, Any]:
"""
Safely add additional keyword arguments to an existing dictionary
Parameters
----------
kwargs : dict
Keyword argument dictionary
extra_kwargs : dict, default None
Keyword argument dictionary to add
Returns
-------
dict
Keyword dictionary with added keyword arguments
Notes
-----
There is no checking for duplicate keys
"""
if extra_kwargs is None:
return kwargs
else:
kwargs_copy = kwargs.copy()
kwargs_copy.update(extra_kwargs)
return kwargs_copy
|
cfc4c17f608c0b7fe1ae3046dc220d385c890caa
| 3,650,204
|
import random
import math
def Hiker(n,xLst,yLst,dist):
"""
Hiker is a function to generate lists of x and y
coordinates of n steps for a random walk of n steps
along with distance between the first and last point
"""
x0=0
y0=0
x=x0
y=y0
xLst[1] = x0
yLst[1] = y0
for i in range (n-1):
rnum = random.random()
if rnum <= 0.19:
y=y+1
x=x
elif rnum <= 0.43:
y=y+1
x=x+1
elif rnum <= 0.60:
y=y
x=x+1
elif rnum <= 0.70:
y = y-1
x= x+1
elif rnum <= 0.72:
y = y-1
x = x
elif rnum <= 0.75:
y = y-1
x = x-1
elif rnum <= 0.85:
y = y
x = x-1
elif rnum <= 1.00:
y = y+1
x = x-1
xLst[i+1] = x
yLst[i+1] = y
dist = math.sqrt ((x-x0)^2 + (y-y0)^2)
return (xLst,yLst,dist)
|
abe341c8ecdc579de2b72f5af1ace3f07dd40dc3
| 3,650,205
|
def extractdata(csvName='US_SP_Restructured.csv'):
"""
Parameters
----------
:string csvName: Name of csv file. e.g. 'US_SP_Restructured.csv'
"""
df = pd.read_csv(csvName)
df['index'] = df.index
# extract alternative specific variables
cost = pd.melt(df, id_vars=['quest', 'index'],
value_vars=['Car_Cost', 'CarRental_Cost', 'Bus_Cost',
'Plane_Cost', 'Train_Cost', 'TrH_Cost'],
value_name='cost')
tt = pd.melt(df, id_vars=['quest', 'index'],
value_vars=['Car_TT', 'CarRental_TT', 'Bus_TT',
'Plane_TT', 'Train_TT', 'TrH_TT'],
value_name='tt')
relib = pd.melt(df, id_vars=['quest', 'index'],
value_vars=['CarRelib', 'CarRentalRelib', 'BusRelib',
'PlaneRelib', 'TrainRelib', 'TrHRelib'],
value_name='reliability')
# extract generic variables
data_RP = df[[
'quest', 'index',
'DrvLicens', 'PblcTrst', 'Ag1825', 'Ag2545', 'Ag4565', 'Ag65M',
'Male', 'Fulltime', 'PrtTime', 'Unemplyd',
'Edu_Highschl', 'Edu_BSc', 'Edu_MscPhD',
'HH_Veh0', 'HH_Veh1', 'HH_Veh2M',
'HH_Adult1', 'HH_Adult2', 'HH_Adult3M',
'HH_Chld0', 'HH_Chld1', 'HH_Chld2M',
'HH_Inc020K', 'HH_Inc2060K', 'HH_Inc60KM',
'HH_Sngl', 'HH_SnglParent', 'HH_AllAddults',
'HH_Nuclear', 'P_Chld',
'BusCrwd', 'CarMorning', 'CarAfternoon', 'CarEve',
'O_MTL_US_max', 'O_Odr_US_max', 'D_Bstn_max', 'D_NYC_max', 'D_Maine_max',
'Tp_Onewy_max', 'Tp_2way_max',
'Tp_h06_max', 'Tp_h69_max', 'Tp_h915_max', 'Tp_h1519_max',
'Tp_h1924_max', 'Tp_h1524_max',
'Tp_Y2016_max', 'Tp_Y2017_max',
'Tp_Wntr_max', 'Tp_Sprng_max', 'Tp_Sumr_max', 'Tp_Fall_max',
'Tp_CarDrv_max', 'Tp_CarPsngr_max', 'Tp_CarShrRnt_max',
'Tp_Train_max', 'Tp_Bus_max', 'Tp_Plane_max', 'Tp_ModOdr_max',
'Tp_WrkSkl_max', 'Tp_Leisr_max', 'Tp_Shpng_max', 'Tp_ActOdr_max',
'Tp_NHotel1_max', 'Tp_NHotel2_max', 'Tp_NHotel3M_max',
'Tp_FreqMonthlMulti_max', 'Tp_FreqYearMulti_max',
'Tp_FreqYear1_max',
'Envrn_Car', 'Envrn_Train', 'Envrn_Bus', 'Envrn_Plane',
'Safe_Car', 'Safe_Train', 'Safe_Bus', 'Safe_Plane',
'Comf_Car', 'Comf_Train', 'Comf_Bus', 'Comf_Plane',
'Import_Cost', 'Import_TT', 'Import_Relib', 'Import_StartTime',
'Import_Freq', 'Import_Onboard', 'Import_Crwding'
]]
# extract alternatives
data_choice = df[['quest', 'index', 'New_SP_Choice']]
# extract availability
data_avail = df[['quest', 'index',
'AV_Car', 'AV_CarRental', 'AV_Bus', 'AV_Plane',
'AV_Train', 'AV_TrH']]
# extract indicators
data_ind = df[['quest', 'index',
'Envrn_Car', 'Envrn_Train', 'Envrn_Bus', 'Envrn_Plane',
'Safe_Car', 'Safe_Train', 'Safe_Bus', 'Safe_Plane',
'Comf_Car', 'Comf_Train', 'Comf_Bus', 'Comf_Plane']]
data_choice = data_choice.sort_values(['quest', 'index'])
cost = cost.sort_values(['quest', 'index', 'variable'])
tt = tt.sort_values(['quest', 'index', 'variable'])
relib = relib.sort_values(['quest', 'index', 'variable'])
data_RP = data_RP.sort_values(['quest', 'index'])
data_avail = data_avail.sort_values(['quest', 'index'])
data_ind = data_ind.sort_values(['quest', 'index'])
# make a copy and merge alternative specific variables
data_SP = cost
data_SP['tt'] = tt['tt']
data_SP['relib'] = relib['reliability']
data_SP['choice'] = data_SP['variable'].str.split('_', expand=True)[0]
data_SP = data_SP.reset_index(drop=True)
# check if everything is in order
print(data_SP.head(6))
# extract data arrays
dataset_y = data_choice[['New_SP_Choice']]
dataset_x_ng = data_SP[['cost', 'tt', 'relib']]
dataset_x_g = data_RP[[
'DrvLicens', 'PblcTrst',
'Ag1825', 'Ag2545', 'Ag4565', 'Ag65M',
'Male', 'Fulltime', # 'PrtTime', 'Unemplyd',
'Edu_Highschl', 'Edu_BSc', 'Edu_MscPhD',
'HH_Veh0', 'HH_Veh1', 'HH_Veh2M',
# 'HH_Adult1', 'HH_Adult2', 'HH_Adult3M',
'HH_Chld0', 'HH_Chld1', 'HH_Chld2M',
'HH_Inc020K', 'HH_Inc2060K', 'HH_Inc60KM',
# 'HH_Sngl', 'HH_SnglParent', 'HH_AllAddults',
# 'HH_Nuclear', # 'P_Chld',
# 'O_MTL_US_max', 'O_Odr_US_max',
# 'D_Bstn_max', 'D_NYC_max', 'D_Maine_max',
# 'Tp_Onewy_max', 'Tp_2way_max',
# 'Tp_h06_max', 'Tp_h69_max', 'Tp_h915_max',
# 'Tp_h1519_max', 'Tp_h1924_max', 'Tp_h1524_max',
# 'Tp_Y2016_max', 'Tp_Y2017_max',
# 'Tp_Wntr_max', 'Tp_Sprng_max', 'Tp_Sumr_max', 'Tp_Fall_max',
# 'Tp_CarDrv_max', 'Tp_CarPsngr_max', 'Tp_CarShrRnt_max',
# 'Tp_Train_max', 'Tp_Bus_max', 'Tp_Plane_max', 'Tp_ModOdr_max',
# 'Tp_WrkSkl_max', 'Tp_Leisr_max', 'Tp_Shpng_max',
# 'Tp_ActOdr_max',
# 'Tp_NHotel1_max', 'Tp_NHotel2_max', 'Tp_NHotel3M_max',
# 'Tp_FreqMonthlMulti_max', 'Tp_FreqYearMulti_max',
# 'Tp_FreqYear1_max',
]]
dataset_avail = data_avail[['AV_Bus', 'AV_CarRental', 'AV_Car',
'AV_Plane', 'AV_TrH', 'AV_Train']]
dataset_ind = data_ind[['Envrn_Car', 'Envrn_Train', 'Envrn_Bus', 'Envrn_Plane',
'Safe_Car', 'Safe_Train', 'Safe_Bus', 'Safe_Plane',
'Comf_Car', 'Comf_Train', 'Comf_Bus', 'Comf_Plane']]
n = df.shape[0]
y = dataset_y.values.reshape(n,)
x_ng = dataset_x_ng.values.reshape(n, 6, -1)/100.
x_g = dataset_x_g.values
avail = dataset_avail.values
ind = dataset_ind.values
return x_ng, x_g, y, avail, ind
|
e0a3f405da0e31e252b6110f84f81363c692d66a
| 3,650,206
|
def has_field(entry: EntryType, field: str) -> bool:
"""Check if a given entry has non empty field"""
return has_data(get_field(entry, field))
|
e13d973fde62e36764871fd3b565552ff46b359b
| 3,650,207
|
import timeit
import json
import logging
def generate_random_fires(fire_schemas, n=100):
"""
Given a list of fire product schemas (account, loan, derivative_cash_flow,
security), generate random data and associated random relations (customer,
issuer, collateral, etc.)
TODO: add config to set number of products, min/max for dates etc.
TODO: add relations
"""
batches = []
start_time = timeit.default_timer()
for fire_schema in fire_schemas:
f = open(fire_schema, "r")
schema = json.load(f)
data_type = fire_schema.split("/")[-1].split(".json")[0]
data = generate_product_fire(schema, data_type, n)
batches.append(data)
end_time = timeit.default_timer() - start_time
logging.warn(
"Generating FIRE batches and writing to files"
" took {} seconds".format(end_time)
)
# logging.warn(batches)
return batches
|
1a9425287d0a2eeccb83b86330751f12801d73d9
| 3,650,208
|
def open_file(app_id, file_name, mode):
# type: (int, str, int) -> str
""" Call to open_file.
:param app_id: Application identifier.
:param file_name: File name reference.
:param mode: Open mode.
:return: The real file name.
"""
return _COMPSs.open_file(app_id, file_name, mode)
|
7c38d219d4a867e72d90b873412ec7d5e5aad78a
| 3,650,209
|
import logging
def is_valid_network(name, ip_network, **kwargs):
"""Valid the format of an Ip network."""
if isinstance(ip_network, list):
return all([
is_valid_network(name, item, **kwargs) for item in ip_network
])
try:
netaddr.IPNetwork(ip_network)
except Exception:
logging.debug('%s invalid network %s', name, ip_network)
return False
return True
|
ce1c36badc5ce3176a75d7787acf19addc0a5c20
| 3,650,210
|
def correct_repeat_line():
""" Matches repeat spec above """
return "2|1|2|3|4|5|6|7"
|
b9c1e48c5043a042b9f6a6253cba6ae8ce1ca32c
| 3,650,211
|
from typing import Tuple
def get_byte_range_bounds(byte_range_str: str, total_size: int) -> Tuple[int, int]:
"""Return the start and end byte of a byte range string."""
byte_range_str = byte_range_str.replace("bytes=", "")
segments = byte_range_str.split("-")
start_byte = int(segments[0])
# chrome does not send end_byte but safari does
# we need to handle this case and generate an end_byte if not provided
end_byte = min(
int(segments[-1]) if segments[-1] else start_byte + MAX_CHUNK_SIZE,
total_size,
)
return start_byte, end_byte
|
f376f5af0771901d9850e06f08ebd32b13243176
| 3,650,212
|
def privmsg(recipient, s, prefix='', msg=None):
"""Returns a PRIVMSG to recipient with the message msg."""
if conf.supybot.protocols.irc.strictRfc():
assert (areReceivers(recipient)), repr(recipient)
assert s, 's must not be empty.'
if minisix.PY2 and isinstance(s, unicode):
s = s.encode('utf8')
assert isinstance(s, str)
if msg and not prefix:
prefix = msg.prefix
return IrcMsg(prefix=prefix, command='PRIVMSG',
args=(recipient, s), msg=msg)
|
1ff0087794732a06c7e2151ad6c255ca863ffb37
| 3,650,213
|
def char_decoding(value):
""" Decode from 'UTF-8' string to unicode.
:param value:
:return:
"""
if isinstance(value, bytes):
return value.decode('utf-8')
# return directly if unicode or exc happens.
return value
|
b8054b4a5012a6e23e2c08b6ff063cf3f71d6863
| 3,650,214
|
def inv2(x: np.ndarray) -> np.ndarray:
"""矩阵求逆"""
# np.matrix()废弃
return np.matrix(x).I
|
9b1b18dc0cbd248c977fd0ae0c35a65b4cd5b797
| 3,650,215
|
def clean_remaining_artifacts(image):
"""
Method still on development. Use at own risk!
Remove remaining artifacts from image
:param image: Path to Image or 3D Matrix representing RGB image
:return: Image
"""
img, *_ = __image__(image)
blur = cv2.GaussianBlur(img, (3, 3), 0)
# convert to hsv and get saturation channel
sat = cv2.cvtColor(blur, cv2.COLOR_BGR2HSV)[:, :, 1]
# threshold saturation channel
thresh = cv2.threshold(sat, 50, 255, cv2.THRESH_BINARY)[1]
# apply morphology close and open to make mask
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (9, 9))
morph = cv2.morphologyEx(thresh, cv2.MORPH_CLOSE, kernel, iterations=1)
mask = cv2.morphologyEx(morph, cv2.MORPH_OPEN, kernel, iterations=1)
# do OTSU threshold to get melanoma image
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
otsu = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)[1]
pre_otsu = otsu.copy()
otsu = cv2.dilate(otsu, kernel)
otsu = cv2.erode(otsu, kernel)
inv_otsu = otsu.copy()
inv_otsu[otsu == 255] = 0
inv_otsu[otsu == 0] = 255
inpaint = mask - inv_otsu
img_result = cv2.inpaint(img, inpaint, 100, cv2.INPAINT_TELEA)
return cv2.cvtColor(img_result, cv2.COLOR_BGR2RGB), otsu
|
54473dcce5eb5764e80304836f0ca16ce4d82a77
| 3,650,216
|
def max_simple_dividers(a):
"""
:param a: число от 1 до 1000
:return: самый большой простой делитель числа
"""
return max(simple_dividers(a))
|
3fc2fb51e2940ed07db97285886e7cb30e99d5a0
| 3,650,217
|
def to_light_low_sat(img, new_dims, new_scale, interp_order=1 ):
"""
Turn an image into lightness
Args:
im : (H x W x K) ndarray
new_dims : (height, width) tuple of new dimensions.
new_scale : (min, max) tuple of new scale.
interp_order : interpolation order, default is linear.
Returns:
a lightness version of the original image
"""
img = skimage.img_as_float( img )
img = np.clip(img, 0.2, 0.8)
img = resize_image( img, new_dims, interp_order )
img = skimage.color.rgb2lab(img)[:,:,0]
img = rescale_image( img, new_scale, current_scale=[0,100])
return np.expand_dims(img,2)
|
2aca96378551a2a083f1762f30582efe1560f2fb
| 3,650,218
|
def get_slug_blacklist(lang=None, variant=None):
"""
Returns a list of KA slugs to skip when creating the channel.
Combines the "global" slug blacklist that applies for all channels, and
additional customization for specific languages or curriculum variants.
"""
SLUG_BLACKLIST = GLOBAL_SLUG_BLACKLIST
if variant and (lang, variant) in SLUG_BLACKLIST_PER_LANG:
SLUG_BLACKLIST.extend(SLUG_BLACKLIST_PER_LANG[(lang, variant)])
elif lang in SLUG_BLACKLIST_PER_LANG:
SLUG_BLACKLIST.extend(SLUG_BLACKLIST_PER_LANG[lang])
else:
LOGGER.warning('No slugs for lang=' + lang + ' variant=' + str(variant))
return SLUG_BLACKLIST
|
e06dd582812ddd8d2d2e929c733dc957abb748d6
| 3,650,219
|
def get_level_rise(station):
"""For a MonitoringStation object (station), returns a the rate of water level rise, specifically
the average value over the last 2 days"""
#Fetch data (if no data available, return None)
times, values = fetch_measure_levels(station.measure_id, timedelta(days=2))
#Only continue if data available, otherwise return None
if times and values and (None in times or None in values) == False:
#Get polynomial approximation of
poly, d0 = polyfit(times, values, p=4)
#Find derivative polynomial
level_der = np.polyder(poly)
#Obtain list of gradients over last 2 days using the derivative polynomial
grads = []
for t in times:
grads.append(level_der(date.date2num(t) - d0))
#Return average of gradient values
return np.average(grads)
else:
return None
|
59de80a5bdb5b24711f45395ec9cbc282ad6ad44
| 3,650,220
|
def get_mask_indices(path):
"""Helper function to get raster mask for NYC
Returns:
list: returns list of tuples (row, column) that represent area of interest
"""
raster = tiff_to_array(path)
indices = []
it = np.nditer(raster, flags=['multi_index'])
while not it.finished:
if it[0] == 1:
r, c = it.multi_index
indices.append((r, c))
it.iternext()
return indices
|
ff957ffac0f79635e729f8880457d8aa33379185
| 3,650,221
|
def about(isin:str):
"""
Get company description.
Parameters
----------
isin : str
Desired company ISIN. ISIN must be of type EQUITY or BOND, see instrument_information() -> instrumentTypeKey
Returns
-------
TYPE
Dict with description.
"""
params = {'isin': isin}
return _data_request('about_the_company', params)
|
86554c393087670637859d991bd2ae740ea4ff86
| 3,650,222
|
def aesEncrypt(message):
"""
Encrypts a message with a fresh key using AES-GCM.
Returns: (key, ciphertext)
"""
key = get_random_bytes(symmetricKeySizeBytes)
cipher = AES.new(key, AES.MODE_GCM)
ctext, tag = cipher.encrypt_and_digest(message)
# Concatenate (nonce, tag, ctext) and return with key
return key, (cipher.nonce + tag + ctext)
|
06a13bb605f9038d8096f73681932a09022107b1
| 3,650,223
|
from contextlib import suppress
def pyro_nameserver(host=None,
port=None,
auto_clean=0,
auto_start=False):
"""Runs a Pyro name server.
The name server must be running in order to use distributed cameras with POCS. The name server
should be started before starting camera servers or POCS.
Args:
host (str, optional): hostname/IP address to bind the name server to. If `None` is given
(default), will look for the `pyro.nameserver.ip` config entry, otherwise will default
to localhost.
port (int, optional): port number to bind the nameserver to. If `None` is given (default),
will look for the `pyro.nameserver.port` config entry, otherwise will default to `0`,
which auto-selects a port.
auto_clean (int, optional): interval, in seconds, for automatic deregistration of objects
from the name server if they cannot be connected. If not given no autocleaning will
be done.
auto_start (bool, optional): If nameserver should be started, which will case the function
to block. Default is False, which will return the nameserver daemon and it is the users
responsibility to start the `requestLoop`.
Returns:
multiprocess.Process: The process responsible for running the nameserver. Note that if the
nameserver was started via `autostart=True`, the function will block until terminated,
but still return the completed process.
"""
logger.info(f"Pyro nameserver start request: host={host}, port={port}, auto_clean={auto_clean}"
f", auto_start={auto_start}.")
host = host or get_config('pyro.nameserver.host')
port = int(port or get_config('pyro.nameserver.port', default=0))
with suppress(error.PyroNameServerNotFound, Pyro5.errors.NamingError):
logger.info(f'Checking for existing nameserver on {host}:{port}')
nameserver = get_running_nameserver(host=host, port=port)
logger.info(f"Pyro nameserver={nameserver} already running.")
return nameserver
Pyro5.config.NS_AUTOCLEAN = float(auto_clean)
# Function to be called inside a separate process to run our nameserver.
def start_server():
try:
start_ns_loop(host=host, port=port, enableBroadcast=True)
except KeyboardInterrupt: # noqa
logger.info(f'Pyro nameserver requested shutdown by user.')
except Exception as e: # noqa
logger.warning(
f'Problem starting Pyro nameserver, is another nameserver already running?')
logger.error(f'Error: {e!r}')
finally:
logger.info(f'Pyro nameserver shutting down.')
# Set up nameserver process.
logger.debug(f'Setting up Pyro nameserver process.')
server_process = Process(target=start_server)
if auto_start:
logger.info("Auto-starting new pyro nameserver")
server_process.start()
logger.success(
"Pyro nameserver started, will block until finished...(Ctrl-c/Cmd-c to exit)")
server_process.join()
return server_process
|
1cf986688a9b5184e2e147fe3a9f5a8aa0379d60
| 3,650,224
|
import os
import sys
def _get_library_path() -> str:
"""Find library path for compiled IK fast libraries.
Look for sub-package 'linux_so', or '.reach/third_party/ikfast/linux-so'
Only supports Linux "so" file.
Returns:
Library path.
"""
if _is_running_on_google3:
return "./"
current_folder = os.path.dirname(os.path.abspath(__file__))
ur5e_so = os.path.join(current_folder, "linux_so", "libur5e_ikfast61.so")
if os.path.exists(ur5e_so):
return os.path.join(current_folder, "linux_so")
reach_path = _find_reach_path(current_folder)
sys.path.append(reach_path)
return os.path.join(reach_path, "third_party/ikfast/linux-so")
|
d227349fb3cfd4caa7938a7bd9da78c846c40fc0
| 3,650,225
|
from pathlib import Path
import os
def view_img(
stat_map_img,
bg_img="MNI152",
cut_coords=None,
colorbar=True,
title=None,
threshold=1e-6,
annotate=True,
draw_cross=True,
black_bg="auto",
cmap=cm.cold_hot,
symmetric_cmap=True,
dim="auto",
vmax=None,
vmin=None,
resampling_interpolation="continuous",
opacity=1,
**kwargs
):
"""
Interactive html viewer of a statistical map, with optional background
Parameters
----------
stat_map_img : Niimg-like object
See http://nilearn.github.io/manipulating_images/input_output.html
The statistical map image. Can be either a 3D volume or a 4D volume
with exactly one time point.
bg_img : Niimg-like object (default='MNI152')
See http://nilearn.github.io/manipulating_images/input_output.html
The background image that the stat map will be plotted on top of.
If nothing is specified, the MNI152 template will be used.
To turn off background image, just pass "bg_img=False".
cut_coords : None, or a tuple of floats (default None)
The MNI coordinates of the point where the cut is performed
as a 3-tuple: (x, y, z). If None is given, the cuts are calculated
automaticaly.
colorbar : boolean, optional (default True)
If True, display a colorbar on top of the plots.
title : string or None (default=None)
The title displayed on the figure (or None: no title).
threshold : string, number or None (default=1e-6)
If None is given, the image is not thresholded.
If a string of the form "90%" is given, use the 90-th percentile of
the absolute value in the image.
If a number is given, it is used to threshold the image:
values below the threshold (in absolute value) are plotted
as transparent. If auto is given, the threshold is determined
automatically.
annotate : boolean (default=True)
If annotate is True, current cuts are added to the viewer.
draw_cross : boolean (default=True)
If draw_cross is True, a cross is drawn on the plot to
indicate the cuts.
black_bg : boolean (default='auto')
If True, the background of the image is set to be black.
Otherwise, a white background is used.
If set to auto, an educated guess is made to find if the background
is white or black.
cmap : matplotlib colormap, optional
The colormap for specified image.
symmetric_cmap : bool, optional (default=True)
True: make colormap symmetric (ranging from -vmax to vmax).
False: the colormap will go from the minimum of the volume to vmax.
Set it to False if you are plotting a positive volume, e.g. an atlas
or an anatomical image.
dim : float, 'auto' (default='auto')
Dimming factor applied to background image. By default, automatic
heuristics are applied based upon the background image intensity.
Accepted float values, where a typical scan is between -2 and 2
(-2 = increase constrast; 2 = decrease contrast), but larger values
can be used for a more pronounced effect. 0 means no dimming.
vmax : float, or None (default=None)
max value for mapping colors.
If vmax is None and symmetric_cmap is True, vmax is the max
absolute value of the volume.
If vmax is None and symmetric_cmap is False, vmax is the max
value of the volume.
vmin : float, or None (default=None)
min value for mapping colors.
If `symmetric_cmap` is `True`, `vmin` is always equal to `-vmax` and
cannot be chosen.
If `symmetric_cmap` is `False`, `vmin` defaults to the min of the
image, or 0 when a threshold is used.
resampling_interpolation : string, optional (default continuous)
The interpolation method for resampling.
Can be 'continuous', 'linear', or 'nearest'.
See nilearn.image.resample_img
opacity : float in [0,1] (default 1)
The level of opacity of the overlay (0: transparent, 1: opaque)
Returns
-------
html_view : the html viewer object.
It can be saved as an html page `html_view.save_as_html('test.html')`,
or opened in a browser `html_view.open_in_browser()`.
If the output is not requested and the current environment is a Jupyter
notebook, the viewer will be inserted in the notebook.
See Also
--------
nilearn.plotting.plot_stat_map:
static plot of brain volume, on a single or multiple planes.
nilearn.plotting.view_connectome:
interactive 3d view of a connectome.
nilearn.plotting.view_markers:
interactive plot of colored markers.
nilearn.plotting.view_surf, nilearn.plotting.view_img_on_surf:
interactive view of statistical maps or surface atlases on the cortical
surface.
"""
# Load template
resource_path = Path(__file__).resolve().parent.joinpath("data", "html")
file_template = resource_path.joinpath("stat_map_template.html")
tpl = tempita.Template.from_filename(str(file_template), encoding="utf-8")
# Initialize namespace for substitution
namespace = {}
namespace["title"] = title or "Slice viewer"
js_dir = os.path.join(os.path.dirname(__file__), "data", "js")
with open(os.path.join(js_dir, "jquery.min.js")) as f:
namespace["jquery_js"] = f.read()
# Initialize the template substitution tool
bsprite = viewer_substitute(
cut_coords=cut_coords,
colorbar=colorbar,
title=title,
threshold=threshold,
annotate=annotate,
draw_cross=draw_cross,
black_bg=black_bg,
cmap=cmap,
symmetric_cmap=symmetric_cmap,
dim=dim,
vmax=vmax,
vmin=vmin,
resampling_interpolation=resampling_interpolation,
opacity=opacity,
base64=True,
value=False,
)
# build sprites and meta-data
bsprite.fit(stat_map_img, bg_img=bg_img)
# Populate template
return bsprite.transform(
tpl,
javascript="javascript",
html="html",
library="library",
namespace=namespace,
)
|
5891758ec6baac825345435afece645fb8b24e42
| 3,650,226
|
def getType(o):
"""There could be only return o.__class__.__name__"""
if isinstance(o, LispObj):
return o.type
return o.__class__.__name__
|
a3602469d8a7d5f6372c6e74d868a903651f85f7
| 3,650,227
|
import shutil
import os
import subprocess
def cr2_to_pgm(
cr2_fname,
pgm_fname=None,
overwrite=True, *args,
**kwargs): # pragma: no cover
""" Convert CR2 file to PGM
Converts a raw Canon CR2 file to a netpbm PGM file via `dcraw`. Assumes
`dcraw` is installed on the system
Note:
This is a blocking call
Arguments:
cr2_fname {str} -- Name of CR2 file to convert
**kwargs {dict} -- Additional keywords to pass to script
Keyword Arguments:
pgm_fname {str} -- Name of PGM file to output, if None (default) then
use same name as CR2 (default: {None})
dcraw {str} -- Path to installed `dcraw` (default: {'dcraw'})
overwrite {bool} -- A bool indicating if existing PGM should be overwritten
(default: {True})
Returns:
str -- Filename of PGM that was created
"""
dcraw = shutil.which('dcraw')
if dcraw is None:
raise error.InvalidCommand('dcraw not found')
if pgm_fname is None:
pgm_fname = cr2_fname.replace('.cr2', '.pgm')
if os.path.exists(pgm_fname) and not overwrite:
logger.warning(f"PGM file exists, returning existing file: {pgm_fname}")
else:
try:
# Build the command for this file
command = '{} -t 0 -D -4 {}'.format(dcraw, cr2_fname)
cmd_list = command.split()
logger.debug("PGM Conversion command: \n {}".format(cmd_list))
# Run the command
if subprocess.check_call(cmd_list) == 0:
logger.debug("PGM Conversion command successful")
except subprocess.CalledProcessError as err:
raise error.InvalidSystemCommand(msg="File: {} \n err: {}".format(cr2_fname, err))
return pgm_fname
|
62ebda213ff919904aa5d84e32c8b6f204de8c24
| 3,650,228
|
def failed_revisions_for_case_study(
case_study: CaseStudy, result_file_type: MetaReport
) -> tp.List[str]:
"""
Computes all revisions of this case study that have failed.
Args:
case_study: to work on
result_file_type: report type of the result files
Returns:
a list of failed revisions
"""
total_failed_revisions = set(
get_failed_revisions(case_study.project_name, result_file_type)
)
return [
rev for rev in case_study.revisions
if rev[:10] in total_failed_revisions
]
|
2e9d5ab3818343e7a07fc3ff7242206f4b231e89
| 3,650,229
|
def _removeTwoSentenceCommonNode(syncSrc1, syncSrc2, matchListT1, matchListT2, prefix):
"""
Identify and remove a node that appears to be in both of the target sentences,
and is also a currently active link
"""
raise DeprecationWarning, "Now finding split points before individual pairs"
# a useful rule splits the node into tgt1 and tgt2,
# with information on how to structure both
src1LinkPos = set([ tuple(syncSrc1.findLink(l)) for l in syncSrc1.links() ])
src2LinkPos = set([ tuple(syncSrc2.findLink(l)) for l in syncSrc2.links() ])
if PRINT_DEBUG_SPLIT: print "Set1 links:",src1LinkPos,"\t\tSet2 links:",src2LinkPos
commonAncestorsLinks = set((commonAncestorPositions(p1,p2) for p1 in src1LinkPos for p2 in src2LinkPos))
if PRINT_DEBUG_SPLIT:
print "commonAncestorsLinks:", commonAncestorsLinks
# not sure if I need this test. It might be enough to get a clean split
# if len(src1LinkPos-src2LinkPos)==0: raise ValueError, "No separate information in tgt1"
# if len(src2LinkPos-src1LinkPos)==0: raise ValueError, "No separate information in tgt2"
if len(src1LinkPos-src2LinkPos)==0 or len(src2LinkPos-src1LinkPos)==0:
print "Rule contains nothing to split the sentences"
linksToRemove = [ prefix+l for l in (commonAncestorsLinks) ]
print "linksToRemove:",linksToRemove
refMatchListT1 = [ (ms,mt) for (ms,mt) in matchListT1 if ms.treeposition() not in linksToRemove ]
refMatchListT2 = [ (ms,mt) for (ms,mt) in matchListT2 if ms.treeposition() not in linksToRemove ]
raise SystemExit,"common nodes"
return refMatchListT1, refMatchListT2, False
ancestorsIn1 = commonAncestorsLinks&src1LinkPos
ancestorsIn2 = commonAncestorsLinks&src2LinkPos
crossAncestors1 = [ a for a in ancestorsIn1 for d in src2LinkPos if _isDirectDescendent(d, a) ]
crossAncestors2 = [ a for a in ancestorsIn2 for d in src1LinkPos if _isDirectDescendent(d, a) ]
if PRINT_DEBUG_SPLIT:
print "crossAncestors1:", crossAncestors1
print "crossAncestors2:", crossAncestors2
if len(crossAncestors1)==0 and len(crossAncestors2)==0:
# print "Nothing to change"
return matchListT1, matchListT2, True
# remove the common ancestor nodes
linksToRemove = [ prefix+l for l in (crossAncestors1+crossAncestors2) ]
if PRINT_DEBUG_SPLIT: print "linksToRemove:",linksToRemove
refMatchListT1 = [ (ms,mt) for (ms,mt) in matchListT1 if ms.treeposition() not in linksToRemove ]
refMatchListT2 = [ (ms,mt) for (ms,mt) in matchListT2 if ms.treeposition() not in linksToRemove ]
if PRINT_DEBUG_SPLIT:
print "\nAfter removing common ancestor nodes,\nrefMatchListT1:"
printAllMatchListInfo(refMatchListT1)
print "refMatchListT2:"
printAllMatchListInfo(refMatchListT2)
raise SystemExit,"common nodes"
return refMatchListT1, refMatchListT2, False
|
4496f2ab8bf00833d3b5a11818994332df48f893
| 3,650,230
|
def bytes_load(path):
"""Load bytest from a file."""
with open(path, 'rb') as f:
return f.read()
|
ebbeb4bfcecfb94a1fa1ef8640f4e749bfa0dfcb
| 3,650,231
|
def get_relationship_length_fam_mean(data):
"""Calculate mean length of relationship for families DataDef 43
Arguments:
data - data frames to fulfill definiton id
Modifies:
Nothing
Returns: added_members
mean_relationship_length - mean relationship length of families
"""
families = data[1]
return families['max_days_since_first_service'].mean()
|
4d9b76c4dca3e1f09e7dd2684bd96e25792177fd
| 3,650,232
|
def convert_hapmap(input_dataframe, recode=False, index_col=0):
""" Specifically deals with hapmap and 23anMe Output
"""
complement = {'G/T': 'C/A', 'C/T': 'G/A', "G/A" : "G/A", "C/A": "C/A", "A/G" : "A/G",
"A/C": "A/C"}
dataframe = input_dataframe.copy()
if recode:
recode = dataframe.ix[:, index_col].apply(lambda x: complement[x])
dataframe.ix[:,0] = recode
new_dataframe = dataframe.apply(_single_column_allele, axis=1)
return new_dataframe
|
c328cfe41e25f7fd3ff2274861fb6fc89effa181
| 3,650,233
|
def to_base64(message):
"""
Returns the base64 representation of a string or bytes.
"""
return b64encode(to_bytes(message)).decode('ascii')
|
d3f091f7dbf04850e8c40bca8e7fb0ec06f2848f
| 3,650,234
|
import traceback
def create_alerts():
"""
Function to create alerts.
"""
try:
# validate post json data
content = request.json
print(content)
if not content: raise ValueError("Empty value")
if not 'timestamp' in content or not 'camera_id' in content or not 'class_id' in content: raise KeyError("Invalid dictionary keys")
if not isinstance(content.get('timestamp'), int): raise TypeError("Timestamp must be in int64 type")
if not isinstance(content.get('camera_id'), int): raise TypeError("Camera_id must be in int32 type")
class_id = content.get('class_id')
if not isinstance(class_id, list): raise TypeError("Class_id must be an array")
for val in class_id:
if not isinstance(val, int): raise TypeError("Array class_id values must be in int32 type")
except (ValueError, KeyError, TypeError) as e:
traceback.print_exc()
resp = Response({"Json format error"}, status=400, mimetype='application/json')
return resp
try:
record_created = db.alerts.insert_one(content)
return jsonify(id=str(record_created.inserted_id)), 201
except:
#traceback.print_exc()
return jsonify(error="Internal server error"), 500
|
ca824f0f356cf2b42e7a598810dc89f7121664ba
| 3,650,235
|
import os
def read_envs():
"""Function will read in all environment variables into a dictionary
:returns: Dictionary containing all environment variables or defaults
:rtype: dict
"""
envs = {}
envs['QUEUE_INIT_TIMEOUT'] = os.environ.get('QUEUE_INIT_TIMEOUT', '3600')
envs['VALIDATION_TIMEOUT'] = os.environ.get('VALIDATION_TIMEOUT', '28800')
envs['VALIDATION_HOME'] = os.environ.get('VALIDATION_HOME', '/opt/aif-validator')
envs['VALIDATION_FLAGS'] = os.environ.get('VALIDATION_FLAGS')
envs['S3_VALIDATION_BUCKET'] = os.environ.get('S3_VALIDATION_BUCKET')
envs['S3_VALIDATION_PREFIX'] = os.environ.get('S3_VALIDATION_PREFIX')
envs['AWS_BATCH_JOB_ID'] = os.environ.get('AWS_BATCH_JOB_ID')
envs['AWS_BATCH_JOB_NODE_INDEX'] = os.environ.get('AWS_BATCH_JOB_NODE_INDEX')
envs['AWS_DEFAULT_REGION'] = os.environ.get('AWS_DEFAULT_REGION', 'us-east-1')
return envs
|
2d357796321d561a735461d425fa7c703082434c
| 3,650,236
|
import platform
import os
def get_platform():
""" Get system platform metadata.
"""
detected_os = platform.system()
detected_distro = platform.platform()
if detected_os == "Darwin":
return PlatformDescription(detected_os=detected_os,
detected_distro=detected_distro,
is_supported=True,
help_message="",
label="")
elif detected_os == "Linux":
if os.path.isfile('/proc/device-tree/hat/uuid'):
return PlatformDescription(detected_os=detected_os,
detected_distro=detected_distro,
is_supported=False,
help_message="%s "
"Please visit %s to get started hosting "
"machine-payable servers on your Bitcoin "
"Computer." % (SUPPORTED_SYSTEMS, PING21_LEARN_URL),
label="21bc")
elif 'boot2docker' in detected_distro.lower():
return PlatformDescription(detected_os=detected_os,
detected_distro=detected_distro,
is_supported=False,
help_message="The `21 sell` service manager is not "
"yet supported within another boot2docker VM.",
label="boot2docker")
elif (os.path.isfile('/sys/hypervisor/uuid') or os.path.isdir('/var/lib/digitalocean')) and (
'debian-8.' in detected_distro.lower() or
'ubuntu-14.04' in detected_distro.lower() or
'ubuntu-16.04' in detected_distro.lower()):
return PlatformDescription(detected_os=detected_os,
detected_distro=detected_distro,
is_supported=True,
help_message="",
label="debian")
elif os.path.isfile('/sys/hypervisor/uuid') and (
'centos-7' in detected_distro.lower()):
return PlatformDescription(detected_os=detected_os,
detected_distro=detected_distro,
is_supported=True,
help_message="",
label="centos")
elif os.path.isfile('/sys/hypervisor/uuid') and (
'fedora-24' in detected_distro.lower()):
return PlatformDescription(detected_os=detected_os,
detected_distro=detected_distro,
is_supported=True,
help_message="",
label="fedora")
return PlatformDescription(detected_os=detected_os,
detected_distro=detected_distro,
is_supported=False,
help_message=SUPPORTED_SYSTEMS,
label="")
|
d54d93bf95e9d746ba33d38e1dfe2e74c7ab54d8
| 3,650,237
|
import numpy
def load_catalog_npy(catalog_path):
"""
Load a numpy catalog (extension ".npy")
@param catalog_path: str
@return record array
"""
return numpy.load(catalog_path)
|
912281ad17b043c6912075144e6a2ff3d849a391
| 3,650,238
|
def pgd(fname, n_gg=20, n_mm=20, n_kk=20, n_scale=1001):
"""
:param fname: data file name
:param n_gg: outer iterations
:param n_mm: intermediate iterations
:param n_kk: inner iterations
:param n_scale: number of discretized points, arbitrary
:return:
"""
n_buses, Qmax, Qmin, Y, V_mod, P_pq, Q_pq, P_pv, I0_pq, n_pv, n_pq = read_grid_data(fname)
SSk, SSp, SSq = init_apparent_powers_decomposition(n_buses, n_scale, P_pq, Q_pq, Qmin, Qmax)
VVk, VVp, VVq = init_voltages_decomposition(n_mm, n_buses, n_scale)
IIk, IIp, IIq = init_currents_decomposition(n_gg, n_mm, n_buses, n_scale)
n_max = n_gg * n_mm * n_kk
iter_count = 1
idx_i = 0
idx_v = 1
for gg in range(n_gg): # outer loop: iterate on γ to solve the power flow as such
for mm in range(n_mm): # intermediate loop: iterate on i to find the superposition of terms of the I tensor.
# define the new C
CCk, CCp, CCq, Nc, Nv, n = fun_C(SSk, SSp, SSq,
VVk, VVp, VVq,
IIk, IIp, IIq,
idx_i, idx_v,
n_buses, n_scale)
# initialize the residues we have to find
IIk1 = (np.random.rand(n_buses) - np.random.rand(n_buses)) * 1 # could also try to set IIk1 = VVk1
IIp1 = (np.random.rand(n_buses) - np.random.rand(n_buses)) * 1
IIq1 = (np.random.rand(n_scale) - np.random.rand(n_scale)) * 1
for kk in range(n_kk): # inner loop: iterate on Γ to find the residues.
# compute IIk1 (residues on Ik)
RHSk = np.zeros(n_buses, dtype=complex)
for ii in range(Nc):
prodRK = np.dot(IIp1, CCp[ii]) * np.dot(IIq1, CCq[ii])
RHSk += prodRK * CCk[ii]
LHSk = np.zeros(n_buses, dtype=complex)
for ii in range(Nv):
prodLK = np.dot(IIp1, VVp[ii] * IIp1) * np.dot(IIq1, VVq[ii] * IIq1)
LHSk += prodLK * VVk[ii]
IIk1 = RHSk / LHSk
# compute IIp1 (residues on Ip)
RHSp = np.zeros(n_buses, dtype=complex)
for ii in range(Nc):
prodRP = np.dot(IIk1, CCk[ii]) * np.dot(IIq1, CCq[ii])
RHSp += prodRP * CCp[ii]
LHSp = np.zeros(n_buses, dtype=complex)
for ii in range(Nv):
prodLP = np.dot(IIk1, VVk[ii] * IIk1) * np.dot(IIq1, VVq[ii] * IIq1)
LHSp += prodLP * VVp[ii]
IIp1 = RHSp / LHSp
# compute IIq1 (residues on Iq)
RHSq = np.zeros(n_scale, dtype=complex)
for ii in range(Nc):
prodRQ = np.dot(IIk1, CCk[ii]) * np.dot(IIp1, CCp[ii])
RHSq += prodRQ * CCq[ii]
LHSq = np.zeros(n_scale, dtype=complex)
for ii in range(Nv):
prodLQ = np.dot(IIk1, VVk[ii] * IIk1) * np.dot(IIp1, VVp[ii] * IIp1)
LHSq += prodLQ * VVq[ii]
IIq1 = RHSq / LHSq
progress_bar(iter_count, n_max, 50) # display the inner operations
iter_count += 1
IIk[idx_i, :] = IIk1
IIp[idx_i, :] = IIp1
IIq[idx_i, :] = IIq1
idx_i += 1
for ii in range(n_mm):
VVk[ii, :] = np.conj(sp_linalg.spsolve(Y, IIk[ii]))
VVp[ii, :] = IIp[ii]
VVq[ii, :] = IIq[ii]
# try to add I0 this way:
VVk[n_mm, :] = np.conj(sp_linalg.spsolve(Y, I0_pq))
VVp[n_mm, :] = np.ones(n_buses)
VVq[n_mm, :] = np.ones(n_scale)
idx_v = n_mm + 1
# VVk: size (n_mm + 1, nbus)
# VVp: size (n_mm + 1, nbus)
# VVq: size (n_mm + 1, n_scale)
v_map = build_map(VVk, VVp, VVq)
# SSk: size (2, nbus)
# SSp: size (2, nbus)
# SSq: size (2, n_scale)
s_map = build_map(SSk, SSp, SSq)
# IIk: size (n_gg * n_mm, nbus)
# IIp: size (n_gg * n_mm, nbus)
# IIq: size (n_gg * n_mm, n_scale)
i_map = build_map(IIk, IIp, IIq)
# the size of the maps is nbus, nbus, n_scale
return v_map, s_map, i_map
|
5a00b6992ecf5c4f3b89fcf5151cae71c4a36298
| 3,650,239
|
def find_first_empty(rect):
"""
Scan a rectangle and find first open square
@param {Array} rect Board layout (rectangle)
@return {tuple} x & y coordinates of the leftmost top blank square
"""
return _find_first_empty_wrapped(len(rect[0]))(rect)
|
d266805761cda903733cef7704baff6d38576b04
| 3,650,240
|
import re
def parseArticle(text: str) -> str:
"""
Parses and filters an article. It uses the `wikitextparser` and custom logic.
"""
# clear the image attachments and links
text = re.sub("\[\[Податотека:.+\]\][ \n]", '', text)
text = wikipedia.filtering.clearCurlyBrackets(text)
# replace everything after "Надворешни врски"
links_location = re.search("[\=]+[ ]+(Поврзано|Наводи|Надворешни врски)[ ]+[\=]+", text)
if links_location != None:
text = text[:links_location.span()[0]]
# remove headings and break lines
text = re.sub("([\=]+.+[\=]+.+\n)|(<br />)", '\n', text)
# parse the file using the wikitextparser
parsed = wtp.parse(text)
return parsed.plain_text()
|
7a17e31ec960b568debb9c6e7ccb8018bba19218
| 3,650,241
|
import torch
def exp2(input, *args, **kwargs):
"""
Computes the base two exponential function of ``input``.
Examples::
>>> import torch
>>> import treetensor.torch as ttorch
>>> ttorch.exp2(ttorch.tensor([-4.0, -1.0, 0, 2.0, 4.8, 8.0]))
tensor([6.2500e-02, 5.0000e-01, 1.0000e+00, 4.0000e+00, 2.7858e+01, 2.5600e+02])
>>> ttorch.exp2(ttorch.tensor({
... 'a': [-4.0, -1.0, 0, 2.0, 4.8, 8.0],
... 'b': {'x': [[-2.0, 1.2, 0.25],
... [16.0, 3.75, -2.34]]},
... }))
<Tensor 0x7ff90a4c3af0>
├── a --> tensor([6.2500e-02, 5.0000e-01, 1.0000e+00, 4.0000e+00, 2.7858e+01, 2.5600e+02])
└── b --> <Tensor 0x7ff90a4c3be0>
└── x --> tensor([[2.5000e-01, 2.2974e+00, 1.1892e+00],
[6.5536e+04, 1.3454e+01, 1.9751e-01]])
"""
return torch.exp2(input, *args, **kwargs)
|
17cbc0917acf19932ec4d3a89de8d78545d02e31
| 3,650,242
|
def list_default_storage_policy_of_datastore(
datastore,
host=None,
vcenter=None,
username=None,
password=None,
protocol=None,
port=None,
verify_ssl=True,
):
"""
Returns a list of datastores assign the storage policies.
datastore
Name of the datastore to assign.
The datastore needs to be visible to the VMware entity the proxy
points to.
service_instance
Service instance (vim.ServiceInstance) of the vCenter.
Default is None.
.. code-block:: bash
salt '*' vsphere.list_default_storage_policy_of_datastore datastore=ds1
"""
log.trace("Listing the default storage policy of datastore '{}'" "".format(datastore))
if salt.utils.platform.is_proxy():
details = __salt__["vmware_info.get_proxy_connection_details"]()
else:
details = __salt__["vmware_info.get_connection_details"](
host=host,
vcenter=vcenter,
username=username,
password=password,
protocol=protocol,
port=port,
verify_ssl=verify_ssl,
)
service_instance = saltext.vmware.utils.vmware.get_service_instance(**details)
# Find datastore
target_ref = __salt__["vmware_info.get_proxy_target"](service_instance)
ds_refs = saltext.vmware.utils.vmware.get_datastores(
service_instance, target_ref, datastore_names=[datastore]
)
if not ds_refs:
raise VMwareObjectRetrievalError("Datastore '{}' was not " "found".format(datastore))
profile_manager = salt.utils.pbm.get_profile_manager(service_instance)
policy = salt.utils.pbm.get_default_storage_policy_of_datastore(profile_manager, ds_refs[0])
return saltext.vmware.utils.get_policy_dict(policy)
|
caf729c258a4ece895d14fdd93ba1f23879453e8
| 3,650,243
|
import numpy
def action_stats(env, md_action, cont_action):
"""
Get information on `env`'s action space.
Parameters
----------
md_action : bool
Whether the `env`'s action space is multidimensional.
cont_action : bool
Whether the `env`'s action space is continuous.
Returns
-------
n_actions_per_dim : list of length (action_dim,)
The number of possible actions for each dimension of the action space.
action_ids : list or None
A list of all valid actions within the space. If `cont_action` is
True, this value will be None.
action_dim : int or None
The number of dimensions in a single action.
"""
if cont_action:
action_dim = 1
action_ids = None
n_actions_per_dim = [numpy.inf]
if md_action:
action_dim = env.action_space.shape[0]
n_actions_per_dim = [numpy.inf for _ in range(action_dim)]
else:
if md_action:
n_actions_per_dim = [
space.n if hasattr(space, "n") else numpy.inf
for space in env.action_space.spaces
]
action_ids = (
None
if numpy.inf in n_actions_per_dim
else list(product(*[range(i) for i in n_actions_per_dim]))
)
action_dim = len(n_actions_per_dim)
else:
action_dim = 1
n_actions_per_dim = [env.action_space.n]
action_ids = list(range(n_actions_per_dim[0]))
return n_actions_per_dim, action_ids, action_dim
|
8a08b3fe5be20f274680fe33df9ca02456bb511f
| 3,650,244
|
def count(pred: Pred, seq: Seq) -> int:
"""
Count the number of occurrences in which predicate is true.
"""
pred = to_callable(pred)
return sum(1 for x in seq if pred(x))
|
09726935174d7590030331da62322da870e216aa
| 3,650,245
|
def lambda_k(W, Z, k):
"""Coulomb function $\lambda_k$ as per Behrens et al.
:param W: Total electron energy in units of its rest mass
:param Z: Proton number of daughter
:param k: absolute value of kappa
"""
#return 1.
gammak = np.sqrt(k**2.0-(ALPHA*Z)**2.0)
gamma1 = np.sqrt(1.-(ALPHA*Z)**2.0)
R = 1.2e-15*(2.5*Z)**(1./3.)/NATURALLENGTH
return generalizedFermiFunction(W, Z, R, k)/generalizedFermiFunction(W, Z, R, 1)*(k+gammak)/(k*(1+gamma1))
|
9f6a59bd730460c09a3c7f855550254ffb5cdc66
| 3,650,246
|
import pprint
import json
def tryJsonOrPlain(text):
"""Return json formatted, if possible. Otherwise just return."""
try:
return pprint.pformat( json.loads( text ), indent=1 )
except:
return text
|
2431479abf6ab3c17ea63356ec740840d2d18a74
| 3,650,247
|
import aiohttp
import websockets
import random
def create_signaling(args):
"""
Create a signaling method based on command-line arguments.
"""
if args.signaling == "apprtc":
if aiohttp is None or websockets is None: # pragma: no cover
raise Exception("Please install aiohttp and websockets to use appr.tc")
if not args.signaling_room:
args.signaling_room = "".join(
[random.choice("0123456789") for x in range(10)]
)
return ApprtcSignaling(args.signaling_room)
elif args.signaling == "tcp-socket":
return TcpSocketSignaling(args.signaling_host, args.signaling_port)
elif args.signaling == "unix-socket":
return UnixSocketSignaling(args.signaling_path)
else:
return CopyAndPasteSignaling()
|
f9fe4ed35555381468dbf15379ab70a4af289ac9
| 3,650,248
|
def get_corpus_gene_adjacency(corpus_id):
"""Generate a nugget table."""
corpus = get_corpus(corpus_id)
data = get_gene_adjacency(corpus)
return jsonify(data), 200
|
9dd86e11eba5e5bc5094d89bd15dd9315df40480
| 3,650,249
|
def get_pool_health(pool):
""" Get ZFS list info. """
pool_name = pool.split()[0]
pool_capacity = pool.split()[6]
pool_health = pool.split()[9]
return pool_name, pool_capacity, pool_health
|
1a9dbb8477d8735b225afc2bdd683f550602b36e
| 3,650,250
|
def resize_short(img, target_size):
""" resize_short """
percent = float(target_size) / min(img.shape[0], img.shape[1])
resized_width = int(round(img.shape[1] * percent))
resized_height = int(round(img.shape[0] * percent))
resized_width = normwidth(resized_width)
resized_height = normwidth(resized_height)
resized = cv2.resize(img, (resized_width, resized_height))
return resized
|
ca9a71dc97a57c5739419c284514466e86dc3fa1
| 3,650,251
|
def _scale(aesthetic, name=None, breaks=None, labels=None, limits=None, expand=None, na_value=None, guide=None,
trans=None, **other):
"""
Create a scale (discrete or continuous)
:param aesthetic
The name of the aesthetic that this scale works with
:param name
The name of the scale - used as the axis label or the legend title
:param breaks
A numeric vector of positions (of ticks)
:param labels
A vector of labels (on ticks)
:param limits
A numeric vector of length two providing limits of the scale.
:param expand
A numeric vector of length two giving multiplicative and additive expansion constants.
:param na_value
Value to use for missing values
:param guide
Type of legend. Use 'colorbar' for continuous color bar, or 'legend' for discrete values.
:param trans
Name of built-in transformation. ('identity', 'log10', 'sqrt', 'reverse')
:return:
"""
# flatten the 'other' sub-dictionary
args = locals().copy()
args.pop('other')
return FeatureSpec('scale', **args, **other)
|
c8d98a52f2b87340e0a1df46b9f36ca811c18d5d
| 3,650,252
|
def logsubexp(x, y):
"""
Helper function to compute the exponential
of a difference between two numbers
Computes: ``x + np.log1p(-np.exp(y-x))``
Parameters
----------
x, y : float or array_like
Inputs
"""
if np.any(x < y):
raise RuntimeError('cannot take log of negative number '
f'{str(x)!s} - {str(y)!s}')
return x + np.log1p(-np.exp(y - x))
|
a0b0434fb2714f3d1dec24b88ce0fa9ff0110bc0
| 3,650,253
|
def is_sequence_of_list(items):
"""Verify that the sequence contains only items of type list.
Parameters
----------
items : sequence
The items.
Returns
-------
bool
True if all items in the sequence are of type list.
False otherwise.
Examples
--------
>>> is_sequence_of_list([[1], [1], [1]])
True
"""
return all(isinstance(item, list) for item in items)
|
e53e5d31e1c4f5649b2f03edf792f810bc398446
| 3,650,254
|
def sum_fib_dp(m, n):
"""
A dynamic programming version.
"""
if m > n: m, n = n, m
large, small = 1, 0
# a running sum for Fibbo m ~ n + 1
running = 0
# dynamically update the two variables
for i in range(n):
large, small = large + small, large
# note that (i + 1) -> small is basically mapping m -> F[m]
if m <= i + 1 <= n:
running += small
return running
|
5be6e57ddf54d185ca6d17adebd847d0bc2f56fc
| 3,650,255
|
def fibo_dyn2(n):
"""
return
the n-th fibonacci number
"""
if n < 2:
return 1
else:
a, b = 1, 1
for _ in range(1,n):
a, b = b, a+b
return b
|
e8483e672914e20c6e7b892f3dab8fb299bac6fc
| 3,650,256
|
import time
import math
def build_all(box, request_list):
"""
box is [handle, left, top, bottom] \n
request_list is the array about dic \n
****** Attention
before running the function, you should be index.
After build_all, function will close the windows about train troop
"""
# get the box of windows
left = box[1]
top = box[2]
positions = init_pos_army()
# get the information about request
request = request_deal(request_list[0]['str'])
num_army = int(request_list[0]['army']['max'])
num_spells = int(request_list[0]['spells']['max'])
num_devices = int(request_list[0]['device']['max'])
num_army_fill_in = int(request_list[0]['army']['fill_in'])
num_spells_fill_in = int(request_list[0]['spells']['fill_in'])
num_device_fill_in = int(request_list[0]['device']['fill_in'])
# open army
time.sleep(0.2)
Click(left + positions['army'][0], top + positions['army'][1])
# select dragon
if request[0] != None:
# open train troops
time.sleep(0.2)
Click(left + positions['train_troops'][0], top + positions['train_troops'][1])
if ( num_army - num_army_fill_in ) >= num_housing_space[request[0]]:
for index in range( math.floor( ( num_army - num_army_fill_in ) / num_housing_space[request[0]] ) ):
time.sleep(0.2)
Click(left + positions[request[0]][0], top + positions[request[0]][1])
# select speed increase
if request[1] != None:
# open brew spells
time.sleep(0.2)
Click(left + positions['Brew_spells'][0], top + positions['Brew_spells'][1])
if ( num_spells - num_spells_fill_in ) >= num_housing_space[request[1]]:
for index in range( math.floor( ( num_spells - num_spells_fill_in ) / num_housing_space[request[1]] ) ):
time.sleep(0.2)
Click(left + positions[request[1]][0], top + positions[request[1]][1])
# select device
# if request[2] != None:
# open brew spells
##
# close the army
time.sleep(0.2)
Click(left + positions['close_army'][0], top + positions['close_army'][1])
print('close the army')
return True
|
59db695f802867e85a5e920f2efa8f652ac12823
| 3,650,257
|
def select_results(results):
"""Select relevant images from results
Selects most recent image for location, and results with positive fit index.
"""
# Select results with positive bestFitIndex
results = [x for x in results['items'] if x['bestFitIndex'] > 0]
# counter_dict schema:
# counter_dict = {
# bounds: {
# 'dateCreated': date,
# 'downloadURL'
# }
# }
counter_dict = {}
for result in results:
bounds = result_to_bounds(result)
# does something already exist with these bounds?
existing = counter_dict.get(bounds)
# If exists, check if newer
if existing is not None:
existing_date = existing['dateCreated']
this_date = date_parse(result['dateCreated'])
if this_date < existing_date:
continue
# Doesn't exist yet or is newer, so add to dict
counter_dict[bounds] = {
'dateCreated': date_parse(result['dateCreated']),
'downloadURL': result['downloadURL']}
return [x['downloadURL'] for x in counter_dict.values()]
|
85940fe93b33d79ca0a799ca52a9e68439e7e822
| 3,650,258
|
def dc_session(virtual_smoothie_env, monkeypatch):
"""
Mock session manager for deck calibation
"""
ses = endpoints.SessionManager()
monkeypatch.setattr(endpoints, 'session', ses)
return ses
|
06876e5ce2d599086efcb37688f5181f32392068
| 3,650,259
|
def is_available(_cache={}):
"""Return version tuple and None if OmnisciDB server is accessible or
recent enough. Otherwise return None and the reason about
unavailability.
"""
if not _cache:
omnisci = next(global_omnisci_singleton)
try:
version = omnisci.version
except Exception as msg:
_cache['reason'] = 'failed to get OmniSci version: %s' % (msg)
else:
print(' OmnisciDB version', version)
if version[:2] >= (4, 6):
_cache['version'] = version
else:
_cache['reason'] = (
'expected OmniSci version 4.6 or greater, got %s'
% (version,))
return _cache.get('version', ()), _cache.get('reason', '')
|
0edcbfcd1ecb6a56b4b4a6f55907271c9094b8d8
| 3,650,260
|
import copy
def pitch_info_from_pitch_string(pitch_str: str) -> PitchInfo:
"""
Parse a pitch string representation. E.g. C#4, A#5, Gb8
"""
parts = tuple((c for c in pitch_str))
size = len(parts)
pitch_class = register = accidental = None
if size == 1:
(pitch_class,) = parts
elif size == 2:
(pitch_class, register) = parts
elif size >= 3:
(pitch_class, accidental, register) = parts[:3]
accidental = Accidental.SHARP if accidental == '#' \
else Accidental.FLAT if accidental == 'b' \
else Accidental.NATURAL
register = int(register)
pitch_info = PitchInfo(pitch_class=pitch_class, accidental=accidental)
matching_chromatic_pitch_info, _ = next(
matching_pitch_info_generator(pitch_info, CHROMATIC_PITCHES_INFO)
)
final_pitch_info = copy.deepcopy(matching_chromatic_pitch_info)
final_pitch_info.register = register
if is_enharmonic_match(pitch_info, matching_chromatic_pitch_info):
final_pitch_info.swap_enharmonic()
return final_pitch_info
|
cc1b6c0fe64834fe3fdc5249078e10e8f3af1434
| 3,650,261
|
def determine_word_type(tag):
"""
Determines the word type by checking the tag returned by the nltk.pos_tag(arr[str]) function.
Each word in the array is marked with a special tag which can be used to find the correct type of a word.
A selection is given in the dictionaries.
Args:
tag : String tag from the nltk.pos_tag(str) function that classified the particular word with a tag
Returns:
str: Word type as a string
"""
types = {
"noun" : {"NN", "NNS", "NNPS", "FW"},
"adjective" : {"JJ", "JJR", "JJS"},
"verb" : {"VB", "VBD", "VBG", "VBN", "VBP", "VBZ"},
"adverb" : {"RB", "RBR"}
}
for type_, set_ in types.iteritems():
if tag in set_:
return type_
return "noun"
|
4505d2cf69f961ecdec4e3c693ea85b916acce96
| 3,650,262
|
def get_normalized_map_from_google(normalization_type, connection=None, n_header_lines=0):
"""
get normalized voci or titoli mapping from gdoc spreadsheets
:param: normalization_type (t|v)
:param: connection - (optional) a connection to the google account (singleton)
:param: n_header_lines - (optional) n. of lines to ignore
:ret: a dict, containing the consuntivo and preventivo sheets
"""
# get all gdocs keys
gdoc_keys = settings.GDOC_KEYS
if normalization_type == 't':
gdoc_key = gdoc_keys['titoli_map']
elif normalization_type == 'v':
gdoc_key = gdoc_keys['voci_map']
else:
raise Exception("normalization_type arg accepts 't' or 'v' as possible values")
if connection is None:
connection = get_connection()
# open the list worksheet
list_sheet = None
try:
list_sheet = connection.open_by_key(gdoc_key)
except exceptions.SpreadsheetNotFound:
raise Exception("Error: gdoc url not found: {0}".format(
gdoc_key
))
logger.info("normalized mapping gdoc read. key: {0}".format(
gdoc_key
))
# put the mapping into the voci_map dict
# preventivo and consuntivo sheets are appended in a single list
# the first two rows are removed (labels)
try:
logger.info("reading preventivo ...")
voci_map_preventivo = list_sheet.worksheet("preventivo").get_all_values()[n_header_lines:]
logger.info("reading consuntivo ...")
voci_map_consuntivo = list_sheet.worksheet("consuntivo").get_all_values()[n_header_lines:]
except URLError:
raise Exception("Connection error to Gdrive")
logger.info("done with reading the mapping list.")
return {
'preventivo': voci_map_preventivo,
'consuntivo': voci_map_consuntivo,
}
|
61056536cae2da9053f21d2739488c5512546a68
| 3,650,263
|
import io
def parse_file(fname, is_true=True):
"""Parse file to get labels."""
labels = []
with io.open(fname, "r", encoding="utf-8", errors="igore") as fin:
for line in fin:
label = line.strip().split()[0]
if is_true:
assert label[:9] == "__label__"
label = label[9:]
labels.append(label)
return labels
|
ea6cbd4b1a272f472f8a75e1cc87a2209e439205
| 3,650,264
|
from datetime import datetime
import os
import json
def main(event, context):
"""
Args:
package: Python Package to build and deploy
return:
execution_arn: ARN of the state machine execution that is building the package
"""
packages = get_config.get_packages()
execution_arns =[]
for package in packages:
client = boto3.client('stepfunctions')
execution_time = datetime.now().isoformat().replace('-', '').replace(':', '')[:14]
response = client.start_execution(
stateMachineArn=os.environ['PIPELINE_ARN'],
name=f"{package}_{execution_time}",
input=json.dumps({"package": package})
)
execution_arns.append(response['executionArn'])
return {"arns": execution_arns}
|
a73abef29b0fd98e878ee71b9fbc30fa8f204bb9
| 3,650,265
|
def make_mesh(object_name, object_colour=(0.25, 0.25, 0.25, 1.0), collection="Collection"):
"""
Create a mesh then return the object reference and the mesh object
:param object_name: Name of the object
:type object_name: str
:param object_colour: RGBA colour of the object, defaults to a shade of grey
:type object_colour: (float, float, float, float)
:param collection: Where you want the objected to be added, defaults to Collection
:type collection: str
:return: Object reference and mesh reference
"""
# Make the block
mesh = bpy.data.meshes.new(object_name) # add the new mesh
obj = bpy.data.objects.new(mesh.name, mesh)
create_emission_node(obj, object_colour)
col = bpy.data.collections.get(collection)
col.objects.link(obj)
bpy.context.view_layer.objects.active = obj
return obj, mesh
|
73fd8d13d471c55258a06feb71eec51ca51f23f9
| 3,650,266
|
import optparse
def _OptionParser():
"""Returns the options parser for run-bisect-perf-regression.py."""
usage = ('%prog [options] [-- chromium-options]\n'
'Used by a try bot to run the bisection script using the parameters'
' provided in the auto_bisect/bisect.cfg file.')
parser = optparse.OptionParser(usage=usage)
parser.add_option('-w', '--working_directory',
type='str',
help='A working directory to supply to the bisection '
'script, which will use it as the location to checkout '
'a copy of the chromium depot.')
parser.add_option('-p', '--path_to_goma',
type='str',
help='Path to goma directory. If this is supplied, goma '
'builds will be enabled.')
parser.add_option('--path_to_config',
type='str',
help='Path to the config file to use. If this is supplied, '
'the bisect script will use this to override the default '
'config file path. The script will attempt to load it '
'as a bisect config first, then a perf config.')
parser.add_option('--extra_src',
type='str',
help='Path to extra source file. If this is supplied, '
'bisect script will use this to override default behavior.')
parser.add_option('--dry_run',
action="store_true",
help='The script will perform the full bisect, but '
'without syncing, building, or running the performance '
'tests.')
return parser
|
7485db294d89732c2c5223a3e3fe0b7773444b49
| 3,650,267
|
def calc_radiance(wavel, Temp):
"""
Calculate the blackbody radiance
Parameters
----------
wavel: float or array
wavelength (meters)
Temp: float
temperature (K)
Returns
-------
Llambda: float or arr
monochromatic radiance (W/m^2/m/sr)
"""
Llambda_val = c1 / (wavel**5. * (np.exp(c2 / (wavel * Temp)) - 1))
return Llambda_val
|
9a957b42e0e92614709f7157765f0185e1dd532a
| 3,650,268
|
import json
import os
def load_config(filename):
"""
Returns:
dict
"""
config = json.load(open(filename, 'r'))
# back-compat
if 'csvFile' in config:
config['modelCategoryFile'] = config['csvFile']
del config['csvFile']
required_files = ["prefix", "modelCategoryFile", "colorFile"]
for f in required_files:
assert f in config, 'Invalid config! key <{}> is missing!'.format(f)
assert os.path.exists(config[f]), 'Invalid config! path <{}> not exists!'.format(config[f])
if ('File' in f):
assert os.path.isfile(config[f]), 'Invalid config! <{}> is not a valid file!'.format(config[f])
return config
|
af9f6cb02925d38077652703813b9fec201f12f7
| 3,650,269
|
def _JMS_to_Fierz_III_IV_V(C, qqqq):
"""From JMS to 4-quark Fierz basis for Classes III, IV and V.
`qqqq` should be of the form 'sbuc', 'sdcc', 'ucuu' etc."""
#case dduu
classIII = ['sbuc', 'sbcu', 'dbuc', 'dbcu', 'dsuc', 'dscu']
classVdduu = ['sbuu' , 'dbuu', 'dsuu', 'sbcc' , 'dbcc', 'dscc']
if qqqq in classIII + classVdduu:
f1 = dflav[qqqq[0]]
f2 = dflav[qqqq[1]]
f3 = uflav[qqqq[2]]
f4 = uflav[qqqq[3]]
return {
'F' + qqqq + '1' : C["V1udLL"][f3, f4, f1, f2]
- C["V8udLL"][f3, f4, f1, f2] / (2 * Nc),
'F' + qqqq + '2' : C["V8udLL"][f3, f4, f1, f2] / 2,
'F' + qqqq + '3' : C["V1duLR"][f1, f2, f3, f4]
- C["V8duLR"][f1, f2, f3, f4] / (2 * Nc),
'F' + qqqq + '4' : C["V8duLR"][f1, f2, f3, f4] / 2,
'F' + qqqq + '5' : C["S1udRR"][f3, f4, f1, f2]
- C["S8udduRR"][f3, f2, f1, f4] / 4
- C["S8udRR"][f3, f4, f1, f2] / (2 * Nc),
'F' + qqqq + '6' : -C["S1udduRR"][f3, f2, f1, f4] / 2
+ C["S8udduRR"][f3, f2, f1, f4] /(4 * Nc)
+ C["S8udRR"][f3, f4, f1, f2] / 2,
'F' + qqqq + '7' : -C["V8udduLR"][f4, f1, f2, f3].conj(),
'F' + qqqq + '8' : -2 * C["V1udduLR"][f4, f1, f2, f3].conj()
+ C["V8udduLR"][f4, f1, f2, f3].conj() / Nc,
'F' + qqqq + '9' : -C["S8udduRR"][f3, f2, f1, f4] / 16,
'F' + qqqq + '10' : -C["S1udduRR"][f3, f2, f1, f4] / 8
+ C["S8udduRR"][f3, f2, f1, f4] / (16 * Nc),
'F' + qqqq + '1p' : C["V1udRR"][f3, f4, f1, f2]
- C["V8udRR"][f3, f4, f1, f2] / (2 * Nc),
'F' + qqqq + '2p' : C["V8udRR"][f3, f4, f1, f2] / 2,
'F' + qqqq + '3p' : C["V1udLR"][f3, f4, f1, f2]
- C["V8udLR"][f3, f4, f1, f2] / (2 * Nc),
'F' + qqqq + '4p' : C["V8udLR"][f3, f4, f1, f2] / 2,
'F' + qqqq + '5p' : C["S1udRR"][f4, f3, f2, f1].conj() -
C["S8udduRR"][f4, f1, f2, f3].conj() / 4
- C["S8udRR"][f4, f3, f2, f1].conj() / (2 * Nc),
'F' + qqqq + '6p' : -C["S1udduRR"][f4, f1, f2, f3].conj() / 2 +
C["S8udduRR"][f4, f1, f2, f3].conj()/(4 * Nc)
+ C["S8udRR"][f4, f3, f2, f1].conj() / 2,
'F' + qqqq + '7p' : -C["V8udduLR"][f3, f2, f1, f4],
'F' + qqqq + '8p' : - 2 * C["V1udduLR"][f3, f2, f1, f4]
+ C["V8udduLR"][f3, f2, f1, f4] / Nc,
'F' + qqqq + '9p' : -C["S8udduRR"][f4, f1, f2, f3].conj() / 16,
'F' + qqqq + '10p' : -C["S1udduRR"][f4, f1, f2, f3].conj() / 8
+ C["S8udduRR"][f4, f1, f2, f3].conj() / 16 / Nc
}
classVuudd = ['ucdd', 'ucss', 'ucbb']
if qqqq in classVuudd:
f3 = uflav[qqqq[0]]
f4 = uflav[qqqq[1]]
f1 = dflav[qqqq[2]]
f2 = dflav[qqqq[3]]
return {
'F' + qqqq + '1' : C["V1udLL"][f3, f4, f1, f2]
- C["V8udLL"][f3, f4, f1, f2] / (2 * Nc),
'F' + qqqq + '2' : C["V8udLL"][f3, f4, f1, f2] / 2,
'F' + qqqq + '3p' : C["V1duLR"][f1, f2, f3, f4]
- C["V8duLR"][f1, f2, f3, f4] / (2 * Nc),
'F' + qqqq + '4p' : C["V8duLR"][f1, f2, f3, f4] / 2,
'F' + qqqq + '5' : C["S1udRR"][f3, f4, f1, f2]
- C["S8udduRR"][f3, f2, f1, f4] / 4
- C["S8udRR"][f3, f4, f1, f2] / (2 * Nc),
'F' + qqqq + '6' : -C["S1udduRR"][f3, f2, f1, f4] / 2
+ C["S8udduRR"][f3, f2, f1, f4] /(4 * Nc)
+ C["S8udRR"][f3, f4, f1, f2] / 2,
'F' + qqqq + '7p' : -C["V8udduLR"][f4, f1, f2, f3].conj(),
'F' + qqqq + '8p' : -2 * C["V1udduLR"][f4, f1, f2, f3].conj()
+ C["V8udduLR"][f4, f1, f2, f3].conj() / Nc,
'F' + qqqq + '9' : -C["S8udduRR"][f3, f2, f1, f4] / 16,
'F' + qqqq + '10' : -C["S1udduRR"][f3, f2, f1, f4] / 8
+ C["S8udduRR"][f3, f2, f1, f4] / (16 * Nc),
'F' + qqqq + '1p' : C["V1udRR"][f3, f4, f1, f2]
- C["V8udRR"][f3, f4, f1, f2] / (2 * Nc),
'F' + qqqq + '2p' : C["V8udRR"][f3, f4, f1, f2] / 2,
'F' + qqqq + '3' : C["V1udLR"][f3, f4, f1, f2]
- C["V8udLR"][f3, f4, f1, f2] / (2 * Nc),
'F' + qqqq + '4' : C["V8udLR"][f3, f4, f1, f2] / 2,
'F' + qqqq + '5p' : C["S1udRR"][f4, f3, f2, f1].conj() -
C["S8udduRR"][f4, f1, f2, f3].conj() / 4
- C["S8udRR"][f4, f3, f2, f1].conj() / (2 * Nc),
'F' + qqqq + '6p' : -C["S1udduRR"][f4, f1, f2, f3].conj() / 2 +
C["S8udduRR"][f4, f1, f2, f3].conj()/(4 * Nc)
+ C["S8udRR"][f4, f3, f2, f1].conj() / 2,
'F' + qqqq + '7' : -C["V8udduLR"][f3, f2, f1, f4],
'F' + qqqq + '8' : - 2 * C["V1udduLR"][f3, f2, f1, f4]
+ C["V8udduLR"][f3, f2, f1, f4] / Nc,
'F' + qqqq + '9p' : -C["S8udduRR"][f4, f1, f2, f3].conj() / 16,
'F' + qqqq + '10p' : -C["S1udduRR"][f4, f1, f2, f3].conj() / 8
+ C["S8udduRR"][f4, f1, f2, f3].conj() / 16 / Nc
}
#case dddd
classIV = ['sbsd', 'dbds', 'bsbd']
classVdddd = ['sbss', 'dbdd', 'dsdd', 'sbbb', 'dbbb', 'dsss']
classVddddind = ['sbdd', 'dsbb', 'dbss']
if qqqq in classIV + classVdddd + classVddddind:
f1 = dflav[qqqq[0]]
f2 = dflav[qqqq[1]]
f3 = dflav[qqqq[2]]
f4 = dflav[qqqq[3]]
return {
'F'+ qqqq +'1' : C["VddLL"][f3, f4, f1, f2],
'F'+ qqqq +'2' : C["VddLL"][f1, f4, f3, f2],
'F'+ qqqq +'3' : C["V1ddLR"][f1, f2, f3, f4]
- C["V8ddLR"][f1, f2, f3, f4]/(2 * Nc),
'F'+ qqqq +'4' : C["V8ddLR"][f1, f2, f3, f4] / 2,
'F'+ qqqq +'5' : C["S1ddRR"][f3, f4, f1, f2]
- C["S8ddRR"][f3, f2, f1,f4] / 4
- C["S8ddRR"][f3, f4, f1, f2] / (2 * Nc),
'F'+ qqqq +'6' : -C["S1ddRR"][f1, f4, f3, f2] / 2
+ C["S8ddRR"][f3, f2, f1, f4] / (4 * Nc)
+ C["S8ddRR"][f3, f4, f1, f2] / 2,
'F'+ qqqq +'7' : -C["V8ddLR"][f1, f4, f3, f2],
'F'+ qqqq +'8' : -2 * C["V1ddLR"][f1, f4, f3, f2]
+ C["V8ddLR"][f1, f4, f3, f2] / Nc,
'F'+ qqqq +'9' : -C["S8ddRR"][f3, f2, f1, f4] / 16,
'F'+ qqqq +'10' : -C["S1ddRR"][f1, f4, f3, f2] / 8
+ C["S8ddRR"][f3, f2, f1, f4] / (16 * Nc),
'F'+ qqqq +'1p' : C["VddRR"][f3, f4, f1, f2],
'F'+ qqqq +'2p' : C["VddRR"][f1, f4, f3, f2],
'F'+ qqqq +'3p' : C["V1ddLR"][f3, f4, f1, f2]
- C["V8ddLR"][f3, f4, f1,f2] / (2 * Nc),
'F'+ qqqq +'4p' : C["V8ddLR"][f3, f4, f1, f2] / 2,
'F'+ qqqq +'5p' : C["S1ddRR"][f4, f3, f2, f1].conj() -
C["S8ddRR"][f4, f1, f2, f3].conj() / 4
-C["S8ddRR"][f4, f3, f2, f1].conj() / 2 / Nc,
'F'+ qqqq +'6p' : -C["S1ddRR"][f4, f1, f2, f3].conj() / 2 +
C["S8ddRR"][f4, f1, f2, f3].conj() / 4 / Nc
+ C["S8ddRR"][f4, f3, f2, f1].conj() / 2,
'F'+ qqqq +'7p' : -C["V8ddLR"][f3, f2, f1, f4],
'F'+ qqqq +'8p' : -2 * C["V1ddLR"][f3, f2, f1, f4]
+ C["V8ddLR"][f3, f2, f1, f4] / Nc,
'F'+ qqqq +'9p' : -C["S8ddRR"][f4, f1, f2, f3].conj() / 16,
'F'+ qqqq +'10p' : -C["S1ddRR"][f4, f1, f2, f3].conj() / 8 +
C["S8ddRR"][f4, f1, f2, f3].conj() / 16 / Nc
}
#case uuuu
classVuuuu = ['ucuu', 'cucc', 'cuuu', 'uccc']
if qqqq in classVuuuu:
f1 = uflav[qqqq[0]]
f2 = uflav[qqqq[1]]
f3 = uflav[qqqq[2]]
f4 = uflav[qqqq[3]]
return {
'F' + qqqq + '1' : C["VuuLL"][f3, f4, f1, f2],
'F' + qqqq + '2' : C["VuuLL"][f1, f4, f3, f2],
'F' + qqqq + '3' : C["V1uuLR"][f1, f2, f3, f4]
- C["V8uuLR"][f1, f2, f3, f4] / (2 * Nc),
'F' + qqqq + '4' : C["V8uuLR"][f1, f2, f3, f4] / 2,
'F' + qqqq + '5' : C["S1uuRR"][f3, f4, f1, f2]
- C["S8uuRR"][f3, f2, f1, f4] / 4
- C["S8uuRR"][f3, f4, f1, f2] / (2 * Nc),
'F' + qqqq + '6' : -C["S1uuRR"][f1, f4, f3, f2] / 2
+ C["S8uuRR"][f3, f2, f1, f4] / (4 * Nc)
+ C["S8uuRR"][f3, f4, f1, f2] / 2,
'F' + qqqq + '7' : -C["V8uuLR"][f1, f4, f3, f2],
'F' + qqqq + '8' : -2 * C["V1uuLR"][f1, f4, f3, f2]
+ C["V8uuLR"][f1, f4, f3, f2] / Nc,
'F' + qqqq + '9' : -C["S8uuRR"][f3, f2, f1, f4] / 16,
'F' + qqqq + '10' : -C["S1uuRR"][f1, f4, f3, f2] / 8
+ C["S8uuRR"][f3, f2, f1, f4] / (16 * Nc),
'F'+ qqqq + '1p': C["VuuRR"][f3, f4, f1, f2],
'F' + qqqq + '2p': C["VuuRR"][f1, f3, f4, f2],
'F' + qqqq + '3p' : C["V1uuLR"][f3, f4, f1, f2]
- C["V8uuLR"][f3, f4, f1,f2] / (2 * Nc),
'F' + qqqq + '4p' : C["V8uuLR"][f3, f4, f1, f2] / 2,
'F' + qqqq + '5p' : C["S1uuRR"][f4, f3, f2, f1].conj() -
C["S8uuRR"][f4, f1, f2, f3].conj() / 4 -
C["S8uuRR"][f4, f3, f2, f1].conj() / 2 / Nc,
'F' + qqqq + '6p' : -C["S1uuRR"][f4, f1, f2, f3].conj() / 2 +
C["S8uuRR"][f4, f1, f2, f3].conj() / 4 / Nc
+ C["S8uuRR"][f4, f3, f2, f1].conj() / 2,
'F' + qqqq + '7p' : -C["V8uuLR"][f3, f2, f1, f4],
'F' + qqqq + '8p' : -2 * C["V1uuLR"][f3, f2, f1, f4]
+ C["V8uuLR"][f3, f2, f1, f4] / Nc,
'F' + qqqq + '9p' : -C["S8uuRR"][f4, f1, f2, f3].conj() / 16,
'F' + qqqq + '10p' : -C["S1uuRR"][f4, f1, f2, f3].conj() / 8 +
C["S8uuRR"][f4, f1, f2, f3].conj() / 16 / Nc
}
else:
raise ValueError(f"Case not implemented: {qqqq}")
|
5629a4e88996bd3ba30bd68cc8758b3d55abd093
| 3,650,270
|
from typing import Any
def get_parsed_args() -> Any:
"""Return Porcupine's arguments as returned by :func:`argparse.parse_args`."""
assert _parsed_args is not None
return _parsed_args
|
9e4dc1eadc3c68d8a8e5a5a885fecf7c0ec89856
| 3,650,271
|
def get_license_description(license_code):
"""
Gets the description of the given license code. For example, license code '1002' results in 'Accessory Garage'
:param license_code: The license code
:return: The license description
"""
global _cached_license_desc
return _cached_license_desc[license_code]
|
3de38be73d303036872b285c2dd7c0048ba5660f
| 3,650,272
|
import pickle
import getpass
def db_keys_unlock(passphrase) -> bool:
"""Unlock secret key with pass phrase"""
global _secretkeyfile
try:
with open(_secretkeyfile, "rb") as f:
secretkey = pickle.load(f)
if not secretkey["locked"]:
print("Secret key file is already unlocked")
return True
if passphrase:
usepass = passphrase
else:
usepass = getpass("Enter pass phrase: ")
print("")
if usepass:
if secretkey["hash"] == blake2b(str.encode(usepass)).hexdigest():
k = Fernet(password_to_key(usepass))
secretkey["secret"] = k.decrypt(str.encode(secretkey["secret"])).decode()
secretkey["locked"] = False
db_keys_set(secretkey, False)
else:
print("Pass phrase did not match, secret key remains locked")
return False
except Exception:
print("Error locking secret key content")
return False
print("Secret key successfully unlocked")
return True
|
4d1af86143384ff6228ad086d92f797b7529c73e
| 3,650,273
|
def list_domains():
"""
Return a list of the salt_id names of all available Vagrant VMs on
this host without regard to the path where they are defined.
CLI Example:
.. code-block:: bash
salt '*' vagrant.list_domains --log-level=info
The log shows information about all known Vagrant environments
on this machine. This data is cached and may not be completely
up-to-date.
"""
vms = []
cmd = 'vagrant global-status'
reply = __salt__['cmd.shell'](cmd)
log.info('--->\n%s', reply)
for line in reply.split('\n'): # build a list of the text reply
tokens = line.strip().split()
try:
_ = int(tokens[0], 16) # valid id numbers are hexadecimal
except (ValueError, IndexError):
continue # skip lines without valid id numbers
machine = tokens[1]
cwd = tokens[-1]
name = get_machine_id(machine, cwd)
if name:
vms.append(name)
return vms
|
af04859c5d6e0cd2edb3d3cec88ebebd777c93d6
| 3,650,274
|
def get_old_options(cli, image):
""" Returns Dockerfile values for CMD and Entrypoint
"""
return {
'cmd': dockerapi.inspect_config(cli, image, 'Cmd'),
'entrypoint': dockerapi.inspect_config(cli, image, 'Entrypoint'),
}
|
eed75800ae3afdc99fdcd5c0f5dc36504d5db96c
| 3,650,275
|
def line_crops_and_labels(iam: IAM, split: str):
"""Load IAM line labels and regions, and load line image crops."""
crops = []
labels = []
for filename in iam.form_filenames:
if not iam.split_by_id[filename.stem] == split:
continue
image = util.read_image_pil(filename)
image = ImageOps.grayscale(image)
image = ImageOps.invert(image)
labels += iam.line_strings_by_id[filename.stem]
crops += [
image.crop([region[_] for _ in ["x1", "y1", "x2", "y2"]])
for region in iam.line_regions_by_id[filename.stem]
]
assert len(crops) == len(labels)
return crops, labels
|
f223ded3c2dc9254985ad450995f8dc598dc5411
| 3,650,276
|
def convert(chinese):
"""converts Chinese numbers to int
in: string
out: string
"""
numbers = {'零':0, '一':1, '二':2, '三':3, '四':4, '五':5, '六':6, '七':7, '八':8, '九':9, '壹':1, '贰':2, '叁':3, '肆':4, '伍':5, '陆':6, '柒':7, '捌':8, '玖':9, '两':2, '廿':20, '卅':30, '卌':40, '虚':50, '圆':60, '近':70, '枯':80, '无':90}
units = {'个':1, '十':10, '百':100, '千':1000, '万':10000, '亿':100000000,'万亿':1000000000000, '拾':10, '佰':100, '仟':1000}
number, pureNumber = 0, True
for i in range(len(chinese)):
if chinese[i] in units or chinese[i] in ['廿', '卅', '卌', '虚', '圆', '近', '枯', '无']:
pureNumber = False
break
if chinese[i] in numbers:
number = number * 10 + numbers[chinese[i]]
if pureNumber:
return number
number = 0
for i in range(len(chinese)):
if chinese[i] in numbers or chinese[i] == '十' and (i == 0 or chinese[i - 1] not in numbers or chinese[i - 1] == '零'):
base, currentUnit = 10 if chinese[i] == '十' and (i == 0 or chinese[i] == '十' and chinese[i - 1] not in numbers or chinese[i - 1] == '零') else numbers[chinese[i]], '个'
for j in range(i + 1, len(chinese)):
if chinese[j] in units:
if units[chinese[j]] >= units[currentUnit]:
base, currentUnit = base * units[chinese[j]], chinese[j]
number = number + base
return number
|
cf2ece895698e2d99fde815efa0339687eadda97
| 3,650,277
|
def computeZvector(idata, hue, control, features_to_eval):
"""
:param all_data: dataframe
:return:
"""
all_data = idata.copy()
numerics = ['int16', 'int32', 'int64', 'float16', 'float32', 'float64']
m_indexes = list(all_data[hue].unique().astype('str'))
query_one = ""
for el in control:
if el in m_indexes:
query_one = query_one + hue + "==\'" + str(el) + "\'|"
else:
break
query_one = query_one[:-1] # remove last character
df_q = all_data.query(query_one).copy()
eps = 1e-15
# Compute average for each feature, per each treatment
avg_vec = pd.DataFrame()
for el in m_indexes:
data_calc = all_data.query(hue + "==\'" + str(el) + "\'").copy()
for col in data_calc.select_dtypes(include=numerics):
if col in features_to_eval:
avg_vec.loc[el, col] = data_calc[col].mean()
# Compute length of vector
all_data.loc[:, 'length'] = 0
for feature in features_to_eval:
all_data['length'] = all_data['length'] + all_data[feature] ** 2
all_data['length'] = np.sqrt(all_data['length'])
# Compute cosine
# Dot product of each vector per each mean v*w
all_data.loc[:, 'cosine'] = 0
for el in m_indexes:
for feature in features_to_eval:
all_data.loc[all_data['Gene'] == el, 'cosine'] = all_data.loc[all_data['Gene'] == el, 'cosine'] + \
all_data[all_data['Gene'] == el][feature] * avg_vec.loc[
el, feature]
# Norm of avg_vec
v_avg_norm = np.sqrt(np.sum(avg_vec ** 2, axis=1))
for el in m_indexes:
all_data.loc[all_data['Gene'] == el, 'cosine'] = all_data.loc[all_data['Gene'] == el, 'cosine'] / (
all_data.loc[all_data['Gene'] == el, 'length'] * v_avg_norm[el])
all_data['projection'] = all_data['length'] * all_data['cosine']
return all_data
|
d80e6a9fe754cb8558598c72ab2f076fab329750
| 3,650,278
|
def getjflag(job):
"""Returns flag if job in finished state"""
return 1 if job['jobstatus'] in ('finished', 'failed', 'cancelled', 'closed') else 0
|
bf0c0a85cb1af954d25f4350e55b9e3604cf7c79
| 3,650,279
|
import copy
def json_parse(ddict):
"""
https://github.com/arita37/mlmodels/blob/dev/mlmodels/dataset/test_json/test_functions.json
https://github.com/arita37/mlmodels/blob/dev/mlmodels/dataset/json/benchmark_timeseries/gluonts_m5.json
"deepar": {
"model_pars": {
"model_uri" : "model_gluon.gluonts_model",
"model_name" : "deepar",
"model_pars" : {
"prediction_length": 12,
"freq": "D",
"distr_output" : {"uri" : "gluonts.distribution.neg_binomial:NegativeBinomialOutput"},
"distr_output" : "uri::gluonts.distribution.neg_binomial:NegativeBinomialOutput",
"""
js = ddict
js2 = copy.deepcopy(js)
def parse2(d2):
if "uri" in d2:
# Be careful not to include heavy compute
return json_to_object(d2)
else:
return json_norm(d2)
for k, val in js.items():
if isinstance(val, dict):
js2[k] = parse2(val)
elif "uri::" in val: # Shortcut when nor argument
js2[k] = json_to_object({"uri": val.split("uri::")[-1]})
else:
js2[k] = json_norm_val(val)
return js2
|
30b037531ac129a2a597b146c935fe344566a547
| 3,650,280
|
def read_viz_icons(style='icomoon', fname='infinity.png'):
""" Read specific icon from specific style
Parameters
----------
style : str
Current icon style. Default is icomoon.
fname : str
Filename of icon. This should be found in folder HOME/.dipy/style/.
Default is infinity.png.
Returns
--------
path : str
Complete path of icon.
"""
folder = pjoin(dipy_home, 'icons', style)
return pjoin(folder, fname)
|
8d97ebb450b94dce5c4feb3f631cd9deebcdb1c1
| 3,650,281
|
def mock_config_entry() -> MockConfigEntry:
"""Return the default mocked config entry."""
return MockConfigEntry(
title="12345",
domain=DOMAIN,
data={CONF_API_KEY: "tskey-MOCK", CONF_SYSTEM_ID: 12345},
unique_id="12345",
)
|
cb2a5b8d7e84d1b825e79a9b4e3aebc8f8c60783
| 3,650,282
|
import datasets
def get_mnist_loader(batch_size, train, perm=0., Nparts=1, part=0, seed=0, taskid=0, pre_processed=True, **loader_kwargs):
"""Builds and returns Dataloader for MNIST and SVHN dataset."""
transform = transforms.Compose([
transforms.Grayscale(),
transforms.ToTensor(),
transforms.Normalize((0.0,), (1.0,)),
transforms.Lambda(lambda x: x.view([28,28]))])
dataset = datasets.MNIST(root='./data', download=True, transform=transform, train = train)
if perm>0:
permute_dataset(dataset, perm, seed=seed)
if Nparts>1:
partition_dataset(dataset, Nparts,part)
if pre_processed:
dataset = preprocess_dataset(dataset)
DL = DataLoaderPreProcessed
else:
DL = DataLoader
loader = DL(dataset=dataset,
batch_size=batch_size,
shuffle=train,
**loader_kwargs)
loader.taskid = taskid
loader.name = 'MNIST_{}'.format(taskid,part)
loader.short_name = 'MNIST'
return loader
|
2181ffa0abd4f1357ec7cc8cdf52b0eb86a2d13c
| 3,650,283
|
def read_data(filename):
"""
Reads orbital map file into a list
"""
data = []
f = open(filename, 'r')
for line in f:
data += line.strip().split('\n')
f.close()
return data
|
7c2f5669735e39352b0b655425a70993baae32ef
| 3,650,284
|
from typing import Union
def _form_factor_pipi(
self, s: Union[float, npt.NDArray[np.float64]], imode: int = 1
) -> Union[complex, npt.NDArray[np.complex128]]:
"""
Compute the pi-pi-V form factor.
Parameters
----------
s: Union[float,npt.NDArray[np.float64]
Square of the center-of-mass energy in MeV.
imode: Optional[int]
Iso-spin channel. Default is 1.
Returns
-------
ff: Union[complex,npt.NDArray[np.complex128]]
Form factor from pi-pi-V.
"""
return __ff_pipi(
s * 1e-6, # Convert to GeV
self._ff_pipi_params,
self._gvuu,
self._gvdd,
)
|
92438400debb52bff6480791631e2b60043c758f
| 3,650,285
|
import os
def lecture():
"""
lecture()
Lee archivos "tmdb_5000_credits.csv" y "tmdb_5000_movies.csv"
para luego transformarlos en pandas.DataFrame.
Parameters
----------
None.
Returns
-------
credits, movies : [pandas.DataFrame, pandas.DataFrame]
Es una lista del DataFrame credits y DataFrame movies.
"""
try:
credits = pd.read_csv(os.path.join("data","tmdb_5000_credits.csv"))
movies = pd.read_csv(os.path.join("data","tmdb_5000_movies.csv"))
print("Datos correctamente leídos")
return credits, movies
except:
print("Datos incorrectamente leídos")
return None, None
|
710c66c98d9af344afa883c9efb551e9e68c991c
| 3,650,286
|
import re
import click
def string_to_epoch(s):
"""
Convert argument string to epoch if possible
If argument looks like int + s,h,md (ie, 30d), we'll pass as-is
since pushshift can accept this. Per docs, pushshift supports:
Epoch value or Integer + "s,m,h,d" (i.e. 30d for 30 days)
:param s: str
:return: int | str
"""
if s is not None:
s = s.strip()
if re.search('^[0-9]+[smhd]$', s):
return s
try:
s = dp.parse(s).timestamp()
s = int(s)
except ValueError:
raise click.BadParameter("could not convert argument to "
"a datetime: {}".format(s))
return s
|
b45db1d589cbe71eec5f11d53d2851dee258da8f
| 3,650,287
|
import array
def spline_filter(Iin, lmbda=5.0):
"""Smoothing spline (cubic) filtering of a rank-2 array.
Filter an input data set, `Iin`, using a (cubic) smoothing spline of
fall-off `lmbda`.
"""
intype = Iin.dtype.char
hcol = array([1.0,4.0,1.0],'f')/6.0
if intype in ['F','D']:
Iin = Iin.astype('F')
ckr = cspline2d(Iin.real,lmbda)
cki = cspline2d(Iin.imag,lmbda)
outr = sepfir2d(ckr,hcol,hcol)
outi = sepfir2d(cki,hcol,hcol)
out = (outr + 1j*outi).astype(intype)
elif intype in ['f','d']:
ckr = cspline2d(Iin,lmbda)
out = sepfir2d(ckr, hcol, hcol)
out = out.astype(intype)
else:
raise TypeError("Invalid data type for Iin")
return out
|
ee606bdceaf974671ccf36b9d498204613c12f51
| 3,650,288
|
def _construct_aline_collections(alines, dtix=None):
"""construct arbitrary line collections
Parameters
----------
alines : sequence
sequences of segments, which are sequences of lines,
which are sequences of two or more points ( date[time], price ) or (x,y)
date[time] may be (a) pandas.to_datetime parseable string,
(b) pandas Timestamp, or
(c) python datetime.datetime or datetime.date
alines may also be a dict, containing
the following keys:
'alines' : the same as defined above: sequence of price, or dates, or segments
'colors' : colors for the above alines
'linestyle' : line types for the above alines
'linewidths' : line types for the above alines
dtix: date index for the x-axis, used for converting the dates when
x-values are 'evenly spaced integers' (as when skipping non-trading days)
Returns
-------
ret : list
lines collections
"""
if alines is None:
return None
if isinstance(alines,dict):
aconfig = _process_kwargs(alines, _valid_lines_kwargs())
alines = aconfig['alines']
else:
aconfig = _process_kwargs({}, _valid_lines_kwargs())
#print('aconfig=',aconfig)
#print('alines=',alines)
alines = _alines_validator(alines, returnStandardizedValue=True)
if alines is None:
raise ValueError('Unable to standardize alines value: '+str(alines))
alines = _convert_segment_dates(alines,dtix)
lw = aconfig['linewidths']
co = aconfig['colors']
ls = aconfig['linestyle']
al = aconfig['alpha']
lcollection = LineCollection(alines,colors=co,linewidths=lw,linestyles=ls,antialiaseds=(0,),alpha=al)
return lcollection
|
863fd8c6e8d0b1a39c5a79bca7a0eaa5b2204aea
| 3,650,289
|
def is_mergeable(*ts_or_tsn):
"""Check if all objects(FermionTensor or FermionTensorNetwork)
are part of the same FermionSpace
"""
if isinstance(ts_or_tsn, (FermionTensor, FermionTensorNetwork)):
return True
fs_lst = []
site_lst = []
for obj in ts_or_tsn:
if isinstance(obj, FermionTensor):
if obj.fermion_owner is None:
return False
hashval, fsobj, tid = obj.fermion_owner
fs_lst.append(hashval)
site_lst.append(fsobj()[tid][1])
elif isinstance(obj, FermionTensorNetwork):
fs_lst.append(hash(obj.fermion_space))
site_lst.extend(obj.filled_sites)
else:
raise TypeError("unable to find fermionspace")
return all([fs==fs_lst[0] for fs in fs_lst]) and len(set(site_lst)) == len(site_lst)
|
de5f4fc47874e328bcdd078e2bdf8d6f53d6d4e6
| 3,650,290
|
import urllib
import http
def fetch_file(url, config):
"""
Fetch a file from a provider.
"""
# pylint: disable=fixme
# FIXME: the handled checking should be in each handler module (possibly handle_file(parsed_url,
# config) => bool)
parsed_url = urllib.parse.urlparse(url)
if parsed_url.scheme == 'github':
file_contents = github.fetch_file(parsed_url, config)
elif parsed_url.scheme == 'file' or (parsed_url.scheme == '' and parsed_url.netloc == ''):
purl = list(parsed_url)
purl[0] = 'file'
parsed_url = urllib.parse.ParseResult(*purl)
file_contents = file.fetch_file(parsed_url, config)
elif parsed_url.scheme in ('http', 'https'):
file_contents = http.fetch_file(parsed_url, config)
else:
raise NotImplementedError(f'Unknown fetch backend: {parsed_url.scheme}')
return file_contents
|
0be30bb953f3d0dedc36eaf89e4b7d0a6ac48ad9
| 3,650,291
|
def query_for_account(account_rec, region):
""" Performs the public ip query for the given account
:param account: Account number to query
:param session: Initial session
:param region: Region to query
:param ip_data: Initial list. Appended to and returned
:return: update ip_data list
"""
ip_data = []
session = boto3.session.Session(region_name=region)
assume = rolesession.assume_crossact_audit_role(
session, account_rec['accountNum'], region)
if assume:
for ip_addr in assume.client('ec2').describe_addresses()['Addresses']:
ip_data.append(
dict(PublicIP=(ip_addr.get('PublicIp')),
InstanceId=(ip_addr.get('InstanceId')), # Prevents a crash
PrivateIP=(ip_addr.get('PrivateIpAddress')),
NetworkInterface=(ip_addr.get('NetworkInterfaceId')),
AccountNum=account_rec['accountNum'],
AccountAlias=(account_rec['alias'])))
for instance in assume.resource('ec2').instances.filter():
if instance.public_ip_address:
ip_data.append(
dict(InstanceId=(instance.instance_id),
PublicIP=(instance.public_ip_address),
PrivateIP=(instance.private_ip_address),
AccountNum=account_rec['accountNum'],
AccountAlias=(account_rec['alias'])))
else:
pass
return ip_data
|
61055939990175c6e2cb850ede7d448a261ccdff
| 3,650,292
|
from typing import List
def filter_list_of_dicts(list_of_dicts: list, **filters) -> List[dict]:
"""Filter a list of dicts by any given key-value pair.
Support simple logical operators like: '<,>,<=,>=,!'. Supports
filtering by providing a list value i.e. openJobsCount=[0, 1, 2].
"""
for key, value in filters.items():
filter_function = make_dict_filter(key, value)
list_of_dicts = list(filter(filter_function, list_of_dicts))
return list_of_dicts
|
f926b7c478400d3804d048ced823003e48fd5ef1
| 3,650,293
|
def construct_pos_line(elem, coor, tags):
"""
Do the opposite of the parse_pos_line
"""
line = "{elem} {x:.10f} {y:.10f} {z:.10f} {tags}"
return line.format(elem=elem, x=coor[0], y=coor[1], z=coor[2], tags=tags)
|
21ca509131c85a2c7bc24d00a28e7d4ea580a49a
| 3,650,294
|
def compute_pcs(predicts, labels, label_mapper, dataset):
"""
compute correctly predicted full spans. If cues and scopes are predicted jointly, convert cue labels to I/O labels depending on the
annotation scheme for the considered dataset
:param predicts:
:param labels:
:return:
"""
def trim_and_convert(predict, label, label_mapper, dataset):
temp_1 = []
temp_2 = []
for j, m in enumerate(predict):
if label_mapper[label[j]] != 'X' and label_mapper[label[j]] != 'CLS' and label_mapper[label[j]] != 'SEP':
temp_1.append(label_mapper[label[j]])
temp_2.append(label_mapper[m])
if 'joint' in dataset:
if cue_in_scope[dataset] is True:
replacement= 'I'
else: replacement = 'O'
for j, m in enumerate(temp_1):
if m == 'C':
temp_1[j] = replacement
for j, m in enumerate(temp_2):
if m == 'C':
temp_2[j] = replacement
return temp_2, temp_1
tp = 0.
for predict, label in zip(predicts, labels):
predict, label = trim_and_convert(predict, label, label_mapper,dataset)
if predict == label:
tp += 1
return tp/len(predicts)
|
5f046c1599617ad7620ea9a618f85f02dd93e28c
| 3,650,295
|
def pentomino():
"""
Main pentomino routine
@return {string} solution as rectangles separated by a blank line
"""
return _stringify(
_pent_wrapper1(tree_main_builder())(rect_gen_boards()))
|
07e448efdbfe5cb43ce943f33f24a7887878001f
| 3,650,296
|
def do_login(request, username, password):
""" Check credentials and log in """
if request.access.verify_user(username, password):
request.response.headers.extend(remember(request, username))
return {"next": request.app_url()}
else:
return HTTPForbidden()
|
f7c076c6f4a6ac51bf5a3ea39116166002ce1833
| 3,650,297
|
from tokenize import Token
import re
def _interpolate(format1):
"""
Takes a format1 string and returns a list of 2-tuples of the form
(boolean, string) where boolean says whether string should be evaled
or not.
from <http://lfw.org/python/Itpl.py> (public domain, Ka-Ping Yee)
"""
def matchorfail(text, pos):
tokenprog = re.compile(Token)
match = tokenprog.match(text, pos)
if match is None:
raise _ItplError(text, pos)
return match, match.end()
namechars = "abcdefghijklmnopqrstuvwxyz" \
"ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_";
chunks = []
pos = 0
while 1:
dollar = format1.find("$", pos)
if dollar < 0:
break
nextchar = format1[dollar + 1]
if nextchar == "{":
chunks.append((0, format1[pos:dollar]))
pos, level = dollar + 2, 1
while level:
match, pos = matchorfail(format1, pos)
tstart, tend = match.regs[3]
token = format1[tstart:tend]
if token == "{":
level = level + 1
elif token == "}":
level = level - 1
chunks.append((1, format1[dollar + 2:pos - 1]))
elif nextchar in namechars:
chunks.append((0, format1[pos:dollar]))
match, pos = matchorfail(format1, dollar + 1)
while pos < len(format1):
if format1[pos] == "." and \
pos + 1 < len(format1) and format1[pos + 1] in namechars:
match, pos = matchorfail(format1, pos + 1)
elif format1[pos] in "([":
pos, level = pos + 1, 1
while level:
match, pos = matchorfail(format1, pos)
tstart, tend = match.regs[3]
token = format1[tstart:tend]
if token[0] in "([":
level = level + 1
elif token[0] in ")]":
level = level - 1
else:
break
chunks.append((1, format1[dollar + 1:pos]))
else:
chunks.append((0, format1[pos:dollar + 1]))
pos = dollar + 1 + (nextchar == "$")
if pos < len(format1):
chunks.append((0, format1[pos:]))
return chunks
|
9af06b91f0ad2e15fd7479ac2e1dedc5443b6e34
| 3,650,298
|
def approxIndex(iterable, item, threshold):
"""Same as the python index() function but with a threshold from wich values are considerated equal."""
for i, iterableItem in rev_enumerate(iterable):
if abs(iterableItem - item) < threshold:
return i
return None
|
45ec7b816674231a5efa8a559e9f9416a81987f5
| 3,650,299
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.