content stringlengths 35 762k | sha1 stringlengths 40 40 | id int64 0 3.66M |
|---|---|---|
def xirrcal(cftable, trades, date, startdate=None, guess=0.01):
"""
calculate the xirr rate
:param cftable: cftable (pd.Dateframe) with date and cash column
:param trades: list [trade1, ...], every item is an trade object,
whose shares would be sold out virtually
:param date: string of date or datetime object,
the date when virtually all holding positions being sold
:param guess: floating number, a guess at the xirr rate solution to be used
as a starting point for the numerical solution
:returns: the IRR as a single floating number
"""
date = convert_date(date)
partcftb = cftable[cftable["date"] <= date]
if len(partcftb) == 0:
return 0
if not startdate:
cashflow = [(row["date"], row["cash"]) for i, row in partcftb.iterrows()]
else:
if not isinstance(startdate, dt.datetime):
startdate = dt.datetime.strptime(
startdate.replace("-", "").replace("/", ""), "%Y%m%d"
)
start_cash = 0
for fund in trades:
start_cash += fund.briefdailyreport(startdate).get("currentvalue", 0)
cashflow = [(startdate, -start_cash)]
partcftb = partcftb[partcftb["date"] > startdate]
cashflow.extend([(row["date"], row["cash"]) for i, row in partcftb.iterrows()])
rede = 0
for fund in trades:
if not isinstance(fund, itrade):
partremtb = fund.remtable[fund.remtable["date"] <= date]
if len(partremtb) > 0:
rem = partremtb.iloc[-1]["rem"]
else:
rem = []
rede += fund.aim.shuhui(
fund.briefdailyreport(date).get("currentshare", 0), date, rem
)[1]
else: # 场内交易
rede += fund.briefdailyreport(date).get("currentvalue", 0)
cashflow.append((date, rede))
return xirr(cashflow, guess) | a01d874ae4a79202aec373f2b1c0b1b2b0904f10 | 3,635,300 |
def calc_synch_kappa(b, ne, delta, sinth, nu, E0=1.):
"""Calculate the relativstic synchrotron absorption coefficient κ_ν.
This is Dulk (1985) equation 41, which is a fitting function assuming a
power-law electron population. Arguments are:
b
Magnetic field strength in Gauss
ne
The density of electrons per cubic centimeter with energies greater than E0.
delta
The power-law index defining the energy distribution of the electron population,
with ``n(E) ~ E^(-delta)``. The equation is valid for ``2 <~ delta <~ 5``.
sinth
The sine of the angle between the line of sight and the magnetic field direction.
It's not specified for what range of values the expressions work well.
nu
The frequency at which to calculate η, in Hz. The equation is valid for
It's not specified for what range of values the expressions work well.
E0
The minimum energy of electrons to consider, in MeV. Defaults to 1 so that
these functions can be called identically to the gyrosynchrotron functions.
The return value is the absorption coefficient, in units of ``cm^-1``.
No complaints are raised if you attempt to use the equation outside of its
range of validity.
"""
s = nu / calc_nu_b(b)
return (ne / b *
8.7e-12 * (delta - 1) / sinth *
E0**(delta - 1) *
(8.7e-2 * s / sinth)**(-0.5 * (delta + 4))) | 546cbdf24459fa7cac6e9b1465b94b63ed722ebf | 3,635,301 |
def random_rotation():
"""Generate a 3D random rotation matrix.
Returns:
np.matrix: A 3D rotation matrix.
"""
x1, x2, x3 = np.random.rand(3)
R = np.matrix([[np.cos(2 * np.pi * x1), np.sin(2 * np.pi * x1), 0],
[-np.sin(2 * np.pi * x1), np.cos(2 * np.pi * x1), 0],
[0, 0, 1]])
v = np.matrix([[np.cos(2 * np.pi * x2) * np.sqrt(x3)],
[np.sin(2 * np.pi * x2) * np.sqrt(x3)],
[np.sqrt(1 - x3)]])
H = np.eye(3) - 2 * v * v.T
M = -H * R
return M | a8b68784973bdccea5331fa685cf8f86a00de04a | 3,635,302 |
def outdir_project(outdir, project_mode, pd_samples, mode):
"""
"""
# Group dataframe by sample name
sample_frame = pd_samples.groupby(["new_name"])
dict_outdir = {}
for name, cluster in sample_frame:
if (project_mode):
#print ("Create subdir for every sample: ", mode)
sample_dir = create_subfolder('data', outdir)
## create sample
sample_name_dir = create_subfolder(name, sample_dir)
## create subdir sub sample
mode_name_dir = create_subfolder(mode, sample_name_dir)
dict_outdir[name] = mode_name_dir
else:
#print ("All samples share same folder")
sample_name_dir = create_subfolder(name, outdir)
dict_outdir[name] = sample_name_dir
return (dict_outdir) | c59ab5d985820fbbff0ea876a6e4623fc00dc1e0 | 3,635,303 |
def _get_percentage_bid_offer(df_with_positions, day, daily_spread_percent_override):
"""Defines the daily spread used in computation."""
if daily_spread_percent_override is not None:
daily_spread_percentage = daily_spread_percent_override
else:
try:
daily_spread_percentage = df_with_positions[PandasEnum.BID_OFFER_SPREAD.value][day]
except (KeyError, IndexError):
daily_spread_percentage = 0.0
return daily_spread_percentage | 7515eb5117eaaac45e89259d5d9d2b8efccb955c | 3,635,304 |
def getChoice(options: list):
"""
Only for Windows and MacOS
"""
# showOptions = '\n'.join(options)
# print(showOptions)
print("操作選項")
for i in range(len(options)):
print(f"{i+1}. {options[i]}")
choice = input(': ')
clearScene()
return choice | 936936d82e20e459af75fa80a3aae650bb4e45e3 | 3,635,305 |
def label_accuracy_score(label_trues, label_preds, n_class):
"""Returns accuracy score evaluation result.
- overall accuracy
- mean accuracy
- mean IU
- fwavacc
"""
hist = np.zeros((n_class, n_class))
for lt, lp in zip(label_trues, label_preds):
hist += fast_hist(lt.flatten(), lp.flatten(), n_class)
acc = np.diag(hist).sum() / hist.sum()
acc_cls = np.diag(hist) / hist.sum(axis=1)
acc_cls = np.nanmean(acc_cls)
iu = np.diag(hist) / (hist.sum(axis=1) + hist.sum(axis=0) - np.diag(hist))
mean_iu = np.nanmean(iu)
freq = hist.sum(axis=1) / hist.sum()
fwavacc = (freq[freq > 0] * iu[freq > 0]).sum()
return acc, acc_cls, mean_iu, fwavacc | 23fbcfea1942ca86bb306f2cb0b8855bc5652749 | 3,635,306 |
import os
import json
def get_storcli_dall_show(c):
""" Gets data from json resource
:param c: unused
:return:
"""
del c
with open(os.path.join(os.path.dirname(__file__),
'../resources/storcli_dall_show.json')) as fp:
data = json.load(fp)
controller_list = []
for controller in data['Controllers']:
# Account for some crazy JSON schema
controller_list.append(
controller['Response Data']['Response Data']
)
return controller_list | 24b0ea1065443d17078971c6567989e05da0cd60 | 3,635,307 |
def Update(name, notification_emails, enrolled_services, update_mask):
"""Get the access approval settings for a resource.
Args:
name: the settings resource name (e.g. projects/123/accessApprovalSettings)
notification_emails: list of email addresses
enrolled_services: list of services
update_mask: which fields to update
Returns: updated settings
"""
client = apis.GetClientInstance('accessapproval', 'v1')
msgs = apis.GetMessagesModule('accessapproval', 'v1')
settings = None
services_protos = [msgs.EnrolledService(cloudProduct=s) for s in enrolled_services]
if len(services_protos) > 0:
settings = msgs.AccessApprovalSettings(
name=name,
enrolledServices=services_protos,
notificationEmails=notification_emails)
else:
settings = msgs.AccessApprovalSettings(
name=name,
notificationEmails=notification_emails)
if 'organizations/' in name:
req = msgs.AccessapprovalOrganizationsUpdateAccessApprovalSettingsRequest(
name=name,
accessApprovalSettings=settings,
updateMask=update_mask)
return client.organizations.UpdateAccessApprovalSettings(req)
if 'folders/' in name:
req = msgs.AccessapprovalFoldersUpdateAccessApprovalSettingsRequest(
name=name,
accessApprovalSettings=settings,
updateMask=update_mask)
return client.folders.UpdateAccessApprovalSettings(req)
req = msgs.AccessapprovalProjectsUpdateAccessApprovalSettingsRequest(
name=name,
accessApprovalSettings=settings,
updateMask=update_mask)
return client.projects.UpdateAccessApprovalSettings(req) | a09577e797c39e3b5b82b50bf37855569721ca52 | 3,635,308 |
from datetime import datetime
def training(request):
"""Renders the training page."""
assert isinstance(request, HttpRequest)
return render(
request,
'magic/training.html',
{
'title':'Training',
'year':datetime.now().year,
}
) | 60d916e0e4f44b9b617c8bbc7f3d8800f37634fd | 3,635,309 |
def MOP(
directed = False, preprocess = "auto", load_nodes = True, load_node_types = True,
load_edge_weights = True, auto_enable_tradeoffs = True,
sort_tmp_dir = None, verbose = 2, cache = True, cache_path = None,
cache_sys_var = "GRAPH_CACHE_DIR", version = "2022-02-01", **kwargs
) -> Graph:
"""Return MOP graph
Parameters
----------
directed = False
preprocess = "auto"
Preprocess for optimal load time & memory peak.
Will preprocess in Linux/macOS but not Windows.
load_nodes = True
Load node names or use numeric range
auto_enable_tradeoffs = True
Enable when graph has < 50M edges
cache_path = None
Path to store graphs
Defaults either to `GRAPH_CACHE_DIR` sys var or `graphs`
cache_sys_var = "GRAPH_CACHE_DIR"
version = "2022-02-01"
Version to retrieve
The available versions are:
- 2022-05-11
- 2014-09-03
- 2022-02-01
"""
return AutomaticallyRetrievedGraph(
"MOP", version, "kgobo", directed, preprocess, load_nodes,
load_node_types, load_edge_weights, auto_enable_tradeoffs, sort_tmp_dir, verbose, cache,
cache_path, cache_sys_var, kwargs
)() | 044bbec68722598ce858012d8272755d652a8fbc | 3,635,310 |
import warnings
def stetson_sharpness(temp, middle, mask, d):
"""Stetson compute of sharpness."""
mask = np.array(mask) # work with a copy
mask[middle, middle] = 0
sharp = temp[middle, middle] - (np.sum(mask*temp))/np.sum(mask)
with warnings.catch_warnings():
warnings.simplefilter('ignore', RuntimeWarning)
sharp /= d
return sharp | bce6ab472b7a96b2575687215788355cdac56529 | 3,635,311 |
def add_variant_to_variant_set(prim, variant_set_name, variant_name):
"""
Adds a new variant to given variant set in prim
:param prim: Usd.Prim
:param variant_set_name: str
:param variant_name: str
:return: Usd.Variant
"""
variant_set = get_variant_set(prim, variant_set_name)
if not variant_set:
return None
variant = variant_set.AddVariant(variant_name)
return variant | f77643eb26e703045825c7e728c338e6c4e4abfb | 3,635,312 |
def load_data(messages_filepath, categories_filepath):
"""
Load messages_file and categories_file
Args:
messages_filepath: csv file with the disaster messages
categories_filepath: csv file with the categories for disaster messages
Returns: a file that is merge of the two input files
"""
df_mes = pd.read_csv(messages_filepath)
df_cat = pd.read_csv(categories_filepath)
df = df_mes.merge(df_cat,how="inner", on=["id"])
return df | fafd059d8847b7bd0b5c39f9f690bd52e79bd6da | 3,635,313 |
def _cmd(cmd):
"""Utility function to run commands."""
result = __salt__["cmd.run_all"](cmd)
if result["retcode"]:
raise CommandExecutionError(result["stdout"] + result["stderr"])
return result["stdout"] | e9d0de256d849cb562e3a83ea93f2766d9ce48cb | 3,635,314 |
from typing import Union
from typing import Tuple
from typing import Optional
def min(
x: Union[ivy.Array, ivy.NativeArray],
axis: Union[int, Tuple[int]] = None,
keepdims: bool = False,
out: Optional[Union[ivy.Array, ivy.NativeArray]] = None,
) -> ivy.Array:
"""Calculates the minimum value of the input array x.
.. note::
When the number of elements over which to compute the minimum value is zero, the
minimum value is implementation-defined. Specification-compliant libraries may
choose to raise an error, return a sentinel value (e.g., if x is a floating-point
input array, return NaN), or return the maximum possible value for the input array x
data type (e.g., if x is a floating-point array, return +infinity).
**Special Cases**
For floating-point operands,
If x_i is NaN, the minimum value is NaN (i.e., NaN values propagate).
Parameters
----------
x
Input array containing elements to min.
axis
axis or axes along which minimum values must be computed. By default, the
minimum value must be computed over the entire array. If a tuple of integers,
minimum values must be computed over multiple axes. Default: None.
keepdims
optional boolean, if True, the reduced axes (dimensions) must be included in the
result as singleton dimensions, and, accordingly, the result must be compatible
with the input array (see Broadcasting). Otherwise, if False, the reduced axes
(dimensions) must not be included in the result. Default: False.
out
optional output array, for writing the result to.
Returns
-------
ret
if the minimum value was computed over the entire array, a zero-dimensional
array containing the minimum value; otherwise, a non-zero-dimensional array
containing the minimum values. The returned array must have the same data type
as x.
"""
return _cur_framework.min(x, axis, keepdims, out) | f875d77a73a554c96c3361134822c761a5a8751a | 3,635,315 |
def bkg_3d():
"""Example with simple values to test evaluate"""
energy = [0.1, 10, 1000] * u.TeV
energy_axis = MapAxis.from_energy_edges(energy)
fov_lon = [0, 1, 2, 3] * u.deg
fov_lon_axis = MapAxis.from_edges(fov_lon, name="fov_lon")
fov_lat = [0, 1, 2, 3] * u.deg
fov_lat_axis = MapAxis.from_edges(fov_lat, name="fov_lat")
data = np.ones((2, 3, 3))
# Axis order is (energy, fov_lon, fov_lat)
# data.value[1, 0, 0] = 1
data[1, 1, 1] = 100
return Background3D(
axes=[energy_axis, fov_lon_axis, fov_lat_axis],
data=data,
unit="s-1 GeV-1 sr-1"
) | cccf110037d2ed9b10b9877e7e413a3d48efe9b3 | 3,635,316 |
import requests
import pickle
def pd_read_pickle(fname):
"""
Read the :class:`.PupilData`-object `pdobj` from file using :mod:`pickle`.
Parameters
----------
fname: str
filename or URL to load data from
Returns
-------
pdobj: :class:`.PupilData`
loaded dataset
"""
if fname.startswith("http"):
# try loading from URL
res=requests.get(fname)
if res.status_code==200:
pdobj=pickle.loads(res.content)
else:
with open(fname, 'rb') as f:
pdobj=pickle.load(f)
return pdobj | 2d92966fcb9834026e352fb01a3a7e3c7f18e5bf | 3,635,317 |
def checkLevel(n):
"""
Check if a level, identified by its name, exists
return :
- Boolean
call example :
chechLevel(1)
"""
levelsList = getLevelsList()
levelExists = False
for level in levelsList:
if "natas"+str(n) == level["name"]:
levelExists = True
break
return levelExists | 72b6e15464462da7e91c88253a64002063da7845 | 3,635,318 |
import os
import re
def generatePSSMProfile(fastas, outDir, blastpgp, db):
"""
Generate PSSM file by using the blastpgp program in NCBI blast-2.2.18 package.
Parameters
----------
file : file
the file, which include the protein sequences in fasta format.
blastpgp: string
the path of blastpgp program.
db: string
the uniref50 data base, which is formated by the 'formatdb' program in blast package.
Returns
-------
a string:
A directory name, which include the predicted protein PSSM information.
"""
if os.path.exists(outDir) == False:
os.mkdir(outDir)
for i in fastas:
name, sequence = re.sub('\|', '', i[0]), i[1]
with open(name + '.txt', 'w') as f:
f.write('>'+name+'\n'+sequence + '\n')
myCmd = blastpgp + ' -i ' + name + '.txt' + ' -d ' + db + ' -b 0 -v 5000 -j 3 -h 0.001 -Q ' + outDir + '/' + name +'.pssm'
print('Doing psiblast for protein: ' + name)
os.system(myCmd)
os.remove(name + '.txt')
return outDir | 2e7cc5f3fae9820d1ecdef1b3674c835bc73e640 | 3,635,319 |
def get_payload(dataset, site_code, year):
"""returns payload needed for a post request
to get EANET csv files"""
if dataset == "dry_deposition_auto":
item_code = 2
elif dataset == "dry_deposition_filter_pack":
item_code = 3
elif dataset == "dry_deposition_passive_sampler":
item_code = 4
elif dataset == "wet_deposition":
item_code = 1
# Country Code
#
# Cambodia KH
# China CN
# Indonesia ID
# Japan JP
# Korea, Republic of KR
# Lao People's Democratic Republic LA
# Malaysia MY
# Mongolia MN
# Myanmar MM
# Philippines PH
# Russia RU
# Thailand TH
# Viet Nam VN
#
# two first letter in station code is country code
country_code = site_code[0:2]
return {
"mode": "download",
"siteType": "1",
"countryCd": country_code,
"year": year,
"siteCd": site_code,
"itemCd": item_code,
} | 6ba2b6b75debb3369869e29f36c5e8a2adec0bf1 | 3,635,320 |
def safe_encode(s, coding='utf-8', errors='surrogateescape'):
"""encode str to bytes, with round-tripping "invalid" bytes"""
return s.encode(coding, errors) | 1b1ba8439db8ec4c82c571197e5007e58f397c87 | 3,635,321 |
def group_required(*group_names):
"""
Requires user membership in at least one of the groups passed in.
"""
def in_groups(u):
if u.is_authenticated:
if u.is_superuser or bool(u.groups.filter(name__in=group_names)):
return True
return False
return user_passes_test(in_groups) | 2c021dd4f71d804bc1e656d5de1a4194bd4131cf | 3,635,322 |
def initialize_service(storage_socket, logger, service_input, tag=None, priority=None):
"""Initializes a service from a API call.
Parameters
----------
storage_socket : StorageSocket
A StorageSocket to the currently active database
logger
A logger for use by the service
service_input
The service to be initialized.
tag : Optional
Optional tag to user with the service. Defaults to None
priority :
The priority of the service.
Returns
-------
Service
Returns an instantiated service
"""
name = service_input.procedure
return _service_chooser(name).initialize_from_api(
storage_socket, logger, service_input, tag=tag, priority=priority) | 4274ba9e7ad1c6bf59155704c0dec2b54c0869ea | 3,635,323 |
import collections
def extract_stats(output):
"""Extract stats from `git status` output
"""
lines = output.splitlines()
return collections.Counter([x.split()[0] for x in lines]) | 41d8aef4df3401ee8127ad0b72402ff9c54c41e3 | 3,635,324 |
import argparse
import sys
def initParser():
""" Initialize the parser for the CLI """
parser = argparse.ArgumentParser(description='Convert SDF file to CSV',
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('--key', '-k', metavar='keyCol', default=None, dest='keyCol')
parser.add_argument('--chiral', default=False, action='store_true', dest='useChirality')
parser.add_argument('--smilesCol', metavar='smilesCol', default='')
parser.add_argument('inFilename', metavar='inFile.sdf', type=existingFile)
parser.add_argument('outF', nargs='?', type=argparse.FileType('w'), default=sys.stdout)
return parser | 7237c3b740c179aa064b244f8aa560db0c973989 | 3,635,325 |
def save_csv(filename, shape_features, centroid_features, label_features=None,
mode='w'):
"""
Create and save a .csv file containing all the features (shapes, centroids
and labels)
Parameters
----------
filename : string
full name (path and name) of the .csv file
mode : string, optional, default is 'w'
Python write mode. For example
'w' Truncate file to zero length or create text file for writing.
The stream is positioned at the beginning of the file.
'a' Open for writing. The file is created if it does not exist. The
stream is positioned at the end of the file. Subsequent writes
to the file will always end up at the then current end of file,
irrespective of any intervening fseek(3) or similar.
shape_features : 2d nd array of scalars
Each column corresponds to a shape (linked to a kernel filter)
Each row corresponds to a ROI
centroid_features: 2d nd array of scalars (centroid in freq and time)
Centroid of image. If labels provided, centroid for each ROI (rows)
column 0 is 'cyear'
column 1 is 'cmonth'
column 2 is 'chour'
column 3 is 'cminute'
column 4 is 'csecond'
column 5 is 'cfreq'
label_features: 2d nd array of integers and strings, optional, default is
None
column 0 is 'labelID'
column 1 is 'labelName'
Returns
-------
table : dataframe (Pandas)
The table contains all the features extracted from the spectrogram.
Keys are {'labelID', 'labelName, 'cyear', 'cmonth', 'cday', 'chour',
'cmin','csecond','cfreq','shp1,'shp2',...'shpn'}
"""
table = create_csv(shape_features, centroid_features, label_features)
table.to_csv(path_or_buf=filename,sep=',',mode=mode,header=True, index=False)
return table | d40cca38c0670bcbd0c67e2d63c670b1033e29e7 | 3,635,326 |
from typing import Optional
from typing import Union
from typing import Pattern
def turn_connect(start: Port, end: Port, radius: float, radius_end: Optional[float] = None, euler: float = 0,
resolution: int = DEFAULT_RESOLUTION, include_width: bool = True) -> Union[Pattern, Curve]:
"""Turn connect.
Args:
start: Start port
end: End port
radius: Start turn effective radius
radius_end: End turn effective radius (use :code:`radius` if :code:`None`)
euler: Euler path contribution (see :code:`turn` method).
resolution: Number of evaluations for the turns.
include_width: Whether to include the width (cubic taper) of the turn connect
Returns:
A path connecting the start and end port using a turn-straight-turn approach
"""
start_r = radius
end_r = start_r if radius_end is None else radius_end
angles, length = _turn_connect_angle_solve(start.copy.flip(), end.copy.flip(), start_r, end_r)
curve = link(turn(start_r, angles[0], euler, resolution) if angles[0] % 360 != 0 else None,
length, turn(end_r, angles[1], euler, resolution) if angles[1] % 360 != 0 else None).to(start)
return curve.path(start.w) if include_width else curve | 7d9e1572b22a2066f5468d6fad159364f00dd9f4 | 3,635,327 |
import base64
import struct
import hmac
import hashlib
import time
def GoogleAuth(key):
"""
谷歌验证码
# RFC 协议下有HOTP和TOTP 前者是计数 后者是计时 生成验证码
# hopt 由 RFC 协议 RFC4266
# google auth 用的是TOTP 而TOTP是在HOTP的基础上计时
:return:
"""
def get_hotp_token(secret, intervals_no):
"""This is where the magic happens."""
key = base64.b32decode(normalize(secret), True)
msg = struct.pack(">Q", intervals_no)
h = bytearray(hmac.new(key, msg, hashlib.sha1).digest())
o = h[19] & 15
h = str((struct.unpack(">I", h[o:o + 4])[0] & 0x7fffffff) % 1000000)
return prefix0(h)
def get_totp_token(secret):
"""The TOTP token is just a HOTP token seeded with every 30 seconds."""
return get_hotp_token(secret, intervals_no=int(time.time()) // 30)
def normalize(key):
"""Normalizes secret by removing spaces and padding with = to a multiple of 8"""
k2 = key.strip().replace(' ', '')
# k2 = k2.upper() # skipped b/c b32decode has a foldcase argument
if len(k2) % 8 != 0:
k2 += '=' * (8 - len(k2) % 8)
return k2
def prefix0(h):
"""Prefixes code with leading zeros if missing."""
if len(h) < 6:
h = '0' * (6 - len(h)) + h
return h
return get_totp_token(key) | 3a2df9626a7f2f5705b1286dcc18c1eae0b331a4 | 3,635,328 |
import tempfile
import os
def compress_image(path, thresh=0.2):
"""Compress Image
"""
file_stat = os.stat(path)
basename = os.path.basename(path)
file_type = basename.split('.')[-1]
# dont compress gif file
if file_type == 'gif':
return path
file_size = file_stat.st_size / 1024.**2
# if file is larger than 200k
if file_size > 0.2:
print('Image `{}` is larger than 200k({:.2}m), need to compress.'.format(path, file_size))
temp_dir = tempfile.gettempdir()
img = Image.open(path)
H, W = img.size
if H > W:
w = int(thresh * 2048 * 2)
h = int(H * w / W)
else:
h = int(thresh * 2048 * 2)
w = int(W * h / H)
new_path = os.path.join(temp_dir, 'compress_' + basename)
img.resize((h, w)).save(new_path)
return new_path
else:
return path | bc6ac98a1eb28b1124eb6416631ff796fd355aa6 | 3,635,329 |
def adjust_opts(in_opts, config):
"""Establish JVM opts, adjusting memory for the context if needed.
This allows using less or more memory for highly parallel or multicore
supporting processes, respectively.
"""
memory_adjust = config["algorithm"].get("memory_adjust", {})
out_opts = []
for opt in in_opts:
if opt.startswith("-Xmx") or (opt.startswith("-Xms") and memory_adjust.get("direction") == "decrease"):
arg = opt[:4]
opt = "{arg}{val}".format(arg=arg,
val=adjust_memory(opt[4:],
memory_adjust.get("magnitude", 1),
memory_adjust.get("direction"),
maximum=memory_adjust.get("maximum")))
out_opts.append(opt)
return out_opts | f932224902df9d61efbd6390447e41813280c3a6 | 3,635,330 |
import os
def load_images(image_name_to_label):
"""
Returns (images, labels), where each image is from a file name
present in the image_name_to_label dictionary
"""
images = []
labels = []
image_names = os.listdir(DEFAULT_IMG_PATH_EDITED)
# Remove directories
image_names.remove("COVID-19")
image_names.remove("Normal")
image_names.remove("ViralPneumonia")
# Load images from specific image directories (COVID-19, normal, viral pneumonia)
def load_directory(directory):
notifier.send(" Loading from directory: " + directory + "...")
directory_path = DEFAULT_IMG_PATH_EDITED + os.sep + directory
directory_image_names = os.listdir(directory_path)
for i, image_name in enumerate(directory_image_names):
base_image_name = get_base_image_name(image_name)
query_name = directory + "/" + base_image_name
query_name = query_name.lower().replace(" ", "")
if query_name in image_name_to_label:
print(f" {i / len(directory_image_names) * 100}% - [{image_name}]")
image_path = directory_path + os.sep + image_name
image = get_processed_image(image_path)
images.append(image)
labels.append(image_name_to_label[query_name])
load_directory("COVID-19")
load_directory("Normal")
load_directory("ViralPneumonia")
# Load images from default directory
if LOAD_ALL_IMAGES:
notifier.send(" Loading from directory: default...")
for i, image_name in enumerate(image_names):
base_image_name = get_base_image_name(image_name)
if base_image_name in image_name_to_label:
print(f" {i / len(image_names) * 100}% - [{image_name}]")
image_path = DEFAULT_IMG_PATH_EDITED + os.sep + image_name
image = get_processed_image(image_path)
images.append(image)
labels.append(image_name_to_label[base_image_name])
return images, labels | 5a9dd839166d29813a58f3031670354ea7b5bf5c | 3,635,331 |
import binascii
def from_address(text, v4_origin=ipv4_reverse_domain,
v6_origin=ipv6_reverse_domain):
"""Convert an IPv4 or IPv6 address in textual form into a Name object whose
value is the reverse-map domain name of the address.
*text*, a ``str``, is an IPv4 or IPv6 address in textual form
(e.g. '127.0.0.1', '::1')
*v4_origin*, a ``dns.name.Name`` to append to the labels corresponding to
the address if the address is an IPv4 address, instead of the default
(in-addr.arpa.)
*v6_origin*, a ``dns.name.Name`` to append to the labels corresponding to
the address if the address is an IPv6 address, instead of the default
(ip6.arpa.)
Raises ``dns.exception.SyntaxError`` if the address is badly formed.
Returns a ``dns.name.Name``.
"""
try:
v6 = dns.ipv6.inet_aton(text)
if dns.ipv6.is_mapped(v6):
parts = ['%d' % byte for byte in v6[12:]]
origin = v4_origin
else:
parts = [x for x in str(binascii.hexlify(v6).decode())]
origin = v6_origin
except Exception:
parts = ['%d' %
byte for byte in dns.ipv4.inet_aton(text)]
origin = v4_origin
return dns.name.from_text('.'.join(reversed(parts)), origin=origin) | c66d8357c955b177a201ad6f94fb98aeaf72d909 | 3,635,332 |
import time
def blog_claim():
"""Blog info
If the user (check the username) has already bought the item or its author, To display the content of the article .
:param
blog_id ,token , address
:return:
{
"msg": {
"author": "0x035EB55d4260455075A8418C4B94Ba618C445537",
"body": null,
"claim_id": "0x909b2af19d778cd804f3ad8bf1161397",
"deposit": 0,
"description": "Sphinx is a tool that makes it easy to create intelligent and beautiful documentation"
"initDate": 1535600331,
"price": 0,
"title": "sphinx,
"type": 1,
"udfs": null or ",
"views": 3,
"waive": false
},
"result": 1
}
"""
current_user = auth_login_required() # check token
if type(current_user) is dict:
return jsonify(current_user)
claim_id = request.form.get("blog_id")
address = request.form.get('address')
if claim_id is None or address is None:
return jsonify({
"result": 0,
"msg": "need claim_id and address"
})
receipt_watcher.background_update(receipt_watcher.update_order, claim_id)
current_blog = Claim.query.filter_by(claim_id=claim_id).first()
if current_blog is not None:
# Forbidden claim
if current_blog.waive:
return jsonify({
"result": 0,
"msg": "Forbidden claim"
})
current_blog.views = current_blog.views + 1
current_blog = current_blog.__dict__
current_blog.pop("_sa_instance_state")
if current_blog['type'] == 2:
ad = Advertising(address=address, claim_id=claim_id, price=current_blog['price'])
ad.initDate = int(time.time()) + 24 * 60 * 60
db.session.merge(ad)
db.session.commit()
return jsonify({
"result": 1,
"msg": current_blog
})
if current_blog.get("author") == address or Order.query.filter_by(claim_id=claim_id,
address=address).first() is not None:
return jsonify({
"result": 1,
"msg": current_blog
})
else: # not buy
current_blog["body"] = None
current_blog["udfs"] = None
return jsonify({
"result": 1,
"msg": current_blog
})
return jsonify({
"result": 0,
"msg": "error claim_id"
}) | ce4344a01f6f71afadc078c6d3bed3b26f5540ee | 3,635,333 |
import typing
import os
import pickle
import logging
def get_task_flow_results_as_dataframe(task_id: int, flow_id: int,
num_runs: int, raise_few_runs: bool,
configuration_space: ConfigSpace.ConfigurationSpace,
evaluation_measures: typing.List[str],
per_fold: bool,
cache_directory: typing.Union[str, None]) -> pd.DataFrame:
"""
Obtains a number of runs from a given flow on a given task, and returns a
(relevant) set of parameters and performance measures. Makes solely use of
listing functions.
Parameters
----------
task_id: int
The task id
flow_id:
The flow id
num_runs: int
Maximum on the number of runs per task
raise_few_runs: bool
Raises an error if not enough runs are found according to the
`num_runs` argument
configuration_space: ConfigurationSpace
Determines valid parameters and ranges. These will be returned as
column names
evaluation_measures: List[str]
A list with the evaluation measure to obtain
per_fold: bool
Whether to obtain all results per repeat and per fold (slower, but for
example run time is not available globally for all runs). Will average
over these. TODO: add option to get all unaveraged
cache_directory: optional, str
Directory where cache files can be stored to or obtained from
Returns
-------
df : pd.DataFrame
a dataframe with as columns the union of the config_space
hyperparameters and the evaluation measures, and num_runs rows.
"""
for measure in evaluation_measures:
if measure in configuration_space.get_hyperparameters():
raise ValueError('measure shadows name in hyperparameter list: %s' % measure)
# both cache paths will be set to a value if cache_directory is not None
evaluations_cache_path = dict()
setups_cache_path = None
# decides the files where the cache will be stored
if cache_directory is not None:
cache_flow_task = os.path.join(cache_directory, str(flow_id), str(task_id))
os.makedirs(cache_flow_task, exist_ok=True)
for measure in evaluation_measures:
evaluations_cache_path[measure] = os.path.join(cache_flow_task,
'evaluations_%s_%d.pkl' % (measure, num_runs))
setups_cache_path = os.path.join(cache_flow_task, 'setups_%d.pkl' % num_runs)
# downloads (and caches, if allowed) the evaluations for all measures.
evaluations = dict()
setup_ids = set() # list maintaining all used setup ids
for measure in evaluation_measures:
if cache_directory is None or not os.path.isfile(evaluations_cache_path[measure]):
# downloads (and caches, if allowed) num_runs random evaluations
evals_current_measure = openml.evaluations.list_evaluations(measure,
size=num_runs,
task=[task_id],
flow=[flow_id],
per_fold=per_fold)
if len(evals_current_measure) < num_runs and raise_few_runs:
raise ValueError('Not enough evaluations for measure: %s. '
'Required: %d, Got: %d' % (measure, num_runs,
len(evals_current_measure)))
if cache_directory is not None and len(evals_current_measure) == num_runs:
# important to only store cache if enough runs were obtained
with open(evaluations_cache_path[measure], 'wb') as fp:
pickle.dump(evals_current_measure, fp)
evaluations[measure] = evals_current_measure
else:
# obtains the evaluations from cache
with open(evaluations_cache_path[measure], 'rb') as fp:
evaluations[measure] = pickle.load(fp)
if len(evaluations[measure]) == 0:
raise ValueError('No results on Task %d measure %s according to these criteria' % (task_id, measure))
for eval in evaluations[measure].values():
setup_ids.add(eval.setup_id)
# downloads (and caches, if allowed) the setups that belong to the evaluations
if cache_directory is None or not os.path.isfile(setups_cache_path):
setups = openmlcontrib.setups.obtain_setups_by_ids(setup_ids=list(setup_ids))
if cache_directory is not None and len(setups) == num_runs:
# important to only store cache if enough runs were obtained
with open(setups_cache_path, 'wb') as fp:
pickle.dump(setups, fp)
else:
# obtains the setups from cache
with open(setups_cache_path, 'rb') as fp:
setups = pickle.load(fp)
# download flows. Note that only one flow is allowed, per definition
flows = dict()
for setup in setups.values():
if flow_id not in flows:
flows[setup.flow_id] = openml.flows.get_flow(setup.flow_id)
if len(flows) != 1:
# This should never happen.
raise ValueError('Expected exactly one flow. Got %d' % len(flows))
# initiates the dataframe object
relevant_parameters = configuration_space.get_hyperparameter_names()
all_columns = list(relevant_parameters) + evaluation_measures
df = pd.DataFrame(columns=all_columns)
# initiates all records. Note that we need to check them one by one before
# we can add them to the dataframe
setups_merged = _merge_setup_dict_and_evaluation_dicts(setups,
flows[flow_id],
configuration_space,
evaluations,
per_fold)
# adds the applicable setups to the dataframe
for setup_id, setup_merged in setups_merged.items():
# the setups dict still contains the setup objects
current_setup = setups[setup_id]
if openmlcontrib.setups.setup_in_config_space(current_setup,
flows[current_setup.flow_id],
configuration_space):
df = df.append(setup_merged, ignore_index=True)
else:
logging.warning('Setup does not comply to configuration space: %s ' % setup_id)
all_numeric_columns = list(evaluation_measures)
for param in configuration_space.get_hyperparameters():
if isinstance(param, ConfigSpace.hyperparameters.NumericalHyperparameter):
all_numeric_columns.append(param.name)
df[all_numeric_columns] = df[all_numeric_columns].apply(pd.to_numeric)
if df.shape[0] > num_runs:
# this should never happen
raise ValueError('Too many runs. Expected %d got %d' % (num_runs, df.shape[0]))
exp_params = len(relevant_parameters) + len(evaluation_measures)
if df.shape[1] != exp_params:
# this should never happen
raise ValueError('Wrong number of attributes. Expected %d got %d' % (exp_params, df.shape[1]))
if df.shape[0] == 0:
raise ValueError('Did not obtain any results for task %d' % task_id)
df = df.reindex(sorted(df.columns), axis=1)
return df | 3195810ee67958b9c8621a9e2563c9db0e13ee42 | 3,635,334 |
def _expr_rshift_as_multiplication_of_reverse_order(lhs, rhs):
"""The multiply express will reverse order.
"""
return rhs * lhs | 4f245d8a8071cf4bcfc6543e0abae24cb1cdde9d | 3,635,335 |
def pathsuboption(option, attr):
"""Decorator used to declare a path sub-option.
Arguments are the sub-option name and the attribute it should set on
``path`` instances.
The decorated function will receive as arguments a ``ui`` instance,
``path`` instance, and the string value of this option from the config.
The function should return the value that will be set on the ``path``
instance.
This decorator can be used to perform additional verification of
sub-options and to change the type of sub-options.
"""
def register(func):
_pathsuboptions[option] = (attr, func)
return func
return register | 08b7f4134b79038a4df77b27d52504fc49df07a6 | 3,635,336 |
import numpy
def CalculateHarmonicTopoIndex(mol):
"""
#################################################################
Calculation of harmonic topological index proposed by Narnumi.
---->Hato
Usage:
result=CalculateHarmonicTopoIndex(mol)
Input: mol is a molecule object
Output: result is a numeric value
#################################################################
"""
deltas = [x.GetDegree() for x in mol.GetAtoms()]
while 0 in deltas:
deltas.remove(0)
deltas = numpy.array(deltas, "d")
nAtoms = mol.GetNumAtoms()
res = nAtoms / sum(1.0 / deltas)
return res | 65984702d49071f18089cdf17b1ba4a21b70357e | 3,635,337 |
import functools
def record_metrics(func):
"""
The metrics decorator records each time a route is hit in redis
The number of times a route is hit and an app_name query param are used are recorded.
A redis a redis hash map is used to store each of these values.
NOTE: This must be placed before the cache decorator in order for the redis incr to occur
"""
@functools.wraps(func)
def wrap(*args, **kwargs):
try:
application_key, application_name = extract_app_name_key()
route_key, route = extract_route_key()
REDIS.hincrby(route_key, route, 1)
if application_name:
REDIS.hincrby(application_key, application_name, 1)
except Exception as e:
logger.error('Error while recording metrics: %s', e.message)
return func(*args, **kwargs)
return wrap | 768804fed0b77bac58f59cc8773f1dfc1acfc0f8 | 3,635,338 |
def is_long_path(path: PathOrString) -> bool:
"""
A long path is a path that has more than 260 characters
"""
return len(str(path)) > MAX_PATH_LENGTH | 4fc9798364c4bf8499c042273685f085476b93b5 | 3,635,339 |
def build_model():
"""
Builds up the SoundNet model and loads the weights from a given model file (8-layer model is kept at models/sound8.npy).
:return:
"""
model_weights = np.load('models/sound8.npy').item()
model = Sequential()
model.add(InputLayer(batch_input_shape=(1, None, 1)))
filter_parameters = [{'name': 'conv1', 'num_filters': 16, 'padding': 32,
'kernel_size': 64, 'conv_strides': 2,
'pool_size': 8, 'pool_strides': 8},
{'name': 'conv2', 'num_filters': 32, 'padding': 16,
'kernel_size': 32, 'conv_strides': 2,
'pool_size': 8, 'pool_strides': 8},
{'name': 'conv3', 'num_filters': 64, 'padding': 8,
'kernel_size': 16, 'conv_strides': 2},
{'name': 'conv4', 'num_filters': 128, 'padding': 4,
'kernel_size': 8, 'conv_strides': 2},
{'name': 'conv5', 'num_filters': 256, 'padding': 2,
'kernel_size': 4, 'conv_strides': 2,
'pool_size': 4, 'pool_strides': 4},
{'name': 'conv6', 'num_filters': 512, 'padding': 2,
'kernel_size': 4, 'conv_strides': 2},
{'name': 'conv7', 'num_filters': 1024, 'padding': 2,
'kernel_size': 4, 'conv_strides': 2},
{'name': 'conv8_2', 'num_filters': 401, 'padding': 0,
'kernel_size': 8, 'conv_strides': 2},
]
for x in filter_parameters:
model.add(ZeroPadding1D(padding=x['padding']))
model.add(Conv1D(x['num_filters'],
kernel_size=x['kernel_size'],
strides=x['conv_strides'],
padding='valid'))
weights = model_weights[x['name']]['weights'].reshape(model.layers[-1].get_weights()[0].shape)
biases = model_weights[x['name']]['biases']
model.layers[-1].set_weights([weights, biases])
if 'conv8' not in x['name']:
gamma = model_weights[x['name']]['gamma']
beta = model_weights[x['name']]['beta']
mean = model_weights[x['name']]['mean']
var = model_weights[x['name']]['var']
model.add(BatchNormalization())
model.layers[-1].set_weights([gamma, beta, mean, var])
model.add(Activation('relu'))
if 'pool_size' in x:
model.add(MaxPooling1D(pool_size=x['pool_size'],
strides=x['pool_strides'],
padding='valid'))
return model | 6ba2d75389f03d6636e353d5a03c6416b9478251 | 3,635,340 |
def open(
urlpath,
mode="rb",
compression=None,
encoding="utf8",
errors=None,
protocol=None,
newline=None,
**kwargs
):
""" Given a path or paths, return one ``OpenFile`` object.
Parameters
----------
urlpath: string or list
Absolute or relative filepath. Prefix with a protocol like ``s3://``
to read from alternative filesystems. Should not include glob
character(s).
mode: 'rb', 'wt', etc.
compression: string
Compression to use. See ``dask.bytes.compression.files`` for options.
encoding: str
For text mode only
errors: None or str
Passed to TextIOWrapper in text mode
protocol: str or None
If given, overrides the protocol found in the URL.
newline: bytes or None
Used for line terminator in text mode. If None, uses system default;
if blank, uses no translation.
**kwargs: dict
Extra options that make sense to a particular storage connection, e.g.
host, port, username, password, etc.
Examples
--------
>>> openfile = open('2015-01-01.csv') # doctest: +SKIP
>>> openfile = open('s3://bucket/2015-01-01.csv.gz', compression='gzip') # doctest: +SKIP
... with openfile as f:
... df = pd.read_csv(f)
Returns
-------
``OpenFile`` object.
"""
return open_files(
[urlpath],
mode,
compression,
encoding,
errors,
protocol,
newline=newline,
**kwargs
)[0] | 4d0be7fe29d8d76a057a4eeb4736e21f80400a55 | 3,635,341 |
import time
from datetime import datetime
def jobsWorker(
jobQueue,
output,
model,
funcProgress,
controlQueue,
trayIconMessageSignal,
log=False,
):
"""
jobsWorker execute jobs on queue
Args:
jobQueue (JobQueue): Job queue has all related information for the job
funcProgress (func): function to call to report job progress. Defaults to None.
Returns:
str: Dummy return value
"""
#
# Always open to start saving in mid of worker operating
#
jobsDB = SqlJobsTable(config.data.get(config.ConfigKey.SystemDB))
if jobsWorker.running:
# Block direct calls while working there should be none
return "Working..."
jobsWorker.running = True
totalJobs = len(jobQueue)
remainingJobs = totalJobs - 1
currentJob = 0
indexTotal = [0, 0]
verify = mkv.VerifyStructure(log=log)
totalErrors = funcProgress.lbl[4]
abortAll = False
bSimulateRun = config.data.get(config.ConfigKey.SimulateRun)
while job := jobQueue.popLeft():
# job = copy.deepcopy(qJob)
job.jobRow = model.dataset[
job.jobRowNumber,
]
statusIndex = model.index(job.jobRowNumber, JobKey.Status)
if abortAll:
jobQueue.statusUpdateSignal.emit(job, JobStatus.Aborted)
continue
actualRemaining = len(jobQueue)
if actualRemaining == remainingJobs:
remainingJobs -= 1
else:
totalJobs = totalJobs + actualRemaining - remainingJobs
remainingJobs = actualRemaining - 1
currentJob += 1
indexTotal[0] = 0 # file index
indexTotal[1] = 0 # current max progress bar
totalFiles = 0
if job.oCommand:
totalFiles = len(job.oCommand)
maxBar = totalFiles * 100
funcProgress.pbSetValues.emit(0, 0)
funcProgress.pbSetMaximum.emit(100, maxBar)
funcProgress.lblSetValue.emit(0, totalJobs)
funcProgress.lblSetValue.emit(1, currentJob)
funcProgress.lblSetValue.emit(3, totalFiles)
indexTotal[0] = 0
# Check Job Status for Skip
#
# sourceIndex = job.statusIndex
status = model.dataset[statusIndex.row(), statusIndex.column()]
if status == JobStatus.Skip:
jobQueue.statusUpdateSignal.emit(job, JobStatus.Skipped)
continue
#
# Check Job Status for Skip
jobQueue.statusUpdateSignal.emit(job, JobStatus.Running)
cli = RunCommand(
processLine=displayRunJobs,
processArgs=[job, output, indexTotal],
processKWArgs={"funcProgress": funcProgress},
controlQueue=controlQueue,
commandShlex=True,
universalNewLines=True,
log=log,
)
if job.oCommand:
job.startTime = time()
if config.data.get(config.ConfigKey.JobHistory):
job.jobRow[JobKey.Status] = JobStatus.Running
addToDb(jobsDB, job)
dt = datetime.fromtimestamp(job.startTime)
msg = "*******************\n"
msg += "Job ID: {} started at {}.\n\n".format(
job.jobRow[JobKey.ID], dt.isoformat()
)
trayIconMessageSignal.emit(
"Information - MKVBatchMultiplex",
f"Job ID: {job.jobRow[JobKey.ID]} started.",
QSystemTrayIcon.Information,
)
output.job.emit(msg, {"color": SvgColor.cyan})
exitStatus = "ended"
if log:
MODULELOG.debug("RJB0005: Job ID: %s started.", job.jobRow[JobKey.ID])
updateStatus = True
if not job.oCommand.commandsGenerated:
output.job.emit("Generating commands...\n", {"appendEnd": True})
job.oCommand.generateCommands()
for (
index,
(cmd, baseFiles, sourceFiles, destinationFile, _, _, _),
) in enumerate(job.oCommand):
funcProgress.lblSetValue.emit(2, indexTotal[0] + 1)
#
# Check Job Status for Abort
#
status = model.dataset[statusIndex.row(), statusIndex.column()]
###
# Check controlQueue
###
if controlQueue:
queueStatus = controlQueue.popleft()
if queueStatus in [
JobStatus.Abort,
JobStatus.AbortJob,
JobStatus.AbortJobError,
]:
jobQueue.statusUpdateSignal.emit(job, JobStatus.Abort)
status = JobStatus.Abort
exitStatus = queueStatus
if queueStatus == JobStatus.Abort:
abortAll = True
if f := job.oCommand[index - 1][3]:
if f.is_file():
f.unlink()
if status == JobStatus.Abort:
jobQueue.statusUpdateSignal.emit(job, JobStatus.Aborted)
job.jobRow[JobKey.Status] = JobStatus.Aborted
updateStatus = False
if exitStatus == "ended":
exitStatus = "aborted"
job.endTime = time()
if config.data.get(config.ConfigKey.JobHistory):
job.jobRow[JobKey.Status] = JobStatus.Aborted
addToDb(jobsDB, job, update=True)
break
verify.verifyStructure(baseFiles, sourceFiles, destinationFile)
if log:
msg = (
"Command: {} Base Files: {} "
"Source Files: {} Destination File: {}"
)
msg = msg.format(cmd, baseFiles, sourceFiles, destinationFile)
MODULELOG.debug("RJB0006: %s", msg)
if verify:
###
# Execute cmd
###
msg = (
"Command: {}\nBase Files: {}\n"
"Source Files: {}\nDestination Files: {}\n"
)
msg = msg.format(cmd, baseFiles, sourceFiles, destinationFile)
output.job.emit(msg, {"appendEnd": True})
if log:
MODULELOG.debug("RJB0007: Structure checks ok")
if bSimulateRun:
dummyRunCommand(funcProgress, indexTotal, controlQueue)
else:
# TODO: queue to control execution of running job inside
# the RunCommand
cli.command = cmd
cli.run()
else:
job.errors.append(verify.analysis)
totalErrors += 1
funcProgress.lblSetValue.emit(4, totalErrors)
msg = "Error Job ID: {} ---------------------\n\n".format(
job.jobRow[JobKey.ID]
)
output.error.emit(msg, {"color": SvgColor.red, "appendEnd": True})
msg = "Destination File: {}\n\n".format(destinationFile)
job.output.append(msg)
output.job.emit(msg, {"color": SvgColor.red, "appendEnd": True})
# output.error.emit(msg, {"color": SvgColor.red, "appendEnd": True})
for i, m in enumerate(verify.analysis):
if i == 0:
lines = m.split("\n")
findSource = True
for line in lines:
color = SvgColor.orange
if findSource and (
(searchIndex := line.find("File Name")) >= 0
):
if searchIndex >= 0:
color = SvgColor.tomato
findSource = False
output.job.emit(line + "\n", {"color": color})
output.error.emit(line + "\n", {"color": color})
job.output.append(line + "\n")
else:
output.job.emit(m, {"color": SvgColor.red})
job.output.append(m + "\n")
output.error.emit(m, {"color": SvgColor.red})
job.output.append("\n")
# output.job.emit("", {"appendEnd": True})
msg = "Error Job ID: {} ---------------------\n\n".format(
job.jobRow[JobKey.ID]
)
output.error.emit(msg, {"color": SvgColor.red, "appendEnd": True})
if log:
MODULELOG.error("RJB0008: Structure check failed")
indexTotal[1] += 100
indexTotal[0] += 1
# End for loop for jobs in job.oCommand
job.endTime = time()
dtStart = datetime.fromtimestamp(job.startTime)
dtEnd = datetime.fromtimestamp(job.endTime)
dtDuration = dtEnd - dtStart
msg = "Job ID: {} {} - date {} - running time {}.\n".format(
job.jobRow[JobKey.ID],
exitStatus,
dtEnd.isoformat(),
strFormatTimeDelta(dtDuration),
)
job.output.append(msg)
msg += "*******************\n\n\n"
output.job.emit(msg, {"color": SvgColor.cyan, "appendEnd": True})
msg = "Job ID: {} {}\nruntime {}"
msg = msg.format(
job.jobRow[JobKey.ID], exitStatus, strFormatTimeDelta(dtDuration),
)
trayIconMessageSignal.emit(
"Information - MKVBatchMultiplex", msg, QSystemTrayIcon.Information,
)
if config.data.get(config.ConfigKey.JobHistory):
if updateStatus:
job.jobRow[JobKey.Status] = JobStatus.Done
addToDb(jobsDB, job, update=True)
if updateStatus:
jobQueue.statusUpdateSignal.emit(job, JobStatus.Done)
if log:
MODULELOG.debug("RJB0009: Job ID: %s finished.", job.jobRow[JobKey.ID])
else:
totalErrors += 1
funcProgress.lblSetValue.emit(4, totalErrors)
msg = "Job ID: {} cannot execute command.\n\nCommand: {}\n"
msg = msg.format(job.jobRow[JobKey.ID], job.oCommand.command)
output.error.emit(msg, {"color": SvgColor.red})
jobQueue.statusUpdateSignal.emit(job, JobStatus.Error)
if log:
MODULELOG.debug(
"RJB0010: Job ID: %s cannot execute command: %s.",
job.jobRow[JobKey.ID],
job.oCommand.command,
)
jobsDB.close()
for index in range(4):
funcProgress.lblSetValue.emit(index, 0)
funcProgress.pbSetMaximum.emit(100, 100)
funcProgress.pbSetValues.emit(0, 100)
funcProgress.pbReset.emit()
jobsWorker.running = False
return "Job queue empty." | 22810a87c51dad12f34b9ca0023383daa29f0bfa | 3,635,342 |
def evaluate_surface(name, oname=None, mesh=False, topology=False, intersections=False, collisions=0, opts={}):
"""Evaluate properties of surface mesh"""
argv = ['evaluate-surface-mesh', name]
if oname:
argv.extend([oname, '-v'])
argv.extend(['-threads', str(threads)])
if mesh: argv.append('-attr')
if topology: argv.append('-topology')
if collisions > 0 or intersections:
argv.extend(['-collisions', collisions])
if len(opts) > 0:
if isinstance(opts, list):
for item in opts:
if isinstance(item, (tuple, list)):
opt = item[0]
arg = flatten(item[1:])
else:
opt = item
arg = None
if not opt.startswith('-'):
opt = '-' + opt
argv.append(opt)
if not arg is None:
argv.extend(flatten(arg))
else:
for opt, arg in opts.items():
if not opt.startswith('-'):
opt = '-' + opt
argv.append(opt)
if not arg is None:
argv.extend(flatten(arg))
return check_output(argv, verbose=showcmd) | faa5309818e7ac4e740adf1fbaa9665d519c3b5b | 3,635,343 |
def uu_query_STK_STATUS_CHANGE():
"""
上市公司状态变动
获取上市公司已发行未上市、正常上市、实行ST、*ST、暂停上市、终止上市的变动情况等
:param :query(finance.STK_STATUS_CHANGE):表示从finance.STK_STATUS_CHANGE这张表中查询上市公司的状态变动信息,还可以指定所要查询的字段名,格式如下:query(库名.表名.字段名1,库名.表名.字段名2),多个字段用英文逗号进行分隔;query函数的更多用法详见:sqlalchemy.orm.query.Query对象
finance.STK_STATUS_CHANGE:代表上市公司状态变动表,收录了上市公司已发行未上市、正常上市、实行ST、*ST、暂停上市、终止上市的变动情况等,表结构和字段信息如下:
字段名称 中文名称 字段类型 备注/示例
company_id 机构ID int
code 股票代码 varchar(12)
name 股票名称 varchar(40)
pub_date 公告日期 date
change_date 变更日期 date
public_status_id 上市状态编码 int 如下上市状态编码
public_status 上市状态 varchar(32)
change_reason 变更原因 varchar(500)
change_type_id 变更类型编码 int 如下变更类型编码
change_type 变更类型 varchar(60)
comments 备注 varchar(255)
上市状态编码
上市状态编码 上市状态
301001 正常上市
301002 ST
301003 *ST
301004 暂停上市
301005 进入退市整理期
301006 终止上市
301007 已发行未上市
301008 预披露
301009 未过会
301010 发行失败
301011 暂缓发行
301012 暂缓上市
301013 停止转让
301014 正常转让
301015 实行投资者适当性管理表示
301099 其他
变更类型编码
变更类型编码 变更类型
303001 恢复上市
303002 摘星
303003 摘帽
303004 摘星摘帽
303005 披星
303006 戴帽
303007 戴帽披星
303008 拟上市
303009 新股上市
303010 发行失败
303011 暂停上市
303012 终止上市
303013 退市整理
303014 暂缓发行
303015 暂缓上市
303016 实行投资者适当性管理标识
303017 未过会
303018 预披露
303019 正常转让
303020 停止转让
303021 重新上市
303099 其他
filter(finance.STK_STATUS_CHANGE.code==code):指定筛选条件,通过finance.STK_STATUS_CHANGE.code==code可以指定你想要查询的股票代码;除此之外,还可以对表中其他字段指定筛选条件,如finance.STK_STATUS_CHANGE.pub_date>='2015-01-01',表示筛选公告日期大于等于2015年1月1日之后的数据;多个筛选条件用英文逗号分隔。
limit(n):限制返回的数据条数,n指定返回条数
:rtype :dataframe
:return:
"""
# 指定查询对象为恒瑞医药(600276.XSHG)的上市公司状态变动,限定返回条数为10
q = query(finance.STK_STATUS_CHANGE).filter(finance.STK_STATUS_CHANGE.code == '600276.XSHG').limit(10)
df = finance.run_query(q)
return df | 1e23ddfb233f6ad1231008ac760cc70a9b8b84ce | 3,635,344 |
def add_custom_encoder_arguments(group):
"""Define arguments for Custom encoder."""
group.add_argument(
"--enc-block-arch",
type=eval,
action="append",
default=None,
help="Encoder architecture definition by blocks",
)
group.add_argument(
"--enc-block-repeat",
default=0,
type=int,
help="Repeat N times the provided encoder blocks if N > 1",
)
group.add_argument(
"--custom-enc-input-layer",
type=str,
default="conv2d",
choices=["conv2d", "vgg2l", "linear", "embed"],
help="Custom encoder input layer type",
)
group.add_argument(
"--custom-enc-positional-encoding-type",
type=str,
default="abs_pos",
choices=["abs_pos", "scaled_abs_pos", "rel_pos"],
help="Custom encoder positional encoding layer type",
)
group.add_argument(
"--custom-enc-self-attn-type",
type=str,
default="self_attn",
choices=["self_attn", "rel_self_attn"],
help="Custom encoder self-attention type",
)
group.add_argument(
"--custom-enc-pw-activation-type",
type=str,
default="relu",
choices=["relu", "hardtanh", "selu", "swish"],
help="Custom encoder pointwise activation type",
)
group.add_argument(
"--custom-enc-conv-mod-activation-type",
type=str,
default="swish",
choices=["relu", "hardtanh", "selu", "swish"],
help="Custom encoder convolutional module activation type",
)
return group | f49a778b78351a08bdb411e8004d00da0ccd96a4 | 3,635,345 |
def expost_te(returns, bmk_returns, periods):
"""
Calculate the EX-POST Tracking Error. Returns a Dataframe of rolling Ex-Post Tracking Error Annualized
"""
temp = pd.concat([pd.DataFrame(returns) - pd.DataFrame(bmk_returns)], axis=1)
temp = temp.dropna()
temp.columns = ["returns", "bmk_returns"]
temp = temp.returns - temp.bmk_returns
rolling_te = temp.rolling(periods).apply(
lambda x: (np.std(x, ddof=1) * np.sqrt(12)), raw=True
)
rolling_te = rolling_te.dropna()
return rolling_te | d8c5de00eb415ad318f6a9c5c397efb8ba2004b7 | 3,635,346 |
def cruise_control(wn,zeta,T,vcruise,vmax,tf_mode='H'):
"""
Cruise control with PI controller and hill disturbance.
This function returns various system function configurations
for a the cruise control Case Study example found in
the supplementary article. The plant model is obtained by the
linearizing the equations of motion and the controller contains a
proportional and integral gain term set via the closed-loop parameters
natural frequency wn (rad/s) and damping zeta.
Parameters
----------
wn : closed-loop natural frequency in rad/s, nominally 0.1
zeta : closed-loop damping factor, nominally 1.0
T : vehicle time constant, nominally 10 s
vcruise : cruise velocity set point, nominally 75 mph
vmax : maximum vehicle velocity, nominally 120 mph
tf_mode : 'H', 'HE', 'HVW', or 'HED' controls the system function returned by the function
'H' : closed-loop system function V(s)/R(s)
'HE' : closed-loop system function E(s)/R(s)
'HVW' : closed-loop system function V(s)/W(s)
'HED' : closed-loop system function E(s)/D(s), where D is the hill disturbance input
Returns
-------
b : numerator coefficient ndarray
a : denominator coefficient ndarray
Examples
--------
>>> # return the closed-loop system function output/input velocity
>>> b,a = cruise_control(wn,zeta,T,vcruise,vmax,tf_mode='H')
>>> # return the closed-loop system function loop error/hill disturbance
>>> b,a = cruise_control(wn,zeta,T,vcruise,vmax,tf_mode='HED')
"""
tau = T/2.*vmax/vcruise
g = 9.8
g *= 3*60**2/5280. # m/s to mph conversion
Kp = T*(2*zeta*wn-1/tau)/vmax
Ki = T*wn**2./vmax
K = Kp*vmax/T
wn = np.sqrt(K/(Kp/Ki))
zeta = (K + 1/tau)/(2*wn)
log.info('wn = %s' % (wn))
log.info('zeta = %s' % (zeta))
a = np.array([1, 2*zeta*wn, wn**2])
if tf_mode == 'H':
b = np.array([K, wn**2])
elif tf_mode == 'HE':
b = np.array([1, 2*zeta*wn-K, 0.])
elif tf_mode == 'HVW':
b = np.array([ 1, wn**2/K+1/tau, wn**2/(K*tau)])
b *= Kp
elif tf_mode == 'HED':
b = np.array([g, 0])
else:
raise ValueError('tf_mode must be: H, HE, HVU, or HED')
return b, a | 61be0dc8f6688d602efad01203a4f65f5f51613d | 3,635,347 |
def load_feature_toggles(request, context):
"""Load feature toggles.
Load all feature toggle values from the database.
"""
LOGGER.info('Load feature toggles request')
feature_toggles = core.load()
response = {'feature_toggles': feature_toggles}
LOGGER.debug('Load feature toggles response: %s', response)
return response | 4fe7008d1b43929d6fd852650847b4d83ad9fe47 | 3,635,348 |
import itertools
def svd(a, compute_uv=True, sort=True, copy=True, eps=1e-9):
""" Performs singular value decomposition of a ds-array via the one-sided
block Jacobi algorithm described in Arbenz and Slapnicar [1]_ and
Dongarra et al. [2]_.
Singular value decomposition is a factorization of the form A = USV',
where U and V are unitary matrices and S is a rectangular diagonal matrix.
Parameters
----------
a : ds-array, shape=(m, n)
Input matrix (m >= n). Needs to be partitioned in two column blocks at
least due to the design of the block Jacobi algorithm.
compute_uv : boolean, optional (default=True)
Whether or not to compute u and v in addition to s.
sort : boolean, optional (default=True)
Whether to return sorted u, s and v. Sorting requires a significant
amount of additional computation.
copy : boolean, optional (default=True)
Whether to create a copy of a or to apply transformations on a
directly. Only valid if a is regular (i.e., top left block is of
regular shape).
eps : float, optional (default=1e-9)
Tolerance for the convergence criterion.
Returns
-------
u : ds-array, shape=(m, n)
U matrix. Only returned if compute_uv is True.
s : ds-array, shape=(1, n)
Diagonal entries of S.
v : ds-array, shape=(n, n)
V matrix. Only returned if compute_uv is True.
Raises
------
ValueError
If a has less than 2 column blocks or m < n.
References
----------
.. [1] Arbenz, P. and Slapnicar, A. (1995). An Analysis of Parallel
Implementations of the Block-Jacobi Algorithm for Computing the SVD. In
Proceedings of the 17th International Conference on Information
Technology Interfaces ITI (pp. 13-16).
.. [2] Dongarra, J., Gates, M., Haidar, A. et al. (2018). The singular
value decomposition: Anatomy of optimizing an algorithm for extreme
scale. In SIAM review, 60(4) (pp. 808-865).
Examples
--------
>>> import numpy as np
>>> import dislib as ds
>>> x = ds.random_array((10, 6), (2, 2), random_state=7)
>>> u, s, v = ds.svd(x)
>>> u = u.collect()
>>> s = np.diag(s.collect())
>>> v = v.collect()
>>> print(np.allclose(x.collect(), u @ s @ v.T))
"""
if a._n_blocks[1] < 2:
raise ValueError("Not enough column blocks to compute SVD.")
if a.shape[0] < a.shape[1]:
raise ValueError("The number of rows of the input matrix is lower "
"than the number of columns")
if not a._is_regular():
x = a.rechunk(a._reg_shape)
elif copy:
x = a.copy()
else:
x = a
if compute_uv:
v = identity(x.shape[1], (x._reg_shape[1], x._reg_shape[1]))
checks = [True]
while not _check_convergence_svd(checks):
checks = []
pairings = itertools.combinations_with_replacement(
range(x._n_blocks[1]), 2
)
for i, j in pairings:
if i >= j:
continue
coli_x = x._get_col_block(i)
colj_x = x._get_col_block(j)
rot, check = _compute_rotation_and_rotate(
coli_x._blocks, colj_x._blocks, eps
)
checks.append(check)
if compute_uv:
coli_v = v._get_col_block(i)
colj_v = v._get_col_block(j)
_rotate(coli_v._blocks, colj_v._blocks, rot)
s = x.norm(axis=0)
if sort:
sorting = _sort_s(s._blocks)
if compute_uv:
if sort:
u = _compute_u_sorted(x, sorting)
v = _sort_v(v, sorting)
else:
u = _compute_u(x)
return u, s, v
else:
return s | 5bc0c65b0b730625e13b918681d384285771d59e | 3,635,349 |
def renameID(idFrom, idTo, identifiedElements, referringNodes):
"""
Changes the ID name from idFrom to idTo, on the declaring element
as well as all nodes in referringNodes.
Updates identifiedElements.
Returns the number of bytes saved by this replacement.
"""
num = 0
definingNode = identifiedElements[idFrom]
definingNode.setAttribute("id", idTo)
num += len(idFrom) - len(idTo)
# Update references to renamed node
if referringNodes is not None:
# Look for the idFrom ID name in each of the referencing elements,
# exactly like findReferencedElements would.
# Cyn: Duplicated processing!
for node in referringNodes:
# if this node is a style element, parse its text into CSS
if node.nodeName == 'style' and node.namespaceURI == NS['SVG']:
# node.firstChild will be either a CDATA or a Text node now
if node.firstChild is not None:
# concatenate the value of all children, in case
# there's a CDATASection node surrounded by whitespace
# nodes
# (node.normalize() will NOT work here, it only acts on Text nodes)
oldValue = "".join(child.nodeValue for child in node.childNodes)
# not going to reparse the whole thing
newValue = oldValue.replace('url(#' + idFrom + ')', 'url(#' + idTo + ')')
newValue = newValue.replace("url(#'" + idFrom + "')", 'url(#' + idTo + ')')
newValue = newValue.replace('url(#"' + idFrom + '")', 'url(#' + idTo + ')')
# and now replace all the children with this new stylesheet.
# again, this is in case the stylesheet was a CDATASection
node.childNodes[:] = [node.ownerDocument.createTextNode(newValue)]
num += len(oldValue) - len(newValue)
# if xlink:href is set to #idFrom, then change the id
href = node.getAttributeNS(NS['XLINK'], 'href')
if href == '#' + idFrom:
node.setAttributeNS(NS['XLINK'], 'href', '#' + idTo)
num += len(idFrom) - len(idTo)
# if the style has url(#idFrom), then change the id
styles = node.getAttribute('style')
if styles != '':
newValue = styles.replace('url(#' + idFrom + ')', 'url(#' + idTo + ')')
newValue = newValue.replace("url('#" + idFrom + "')", 'url(#' + idTo + ')')
newValue = newValue.replace('url("#' + idFrom + '")', 'url(#' + idTo + ')')
node.setAttribute('style', newValue)
num += len(styles) - len(newValue)
# now try the fill, stroke, filter attributes
for attr in referencingProps:
oldValue = node.getAttribute(attr)
if oldValue != '':
newValue = oldValue.replace('url(#' + idFrom + ')', 'url(#' + idTo + ')')
newValue = newValue.replace("url('#" + idFrom + "')", 'url(#' + idTo + ')')
newValue = newValue.replace('url("#' + idFrom + '")', 'url(#' + idTo + ')')
node.setAttribute(attr, newValue)
num += len(oldValue) - len(newValue)
return num | 2d081eb4f5892b53f7f2d6ffb1f72e21ce1e3f89 | 3,635,350 |
def rotate_hue(im, x):
"""
Adjust the hue of *im* by *x*, where *x* is a value between 0.0 to 1.0.
Full red to full green is 1/3, red to blue is 2/3.
"""
assert im.mode == 'RGB'
ima = np.asarray(im) / 255
ima_hsv = colors.rgb_to_hsv(ima)
ima_hsv[...,0] = (ima_hsv[...,0] + x) % 1
ima = colors.hsv_to_rgb(ima_hsv)
ima *= 255
ima = ima.astype(np.uint8)
im = Image.fromarray(ima)
return im | 4f1a6805e3cf59009013631a394899c5135facd7 | 3,635,351 |
import argparse
def get_parser() -> argparse.ArgumentParser:
"""
Create a command line parser.
Returns:
argparse.ArgumentParser: Created parser
"""
parser = argparse.ArgumentParser()
parser.add_argument(
"-l",
"--list",
required=False,
action="store_true",
help="list of all highways"
)
parser.add_argument(
"-r",
"--roadworks",
type=str,
required=False,
help="list of roadworks"
)
parser.add_argument(
"-p",
"--parking",
type=str,
required=False,
help="list of service areas"
)
parser.add_argument(
"-v",
"--version",
required=False,
action="store_true",
help="show the version"
)
return parser | c17895029da4e41a5bbc0e0cc9a689680fb2c80e | 3,635,352 |
def Pa_2_psig(value):
"""
converts pressure in abs Pa (Pascal) to gauged psig (pound-force per square inch)
:param value: pressure value in Pa
:return: pressure value in psi
"""
return value / const.psi - 14.7 | d34671a458f6796134ab80eeddcb3c9f0f637a25 | 3,635,353 |
def get_vedges_details(customer, api_response_data, query_condition):
"""
Parses JSON response and builds CSV file with vEdge details
All other devices, such as vBond, vSmart are excluded
:param query_condition: filter
:param customer: string to build correct directory to store CSV files
:param api_response_data: JSON response with all devices from vManage
:return: list of deviceId of vEdge devices
"""
# List of devices - for return
device_ids = []
# Build device list to query - can be a list from CLI, or all devices if no DeviceId is specified
device_list = []
found_device_id_in_filter = False
# handle site-id or hostname in query sting
devices_to_query = {}
values_to_query = []
# if deviceId and hostname already specified in filter - 'where' condition, only query this device
# exception is when multiple device are there in 'when' condition - checking item["cond_value"] isn't a list
for item in query_condition:
if "deviceId" in item["cond_field"] and not (isinstance(item["cond_value"], list)):
# Querying particular devices
device_list.append(item["cond_value"])
# already found deviceId - no need to get device list later
found_device_id_in_filter = True
# check if host-name or site-id is in query, get vEdge details only for these sites
elif "host-name" in item["cond_field"]:
devices_to_query["type"] = "host-name"
# if a list of host name already given using 'or' condition, simply use it, otherwise add to list
if (isinstance(item["cond_value"], list)):
values_to_query = item["cond_value"]
else:
values_to_query.append(item["cond_value"])
query_condition.remove(item)
elif "site-id" in item["cond_field"]:
devices_to_query["type"] = "site-id"
if (isinstance(item["cond_value"], list)):
values_to_query = item["cond_value"]
else:
values_to_query.append(item["cond_value"])
query_condition.remove(item)
devices_to_query["device_list"] = values_to_query
# found deviceId in condition filter
if found_device_id_in_filter:
return device_ids
# Get CSV Headers for vEdge devices
csv_headers = []
found = False
for element in api_response_data:
for key, value in element.items():
if element["device-type"] == "vedge":
csv_headers.append(key)
found = True
if found:
# found vEdge device, got headers, no need to process other records
break
# Get CSV Data for vEdge devices
csv_data = []
found = False
for element in api_response_data:
csv_row = []
for key, value in element.items():
# only getting vEdge devices from vManage
if element["device-type"] == "vedge":
# Populate list - CSV row for a vEdge device
csv_row.append(value)
# Save device ID
device_id = element["deviceId"]
found = True
if found:
# if specific devices requested - look up for host-name or site-id
if "type" in devices_to_query:
# Look up in any hostnames in query matches a vEdge hostname received from vManage
if devices_to_query["type"] == "host-name" and \
next((s for s in devices_to_query["device_list"] if s in element["host-name"]), None):
device_ids.append(device_id)
elif devices_to_query["type"] == "site-id" and element["site-id"] in devices_to_query["device_list"]:
device_ids.append(device_id)
else:
device_ids.append(device_id)
# Add next row to a CSV data
csv_data.append(csv_row)
# Dump data
print_to_csv_file(
csv_headers, csv_data, get_file_path(customer, "", "vedges", "raw_output") + ".csv")
return device_ids | 6b0971b8eaff9b70eaf42efd5ed65d6f6615ba04 | 3,635,354 |
import os
import re
def print_unique_issues_summary(issues_directory, platform, arch, build_config):
"""Merge issues-summary-*-PartitionN.txt files from each partitions
and print a summary to markdown
Args:
issues_directory (string): Issues directory
Returns:
Number of issues found
"""
issues_by_assert = defaultdict(list)
remaining_issues = defaultdict(int)
for file_path, dirs, files in walk(issues_directory, topdown=True):
for file_name in files:
if not file_name.startswith("issues-summary-") or "Partition" not in file_name:
continue
issues_summary_file = os.path.join(file_path, file_name)
partition_name = file_path.split(os.sep)[-1]
unique_issues = []
with open(issues_summary_file, 'r') as sf:
contents = sf.read()
unique_issues = list(filter(None, re.split(unique_issue_dir_pattern, contents)))
# Iterate over all unique issues of this partition
for unique_issue in unique_issues:
assertion_error = extract_assertion_error(unique_issue)
if assertion_error:
issues_by_assert[assertion_error].append((partition_name, unique_issue))
else:
remaining_issues[unique_issue] += 1
md_name = "Summary of Antigen run"
if platform or arch or build_config:
md_name += " on"
if platform:
md_name += " " + platform
if arch:
md_name += " " + arch
if build_config:
md_name += " " + build_config
md_name += ".md"
md_path = os.path.join(issues_directory, md_name)
with open(md_path, "w") as f:
f.write("# General info about run\n")
if platform:
f.write("* Platform: {}\n".format(platform))
if arch:
f.write("* Architecture: {}\n".format(arch))
if build_config:
f.write("* Build config: {}\n".format(build_config))
f.write("* Number of unique examples found: {}\n".format(len(issues_by_assert) + len(remaining_issues)))
f.write("\n")
if len(issues_by_assert) > 0:
f.write("# {} distinct assertion errors seen\n".format(len(issues_by_assert)))
for message, issues in sorted(issues_by_assert.items(), key=lambda p: len(p[1]), reverse=True):
f.write("## ({} occurences) {}\n".format(len(issues), message))
(partition, issue) = issues[0]
f.write("Example occurence from {}:\n".format(partition))
f.write("```scala\n")
f.write(issue.strip() + "\n")
f.write("```\n\n")
if len(remaining_issues) > 0:
f.write("# {} uncategorized issues found\n", len(remaining_issues))
# Turned off since the output does not seem particularly useful
# for issue, occurences in sorted(remaining_issues.items(), key=lambda p: p[1], reverse=True):
# f.write("## {} occurences\n".format(occurences))
# f.write("```scala\n")
# f.write(issue.strip() + "\n")
# f.write("```\n\n")
print("##vso[task.uploadsummary]{}".format(md_path))
with open(md_path, "r") as f:
print(f.read())
return len(issues_by_assert) + len(remaining_issues) | 88a18a1117df8d72cfc8800b9d268b323ce38ff9 | 3,635,355 |
def correct_errors(page, labels, bboxes, model):
"""Error correction.
parameters:
page - 2d array, each row is a feature vector to be classified
labels - the output classification label for each feature vector
bboxes - 2d array, each row gives the 4 bounding box coords of the character
model - dictionary, stores the output of the training stage
"""
if(len(labels.shape) == 1):
print('Error correction skipped (k=1)')
return labels
if output:
print('Processing error correction...')
# Make a word set
wordset = set(model['word_list'])
# Ready for error correction
total_length = labels.shape[0]
startIndex = 0
output_labels = []
# Try to split words based on border boxes
for i in range(bboxes.shape[0]):
if(i == total_length-1):
correct(labels, startIndex, i, wordset, output_labels)
startIndex = i+1
elif(abs(bboxes[i+1][0] - bboxes[i][2]) > 6):
correct(labels, startIndex, i, wordset, output_labels)
startIndex = i+1
if output:
print('\r', startIndex, '/', total_length, end='')
if output:
print()
output_labels_array = np.array(output_labels)
return output_labels_array | 7561c3174d3ec214f5d469b7437d65339a08bcd0 | 3,635,356 |
def sign(x):
"""Returns sign of x"""
if x==0:
return 0
return x/abs(x) | 677dfd796b0ee354fbcaf78b58cf7a5a660446b5 | 3,635,357 |
import urllib
import json
def get_sources(category):
"""
function that gets response from the api call
"""
sources_url = base_url.format(category,api_key)
with urllib.request.urlopen(sources_url) as url:
sources_data = url.read()
sources_response = json.loads(sources_data)
sources_results = None
if sources_response['sources']:
sources_results_list = sources_response['sources']
sources_results = process_results(sources_results_list)
return sources_results | e1c17709ba93cd0eba46a179bcabc638ebb631c5 | 3,635,358 |
def split(geometry, (dx_max, dy_max), (rx, ry), (bx0, by0, bx1, by1)):
""" Cut geometry to smaller blocks. """
#pylint: disable=invalid-name
def _get_sizes(v0, v1, dv_max, rv, bv0, bv1):
if rv > 0:
dv_max *= rv
if rv > 0:
vr0 = vr1 = 0.5*(rv*ceil((v1-v0)/rv) - (v1-v0))
if bv1 < (v1 + vr1):
v1 = bv1
vr0 = rv*ceil((v1-v0)/rv) - (v1-v0)
vr1 = 0
if bv0 > (v0 - vr0):
v0 = bv0
vr0 = 0
vr1 = rv*ceil((v1-v0)/rv) - (v1-v0)
v0 -= vr0
v1 += vr1
nv = int(ceil((v1 - v0) / dv_max))
dv = (v1 - v0) / nv
return v0, v1, dv, nv
def _round(v0, v1, v_ref, rv):
if rv > 0:
v0 = v_ref + rv*floor((v0 - v_ref)/rv)
v1 = v_ref + rv*ceil((v1 - v_ref)/rv)
return v0, v1
x0, x1, y0, y1 = geometry.GetEnvelope()
x0, x1, dx, nx = _get_sizes(x0, x1, dx_max, rx, bx0, bx1)
y0, y1, dy, ny = _get_sizes(y0, y1, dy_max, ry, by0, by1)
for ix in xrange(nx):
xx0 = x0 + ix*dx
xx1 = xx0 + dx
for iy in xrange(ny):
yy0 = y0 + iy*dy
yy1 = yy0 + dy
gg = geometry.Intersection(ig.getRectangle((xx0, yy0, xx1, yy1)))
if gg.IsEmpty():
continue
xxx0, xxx1, yyy0, yyy1 = gg.GetEnvelope()
xxx0, xxx1 = _round(xxx0, xxx1, xx0, rx)
yyy0, yyy1 = _round(yyy0, yyy1, yy0, ry)
yield xxx0, yyy0, xxx1, yyy1 | bc7d6dd69768057195a2c5c75c91846355fdb21f | 3,635,359 |
import asyncio
async def handleJoin(cls:"Client", payload:str) -> bool:
"""
handles all JOIN events
may calls the following events for custom code:
- onMemberJoin(Channel, User)
"""
JoinUser = User(payload)
# ignore self, but use it to update the clients channels
if JoinUser.name.lower() == cls.nickname.lower():
FreshChannel:Channel = Channel(None)
FreshChannel._name = JoinUser._generated_via_channel
# add new channel to clients known channels
Log.debug(f"Client joined a channel, adding {JoinUser._generated_via_channel}")
cls.channels[FreshChannel.name] = FreshChannel
return True
# let's see if we got this user already
KnownUser:User = cls.users.get(JoinUser.name, None)
if not KnownUser:
# we never saw this user, add it
cls.users[JoinUser.name] = JoinUser
KnownUser = cls.users[JoinUser.name]
Chan:Channel = cls.channels.get(JoinUser._generated_via_channel, None)
if not Chan:
# that should never happen... but if it does... well fuck
Log.error(f"Could not find channel for {JoinUser._generated_via_channel}")
return True
# add User to chatters dict of channel
Chan.chatters[KnownUser.name] = KnownUser
# add add channel id to Users known channels
KnownUser.found_in.add(Chan.name)
Log.debug(f"Client launching: Client.onMemberJoin: {str(vars(Chan))} {str(vars(KnownUser))}")
asyncio.ensure_future(cls.onMemberJoin(Chan, KnownUser))
return True | 13eec4717035be4e82593e36c2cb54b0c09d6c43 | 3,635,360 |
from typing import Optional
import requests
import json
def execute_web_hook(hook_url: Optional[str], status_id: int) -> bool:
"""execute_web_hook
Discordに対してBOT用のデータを送る
Args:
hook_url (Optional[str]): webhook url
status_id (int): tweet id
Returns:
bool: hookに成功した場合はTrue失敗した場合はFalseを返す
"""
url = 'https://twitter.com/granbluefantasy/status/{id}'.format(
id=status_id
)
post_data = {"content": "お知らせですよ~。\n {url}".format(url=url)}
response = requests.post(
hook_url,
headers={'Content-Type': 'application/json'},
data=json.dumps(post_data)
)
if response.status_code == 204:
return True
else:
print('Failed webhook [status:{status}, response:{response}].'.format(
status=response.status_code,
response=response.text))
return False | f90696cf4cf03eacdc87e175776cbbc4138c4c2f | 3,635,361 |
from datetime import datetime
def handler_callback(callback, user):
"""
A method for handling callbacks
:param user: user object
:param callback: callback from telebot.types.CallbackQuery
:return: datetime.date object if some date was picked else None
"""
if callback == "prev" and user.curr_date.replace(day=1) >= user.min_date.replace(
day=1
):
user.curr_date = user.curr_date.replace(month=user.curr_date.month - 1)
return None
if callback == "next" and user.curr_date.replace(day=1) <= user.max_date.replace(
day=1
):
user.curr_date = user.curr_date.replace(month=user.curr_date.month + 1)
return None
if callback != "none":
entered_date = datetime.strptime(callback, "%Y,%m,%d")
if user.min_date <= entered_date <= user.max_date:
return entered_date
return -1 | 27eb78f77d77543fe02a984751078cee96d38b06 | 3,635,362 |
import torch
def get_weight(df):
"""This will give weights to encounter imbalanced class problem"""
# Getting number of data points for each class
weight_count = df["sentiment"].value_counts(sort=False)
# print(weight_count)
# Weight of class c is the size of largest class divided by the size of class c.
weight = weight_count.values.max() / weight_count.values
class_weight = torch.tensor(weight.astype(np.float32))
return class_weight | 316e000ee262bf7324228c0ca6b752e6e2908510 | 3,635,363 |
from django.contrib.contenttypes.models import ContentType
def bestellungen(request):
""" Übersicht der abgeschlossenen Bestellungen
Es werden die Käufe vom Nutzer gesucht und in einem dict nach
Kategorien geordnet ausgegeben, folgende Kategorien:
- kommende Veranstaltungen
- elektronische Medien
- unkategorisiert
(später noch bestellte Bücher)
"""
nutzer = request.user.my_profile
liste_menue = erstelle_liste_menue(request.user)
liste_kaeufe = list(Kauf.objects.filter(nutzer=nutzer))
# hack: bestimme, welche pks existieren, sonst gibt's Fehler, wenn die
# Objekte gelöscht wurden; die werden unten in while-Schleife aussortiert
liste_models = set([k.model_ausgeben() for k in liste_kaeufe])
pks_zu_model = dict([
(name, ContentType.objects.get(model=name).model_class(
).objects.all().values_list('pk', flat=True))
for name in liste_models
])
# verteile Käufe nach Kategorie:
kaeufe = {'teilnahmen': [], 'digital': [], 'rest': []}
while liste_kaeufe:
kauf = liste_kaeufe.pop()
if int(kauf.obj_pk_ausgeben()) not in pks_zu_model[kauf.model_ausgeben()]:
continue
if (kauf.model_ausgeben() == 'veranstaltung' and
kauf.art_ausgeben() in ['teilnahme', 'livestream'] and
kauf.objekt_ausgeben().ist_zukunft()):
kaeufe['teilnahmen'].append(kauf)
elif kauf.art_ausgeben() in ['pdf', 'epub', 'mobi', 'aufzeichnung']:
kaeufe['digital'].append(kauf)
# wär nett das reinzunehmen, aber dafür download-button flexibler anzeigen:
# elif kauf.art_ausgeben() == 'livestream': # bleibt nur vergangen
# kaeufe['digital'].append(kauf)
else:
kaeufe['rest'].append(kauf)
return render(request,
'Produkte/bestellungen.html',
{'kaeufe': kaeufe, 'liste_menue': liste_menue}) | b2c87525c85a1e73737e7c373492cf5db5a02263 | 3,635,364 |
def comp_indexes_fcn(site, comp_name, n_inds):
"""
Returns an array of indexes to associate with new emissions
:param site: a GeneralClassesFunctions.simulation_classes.Site object
:param comp_name: name of a component contained in Site.comp_dict
:param n_inds: Integer of indexes to generate
:return: An array of indexes in the range specified for the relevant component
"""
low_ind = site.comp_dict[comp_name]['comp_indexes'][0]
high_ind = site.comp_dict[comp_name]['comp_indexes'][1]
return np.random.randint(low_ind, high_ind, n_inds) | 969727d7da3663bbca8f3733d0dab9f46d117d7b | 3,635,365 |
def update_cache_and_get_specs():
"""
Get all concrete specs for build caches available on configured mirrors.
Initialization of internal cache data structures is done as lazily as
possible, so this method will also attempt to initialize and update the
local index cache (essentially a no-op if it has been done already and
nothing has changed on the configured mirrors.)
Throws:
FetchCacheError
"""
binary_index.update()
return binary_index.get_all_built_specs() | eabd2b4d356651de624946d6d8cff6e6aa501209 | 3,635,366 |
def sodium_unpad(s, blocksize):
"""
Remove ISO/IEC 7816-4 padding from the input byte array ``s``
:param s: input bytes string
:type s: bytes
:param blocksize:
:type blocksize: int
:return: unpadded string
:rtype: bytes
"""
ensure(isinstance(s, bytes), raising=exc.TypeError)
ensure(isinstance(blocksize, int), raising=exc.TypeError)
s_len = len(s)
u_len = ffi.new("size_t []", 1)
rc = lib.sodium_unpad(u_len, s, s_len, blocksize)
if rc != 0:
raise exc.CryptoError("Unpadding failure")
return s[: u_len[0]] | bdb86073a29b46ee3029ed0fe51b79ab6da2e7f8 | 3,635,367 |
from datetime import datetime
def find_lag(t,x,t_ref,x_ref):
"""
Report lag in time between x(t) and a reference x_ref(t_ref).
If times are already numeric, they are left as is and the lag is
reported in the same units.
If times are not numeric, they are converted to date nums via
utils.to_dnum. If they are np.datetime64, lag is reported as timedelta64.
If the inputs are datetimes, lag is reported as timedelta.
Otherwise, lag is kept as a floating point number
"""
t_orig=t
# convert times if they aren't numeric:
t=to_dnum(t)
t_ref=to_dnum(t_ref)
dt=np.median(np.diff(t))
dt_ref=np.median(np.diff(t_ref))
# goal is to interpolate the finer time scale onto the coarser.
# simplify code below by making sure that the reference is the
# finer time scale
if dt<dt_ref:
lag_opt=-find_lag(t_ref,x_ref,t,x)
else:
def f(lag):
interp_x_ref=interp_near(t-lag,t_ref,x_ref,dt)
valid=np.isfinite(interp_x_ref * x)
R=np.corrcoef( interp_x_ref[valid],x[valid])[0,1]
return -R
# dt_ref here gives fmin a good guess on initial stepsize
lag_opt = optimize.fmin(f,dt_ref,disp=0)[0]
if isinstance(t_orig[0],datetime.datetime):
lag_opt = datetime.timedelta(days,lag_opt)
elif isinstance(t_orig[0],np.datetime64):
# goofy, but assume that lags are adequately represented
# by 64 bits of microseconds. That means the range of
# lags is +-1us to 2.9e5 years. Good enough, unless you're a
# a physicist or geologist.
lag_opt = np.timedelta64( int(lag_opt*86400*1e6), 'us')
return lag_opt | aa30dfcb75847af9490077563438f0769775412d | 3,635,368 |
def classname(obj: _rinterface_capi.SupportsSEXP) -> str:
"""Name of the R class."""
res = dollar(obj, 'classname')
if res is not rpy2.robjects.NULL:
assert len(res) == 1
res = res[0]
return res | 4ad0e8a4d399802373f6a4d4bb480e76a8cec70d | 3,635,369 |
import inspect
def get_ofp_cls(ofp_version, name):
"""get class for name of a given OF version"""
(_consts_mod, parser_mod) = get_ofp_module(ofp_version)
for i in inspect.getmembers(parser_mod, inspect.isclass):
if i[0] == name:
return i[1]
return None | 179f24640c2539ce2fb38767d3bd7fd472049c95 | 3,635,370 |
import tempfile
def split_file(rows_per_file: int, inpath: str, dir):
"""Split file from inpath into multiple named tempfiles with delete set to
false, each containing rows_per_file number of rows.
All split files are placed in dir. The intent is to use this function with dir
as a TemporaryDirectory.
Return paths to the subfiles.
"""
subfiles = []
with open(inpath, 'r') as infile:
for i, sli in enumerate(
iter(lambda: list(islice(infile, rows_per_file)), [])):
with tempfile.NamedTemporaryFile(
mode='w', dir=dir, delete=False) as subfile:
subfile.writelines(sli)
subfiles.append(subfile.name)
return subfiles | 44b0723c5073a82d548daabd08d4e556012229a2 | 3,635,371 |
def HHCF(r, sigma, xi , alpha):
"""
Model for height-height correlation function. This model is suitable for fitting when data is present for both above and below the correlation length.
Inputs:
r: Numpy array. This contains the distance data which would be used for the x coordinate on a HHCF plot.
sigma: float. This is related to the RMS roughness of the sample.
xi: float. This is the correlation length of the sample.
alpha: float. This is the Hurst parameter.
Outputs:
The output of this function is the height-height correlation function for the given distances and parameters.
"""
return 2 * sigma**2 * (1 - np.exp(- (r/xi)**(2 * alpha))) | 49542983271666400085dc050b1218a6759d7009 | 3,635,372 |
def process_reaction_with_product_maps_atoms(
rxn, skip_if_not_in_precursors=False
):
"""
Remove atom-mapping, move reagents to reactants and canonicalize reaction.
If fragment group information is given, keep the groups together using
the character defined with fragment_bond.
Args:
rxn: Reaction SMILES
skip_if_not_in_precursors: accept unmapped atoms in the product (default: False)
Returns: joined_precursors>>joined_products reaction SMILES
"""
reactants, reagents, products = rxn.split(">")
try:
precursors = [
canonicalize_and_atom_map(r)
for r in reactants.split(".")
]
if len(reagents) > 0:
precursors += [
canonicalize_and_atom_map(
r
) for r in reagents.split(".")
]
products = [
canonicalize_and_atom_map(p)
for p in products.split(".")
]
except NotCanonicalizableSmilesException:
return ""
sorted_precursors = sorted(precursors, key=lambda x: x[0])
sorted_products = sorted(products, key=lambda x: x[0])
joined_precursors = ".".join([p[0] for p in sorted_precursors])
joined_products = ".".join([p[0] for p in sorted_products])
precursors_atom_maps = [
i for p in sorted_precursors for i in p[1]
] # could contain duplicate entries
product_atom_maps = [
i for p in sorted_products for i in p[1]
] # could contain duplicate entries
joined_rxn = f"{joined_precursors}>>{joined_products}"
products_maps = []
warnings = []
for p_map in product_atom_maps:
if skip_if_not_in_precursors and p_map not in precursors_atom_maps:
products_maps.append(-1)
elif int(p_map) == 0:
products_maps.append(-1)
else:
corresponding_precursors_atom = precursors_atom_maps.index(p_map)
if (
corresponding_precursors_atom in products_maps
): # handle equivalent atoms
found_alternative = False
for atom_idx, precursor_map in enumerate(precursors_atom_maps):
if (
precursor_map == p_map
) and atom_idx not in products_maps:
products_maps.append(atom_idx)
found_alternative = True
break
if not found_alternative:
warnings.append(
f"Two product atoms mapped to the same precursor atom: {rxn}"
)
products_maps.append(corresponding_precursors_atom)
else:
products_maps.append(corresponding_precursors_atom)
for w in list(set(warnings)):
LOGGER.warning(w)
return joined_rxn, products_maps | 107452f44d079bdfe9dafce82b2744f170c239d6 | 3,635,373 |
def echo_handler(completed_proc):
"""Immediately return ``completed_proc``."""
return completed_proc | 53f3ef51bf349ac5146014ef25b88326d5bc010e | 3,635,374 |
import time
import tqdm
from pathlib import Path
def render_path(
render_poses, hwf, K, chunk, render_kwargs, gt_imgs=None, savedir=None, render_factor=0,
fixed_viewdir=None):
"""
Render a batch of full images.
fixed_viewdir:
If a 4 x 4 matrix, use this view direction for all frames.
If `None`, use actual view direction (camera position).
"""
H, W, focal = hwf
if render_factor!=0:
# Render downsampled for speed
H = H//render_factor
W = W//render_factor
focal = focal/render_factor
rgbs = []
disps = []
t = time.time()
for i, c2w in enumerate(tqdm(render_poses)):
print(i, time.time() - t)
t = time.time()
if fixed_viewdir is not None:
rgb, disp, acc, _ = render(
H, W, K, chunk=chunk, c2w=fixed_viewdir, c2w_staticcam=c2w[:3,:4], **render_kwargs)
else:
rgb, disp, acc, _ = render(
H, W, K, chunk=chunk, c2w=c2w[:3,:4], **render_kwargs)
rgb.clamp_(0.0, 1.0)
rgbs.append(rgb.cpu().numpy())
disps.append(disp.cpu().numpy())
if i==0:
print(rgb.shape, disp.shape)
"""
if gt_imgs is not None and render_factor==0:
p = -10. * np.log10(np.mean(np.square(rgb.cpu().numpy() - gt_imgs[i])))
print(p)
"""
if savedir is not None:
acc.clamp_(0.0, 1.0)
image = np.concatenate((rgbs[-1], acc.cpu().numpy()[..., None]), axis=-1)
imageio.imwrite(Path(savedir) / f"{i:03}.png", image)
rgbs = np.stack(rgbs, 0)
disps = np.stack(disps, 0)
return rgbs, disps | 29413d4306ba46c2ef069b7845e780df7bfcd764 | 3,635,375 |
def _check_pr(pr, cfg):
"""make sure a PR is ok to automerge"""
if any(label.name == "automerge" for label in pr.get_labels()):
return True, None
# only allowed users
if pr.user.login not in ALLOWED_USERS:
return False, "user %s cannot automerge" % pr.user.login
# only if [bot-automerge] is in the pr title
if '[bot-automerge]' not in pr.title:
return False, "PR does not have the '[bot-automerge]' slug in the title"
# can we automerge in this feedstock?
if not _automerge_me(cfg):
return False, "automated bot merges are turned off for this feedstock"
return True, None | 2ed229c393dffbaeec6822bd4e2492b6339c8fca | 3,635,376 |
from typing import Any
import joblib
def read_joblib(
bucket: str,
key: str,
) -> Any:
"""Read a joblib model from a given s3 bucket and key.
Parameters
----------
bucket : str
The S3 bucket to load from.
key : str
The object key within the s3 bucket.
Returns
-------
np.array
A joblib model.
"""
data = _read_object(bucket=bucket, key=key)
return joblib.load(data) | 90ff368b89b583678f2c41b3d2d76e9d66bc393b | 3,635,377 |
import logging
def cleandataseth5(*args, **kwargs) -> None:
"""
Read the the single file (with whole dataset), remove NaN and save 1 file per class.
"""
well_vars = [
"P-PDG",
"P-TPT",
"T-TPT",
"P-MON-CKP",
"T-JUS-CKP",
"P-JUS-CKGL",
"T-JUS-CKGL",
"QGL",
]
columns = ["timestamp"] + well_vars + ["class"]
logger = logging.getLogger(f"clean")
formatter = logging.Formatter(
"%(asctime)s %(name)-12s %(levelname)-8s %(lineno)-5d %(funcName)-10s %(module)-10s %(message)s"
)
fh = logging.FileHandler(f"experiments_clean.log")
fh.setLevel(logging.DEBUG)
fh.setFormatter(formatter)
ch = logging.StreamHandler()
ch.setLevel(logging.INFO)
ch.setFormatter(formatter)
logger.addHandler(fh)
logger.addHandler(ch)
logger.setLevel(logging.INFO)
usecols = [1, 2, 3, 4, 5]
good = [columns[i] for i, _ in enumerate(columns) if i in usecols]
with h5py.File("datasets.h5", "r") as f:
logger.debug("reading input file")
with h5py.File("datasets_clean.h5", "w") as fc:
logger.debug("created output file")
for c in range(0, 9):
grp = fc.create_group(f"/{c}")
logger.debug(f"Processing class {c}")
k = f"/{c}"
for s in f[k]:
if s[0] != "W":
continue
logger.debug(f"{c} {s}")
data = f[k][s][()]
frame = pd.DataFrame(data=data, columns=columns)
frame.dropna(inplace=True, how="any", subset=good, axis=0)
array = frame.to_numpy()
n = check_nan(array[:, [1, 2, 3, 4, 5]], logger)
if n > 0:
logger.info(f"{c} {s} dataset contains NaN")
grp.create_dataset(f"{s}", data=array, dtype=np.float64)
return None | d261689d2dc599e3b8d55c050ae79f21076a222c | 3,635,378 |
import random
def random_choice(choices):
"""returns a random choice
from a list of (choice, probability)"""
# sort by probability
choices = sorted(choices, key=lambda x:x[1])
roll = random.random()
acc_prob = 0
for choice, prob in choices:
acc_prob += prob
if roll <= acc_prob:
return choice | f477abe220fa9d87ee3692bed8c41973af4c637c | 3,635,379 |
def set_rdmol_positions_(mol, pos):
"""
Args:
rdkit_mol: An `rdkit.Chem.rdchem.Mol` object.
pos: (N_atoms, 3)
"""
assert mol.GetNumAtoms() == pos.shape[0]
conf = Chem.Conformer(mol.GetNumAtoms())
for i in range(pos.shape[0]):
conf.SetAtomPosition(i, pos[i].tolist())
mol.AddConformer(conf, assignId=True)
# for i in range(pos.shape[0]):
# mol.GetConformer(0).SetAtomPosition(i, pos[i].tolist())
return mol | 81ea738f5a548403f8860f055c7fa79be633e5cf | 3,635,380 |
import torch
def convert_label_to_color(label, color_map):
"""Convert integer label to RGB image.
"""
n, h, w = label.shape
rgb = torch.index_select(color_map, 0, label.view(-1)).view(n, h, w, 3)
rgb = rgb.permute(0, 3, 1, 2)
return rgb | a37ec3ad382f88bdc9de8fbc2b4e2524213607c3 | 3,635,381 |
def is_collection(name):
"""compare with https://developer.microsoft.com/en-us/graph/docs/api-reference/v1.0/resources/user"""
return name in [
'assignedLicenses', 'assignedPlans', 'businessPhones', 'imAddresses',
'interests', 'provisionedPlans', 'proxyAddresses', 'responsibilities',
'schools', 'skills'
] | c2557f142f3ca066506256b273c9f65657079478 | 3,635,382 |
def unpack_list(string_: str):
"""
Recursively strip, split, translate and unpack provided string to the list.
:param string_: str -- string to be unpacked into the list
:return: list
"""
list_ = []
string2 = string_.replace("[", "", 1).strip("]")
elements = string2.split("], ") if "[" in string2 else string2.split(", ")
for i in elements:
if i.startswith("["):
list_.append(unpack_list(i))
elif i.startswith("{"):
list_.append(unpack_dict(i))
else:
list_.append(get_type(i)(i))
return list_ | 2271b4d67461ce20b4f11d77b47838a2689ba196 | 3,635,383 |
from typing import Any
def login_access_token(
response: Response,
db: Session = Depends(deps.get_db),
form_data: OAuth2PasswordRequestForm = Depends(),
) -> Any:
"""
OAuth2 compatible token login, get an access token for future requests
"""
user = crud.user.authenticate(db, email=form_data.username, password=form_data.password)
if not user:
raise HTTPException(status_code=400, detail="Incorrect email or password")
elif not crud.user.is_active(user):
raise HTTPException(status_code=400, detail="Inactive user")
access_token_expires = timedelta(minutes=settings.ACCESS_TOKEN_EXPIRE_MINUTES)
token = create_access_token(user.id, expires_delta=access_token_expires)
response.set_cookie("session", token["access_token"], httponly=True)
logger.debug(f"access_token {token}")
return token | 96f764e82df0330e6497d618ebc432b76fa2c585 | 3,635,384 |
def in_nested_list(my_list, item):
"""
Determines if an item is in my_list, even if nested in a lower-level list.
"""
if item in my_list:
return True
else:
return any(in_nested_list(sublist, item) for sublist in my_list if
isinstance(sublist, list)) | 3daeaf89099bf19ba82eabfedd943adfb32fc146 | 3,635,385 |
def from_numpy_array(nparr, framerate):
"""
Returns an AudioSegment created from the given numpy array.
The numpy array must have shape = (num_samples, num_channels).
:param nparr: The numpy array to create an AudioSegment from.
:param framerate: The sample rate (Hz) of the segment to generate.
:returns: An AudioSegment created from the given array.
"""
# Check args
if nparr.dtype.itemsize not in (1, 2, 4):
raise ValueError("Numpy Array must contain 8, 16, or 32 bit values.")
# Determine nchannels
if len(nparr.shape) == 1:
nchannels = 1
elif len(nparr.shape) == 2:
nchannels = nparr.shape[1]
else:
raise ValueError("Numpy Array must be one or two dimensional. Shape must be: (num_samples, num_channels), but is {}.".format(nparr.shape))
# Fix shape if single dimensional
nparr = np.reshape(nparr, (-1, nchannels))
# Create an array of mono audio segments
monos = []
for i in range(nchannels):
m = nparr[:, i]
dubseg = pydub.AudioSegment(m.tobytes(), frame_rate=framerate, sample_width=nparr.dtype.itemsize, channels=1)
monos.append(dubseg)
return AudioSegment(pydub.AudioSegment.from_mono_audiosegments(*monos), "") | 4aa9e4e2f43de0bce106f0ddc685480a198b3255 | 3,635,386 |
import sys
def get_exc_info(exception):
"""Get an exc_info tuple based on an exception instance."""
try:
raise exception
except:
return sys.exc_info() | 58378b68b7d6c65d0b62d6fd304c29798f65f1c2 | 3,635,387 |
import functools
def visits_stmt(node_cls):
"""Decorator that registers a function as a visitor for ``node_cls``.
:param node_cls: subclass of :class:`jinja2.nodes.Stmt`
"""
def decorator(func):
stmt_visitors[node_cls] = func
@functools.wraps(func)
def wrapped_func(node, macroses=None, config=default_config, child_blocks=None):
assert isinstance(node, node_cls)
return func(node, macroses, config, child_blocks)
return wrapped_func
return decorator | d902269e56ff5d6cb839852f767c2cc63d91c990 | 3,635,388 |
def is_prebuffer() -> bool:
"""
Return whether audio is in pre-buffer (threadsafe).
Returns
-------
is_prebuffer : bool
Whether audio is in pre-buffer.
"""
is_prebuffer = bool(RPR.Audio_IsPreBuffer()) # type:ignore
return is_prebuffer | 8afa4979578be310fd71b22907c99bb747780454 | 3,635,389 |
def ad_reset_user_pwd_by_mail(user_mail_addr, new_password):
"""
通过mail重置某个用户的密码
:param user_mail_addr:
:return:
"""
conn = __ad_connect()
user_dn = ad_get_user_dn_by_mail(user_mail_addr)
result = conn.extend.microsoft.modify_password(user="%s" % user_dn, new_password="%s" % new_password)
conn.unbind()
return result | 91237ac20805f3a4999c627cf74f9c2de90415d7 | 3,635,390 |
def get_data(name: str, override: bool=True) -> str:
"""
Obtiene el contenido del archivo SVG que coincida con el nombre.
Si override es False, se lanzará una excepción en caso de no encontrar el
archivo con el nombre indicado.
"""
if not ".svg" in name:
name += ".svg"
override = bool(override)
if override is False:
return open(ICON_DIR / name, "r").read() # Lanzará una excepción...
try:
return open(ICON_DIR / name, "r").read()
except (OSError):
return "" | f8facd1ab9e37f7fc3e8ffb6cbbb5e9ea5161abb | 3,635,391 |
def array_read(array, i):
"""
This OP is used to read data at the specified position from the input array
:ref:`api_fluid_LoDTensorArray` . ``array`` is the input array and ``i``
is the specified read position. This OP is often used together with
:ref:`api_fluid_layers_array_write` OP.
Case 1:
::
Input:
The shape of first three tensors are [1], and that of the last one is [1,2]:
array = ([0.6], [0.1], [0.3], [0.4, 0.2])
And:
i = [3]
Output:
output = [0.4, 0.2]
Args:
array (LoDTensorArray): The input LoDTensorArray.
i (Variable): 1-D Tensor, whose shape is [1] and dtype is int64. It represents the
specified read position of ``array``.
Returns:
Variable: The LoDTensor or Tensor that is read at the specified position of ``array``.
Examples:
.. code-block:: python
# First we're going to create a LoDTensorArray, then we're going to write the Tensor into
# the specified position, and finally we're going to read the Tensor at that position.
import paddle.fluid as fluid
arr = fluid.layers.create_array(dtype='float32')
tmp = fluid.layers.fill_constant(shape=[3, 2], dtype='int64', value=5)
i = fluid.layers.fill_constant(shape=[1], dtype='int64', value=10)
# tmp is the Tensor with shape [3,2], and if we write it into the position with subscript 10
# of the empty-array: arr, then the length of arr becomes 11.
arr = fluid.layers.array_write(tmp, i, array=arr)
# Read the data of the position with subscript 10.
item = fluid.layers.array_read(arr, i)
# You can print out the data via executor.
input = fluid.layers.Print(item, message="The LoDTensor of the i-th position:")
main_program = fluid.default_main_program()
exe = fluid.Executor(fluid.CPUPlace())
exe.run(main_program)
# The printed result is:
# 1569588169 The LoDTensor of the i-th position: The place is:CPUPlace
# Tensor[array_read_0.tmp_0]
# shape: [3,2,]
# dtype: l
# data: 5,5,5,5,5,5,
# the output is 2-D Tensor with shape [3,2].
# dtype is the corresponding C++ data type, which may vary in different environments.
# Eg: if the data type of tensor is int64, then the corresponding C++ data type is int64_t,
# so the dtype value is typeid(int64_t).Name(), which is 'x' on MacOS, 'l' on Linux,
# and '__int64' on Windows. They both represent 64-bit integer variables.
"""
if _non_static_mode():
assert isinstance(
array,
list), "The 'array' in array_read must be list in dygraph mode"
assert isinstance(
i, Variable
), "The index 'i' in array_read must be Variable in dygraph mode"
assert i.shape == [
1
], "The shape of index 'i' should be [1] in dygraph mode"
i = i.numpy().item(0)
return array[i]
check_variable_and_dtype(i, 'i', ['int64'], 'array_read')
helper = LayerHelper('array_read', **locals())
if not isinstance(
array,
Variable) or array.type != core.VarDesc.VarType.LOD_TENSOR_ARRAY:
raise TypeError("array should be tensor array vairable")
out = helper.create_variable_for_type_inference(dtype=array.dtype)
helper.append_op(
type='read_from_array',
inputs={'X': [array],
'I': [i]},
outputs={'Out': [out]})
return out | ca7acc95c9ae8213a38a981c3d6c7c86b77a17a5 | 3,635,392 |
import requests
def get_user_posts (user_id, count, tags = None, expiry = None):
"""Get a user's Instagram posts.
Filter by tags and and max $count items.
Returns:
Response: the JSON items representing instagram posts.
"""
# create the cache key
cache_key = filecache.create_key('{user_id}{count}{tags}'.format(
user_id = user_id,
count = count,
tags = tags))
# cache requested?
if expiry != None:
data = filecache.fetch_cache(cache_key, expiry)
if data != False:
return data
# type casting
count = int(count)
if tags != None:
tags = tags.split(',')
url = None
data = []
while len(data) < count:
if url == None:
url = (
'https://api.instagram.com/v1/users/{user_id}'
'/media/recent/?client_id={client_id}&count={count}').format(
user_id=user_id,
client_id=config.instagram.get('client_id'),
count=count)
response = requests.get(url).json()
posts = response.get('data')
for post in posts:
if len(data) < count:
# no tag filtering requested
if tags == None:
data.append(post)
# tags requested
else:
for tag in tags:
if post.get('tags').count(tag) > 0:
data.append(post)
break
else:
break
url = response.get('pagination').get('next_url')
# write to cache
filecache.write_cache(cache_key, data)
return data | f2f4b95a9b7dd1b1e477ac8c5ffff0667f46466b | 3,635,393 |
import os
def session_folder():
"""
Generates and returns the file path for the session folder.
Args:
None
Returns:
The file path for the session folder.
"""
return os.path.join(constants.UPLOAD_FOLDER, session['id']) | 5e3a9b757996b8b4b09761d913498d785a7a7861 | 3,635,394 |
def list_active_containers():
"""Return python list of all containers in state table
list of tuples
"""
containers = []
state_info = ContainerState.query.all()
for state in state_info:
containers.append([state.c_id, state.name])
return containers | 64b37ee392b981e264457a486651cbc9641ab0af | 3,635,395 |
import typing
def filter_imports(language: Language,
t: pydsdl.CompositeType,
sort: bool = True) -> typing.List[str]:
"""
Returns a list of all modules that must be imported to use a given type.
:param pydsdl.CompositeType t: The type to scan for dependencies.
:param bool sort: If true the returned list will be sorted.
:return: a list of python module names the provided type depends on.
"""
# Make a list of all attributes defined by this type
if isinstance(t, pydsdl.ServiceType):
atr = t.request_type.attributes + t.response_type.attributes
else:
atr = t.attributes
def array_w_composite_type(data_type: pydsdl.Any) -> bool:
return isinstance(data_type, pydsdl.ArrayType) and isinstance(data_type.element_type, pydsdl.CompositeType)
# Extract data types of said attributes; for type constructors such as arrays extract the element type
dep_types = [x.data_type for x in atr if isinstance(x.data_type, pydsdl.CompositeType)]
dep_types += [x.data_type.element_type for x in atr if array_w_composite_type(x.data_type)]
# Make a list of unique full namespaces of referenced composites. Keep the original ordering.
namespace_list = []
for dt in dep_types:
ns = dt.full_namespace
if ns not in namespace_list:
namespace_list.append(ns)
if language.enable_stropping:
namespace_list = ['.'.join([filter_id(language, y) for y in x.split('.')]) for x in namespace_list]
if sort:
return list(sorted(namespace_list))
else:
return namespace_list | e9171ff6b39e082e2e6e735366b890d689a3bed9 | 3,635,396 |
def compute_ramlak_filter(dwidth_padded, dtype=np.float32):
"""
Compute the Ramachandran-Lakshminarayanan (Ram-Lak) filter, used in
filtered backprojection.
:param dwidth_padded: width of the 2D sinogram after padding
:param dtype: data type
"""
L = dwidth_padded
h = np.zeros(L, dtype=dtype)
L2 = L//2+1
h[0] = 1/4.
j = np.linspace(1, L2, L2//2, False).astype(dtype) # np < 1.9.0
h[1:L2:2] = -1./(pi**2 * j**2)
h[L2:] = np.copy(h[1:L2-1][::-1])
return h | e8370f28d140de83d913fbddd71aadf5809b3fa4 | 3,635,397 |
def t_seg(p1, p2, t, align=0):
""" trim segment
Args:
p1, p2: point(x, y)
t: scaling factor (1 - trimed segment / original segment)
align: 1: trim p2, 2: trim p1, 0: both side
Return:
trimmed segment(p1, p2)
"""
v = vector(p1, p2)
result = {
1: lambda a, b: (a, translate(b, scale(v, -t))),
2: lambda a, b: (translate(a, scale(v, t)), b),
0: lambda a, b: (translate(a, scale(v, t / 2)),
translate(b, scale(v, -t / 2)))
}
return result[align](p1, p2) | 879f8cad825f0787d71a152a5d673f9269696870 | 3,635,398 |
def isomorphic(tt,ttt):
"""True if isomorphic."""
(a,b) = isomorphism(tt,ttt)
return (len(b) > 0) | e5f63e3a65fd9a6928940ad53a0fda745002ac03 | 3,635,399 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.