content stringlengths 35 762k | sha1 stringlengths 40 40 | id int64 0 3.66M |
|---|---|---|
import fnmatch
def get_user_id(gi,email):
"""
Get the user ID corresponding to a username email
Arguments:
gi (bioblend.galaxy.GalaxyInstance): Galaxy instance
email : email address for the user
Returns:
String: user ID, or None if no match.
"""
user_id = None
try:
for u in get_users(gi):
if fnmatch.fnmatch(u.email,email):
return u.id
except ConnectionError as ex:
logger.warning("Failed to get user list: %s (%s)" % (ex.body,
ex.status_code))
return None | 89b72a4291b789ab61f276a7e4563c17c2c4e4b7 | 20,700 |
import os
def get_tests_directory() -> str:
"""
Returns the path of the top level directory for tests.
Returns: The path of the top level directory for tests.
This is useful for constructing paths to the test files.
"""
module_file_path = os.path.abspath(__file__)
return os.path.dirname(module_file_path) | 35b445342ea6cf6c6f659118d32b0b8c9724a7e8 | 20,701 |
def ldns_pkt_size(*args):
"""LDNS buffer."""
return _ldns.ldns_pkt_size(*args) | 833223ed702fdee4525f0c330f4c803bd867daa3 | 20,702 |
def getUser(userID):
""" Takes a user ID as an argument and returns the user associated
with that ID.
Args:
userID -- The ID of a user stored in the Patrons table
"""
user = session.query(Patrons).filter_by(id = userID).one()
return user | 7da06a0baaef540826fd9e902129e1a625f5bfd9 | 20,703 |
import warnings
def guard_transform(transform):
"""Return an Affine transformation instance"""
if not isinstance(transform, Affine):
if tastes_like_gdal(transform):
warnings.warn(
"GDAL-style transforms are deprecated and will not "
"be supported in Rasterio 1.0.",
FutureWarning,
stacklevel=2)
transform = Affine.from_gdal(*transform)
else:
transform = Affine(*transform)
a, e = transform.a, transform.e
if a == 0.0 or e == 0.0:
raise ValueError(
"Transform has invalid coefficients a, e: (%f, %f)" % (
transform.a, transform.e))
return transform | 1c19f92331c0bb99841c86302c1c1d4c13a07649 | 20,704 |
import pandas
import math
def create_heatmap(piek_json,
antske_json,
output_path=None,
verbose=0):
"""
"""
# initialize dataframe
likert_values = [1, 2, 3, 4, 5, 6, 7]
df = pandas.DataFrame()
default_values = [None for _ in range(len(likert_values))]
for likert_value in likert_values:
df[likert_value] = default_values
piek_antske_to_items = defaultdict(list)
keys = list(piek_json.keys())
assert piek_json.keys() == antske_json.keys()
for key in keys:
piek_value = piek_json[key]
antske_value = antske_json[key]
piek_antske_to_items[(piek_value, antske_value)].append(key)
for (piek, antske), items in piek_antske_to_items.items():
num_items = len(items)
df.set_value(piek, antske, len(items))
if verbose >= 2:
print(piek, antske, len(items))
for index, row in df.iterrows():
for column_name, value in row.items():
to_change = False
if value is None:
value = 0
to_change = True
elif math.isnan(value):
value = 0
to_change = True
if to_change:
df.set_value(index, column_name, value)
df = df[df.columns].astype(int)
df = df.drop(df.index[0])
f, ax = plt.subplots(figsize=(9, 6))
plot = sns.heatmap(df, annot=True, fmt="d", linewidths=.5, ax=ax)
ax.invert_yaxis()
if output_path is not None:
plot.figure.savefig(output_path)
return df, ax | 4e26331c3290d8282e98f7473cd7ee6ffbb46146 | 20,705 |
def get_classification_report(true_labels, pred_labels, labels=None, target_names=None, output_dict=False):
"""
true_labels = [0, 1, 2, 3, 4, 1] # Y
pred_labels = [0, 1, 1, 2, 2, 1] # X
target_names = ["A", "B", "C", "D", "E"]
out_result = get_classification_report(true_labels, pred_labels, target_names=target_names, output_dict=False)
宏平均(macro avg)和微平均(micro avg)
如果每个class的样本数量差不多,那么宏平均和微平均没有太大差异
如果每个class的样本数量差异很大,而且你想:
更注重样本量多的class:使用微平均,若微平均比宏平均小,应检检查样本量多的class
更注重样本量少的class:使用宏平均,若宏平均比微平均小,应检查样本量少的class
:param true_labels:
:param pred_labels:
:param labels:
:param target_names:
:param output_dict:
:return:
"""
true_labels = np.array(true_labels, dtype=np.int32)
pred_labels = np.array(pred_labels, dtype=np.int32)
if target_names:
labels = list(range(len(target_names)))
result = metrics.classification_report(true_labels,
pred_labels,
labels=labels,
digits=4,
target_names=target_names,
output_dict=output_dict)
if output_dict:
macro_avg = result["macro avg"]
accuracy = result["accuracy"]
weighted_avg = result["weighted avg"]
out_result = {"macro_avg": macro_avg, "accuracy": accuracy, "weighted_avg": weighted_avg}
# pdf=pd.DataFrame.from_dict(result)
# save_csv("classification_report.csv", pdf)
else:
out_result = result
return out_result | 37467891f004175ec228adac6ca1e1347c71ca15 | 20,706 |
import random
from sys import prefix
from datetime import datetime
import time
def generate_message(minutes_to_next=None):
"""
Using the list of exercises, this function generates a new exercise message. Optionally it
takes a minutes_to_next parameter which it uses to add an indication of when the next exercise
will take place.
"""
# Randomly select an exercise and a number of repetitions
exercise, data = random.choice(exercises.items())
repetitions = random.randint(*data['range'])
# Prepare the message string
unit_string = ' ' + data['unit'] if data['unit'] else ''
text = '{}{} {} RIGHT NOW!'.format(repetitions, prefix, exercise)
# Add the next exercise indication
if minutes_to_next is not None:
current_time = datetime.datetime.now(timezone('UTC'))
next_time = (now.astimezone(timezone(message_timezone)) +
datetime.timedelta(minutes=minutes_to_next))
next_text = 'NEXT EXERCISE AT {}'.format(time.strftime('%H:%M'))
text += '\n' + next_text
return text | 2bbf7d4a9a1baa7f7239d3d9758d96b14474b484 | 20,707 |
def yx():
"""
测试印象笔记服务
:return:
"""
client = EvernoteClient(token=dev_token,sandbox=False)
client.service_host = 'app.yinxiang.com'
userStore = client.get_user_store()
user = userStore.getUser()
print user
return "yx" | 9bfdcccddfc9a99d445deea5d920256e19e0bcc5 | 20,708 |
def generate_frequency_result_for_time_precedence_query_workload(config_map, time_interval, spatial_interval):
"""
:param config_map:
:param time_interval:
:param spatial_interval:
:return:
"""
frequency_result = {}
for key in config_map.keys():
region_param_list = config_map.get(key)
lon_min = region_param_list[4]
lon_max = region_param_list[5]
lat_min = region_param_list[6]
lat_max = region_param_list[7]
time_min = normalize_to_utc_date(region_param_list[8])
time_max = normalize_to_utc_date(region_param_list[9])
frequency_result[key] = generate_query_frequency_per_region(key, time_interval, spatial_interval, lon_min, lon_max, lat_min, lat_max, time_min, time_max)
print("finish frequency result for query")
return frequency_result | c477acb9652ea40f498a2d92aa91840e868b1736 | 20,709 |
def delete(request):
"""退出登录,清除session"""
request.session.flush()
return redirect('/login/') | 0f10480f3259c52bc1a203ccd9e9746cdf9776ed | 20,710 |
import warnings
def acovf(x, unbiased=False, demean=True, fft=None, missing='none', nlag=None):
"""
Autocovariance for 1D
Parameters
----------
x : array
Time series data. Must be 1d.
unbiased : bool
If True, then denominators is n-k, otherwise n
demean : bool
If True, then subtract the mean x from each element of x
fft : bool
If True, use FFT convolution. This method should be preferred
for long time series.
missing : str
A string in ['none', 'raise', 'conservative', 'drop'] specifying how
any NaNs are to be treated.
nlag : {int, None}
Limit the number of autocovariances returned. Size of returned
array is nlag + 1. Setting nlag when fft is False uses a simple,
direct estimator of the autocovariances that only computes the first
nlag + 1 values. This can be much faster when the time series is long
and only a small number of autocovariances are needed.
Returns
-------
acovf : array
autocovariance function
References
-----------
.. [*] Parzen, E., 1963. On spectral analysis with missing observations
and amplitude modulation. Sankhya: The Indian Journal of
Statistics, Series A, pp.383-392.
"""
if fft is None:
# GH#4937
warnings.warn('fft=True will become the default in a future version '
'of statsmodels/sm2. To suppress this warning, '
'explicitly set fft=False.', FutureWarning)
fft = False
x = np.squeeze(np.asarray(x))
if x.ndim > 1:
raise ValueError("x must be 1d. Got %d dims." % x.ndim)
missing = missing.lower()
if missing not in ['none', 'raise', 'conservative', 'drop']:
raise ValueError("`missing` option %s not understood"
% missing) # pragma: no cover
if missing == 'none':
deal_with_masked = False
else:
deal_with_masked = np.isnan(x).any()
if deal_with_masked:
if missing == 'raise':
raise MissingDataError("NaNs were encountered in the data")
notmask_bool = ~np.isnan(x)
if missing == 'conservative':
# Must copy for thread safety (GH#4937)
x = x.copy()
x[~notmask_bool] = 0
else:
# 'drop'
x = x[notmask_bool] # copies non-missing
notmask_int = notmask_bool.astype(int)
if demean and deal_with_masked:
# whether 'drop' or 'conservative':
xo = x - x.sum() / notmask_int.sum()
if missing == 'conservative':
xo[~notmask_bool] = 0
elif demean:
xo = x - x.mean()
else:
xo = x
n = len(x)
lag_len = nlag
if nlag is None:
lag_len = n - 1
elif nlag > n - 1:
raise ValueError('nlag must be smaller than nobs - 1')
if not fft and nlag is not None:
# GH#4937
acov = np.empty(lag_len + 1)
acov[0] = xo.dot(xo)
for i in range(lag_len):
acov[i + 1] = xo[i + 1:].dot(xo[:-(i + 1)])
if not deal_with_masked or missing == 'drop':
if unbiased:
acov /= (n - np.arange(lag_len + 1))
else:
acov /= n
else:
if unbiased:
divisor = np.empty(lag_len + 1, dtype=np.int64)
divisor[0] = notmask_int.sum()
for i in range(lag_len):
divisor[i + 1] = np.dot(notmask_int[i + 1:],
notmask_int[:-(i + 1)])
divisor[divisor == 0] = 1
acov /= divisor
else:
# biased, missing data but npt 'drop'
acov /= notmask_int.sum()
return acov
if unbiased and deal_with_masked and missing == 'conservative':
d = np.correlate(notmask_int, notmask_int, 'full')
d[d == 0] = 1
elif unbiased:
xi = np.arange(1, n + 1)
d = np.hstack((xi, xi[:-1][::-1]))
elif deal_with_masked:
# biased and NaNs given and ('drop' or 'conservative')
d = notmask_int.sum() * np.ones(2 * n - 1)
else:
# biased and no NaNs or missing=='none'
d = n * np.ones(2 * n - 1)
if fft:
nobs = len(xo)
n = _next_regular(2 * nobs + 1)
Frf = np.fft.fft(xo, n=n)
acov = np.fft.ifft(Frf * np.conjugate(Frf))[:nobs] / d[nobs - 1:]
acov = acov.real
else:
acov = np.correlate(xo, xo, 'full')[n - 1:] / d[n - 1:]
if nlag is not None:
# GH#4937 Copy to allow gc of full array rather than view
return acov[:lag_len + 1].copy()
return acov | c318d80a13bcb4f87117ce84351001a8f320c6d0 | 20,711 |
import re
def ParseTimeCommandResult(command_result):
"""Parse command result and get time elapsed.
Args:
command_result: The result after executing a remote time command.
Returns:
Time taken for the command.
"""
time_data = re.findall(r'real\s+(\d+)m(\d+.\d+)', command_result)
time_in_seconds = 60 * float(time_data[0][0]) + float(time_data[0][1])
return time_in_seconds | fc92d4b996716ddb2253bf4eb75ed9860c43b2d7 | 20,712 |
import os
import tempfile
def _GetTmpDir():
"""Determines the right temporary dir to use and creates it if necessary.
Returns:
path to temporary directory.
"""
tmp_dir = FLAGS.emulator_tmp_dir or os.path.abspath(
tempfile.mkdtemp('android-emulator-launch'))
if not os.path.exists(tmp_dir):
os.makedirs(tmp_dir)
return tmp_dir | 90526a849b3967f733369c7ab3b7d4d3a2f94658 | 20,713 |
import requests
import json
def _push_aol_to_dativetop_server(aol):
"""Make a PUT request to the DativeTop server in order to push ``aol`` on to
the server's AOL.
"""
try:
resp = requests.put(c.DATIVETOP_SERVER_URL, json=aol)
resp.raise_for_status()
return resp.json(), None
except json.decoder.JSONDecodeError:
msg = ('Failed to parse JSON from DativeTop Server response to our PUT'
' request.')
logger.exception(msg)
return None, msg
except requests.exceptions.RequestException:
msg = 'Failed to push our AOL to the DativeTop Server.'
logger.exception(msg)
return None, msg | 19581efa535e4022fd844292576f56532e9b0cdd | 20,714 |
def dual_single(lag_mul: float, val: np.ndarray, count: np.ndarray) -> float:
"""Weighted average minus 1 for estimate of F_0.
Computes phi_n(lambda_n) - 1.
Args:
lag_mul: The normalized Lagrangian multiplier. Must be
strictly between 0 and 1.
val: Likelihood values (excluding zero and infinity).
count: Counts for likelihood values (including zero and
infinity). Only the first and the last counts can be
zero. Sum must be positive.
Returns:
Weighted average minus 1.
"""
return (
sum(count[1:-1] / (lag_mul + (1 - lag_mul) * val)) + count[0] / lag_mul
) / sum(count) - 1 | 699b33b824e8a0cae1b35f1290281a08a910f7ef | 20,715 |
def _execute(
repository_ctx,
cmdline,
error_msg = None,
error_details = None,
empty_stdout_fine = False,
environment = {}):
"""Executes an arbitrary shell command.
Args:
repository_ctx: the repository_ctx object
cmdline: list of strings, the command to execute
error_msg: string, a summary of the error if the command fails
error_details: string, details about the error or steps to fix it
empty_stdout_fine: bool, if True, an empty stdout result is fine, otherwise
it's an error
environment: environment variables passed to repository_ctx.execute
Return:
the result of repository_ctx.execute(cmdline)
"""
result = repository_ctx.execute(cmdline, environment = environment)
if result.stderr or not (empty_stdout_fine or result.stdout):
_fail("\n".join([
error_msg.strip() if error_msg else "Repository command failed",
result.stderr.strip(),
error_details if error_details else "",
]))
return result | 1d1a291380ca540ab7ec34bbcec4780b695cc0d1 | 20,716 |
import warnings
def parse_tagged_block(block):
"""
Replaces "data" attribute of a block with parsed data structure
if it is known how to parse it.
"""
key = block.key.decode('ascii')
if not TaggedBlock.is_known(key):
warnings.warn("Unknown tagged block (%s)" % block.key)
decoder = _tagged_block_decoders.get(key, lambda data: data)
return Block(key, decoder(block.data)) | 2c06fb4a20e05690a3b72b85dd09b5a2f04a6513 | 20,717 |
import numpy
def floatX(arr):
"""Converts data to a numpy array of dtype ``theano.config.floatX``.
Parameters
----------
arr : array_like
The data to be converted.
Returns
-------
numpy ndarray
The input array in the ``floatX`` dtype configured for Theano.
If `arr` is an ndarray of correct dtype, it is returned as is.
"""
return numpy.asarray(arr, dtype=theano.config.floatX) | 261064f1685b4e393d493c5cc657b1ab76d5e89f | 20,718 |
def getNumberOfPublicIp():
"""Get the total number of public IP
return: (long) Number of public IP
"""
#No need to calculate this constant everytime
return 3689020672
# Real implementation:
#ranges = getValidPublicIpRange()
#number_of_ip = 0
#for range in ranges:
# number_of_ip = number_of_ip + (range[1] - range[0] + 1)
#return number_of_ip | 79221376f64d0a44da06746bc28f0bb7db808b0f | 20,719 |
import os
def framework_find(fn, executable_path=None, env=None):
"""
Find a framework using dyld semantics in a very loose manner.
Will take input such as:
Python
Python.framework
Python.framework/Versions/Current
"""
error = None
try:
return dyld_find(fn, executable_path=executable_path, env=env)
except ValueError as e:
error = e
fmwk_index = fn.rfind('.framework')
if fmwk_index == -1:
fmwk_index = len(fn)
fn += '.framework'
fn = os.path.join(fn, os.path.basename(fn[:fmwk_index]))
try:
return dyld_find(fn, executable_path=executable_path, env=env)
except ValueError:
raise error
finally:
error = None | 63baebf968a78e39940fbb787c7d9abd0c638046 | 20,720 |
import sys
def find_modules(path):
"""
Funtion to list all the modules in a repository.
"""
modules = set()
packages = find_packages(path)
for pkg in packages:
modules.add(pkg)
pkgpath = path + "/" + pkg.replace(".", "/")
if sys.version_info.major == 2 or (
sys.version_info.major == 3 and sys.version_info.minor < 6
):
for _, name, ispkg in iter_modules([pkgpath]):
if not ispkg:
modules.add(pkg + "." + name)
else:
for info in iter_modules([pkgpath]):
if not info.ispkg:
modules.add(pkg + "." + info.name)
return modules | fe98c28b45e01317be2f732a32159bbbdaa4fcc0 | 20,721 |
def cart2sph(x, y, z):
"""
Convert Cartesian coordinates x, y, z
to conventional spherical coordinates r, p, a
:param x: Cartesian coordinate or vector x
:type x: float or np.ndarray
:param y: Cartesian coordinate or vector y
:type y: float or np.ndarray
:param z: Cartesian coordinates or vector z
:type z: float or np.ndarray
:return: Spherical coordinates: radius, polar angle, and azimuth angle
:rtype: np.ndarray
"""
r = (x ** 2 + y ** 2 + z ** 2) ** 0.5
p = np.arccos(z / r)
a = np.arctan2(y, x)
return np.array([r, p, a]) | 9487d44a8892f450a2920997f277f0f699a89e2d | 20,722 |
def hdfs_open(server, username, path, **args):
"""Read a file.
Returns a filelike object (specifically, an httplib response object).
"""
datanode_url = datanode_url(server, username, path, **args)
response = _datanode_request(server, username, 'GET', datanode_url)
if response.status == httplib.OK:
return response
else:
content = response.read()
_raise_error(response.status, content) | 6ddc83d6d571d63ce5536f7c34a00c7c3b04f1fc | 20,723 |
def get_serv_loader(desc_file=SERVICES_FILE):
"""Get a ServiceLoader with service descriptions in the given file.
Uses a "singleton" when the file is `SERVICES_FILE`.
"""
global _serv_loader
if desc_file == SERVICES_FILE:
if _serv_loader is None:
with open(desc_file, "r") as fp:
_serv_loader = ServiceLoader(fp)
return _serv_loader
with open(desc_file, "r") as fp:
ld = ServiceLoader(fp)
return ld | 3693b4fd11bc2efcd394eac94e6667b84d110b34 | 20,724 |
import socket
def check_reverse_lookup():
"""
Check if host fqdn resolves to current host ip
"""
try:
host_name = socket.gethostname().lower()
host_ip = socket.gethostbyname(host_name)
host_fqdn = socket.getfqdn().lower()
fqdn_ip = socket.gethostbyname(host_fqdn)
return host_ip == fqdn_ip
except socket.error:
pass
return False | 4979ba32d03782258f322ec86b2cd1c24fb4de2c | 20,725 |
from pathlib import Path
def render_pages(site_data: SiteData) -> dict[Path, str]:
"""Create page content."""
homepage = (site_data.homepage, rendering.render_homepage(site_data))
record_pages = [
(page, rendering.render_record_page(site_data, uri))
for uri, page in site_data.record_pages.items()
]
toc_pages = [(page, "TODO") for record_type, page in site_data.toc_pages.items()]
all_pages = [homepage] + record_pages + toc_pages
return {
path: rendering.apply_links(content, site_data) for path, content in all_pages
} | 7549e4687a5142b7cc0a4a8054a91fb00575bea6 | 20,726 |
import os
def download_file(clean_url, download_folder, downloaded_file_name, depth=0, error_output=True):
""" Downloads a specific file.
:param clean_url: Decoded URL to the file.
:param download_folder: Folder to place the downloaded file in.
:param downloaded_file_name: File name to save the download to.
:param depth: (optional) Hierarchy depth of the handled Confluence page.
:param error_output: (optional) Set to False if you do not want to see any error outputs
:returns: Path to the downloaded file.
"""
downloaded_file_path = '%s/%s' % (download_folder, downloaded_file_name)
# Download file if it does not exist yet
if not os.path.exists(downloaded_file_path):
absolute_download_url = '%s%s' % (settings.CONFLUENCE_BASE_URL, clean_url)
print('%sDOWNLOAD: %s' % ('\t'*(depth+1), downloaded_file_name))
try:
utils.http_download_binary_file(absolute_download_url, downloaded_file_path,
auth=settings.HTTP_AUTHENTICATION, headers=settings.HTTP_CUSTOM_HEADERS,
verify_peer_certificate=settings.VERIFY_PEER_CERTIFICATE,
proxies=settings.HTTP_PROXIES)
except utils.ConfluenceException as e:
if error_output:
error_print('%sERROR: %s' % ('\t'*(depth+2), e))
else:
print('%sWARNING: %s' % ('\t'*(depth+2), e))
return downloaded_file_path | f2b3b564d5ed78bca678bfd35f20e548278013c6 | 20,727 |
from typing import Sequence
def _clean_bar_plot_data(df_in: pd.DataFrame,
sweep_vars: Sequence[Text] = None) -> pd.DataFrame:
"""Clean the summary data for bar plot comparison of agents."""
df = df_in.copy()
df['env'] = pd.Categorical(
df.bsuite_env, categories=_ORDERED_EXPERIMENTS, ordered=True)
df['type'] = pd.Categorical(
df['type'], categories=_ORDERED_TYPES, ordered=True)
if sweep_vars is None:
df['agent'] = 'agent'
elif len(sweep_vars) == 1:
df['agent'] = df[sweep_vars[0]].astype(str)
else:
df['agent'] = (df[sweep_vars].astype(str)
.apply(lambda x: x.name + '=' + x, axis=0)
.apply(lambda x: '\n'.join(x), axis=1) # pylint:disable=unnecessary-lambda
)
return df | a62feba3511dccd0d6a909531d5f50b96348e072 | 20,728 |
def file_resources():
"""File Resources."""
return {
'mock_file': CustomFileResource(service=FileService()),
'mock_file_action': CustomFileActionResource(service=FileService()),
} | 412bdb59f04c2092c7bd77ef677cfac43bbc27ff | 20,729 |
import pickle
def prepare_data(features=None):
"""Prepare data for analysis
Args:
features (list of str): list with features
Returns:
X_train (np.matrix): train X
X_test (np.matrix): test X
y_train (np.matrix): train y
y_test (np.matrix): test y
"""
# Read data
xls = pd.ExcelFile('Database.xlsx')
db1 = xls.parse(1)
db2 = xls.parse(2)
db1.loc[np.isnan(db1['Sales']), 'Sales'] = 0
y = (db1['Sales']).as_matrix()
# Fill the premium column in db2
db2['Premium Offered'] = db1['Premium Offered'].mean()
# To get all columns in X, we need to mix it with the training data
if features is None:
features = [x for x in db1.columns if x not in not_features]
db3 = pd.concat([db1[features], db2[features]], axis=0)
# Generate an X matrix
Xall = proccess_X(db3, features)
X = Xall[:db1.shape[0], :]
X2 = Xall[db1.shape[0]:, :]
# Train and test sets
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.15, random_state=42)
# Pickle the data
data = {'X1': X, 'X_train': X_train, 'X_test': X_test, 'y_train': y_train, 'y_test': y_test, 'X2': X2}
pickle.dump(data, open('rf_data.dat', 'wb'))
return X_train, X_test, y_train, y_test | b880e89c4bd5b8df80c385141f7f34e71164dc37 | 20,730 |
def choose_my_art_date(my_location, google_maps_key, mapping = False, search_range = 500, min_rating = 4.3):
"""
Function to select an artsy date and dinner; randomly selects local arts event from NY ArtBeat API
found at https://www.nyartbeat.com/resources/doc/api, and uses the arts event data to determine a nearby restaurant.
Args
----
Required:
my_location(str): Starting point address - must be within NYC Metro Location
google_maps_key (str): Optional google maps API key needed to geocode your location
To obtain a google maps API key, please refer to https://developers.google.com/maps
Optional:
search_range(float): Distance from starting point (radius for search, meters)
Default: 500
min_rating(float): should be 1-5
Default: 4.3
mapping(bool): Boolean param specifying whether user wants a simple interactive map returned of matching locations
Default: False
Returns
---
DataFrame with [max_results] art events in the New York Metro area in the [radius] of the [specified location]
Fields:
Event_Name(str): Name of Event
Event_Description(str): Details about event
Event_Price_Adult(float): Price for tickets
DateEnd(date): Last date for exhibit or installation
Event_Lat(float): Latitude of event
Event_Lon(float): Longitude of event
Event_Address(str): Address for event - requires geocoding.
Restaurant_Name(str): Name of restaurant
Price_Level(str): $ - $$$$
Restaurant_Rating(float): 1-5
Restaurant_Address(str): Distance from starting point (my location)
Restaurant_Lat(float): Latitude of restaurant
Restaurant_Lon(float): Longitude of restaurant
Map (Optional): Interactive Google Maps Output with Markers for selected restaurant and selected event.
Usage Example
---
[in]:
choose_my_art_date("Met Museum", google_maps_key)
[out]:
df
| Event_Name | Eugène Leroy “About Marina”
| Event_Description | Michael Werner Gallery, New York presents an e...
| Price | Free
| DateEnd | 2021-12-23
| Distance | 438.962726
| Event_Lat | 40.775625
...
[out]:
Interactive Map
"""
lat,lon = geocoding(my_location = my_location, google_maps_key = google_maps_key)
df_events = find_my_art_events(my_location = my_location, google_maps_key = google_maps_key, lat = lat, lon = lon, mapping = False, search_range = search_range)
selected_event_row = df_events.sample(n = 1)
event_lat = selected_event_row['Event_Lat'].values
event_lon = selected_event_row['Event_Lon'].values
df_dinner = find_my_dinner(lat = event_lat, lon = event_lon, google_maps_key = google_maps_key, mapping = False, search_range = search_range)
selected_restaurant_row = df_dinner.sample(n = 1)
date_night_df = pd.concat([selected_event_row,selected_restaurant_row], axis=1).unstack().reset_index().dropna().drop(columns = ['level_1']).rename(columns = {'level_0':'Field',0:'Value'})
if mapping == True:
lat_lon_df = pd.concat([selected_event_row[['Event_Name','Event_Lat','Event_Lon']].rename(columns = {'Event_Name':'Name','Event_Lat':'Lat','Event_Lon':'Lon'}),\
selected_restaurant_row[['Restaurant_Name','Restaurant_Lat','Restaurant_Lon']].rename(columns = {'Restaurant_Name':'Name','Restaurant_Lat':'Lat','Restaurant_Lon':'Lon'})], axis=0).reset_index()
nymap = map_events(lat_lon_df, google_maps_key, name_column = 'Name', start_lat = lat, start_lon = lon, lat_column = 'Lat', long_column = 'Lon')
return date_night_df,nymap
else:
return date_night_df | a33ffc38c4bfb648fd4a68087844484817c8ee02 | 20,731 |
import os
from typing import MutableMapping
def flatten_repo_tree(d, parent_key=''):
"""Flatten a dict to so that keys become nodes in a path."""
items = []
for k, v in d.items():
new_key = os.path.join(parent_key, k)
try:
repo = Repo(**v)
except (ValidationError, TypeError):
repo = None
if repo:
items.append((new_key, v))
elif isinstance(v, MutableMapping):
items.extend(flatten_repo_tree(v, new_key).items())
else:
items.append((new_key, v))
return dict(items) | f47091609a62549b575d883561d8a009aa08c98f | 20,732 |
import hashlib
def hash_text(message: str, hash_alg: str = 'keccak256') -> str:
"""get the hash of text data
:param message: str
:param hash_alg: str, `keccak256` or `sha256`, the default value is `keccak256`
:return: hex str, digest message with `keccak256` or `sha256`
"""
if hash_alg == 'keccak256':
_hash = keccak256(text=message)
elif hash_alg == 'sha256':
_hash = hashlib.sha256(message.encode()).hexdigest()
else:
raise ValueError(f'unsupport hash_alg param, hash_alg: {hash_alg}')
return _hash | da161ee7573ff1af9d644e04641cbb844b2311ca | 20,733 |
def get_grains_connected_to_face(mesh, face_set, node_id_grain_lut):
"""
This function find the grain connected to the face set given as argument.
Three nodes on a grain boundary can all be intersected by one grain
in which case the grain face is on the boundary or by two grains. It
is therefore sufficient to look at the set of grains contained by any
three nodes in the face set and take the intersection of these sets.
:param mesh: The mesh
:type mesh: :class:`Mesh`
:param face_set: The face set to find grains connected to
:type: face_set: :class:`ElementSet`
:return: The grain identifiers that intersect the face.
:rtype: list of ints
"""
grains_connected_to_face = []
grains = face_set.name[4:].split("_")
if len(grains) == 2:
return [int(g) for g in grains]
triangle_element = mesh.elements[face_set.ids[0]]
for node_id in triangle_element.vertices:
grains_with_node_id = node_id_grain_lut[node_id]
grains_connected_to_face.append(set(grains_with_node_id))
return list(set.intersection(*grains_connected_to_face)) | cb4adff2d6ffe3c32e2a1fc8058e6ad1fed9b2c9 | 20,734 |
def get_git_projects(git_worktree, args,
default_all=False,
use_build_deps=False,
groups=None):
""" Get a list of git projects to use """
git_parser = GitProjectParser(git_worktree)
groups = vars(args).get("groups")
if groups:
use_build_deps = False
if use_build_deps:
# To avoid getting all the projects when no project is given
# and running from the subdir of a build project
if not at_top_worktree(git_worktree):
default_all = False
build_worktree = qibuild.worktree.BuildWorkTree(git_worktree.worktree)
build_parser = GitBuildProjectParser(git_worktree, build_worktree)
return build_parser.parse_args(args, default_all=default_all)
if groups:
return git_worktree.get_git_projects(groups=groups)
return git_parser.parse_args(args, default_all=default_all) | afcb3e68d5e023937bbcdb9e86f9d50f9dc63e78 | 20,735 |
from typing import Literal
def RmZ(
ps: Table,
r_band: Literal["9601", "9602"] = "9602",
z_band: Literal["9801", "9901"] = "9901",
**kw
) -> units.mag:
"""R-Z color.
Parameters
----------
ps : astropy.table.Table
need arguments for r(z)_band functions
r_band: {'9601', '9602'}
R band to use
(default '9602')
z_band: {'9801', '9901'}
Z band to use
(default '9901')
kwargs
passes to R & Z-band functions
Returns
-------
R-Z color
"""
return _b1mb2(ps, "R_MP" + r_band, "Z_MP" + z_band, **kw) | 9c659933788a36409361941f65c4d29e73ec0b9f | 20,736 |
from typing import List
def get_table_names(connection: psycop.extensions.connection) -> List[str]:
"""
Report the name of the tables.
E.g., tables=['entities', 'events', 'stories', 'taxonomy']
"""
query = """
SELECT table_name
FROM information_schema.tables
WHERE table_type = 'BASE TABLE'
AND table_schema = 'public'
"""
cursor = connection.cursor()
cursor.execute(query)
tables = [x[0] for x in cursor.fetchall()]
return tables | 7602e998b8d4431041eb688cc5ed2450f4dc49bc | 20,737 |
def mrpxmrp(sigmaset1, sigmaset2):
"""in work; returns transformation [FN] = [FB(s2)][BN(s1)]
"""
q1 = np.array(sigmaset1)
q2 = np.array(sigmaset2)
sig1_norm = norm(sigmaset1)
sig2_norm = norm(sigmaset2)
scalar1 = 1 - sig1_norm**2
scalar2 = 1 - sig2_norm**2
scalar3 = 2.
denom = 1 + sig1_norm**2*sig2_norm**2-2*vec.vdotv(sigmaset1, sigmaset2)
term1 = vec.vxs(scalar1, sigmaset2)
term2 = vec.vxs(scalar2, sigmaset1)
term3 = vec.vxs(2, vec.vcrossv(sigmaset2, sigmaset1))
numer = vec.vxadd(term1, vec.vxadd(term2, -term3))
sigma = vec.vxs(denom, numer)
# sigma = (1-(q1.T*q1))*q2+(1-(q2*q2.T))*q1+2*np.cross(q1.T,q2.T).T;
# sigma = sigma/(1+q1.T*q1 * q2.T*q2-2*q1.T*q2);
return np.array(sigma) | 4df11a8f93c857e44fc98a59ff9a5a4325d29eef | 20,738 |
def continuous_partition_data(data, bins='auto', n_bins=10):
"""Convenience method for building a partition object on continuous data
Args:
data (list-like): The data from which to construct the estimate.
bins (string): One of 'uniform' (for uniformly spaced bins), 'ntile' (for percentile-spaced bins), or 'auto' (for automatically spaced bins)
n_bins (int): Ignored if bins is auto.
Returns:
A new partition_object::
{
"bins": (list) The endpoints of the partial partition of reals,
"weights": (list) The densities of the bins implied by the partition.
}
"""
if bins == 'uniform':
bins = np.linspace(start=np.min(data), stop=np.max(data), num = n_bins+1)
elif bins =='ntile':
bins = np.percentile(data, np.linspace(start=0, stop=100, num = n_bins+1))
elif bins != 'auto':
raise ValueError("Invalid parameter for bins argument")
hist, bin_edges = np.histogram(data, bins, density=False)
return {
"bins": bin_edges,
"weights": hist / len(data)
} | 07ab1663a4b2a4d62f2e8fce3c49d0c8c135d9e7 | 20,739 |
import csv
def readInput(infile,genefile, segfile):
"""
Reads input files.
Extended description of function.
Parameters:
infile (str): File containing list of genes to be analyzed
genefile (str): File containing gene range definitions
segfile (str): File containing cell line intervals and copy number data
Returns:
genes (list): List of genes
genedef (dict): Dictionary of genes mapping to corresponding intervals
interval_dict(dict): Dictionary of dictionary of interval trees containing cell line ranges
"""
with open(infile) as inf:
genes = [i.strip() for i in inf.readlines()]
with open(genefile) as genef:
dictgenes = csv.DictReader(genef, delimiter="\t")
genedef = {}
for d in dictgenes:
if d["cds_from"] != "-" and d["cds_to"] != "-":
genedef[d["gene"]] = (d["#chromosome"], Interval(int(d["cds_from"]),int(d["cds_to"])))
with open(segfile) as seg:
interval_dict = {}
dictseg = csv.DictReader(seg, delimiter="\t")
for d in dictseg:
d = dict(d)
if "e" in d["End"]:
#Replace one incorrect exponential value
d["End"] = 115000000
if d["CCLE_name"] in interval_dict:
if d["Chromosome"] in interval_dict[d["CCLE_name"]]:
interval_dict[d["CCLE_name"]][d["Chromosome"]][int(d["Start"]):int(d["End"])] = float(d["Segment_Mean"])
else:
interval_dict[d["CCLE_name"]][d["Chromosome"]] = IntervalTree()
interval_dict[d["CCLE_name"]][d["Chromosome"]][int(d["Start"]):int(d["End"])] = float(d["Segment_Mean"])
else:
interval_dict[d["CCLE_name"]] = dict()
interval_dict[d["CCLE_name"]][d["Chromosome"]] = IntervalTree()
interval_dict[d["CCLE_name"]][d["Chromosome"]][int(d["Start"]):int(d["End"])] = float(d["Segment_Mean"])
return genes, genedef, interval_dict | 225a0403c1b8875ec7f40c6511edc1c1828dd2aa | 20,740 |
def library_name(name, suffix=SHLIB_SUFFIX, is_windows=is_windows):
"""
Convert a file basename `name` to a library name (no "lib" and ".so" etc.)
>>> library_name("libpython3.7m.so") # doctest: +SKIP
'python3.7m'
>>> library_name("libpython3.7m.so", suffix=".so", is_windows=False)
'python3.7m'
>>> library_name("libpython3.7m.dylib", suffix=".dylib", is_windows=False)
'python3.7m'
>>> library_name("python37.dll", suffix=".dll", is_windows=True)
'python37'
"""
if not is_windows and name.startswith("lib"):
name = name[len("lib"):]
if suffix and name.endswith(suffix):
name = name[:-len(suffix)]
return name | 56c19da17acd6d00006e9c1e4308148ab7bc18d8 | 20,741 |
def add_light(light_type: str = 'POINT') -> str:
"""
Add a light of the given type to the scene, return
the name key of the newly added light
:param light_type:
:return: The named key used to index the object
"""
if utils.is_new_api():
bpy.ops.object.light_add(type=light_type)
else:
bpy.ops.object.lamp_add(type=light_type)
light_obj = bpy.context.selected_objects[0]
# Enable contact shadows
if utils.is_new_api():
light_obj.data.use_contact_shadow = True
# Return the name
return light_obj.name | ac1f5e66a3baf90f2e603069e967b8ed3a76bf8d | 20,742 |
def set_constants(ze=40, p=0.4,
kc_min=0.01, kc_max=1.0,
snow_alpha=0.2, snow_beta=11.0,
ke_max=1.0,
a_min=0.45, a_max=0.90):
"""
:param ze:
:param p: the fraction of TAW that a crop can extract from the root zone without suffering water stress; ASCE pg 226
:param kc_min:
:param kc_max:
:param snow_alpha:
:param snow_beta:
:param ke_max:
:param a_min:
:param a_max:
:return:
"""
d = dict(s_mon=datetime(1900, 7, 1),
e_mon=datetime(1900, 10, 1),
ze=ze, p=p,
kc_min=kc_min, kc_max=kc_max,
snow_alpha=snow_alpha, snow_beta=snow_beta,
ke_max=ke_max,
a_min=a_min, a_max=a_max)
print 'constants dict: {}\n'.format(pformat(d, indent=2))
return d | d2c02eb49c59c203b9520b32e94aaceac611abcc | 20,743 |
def project_poses(poses, P):
"""Compute projected poses x = Pp."""
assert poses.ndim == 2 and poses.shape[-1] == 3, \
'Invalid pose dim at ext_proj {}'.format(poses.shape)
assert P.shape == (3, 4), 'Invalid projection shape {}'.format(P.shape)
p = np.concatenate([poses, np.ones((len(poses), 1))], axis=-1)
x = np.matmul(P, p.T)
return x.T | 943c935791744ec3ec6f476c16911d7c90f2024b | 20,744 |
def argmin(x):
"""
Returns the index of the smallest element of the iterable `x`.
If two or more elements equal the minimum value, the index of the first
such element is returned.
>>> argmin([1, 3, 2, 0])
3
>>> argmin(abs(x) for x in range(-3, 4))
3
"""
argmin_ = None
min_ = None
for (nItem, item) in enumerate(x):
if (argmin_ is None) or (item < min_):
argmin_ = nItem
min_ = item
return argmin_ | 8d6778182bf3c18ffa6ef72093bf19a818d74911 | 20,745 |
def find_spot(entry, list):
"""
return index of entry in list
"""
for s, spot in enumerate(list):
if entry==spot:
return s
else:
raise ValueError("could not find entry: "+ str(entry)+ " in list: "+ str(list)) | e218822e5e56a62c40f5680751c1360c56f05f4a | 20,746 |
def parse_file(path, game=None, path_relative_to_game=True, verbose=False):
"""
Parse a single file and return a Tree.
path, game:
If game is None, path is a full path and the game is determined from that.
Or game can be supplied, in which case path is a path relative to the game directory.
"""
if not path_relative_to_game:
pass
else:
path, game = pyradox.config.combine_path_and_game(path, game)
encodings = game_encodings[game]
lines = readlines(path, encodings)
if verbose: print('Parsing file %s.' % path)
token_data = lex(lines, path)
return parse_tree(token_data, path) | 64673afea557ad2d74eb4cdcc56d6f6f6e2cd1f6 | 20,747 |
def svn_repos_fs_commit_txn(*args):
"""svn_repos_fs_commit_txn(svn_repos_t * repos, svn_fs_txn_t * txn, apr_pool_t pool) -> svn_error_t"""
return _repos.svn_repos_fs_commit_txn(*args) | 6aebf604435485aba694b46ab58f5e4ca7d1b549 | 20,748 |
import os
import asyncio
from datetime import datetime
async def run_command(
cmd: list, cwd: str = None, log_path=None, environ=None) -> str:
"""
Run a command.
If 'log_path' is provided, stdout and stderr will be written to this
location regardless of the end result.
:raises subprocess.CalledProcessError: If the command returned an error
:returns: Command stdout
"""
if not environ:
environ = os.environ.copy()
process = await asyncio.create_subprocess_exec(
*cmd, stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE,
cwd=cwd,
env=environ
)
stdout, stderr = await process.communicate()
if log_path:
async with aiofiles.open(log_path, "ab") as file_:
now = datetime.datetime.now(datetime.timezone.utc)
await file_.write(
f"\n===COMMAND===\n{now.isoformat()}\n{cmd}".encode("utf-8")
)
await file_.write(b"\n===STDOUT===\n")
await file_.write(stdout)
await file_.write(b"\n===STDERR===\n")
await file_.write(stderr)
if process.returncode != 0:
raise CalledProcessError(
returncode=process.returncode,
cmd=cmd,
output=stdout,
stderr=stderr
)
logger.debug(
"Command %s completed.\nOUTPUT: %s\n",
" ".join(cmd), stdout
)
return stdout.decode("utf-8") | d4b1ebb52ac57424ae36235ff6884e80737d6a23 | 20,749 |
def iter_dir(temp_dir, blast_db, query_name, iteration):
"""
Get the work directory for the current iteration.
We need to call this function in child processes so it cannot be in an
object.
"""
name = '{}_{}_{:02d}'.format(
basename(blast_db), basename(query_name), iteration)
return join(temp_dir, name) | e4a8acf79fa3ef6b3822d91406cd6cf007477308 | 20,750 |
def zoomSurface(src, zoomx, zoomy, smooth):
"""Zooms a surface with different x & y scaling factors.
This function renders to a new surface, with optional anti-aliasing. If a
zoom factor is negative, the image will be flipped along that axis. If the
surface is not 8-bit or 32-bit RGBA/ABGR, it will be converted into a 32-bit
RGBA format on the fly.
Args:
src (:obj:`SDL_Surface`): The surface to zoom.
zoomx (float): The x-axis (horizontal) zoom factor.
zoomy (float): The y-axis (vertical) zoom factor.
smooth (int): If set to 1, the output image will be anti-aliased. If set
to 0, no anti-aliasing will be performed. Must be either 0 or 1.
Returns:
:obj:`SDL_Surface`: A new output surface with zoom applied.
"""
return _funcs["zoomSurface"](src, zoomx, zoomy, smooth) | 08e7ad74a4420f5b02a6924d47cdecf3dc09229b | 20,751 |
import struct
def array(*cols: Column) -> Column:
"""
Return column of arrays
"""
return (struct(*cols).apply(list)).alias(f"[{', '.join([Column.getName(c) for c in cols])}]") | 65599cd0fa7b0ea5b670555656a9b615e0e68664 | 20,752 |
def addtodo(request):
"""
:param request: HttpRequest object
:return: None. Redirects to Webpage.
if pk is not available defaults to id=0.
"""
pk = request.POST.get("id_todo", 0)
if pk:
todoobj = Todo.objects.get(pk=pk)
newtodoform = NewTodoForm(request.POST, instance=todoobj)
else:
newtodoform = NewTodoForm(request.POST)
if newtodoform.is_valid():
newtodoform.save()
return redirect("todoindex") | 9b7fdb53d98b935106f1878135223cd9731503b5 | 20,753 |
def prepare_inputs(boxes, digits_occurrence):
"""
:param boxes:
2D list of 81 gray OpenCV images (2D numpy arrays)
:param digits_occurrence:
2D numpy array that contains True or False values that represent occurrence of digits
:return:
if no digit was found returns None;
otherwise returns 4D numpy array with shape = (digits count, 28, 28, 1) that
contains cropped, scaled and centered digits that are perfectly prepared for a cnn model
(at least for this model I created)
"""
digits_count = 0
for y in digits_occurrence:
for x in y:
digits_count += int(x)
if digits_count == 0:
return None
cropped_boxes_with_digits = get_cropped_boxes_with_digits(boxes, digits_occurrence)
digits = get_cropped_digits(cropped_boxes_with_digits)
if digits is None:
return None
resize(digits)
digits = add_margins(digits, 28, 28)
center_using_mass_centers(digits)
digits = digits.reshape((digits.shape[0], 28, 28, 1))
digits = digits / 255
return digits | 27c97e374f9cbdb0d427acacb5645819d05a2aea | 20,754 |
def evaluate_constants(const_arrays, expr): # pragma: no cover
"""Convert constant arguments to cupy arrays, and perform any possible
constant contractions.
"""
return expr(*[to_cupy(x) for x in const_arrays], backend='cupy', evaluate_constants=True) | 7a371d0cb262fb530873825dee02a8201913b2c1 | 20,755 |
def setup_family(dompc, family, create_liege=True, create_vassals=True,
character=None, srank=None, region=None, liege=None,
num_vassals=2):
"""
Creates a ruler object and either retrieves a house
organization or creates it. Then we also create similar
ruler objects for an npc liege (if we should have one),
and npc vassals (if we should have any). We return a tuple of
our ruler object, our liege's ruler object or None, and a list
of vassals' ruler objects.
"""
vassals = []
# create a liege only if we don't have one already
if create_liege and not liege:
name = "Liege of %s" % family
liege = setup_ruler(name)
ruler = setup_ruler(family, dompc, liege)
if create_vassals:
vassals = setup_vassals(family, ruler, region, character, srank, num=num_vassals)
return ruler, liege, vassals | 9cd1fda01685c071f79e876add49e5ffe5b23642 | 20,756 |
def normalize_line(line: dict, lang: str):
"""Apply normalization to a line of OCR.
The normalization rules that are applied depend on the language in which
the text is written. This normalization is necessary because Olive, unlike
e.g. Mets, does not encode explicitly the presence/absence of whitespaces.
:param dict line: A line of OCR text.
:param str lang: Language of the text.
:return: A new line of text.
:rtype: dict
"""
mw_tokens = [
token
for token in line["t"]
if "qid" in token
]
# apply normalization only to those lines that contain at least one
# multi-word token (denoted by presence of `qid` field)
if len(mw_tokens) > 0:
line = merge_pseudo_tokens(line)
line = normalize_hyphenation(line)
for i, token in enumerate(line["t"]):
if "qid" not in token and "nf" in token:
del token["nf"]
if "qid" in token:
del token["qid"]
if i == 0 and i != len(line["t"]) - 1:
insert_ws = insert_whitespace(
token["tx"],
line["t"][i + 1]["tx"],
None,
lang
)
elif i == 0 and i == len(line["t"]) - 1:
insert_ws = insert_whitespace(
token["tx"],
None,
None,
lang
)
elif i == len(line["t"]) - 1:
insert_ws = insert_whitespace(
token["tx"],
None,
line["t"][i - 1]["tx"],
lang
)
else:
insert_ws = insert_whitespace(
token["tx"],
line["t"][i + 1]["tx"],
line["t"][i - 1]["tx"],
lang
)
if not insert_ws:
token["gn"] = True
return line | f158d3d9811752e009a7f0aa6f94b4a67e322960 | 20,757 |
def attribute_summary(attribute_value, item_type, limit=None):
"""Summarizes the information in fields attributes where content is
written as an array of arrays like tag_cloud, items, etc.
"""
if attribute_value is None:
return None
items = ["%s (%s)" % (item, instances) for
item, instances in attribute_value]
items_length = len(items)
if limit is None or limit > items_length:
limit = items_length
return "%s %s: %s" % (items_length, type_singular(item_type,
items_length == 1),
", ".join(items[0: limit])) | a835e985ce4f9c6d82bdaa267589a802fb865d26 | 20,758 |
import json
def root():
"""Base view."""
new_list = json.dumps(str(utc_value))
return new_list | 7d94ea10dc944d1cef13dfea7cc12ccc5bc9f742 | 20,759 |
def plc_read_db(plc_client, db_no, entry_offset, entry_len):
"""
Read specified amount of bytes at offset from a DB on a PLC
"""
try:
db_var = plc_client.db_read(db_no, entry_offset, entry_len)
except Exception as err:
print "[-] DB read error:", err
sys.exit(1)
db_val = struct.unpack('!f', binascii.hexlify(db_var).decode('hex'))[0]
return db_val | 324c81b1192c90c3909bf4e6cb65e590ce48f59a | 20,760 |
import os
def getPath(path=__file__):
"""
Get standard path from path. It supports ~ as home directory.
:param path: it can be to a folder or file. Default is __file__ or module's path.
If file exists it selects its folder.
:return: dirname (path to a folder)
.. note:: It is the same as os.path.dirname(os.path.abspath(path)).
"""
if path.startswith("~"):
path = os.path.expanduser("~") + path[1:]
if "." in path: # check extension
return os.path.dirname(os.path.abspath(path)) # just use os
else:
return os.path.abspath(path) | ef1a0997de2febea075b9ce44414c533806f6125 | 20,761 |
def rank_adjust(t, c=None):
"""
Currently limited to only Mean Order Number
Room to expand to:
Modal Order Number, and
Median Order Number
Uses mean order statistic to conduct rank adjustment
For further reading see:
http://reliawiki.org/index.php/Parameter_Estimation
Above reference provides excellent explanation of how this method is
derived this function currently assumes good input
"""
# Total items in test/population
N = len(t)
# Preallocate adjusted ranks array
ranks = np.zeros(N)
if c is None:
c = np.zeros(N)
# Rank adjustment for [right] censored data
# PMON - "Previous Mean Order Number"
# NIPBSS - "Number of Items Before Present Suspended Set"
PMON = 0
for i in range(0, N):
if c[i] == 0:
NIBPSS = N - i
ranks[i] = PMON + (N + 1 - PMON) / (1 + NIBPSS)
PMON = ranks[i]
elif c[i] == 1:
ranks[i] = np.nan
else:
# ERROR
raise ValueError("Censoring flag must be 0 or 1 with rank_adjust")
return ranks | c79d308dd333c96abe64274918fcd294d24f7d40 | 20,762 |
from datetime import datetime
import requests
def login_captcha(username, password, sid):
"""
bilibili login with captcha.
depend on captcha recognize service, please do not use this as first choice.
Args:
username: plain text username for bilibili.
password: plain text password for bilibili.
sid: session id
Returns:
code: login response code (0: success, -105: captcha error, ...).
access_token: token for further operation.
refresh_token: token for refresh access_token.
sid: session id.
mid: member id.
expires_in: access token expire time (30 days)
"""
jsessionid, captcha_img = get_capcha(sid)
captcha_str = recognize_captcha(captcha_img)
hash, pubkey, sid = get_key(sid, jsessionid)
encrypted_password = cipher.encrypt_login_password(password, hash, pubkey)
url_encoded_username = parse.quote_plus(username)
url_encoded_password = parse.quote_plus(encrypted_password)
post_data = {
'appkey': APPKEY,
'captcha': captcha_str,
'password': url_encoded_password,
'platform': "pc",
'ts': str(int(datetime.now().timestamp())),
'username': url_encoded_username
}
post_data['sign'] = cipher.sign_dict(post_data, APPSECRET)
# avoid multiple url parse
post_data['username'] = username
post_data['password'] = encrypted_password
post_data['captcha'] = captcha_str
headers = {
'Connection': 'keep-alive',
'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',
'User-Agent': '',
'Accept-Encoding': 'gzip,deflate',
}
r = requests.post(
"https://passport.bilibili.com/api/oauth2/login",
headers=headers,
data=post_data,
cookies={
'JSESSIONID': jsessionid,
'sid': sid
}
)
response = r.json()
if response['code'] == 0:
login_data = response['data']
return response['code'], login_data['access_token'], login_data['refresh_token'], sid, login_data['mid'], login_data["expires_in"]
else:
return response['code'], None, None, sid, None, None | de0ee5d865ea11b32f155fb3a3470fff9b13202b | 20,763 |
def ml_transitions(game, attach=True, verbose=False):
"""
dataframe to directional line movement arrays
"""
transition_classes = []
prev = [None, None]
for i, row in game.iterrows():
cur = list(row[["a_ml", "h_ml"]])
transition_class = analyze.classify_transition(prev, cur)
transition_classes.append(transition_class)
prev = cur
if attach:
trans_df = pd.DataFrame(transition_classes)
trans_df = trans_df.add_prefix("trans_class_")
ret = pd.concat([game, trans_df], axis=1)
else:
ret = transition_classes
if verbose:
strings = {i: s for i, s in enumerate(bm.TRANSITION_CLASS_STRINGS)}
for i, t in enumerate(transition_classes):
class_num = np.argmax(t)
print(f"{i}: {strings[class_num]}")
return ret | 3156f377f4c78b30cabe27f1dc37a277b25ebde6 | 20,764 |
def plugin_uninstall(plugin, flags=None, kvflags=None):
"""
Uninstall a Helm plugin.
Return True if succeed, else the error message.
plugin
(string) The plugin to uninstall.
flags
(list) Flags in argument of the command without values. ex: ['help', '--help']
kvflags
(dict) Flags in argument of the command with values. ex: {'v': 2, '--v': 4}
CLI Example:
.. code-block:: bash
salt '*' helm.plugin_uninstall PLUGIN
"""
return _exec_true_return(
commands=["plugin", "uninstall", plugin], flags=flags, kvflags=kvflags
) | f162d37cb48e3cc0ebb3294ec4fe33b9fc56d8f0 | 20,765 |
def geom_to_tuple(geom):
"""
Takes a lat/long point (or geom) from KCMO style csvs.
Returns (lat, long) tuple
"""
geom = geom[6:]
geom = geom.replace(" ", ", ")
return eval(geom) | 003f25a0ebc8fd372b63453e4782aa52c0ad697c | 20,766 |
def b_s_Poole(s, V_max, z, halo_type, bias_type):
""" This function expresses Equation (2) of Poole et al (2014)
and fetches the parameters needed to compute it.
Args:
s (numpy.ndarray) : scale values
V_max (float) : halo maximum circular velocity
z (float) : redshift of interest
halo_type (str) : halo type
bias_type (str) : bias type
Returns:
A list containing two arrays with the values of `b_s`
and `b_x` at each scale
"""
# Set the bias parameters
[b_x, s_o, V_SF] = set_b_s_Poole_params(V_max, z, halo_type, bias_type)
# Create b(s) arrays
V_max_norm = V_max/220.
if(V_max_norm < V_SF):
b_s = b_x*(1.-(s_o/s))**0.5
else:
b_s = b_x*(1.+(s_o/s))**0.5
return([b_s, b_x]) | 7ee59c4e3052cab74932c9e78dfc04af92a2284e | 20,767 |
from sys import path
def find_places_location():
"""Finds the location of the largest places.sqlite file"""
location = ""
filesize = 0
for user in listdir('/Users'):
profile_dir = '/Users/' + user + '/Library/Application Support/Firefox/Profiles/'
if path.exists(profile_dir):
for profile in listdir(profile_dir):
places_location = profile_dir + profile + "/places.sqlite"
if path.exists(places_location):
size = path.getsize(places_location)
if size > filesize:
filesize = size
location = places_location
return location | f0af00dadb76ae56a9424fdab0c24c5b6ab342a4 | 20,768 |
import sys
def argumentParser(listOfArguments):
"""Parses arguments"""
argumentParserFilter = {}
for argument in listOfArguments:
if argument in sys.argv:
argumentParserFilter[listOfArguments[argument]] = True
else:
argumentParserFilter[listOfArguments[argument]] = False
return argumentParserFilter | 939cfea9c6b4f8deec08b4afb73427183b551ccc | 20,769 |
def rasterize_polygon(poly_as_array, shape, geo_ref):
"""
Return a boolean numpy mask with 1 for cells within polygon.
Args:
poly_as_array: A polygon as returned by ogrpoly2array (list of numpy arrays / rings)
shape: Shape (nrows, ncols) of output array
geo_ref: GDAL style georeference of grid.
Returns:
Numpy boolean 2d array.
"""
xy = mesh_as_points(shape, geo_ref)
return points_in_polygon(xy, poly_as_array).reshape(shape) | 506fbc7b1e215e52e1ee469cb88ffc4b4d45dd89 | 20,770 |
def _model_columns(ins):
""" Get columns info
:type ins: sqlalchemy.orm.mapper.Mapper
:rtype: list[SaColumnDoc]
"""
columns = []
for c in ins.column_attrs:
# Skip protected
if c.key.startswith('_'):
continue
# Type
column_type = c.columns[0].type # FIXME: support multi-column properties
# Compile it using a dialect if necessary
try:
column_type_str = str(column_type)
except sa_exc.UnsupportedCompilationError:
# Got to compile it using a dialect
# TODO: support other dialects in addition to Postgres
column_type_str = column_type.compile(dialect=postgresql.dialect())
except sa_exc.CompileError:
column_type_str = '?'
# Collect
columns.append(SaColumnDoc(
key=c.key,
doc=c.doc or '',
type=column_type_str,
null=_is_attribute_nullable(c),
))
return columns | 331ae5725f82ac7fb5d685951caabcf7b1258b20 | 20,771 |
def get_reg_part(reg_doc):
"""
Depending on source, the CFR part number exists in different places. Fetch
it, wherever it is.
"""
potential_parts = []
potential_parts.extend(
# FR notice
node.attrib['PART'] for node in reg_doc.xpath('//REGTEXT'))
potential_parts.extend(
# e-CFR XML, under PART/EAR
node.text.replace('Pt.', '').strip()
for node in reg_doc.xpath('//PART/EAR')
if 'Pt.' in node.text)
potential_parts.extend(
# e-CFR XML, under FDSYS/HEADING
node.text.replace('PART', '').strip()
for node in reg_doc.xpath('//FDSYS/HEADING')
if 'PART' in node.text)
potential_parts.extend(
# e-CFR XML, under FDSYS/GRANULENUM
node.text.strip() for node in reg_doc.xpath('//FDSYS/GRANULENUM'))
potential_parts = [p for p in potential_parts if p.strip()]
if potential_parts:
return potential_parts[0] | 33f4c2bb9a4e2f404e7ef94a3bfe3707a3b1dd93 | 20,772 |
from pathlib import Path
import json
def load_configuration(module: str, configs_path=None) -> dict:
"""
Load the configuration and return the dict of the configuration loaded
:param module: The module name to load the configuration.
:type module: str
:param configs_path: path where to check configs. Default `configs/modules/`
:type configs_path: str
:return: Dict of the configuration if present.
:rtype: dict
:raise FileNotFoundError: If configuration file not found
"""
Validator().string(module)
module = module.lower()
if configs_path:
module_path = Path(f"{configs_path}{module}.json") # search for config file
if not module_path.exists():
raise FileNotFoundError(
f"Couldn't find the configuration file of the module {module_path.absolute()}"
)
else:
server_path = Path(
f"configs{sep}modules{sep}server{sep}{module}.json"
) # search for config file in server
android_path = Path(
f"configs{sep}modules{sep}android{sep}{module}.json"
) # search for config file in android
if server_path.exists():
module_path = server_path
elif android_path.exists():
module_path = android_path
else:
raise FileNotFoundError(
f"Couldn't find the configuration file of the module {module}.json"
)
with module_path.open() as mod_file:
mod_data = json.load(mod_file)
return mod_data | 2a5e0a798b3c3c3dd1570ee79d1e3ce5d6eff97b | 20,773 |
def direct(input_writer, script_str, run_dir, prog,
geo, charge, mult, method, basis, **kwargs):
""" Generates an input file for an electronic structure job and
runs it directly.
:param input_writer: elstruct writer module function for desired job
:type input_writer: elstruct function
:param script_str: string of bash script that contains
execution instructions electronic structure job
:type script_str: str
:param run_dir: name of directory to run electronic structure job
:type run_dir: str
:param prog: electronic structure program to run
:type prog: str
:param geo: cartesian or z-matrix geometry
:type geo: tuple
:param charge: molecular charge
:type charge: int
:param mult: spin multiplicity
:type mult: int
:param method: electronic structure method
:type method: str
:returns: the input string, the output string, and the run directory
:rtype: (str, str)
"""
input_str = input_writer(
prog=prog,
geo=geo, charge=charge, mult=mult, method=method, basis=basis,
**kwargs)
output_strs = from_input_string(script_str, run_dir, input_str)
output_str = output_strs[0]
return input_str, output_str | e14e41aac6f682d283094a4e0755dcc8949de269 | 20,774 |
def get_ids(records, key):
"""Utility method to extract list of Ids from Bulk API insert/query result.
Args:
records (:obj:`list`): List of records from a Bulk API insert or SOQL query.
key (:obj:`str`): Key to extract - 'Id' for queries or 'id' for inserted data.
Returns:
(:obj:`list`) of inserted record Ids in form [{'Id':'001000000000001'},...]
"""
return [{'Id': record[key]} for record in records] | e1373aee926406a4a780f6c344069702350cb16d | 20,775 |
import codecs
import os
def read(*parts):
"""
Build an absolute path from *parts* and return the contents of the
resulting file. Assume UTF-8 encoding.
"""
with codecs.open(os.path.join(HERE, *parts), "rb", "utf-8") as f:
return f.read() | 8914c5e14240faa1afeb930e135651f1c5bb1b31 | 20,776 |
def auto_help(func):
"""Automatically registers a help command for this group."""
if not isinstance(func, commands.Group):
raise TypeError('Auto help can only be applied to groups.')
cmd = commands.Command(_call_help, name='help', hidden=True)
func.add_command(cmd)
return func | bfcbd0d951dcffb43363b87b6bc4162226db1413 | 20,777 |
import itertools
import operator
def unique_justseen(iterable, key=None):
"""
List unique elements, preserving order. Remember only the element just seen.
>>> [x for x in unique_justseen('AAAABBBCCDAABBB')]
['A', 'B', 'C', 'D', 'A', 'B']
>>> [x for x in unique_justseen('ABBCcAD', str.lower)]
['A', 'B', 'C', 'A', 'D']
"""
imap = itertools.imap
itemgetter = operator.itemgetter
groupby = itertools.groupby
return imap(next, imap(itemgetter(1), groupby(iterable, key))) | 97323df08e9a001b5cd81cbcb88ae3e8ae486a8b | 20,778 |
def unique_rows(arr, thresh=0.0, metric='euclidean'):
"""Returns subset of rows that are unique, in terms of Euclidean distance
http://stackoverflow.com/questions/16970982/find-unique-rows-in-numpy-array
"""
distances = squareform(pdist(arr, metric=metric))
idxset = {tuple(np.nonzero(v)[0]) for v in distances <= thresh}
return arr[[x[0] for x in idxset]] | 79afdddc50239ed1479deeb75a41ea19d2dec9ca | 20,779 |
import json
def backdoors_listing(request,option=None):
"""
Generate the Backdoor listing page.
:param request: Django request.
:type request: :class:`django.http.HttpRequest`
:param option: Action to take.
:type option: str of either 'jtlist', 'jtdelete', 'csv', or 'inline'.
:returns: :class:`django.http.HttpResponse`
"""
request.user._setup()
user = request.user
if user.has_access_to(BackdoorACL.READ):
if option == "csv":
return generate_backdoor_csv(request)
elif option== "jtdelete" and not user.has_access_to(BackdoorACL.DELETE):
result = {'sucess':False,
'message':'User does not have permission to delete Backdoor.'}
return HttpResponse(json.dumps(result,
default=json_handler),
content_type="application/json")
return generate_backdoor_jtable(request, option)
else:
return render_to_response("error.html",
{'error': 'User does not have permission to view backdoor listing.'},
RequestContext(request)) | f4c8d2b2be68c40de7ec9e79a29e592618afbb44 | 20,780 |
def my_join(x):
"""
:param x: -> the list desired to join
:return:
"""
return ''.join(x) | bffc33247926c2b1ebe1930700ed0ad9bcb483ec | 20,781 |
import argparse
import configparser
def config_parse_args(configfile=None):
""" Command line arguments and configuration file setting """
parser = argparse.ArgumentParser(
description='nanoping log importer to InfluxDB')
parser.add_argument('--config', type=str, required=True,
help='configuration file')
parser.add_argument('--interface', type=str, required=True,
help='interface name')
parser.add_argument('--log', type=str, required=False,
help='nanoping logfile location')
parser.add_argument('--db', type=str, required=False,
help='database name of InfluxDB')
parser.add_argument('--debug', action='store_true',
help='turn on debug output')
parser.add_argument('--dry', action='store_true',
help='dry run mode')
args = parser.parse_args()
config = configparser.ConfigParser()
config.read(args.config)
# override by command line option
if args.db is not None:
config['influxdb']['db'] = args.db
return args, config | 477ec101da00ed3f256ccc652ea4b9a9e852f6e2 | 20,782 |
def visualize_code_vectors(code_vectors, cmap='Paired', inter='none',
origin='upper', fontsize=16, aspect='auto',
colorbar=True):
"""
Document
"""
to_plot = np.array(code_vectors)
# First the parameters
to_plot_title = 'Code Vectors in Time'
cmap = cmap
inter = inter
origin = origin
fontsize = fontsize # The fontsize
fig_size = (16, 12)
axes_position = [0.1, 0.1, 0.8, 0.8]
xlabel = 'Sensor Clusters'
ylabel = 'Time'
fig = plt.figure(figsize=fig_size)
ax = fig.add_axes(axes_position)
im = plt.imshow(to_plot, interpolation=inter, cmap=cmap,
origin=origin, aspect=aspect)
# Se the labels and titles
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
ax.set_title(to_plot_title)
# Se the ticks names for x
# x_labels = np.arange(Nseries * Nseries + 1)
# ax.xaxis.set_major_formatter(plt.FixedFormatter(x_labels))
# ax.xaxis.set_major_locator(plt.MultipleLocator(1))
# Change the font sizes
axes = fig.get_axes()
for ax in axes:
for item in ([ax.title, ax.xaxis.label, ax.yaxis.label] +
ax.get_xticklabels() + ax.get_yticklabels()):
item.set_fontsize(fontsize)
# Colorbar (This makes the axes to display proper)
if colorbar:
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.05)
cbar = fig.colorbar(im, cax=cax)
cbar.solids.set_edgecolor('face')
return fig | 39269d46a8ec544ccf415a363489b83f43d7fbc0 | 20,783 |
def get_displacements_and_forces(disp_dataset):
"""Return displacements and forces of all atoms from displacement dataset.
This is used to extract displacements and forces from displacement dataset.
This method is considered more-or-less as a converter when the input is in
type-1.
Parameters
----------
disp_dataset : dict
Displacement dataset either in type-1 or type-2.
Returns
-------
displacements : ndarray
Displacements of all atoms in all supercells.
shape=(snapshots, supercell atoms, 3), dtype='double', order='C'
forces : ndarray or None
Forces of all atoms in all supercells.
shape=(snapshots, supercell atoms, 3), dtype='double', order='C'
None is returned when forces don't exist.
"""
if "first_atoms" in disp_dataset:
natom = disp_dataset["natom"]
disps = np.zeros(
(len(disp_dataset["first_atoms"]), natom, 3), dtype="double", order="C"
)
forces = None
for i, disp1 in enumerate(disp_dataset["first_atoms"]):
disps[i, disp1["number"]] = disp1["displacement"]
if "forces" in disp1:
if forces is None:
forces = np.zeros_like(disps)
forces[i] = disp1["forces"]
return disps, forces
elif "displacements" in disp_dataset:
if "forces" in disp_dataset:
forces = disp_dataset["forces"]
else:
forces = None
return disp_dataset["displacements"], forces | 998f3fd6319ad777b4dfa523b8464b45ce9bae52 | 20,784 |
from typing import Dict
from datetime import datetime
def GetUserAllBasicData(user_url: str) -> Dict:
"""获取用户的所有基础信息
Args:
user_url (str): 用户个人主页 Url
Returns:
Dict: 用户基础信息
"""
result = {}
json_obj = GetUserJsonDataApi(user_url)
html_obj = GetUserPCHtmlDataApi(user_url)
anniversary_day_html_obj = GetUserNextAnniversaryDayHtmlDataApi(UserUrlToUserSlug(user_url))
result["name"] = json_obj["nickname"]
result["url"] = user_url
result["uslug"] = UserUrlToUserSlug(user_url)
result["gender"] = json_obj["gender"]
result["followers_count"] = json_obj["following_users_count"]
result["fans_count"] = json_obj["followers_count"]
result["articles_count"] = json_obj
result["wordage"] = json_obj["total_wordage"]
result["likes_count"] = json_obj["total_likes_count"]
try:
result["assets_count"] = html_obj.xpath("//div[@class='info']/ul/li[6]/div[@class='meta-block']/p")[0].text
result["assets_count"] = float(result["assets_count"].replace(".", "").replace("w", "000"))
except IndexError:
result["assets_count"] = None
if json_obj["total_wordage"] == 0 and json_obj["jsd_balance"] == 0:
result["FP_count"] = None
else:
result["FP_count"] = json_obj["jsd_balance"] / 1000
if result["assets_count"] and result["FP_count"]:
result["FTN_count"] = result["assets_count"] - result["FP_count"]
result["FTN_count"] = round(abs(result["FTN_count"]), 3)
else:
result["FTN_count"] = None
result["badges_list"] = html_obj.xpath("//li[@class='badge-icon']/a/text()")
result["badges_list"] = [item.replace(" ", "").replace("\n", "") for item in result["badges_list"]] # 移除空格和换行符
result["badges_list"] = [item for item in result["badges_list"] if item != ""] # 去除空值
result["last_update_time"] = datetime.fromtimestamp(json_obj["last_updated_at"])
try:
result["vip_info"] = {
"vip_type": {
"bronze": "铜牌",
"silver": "银牌",
"gold": "黄金",
"platina": "白金"
}[json_obj["member"]["type"]],
"expire_date": datetime.fromtimestamp(json_obj["member"]["expires_at"])
}
except KeyError:
result["vip_info"] = {
"vip_type": None,
"expire_date": None
}
result["introduction_html"] = json_obj["intro"]
if not result["introduction_html"]:
result["introduction_text"] = ""
else:
result["introduction_text"] = "\n".join(etree.HTML(result["introduction_html"]).xpath("//*/text()"))
result["next_anniversary_day"] = anniversary_day_html_obj.xpath('//*[@id="app"]/div[1]/div/text()')[0]
result["next_anniversary_day"] = datetime.fromisoformat("-".join(findall(r"\d+", result["next_anniversary_day"])))
return result | 1965484ef2aa638f9084720e38c5f660d9000665 | 20,785 |
def get_trace(session, trace_uuid):
"""Retrieves traces given a uuid.
Args:
sesssion: db session
trace_uuid: uuid of trace in question
Returns 2-tuple of plop, flamegraph input or None if trace doesn't exist
(or was garbage collected.
"""
trace = session.query(PerfProfile).filter(PerfProfile.uuid == trace_uuid).first()
if not trace:
raise InvalidUUID()
return trace.plop_input, trace.flamegraph_input | 0f44ebce393277a3660810ae1b359b437f733ec1 | 20,786 |
def first_kind_discrete(orientations, order=4):
"""
Calc orientation tensors of first kind for given discrete vectors
"""
# Normalize orientations
orientations = [np.array(v) / np.linalg.norm(v) for v in orientations]
# Symmetrize orientations
# orientations_reversed = [-v for v in orientations]
# orientations = orientations + orientations_reversed
einsumStrings = {
1: "ij -> j",
2: "ij, ik -> jk",
3: "ij, ik, il -> jkl",
4: "ij, ik, il, im -> jklm",
5: "ij, ik, il, im, in -> jklmn",
6: "ij, ik, il, im, in, ip -> jklmnp",
}
ori = orientations
if order == 1:
N = 1.0 / len(orientations) * np.einsum(einsumStrings[order], ori,)
elif order == 2:
N = 1.0 / len(orientations) * np.einsum(einsumStrings[order], ori, ori)
elif order == 3:
N = 1.0 / len(orientations) * np.einsum(einsumStrings[order], ori, ori, ori)
elif order == 4:
N = (
1.0
/ len(orientations)
* np.einsum(einsumStrings[order], ori, ori, ori, ori)
)
elif order == 5:
N = (
1.0
/ len(orientations)
* np.einsum(einsumStrings[order], ori, ori, ori, ori, ori)
)
elif order == 6:
N = (
1.0
/ len(orientations)
* np.einsum(einsumStrings[order], ori, ori, ori, ori, ori, ori)
)
else:
raise Exception("Not implemented")
return N | 4b28f426ea551d7ef6a744091a65be11d418e324 | 20,787 |
def prep_pointcloud(input_dict,
root_path,
voxel_generator,
target_assigner,
db_sampler=None,
max_voxels=20000,
class_names=['Car'],
remove_outside_points=False,
training=True,
create_targets=True,
shuffle_points=False,
reduce_valid_area=False,
remove_unknown=False,
gt_rotation_noise=[-np.pi / 3, np.pi / 3],
gt_loc_noise_std=[1.0, 1.0, 1.0],
global_rotation_noise=[-np.pi / 4, np.pi / 4],
global_scaling_noise=[0.95, 1.05],
global_loc_noise_std=(0.2, 0.2, 0.2),
global_random_rot_range=[0.78, 2.35],
generate_bev=False,
without_reflectivity=False,
num_point_features=6,
anchor_area_threshold=1,
gt_points_drop=0.0,
gt_drop_max_keep=10,
remove_points_after_sample=True,
anchor_cache=None,
remove_environment=False,
random_crop=False,
reference_detections=None,
add_rgb_to_points=False,
lidar_input=False,
unlabeled_db_sampler=None,
out_size_factor=2,
min_gt_point_dict=None,
bev_only=False,
use_group_id=False,
out_dtype=np.float32):
"""convert point cloud to voxels, create targets if ground truths
exists.
"""
points = input_dict["points"]
if training:
gt_boxes = input_dict["gt_boxes"]
gt_names = input_dict["gt_names"]
difficulty = input_dict["difficulty"]
group_ids = None
if use_group_id and "group_ids" in input_dict:
group_ids = input_dict["group_ids"]
rect = input_dict["rect"]
Trv2c = input_dict["Trv2c"]
P2 = input_dict["P2"]
unlabeled_training = unlabeled_db_sampler is not None
image_idx = input_dict["image_idx"]
if reference_detections is not None:
C, R, T = box_np_ops.projection_matrix_to_CRT_kitti(P2)
frustums = box_np_ops.get_frustum_v2(reference_detections, C)
frustums -= T
# frustums = np.linalg.inv(R) @ frustums.T
frustums = np.einsum('ij, akj->aki', np.linalg.inv(R), frustums)
frustums = box_np_ops.camera_to_lidar(frustums, rect, Trv2c)
surfaces = box_np_ops.corner_to_surfaces_3d_jit(frustums)
masks = points_in_convex_polygon_3d_jit(points, surfaces)
points = points[masks.any(-1)]
if remove_outside_points and not lidar_input:
image_shape = input_dict["image_shape"]
points = box_np_ops.remove_outside_points(points, rect, Trv2c, P2,
image_shape)
if remove_environment is True and training:
selected = kitti.keep_arrays_by_name(gt_names, class_names)
gt_boxes = gt_boxes[selected]
gt_names = gt_names[selected]
difficulty = difficulty[selected]
if group_ids is not None:
group_ids = group_ids[selected]
points = prep.remove_points_outside_boxes(points, gt_boxes)
if training:
# print(gt_names)
selected = kitti.drop_arrays_by_name(gt_names, ["DontCare"])
gt_boxes = gt_boxes[selected]
gt_names = gt_names[selected]
difficulty = difficulty[selected]
if group_ids is not None:
group_ids = group_ids[selected]
gt_boxes = box_np_ops.box_camera_to_lidar(gt_boxes, rect, Trv2c)
if remove_unknown:
remove_mask = difficulty == -1
"""
gt_boxes_remove = gt_boxes[remove_mask]
gt_boxes_remove[:, 3:6] += 0.25
points = prep.remove_points_in_boxes(points, gt_boxes_remove)
"""
keep_mask = np.logical_not(remove_mask)
gt_boxes = gt_boxes[keep_mask]
gt_names = gt_names[keep_mask]
difficulty = difficulty[keep_mask]
if group_ids is not None:
group_ids = group_ids[keep_mask]
gt_boxes_mask = np.array(
[n in class_names for n in gt_names], dtype=np.bool_)
if db_sampler is not None:
sampled_dict = db_sampler.sample_all(
root_path,
gt_boxes,
gt_names,
num_point_features,
random_crop,
gt_group_ids=group_ids,
rect=rect,
Trv2c=Trv2c,
P2=P2)
if sampled_dict is not None:
sampled_gt_names = sampled_dict["gt_names"]
sampled_gt_boxes = sampled_dict["gt_boxes"]
sampled_points = sampled_dict["points"]
sampled_gt_masks = sampled_dict["gt_masks"]
# gt_names = gt_names[gt_boxes_mask].tolist()
gt_names = np.concatenate([gt_names, sampled_gt_names], axis=0)
# gt_names += [s["name"] for s in sampled]
gt_boxes = np.concatenate([gt_boxes, sampled_gt_boxes])
gt_boxes_mask = np.concatenate(
[gt_boxes_mask, sampled_gt_masks], axis=0)
if group_ids is not None:
sampled_group_ids = sampled_dict["group_ids"]
group_ids = np.concatenate([group_ids, sampled_group_ids])
if remove_points_after_sample:
points = prep.remove_points_in_boxes(
points, sampled_gt_boxes)
points = np.concatenate([sampled_points, points], axis=0)
# unlabeled_mask = np.zeros((gt_boxes.shape[0], ), dtype=np.bool_)
if without_reflectivity:
used_point_axes = list(range(num_point_features))
used_point_axes.pop(3)
points = points[:, used_point_axes]
pc_range = voxel_generator.point_cloud_range
if bev_only: # set z and h to limits
gt_boxes[:, 2] = pc_range[2]
gt_boxes[:, 5] = pc_range[5] - pc_range[2]
prep.noise_per_object_v3_(
gt_boxes,
points,
gt_boxes_mask,
rotation_perturb=gt_rotation_noise,
center_noise_std=gt_loc_noise_std,
global_random_rot_range=global_random_rot_range,
group_ids=group_ids,
num_try=100)
# should remove unrelated objects after noise per object
gt_boxes = gt_boxes[gt_boxes_mask]
gt_names = gt_names[gt_boxes_mask]
if group_ids is not None:
group_ids = group_ids[gt_boxes_mask]
gt_classes = np.array(
[class_names.index(n) + 1 for n in gt_names], dtype=np.int32)
gt_boxes, points = prep.random_flip(gt_boxes, points)
gt_boxes, points = prep.global_rotation(
gt_boxes, points, rotation=global_rotation_noise)
gt_boxes, points = prep.global_scaling_v2(gt_boxes, points,
*global_scaling_noise)
# Global translation
gt_boxes, points = prep.global_translate(gt_boxes, points, global_loc_noise_std)
bv_range = voxel_generator.point_cloud_range[[0, 1, 3, 4]]
mask = prep.filter_gt_box_outside_range(gt_boxes, bv_range)
gt_boxes = gt_boxes[mask]
gt_classes = gt_classes[mask]
if group_ids is not None:
group_ids = group_ids[mask]
# limit rad to [-pi, pi]
gt_boxes[:, 6] = box_np_ops.limit_period(
gt_boxes[:, 6], offset=0.5, period=2 * np.pi)
if shuffle_points:
# shuffle is a little slow.
np.random.shuffle(points)
# [0, -40, -3, 70.4, 40, 1]
voxel_size = voxel_generator.voxel_size
pc_range = voxel_generator.point_cloud_range
grid_size = voxel_generator.grid_size
# [352, 400]
voxels, coordinates, num_points = voxel_generator.generate(
points, max_voxels)
example = {
'voxels': voxels,
'num_points': num_points,
'coordinates': coordinates,
"num_voxels": np.array([voxels.shape[0]], dtype=np.int64)
}
example.update({
'rect': rect,
'Trv2c': Trv2c,
'P2': P2,
})
# if not lidar_input:
feature_map_size = grid_size[:2] // out_size_factor
feature_map_size = [*feature_map_size, 1][::-1]
if anchor_cache is not None:
anchors = anchor_cache["anchors"]
anchors_bv = anchor_cache["anchors_bv"]
matched_thresholds = anchor_cache["matched_thresholds"]
unmatched_thresholds = anchor_cache["unmatched_thresholds"]
else:
ret = target_assigner.generate_anchors(feature_map_size)
anchors = ret["anchors"]
anchors = anchors.reshape([-1, 7])
matched_thresholds = ret["matched_thresholds"]
unmatched_thresholds = ret["unmatched_thresholds"]
anchors_bv = box_np_ops.rbbox2d_to_near_bbox(
anchors[:, [0, 1, 3, 4, 6]])
example["anchors"] = anchors
# print("debug", anchors.shape, matched_thresholds.shape)
# anchors_bv = anchors_bv.reshape([-1, 4])
anchors_mask = None
if anchor_area_threshold >= 0:
coors = coordinates
dense_voxel_map = box_np_ops.sparse_sum_for_anchors_mask(
coors, tuple(grid_size[::-1][1:]))
dense_voxel_map = dense_voxel_map.cumsum(0)
dense_voxel_map = dense_voxel_map.cumsum(1)
anchors_area = box_np_ops.fused_get_anchors_area(
dense_voxel_map, anchors_bv, voxel_size, pc_range, grid_size)
anchors_mask = anchors_area > anchor_area_threshold
# example['anchors_mask'] = anchors_mask.astype(np.uint8)
example['anchors_mask'] = anchors_mask
if generate_bev:
bev_vxsize = voxel_size.copy()
bev_vxsize[:2] /= 2
bev_vxsize[2] *= 2
bev_map = points_to_bev(points, bev_vxsize, pc_range,
without_reflectivity)
example["bev_map"] = bev_map
if not training:
return example
if create_targets:
targets_dict = target_assigner.assign(
anchors,
gt_boxes,
anchors_mask,
gt_classes=gt_classes,
matched_thresholds=matched_thresholds,
unmatched_thresholds=unmatched_thresholds)
example.update({
'labels': targets_dict['labels'],
'reg_targets': targets_dict['bbox_targets'],
'reg_weights': targets_dict['bbox_outside_weights'],
})
return example | ab28c0d1343858bafd51f60c35b3aec322c34528 | 20,788 |
def fix_repo_url(repo_url, in_type='https', out_type='ssh', format_dict=format_dict):
""" Changes the repo_url format """
for old, new in izip(format_dict[in_type], format_dict[out_type]):
repo_url = repo_url.replace(old, new)
return repo_url | 6382136693bfce4b72e122cdc016fa9eee1fb78f | 20,789 |
from typing import Optional
def add_nodes_to_graph(
G: nx.Graph,
protein_df: Optional[pd.DataFrame] = None,
verbose: bool = False,
) -> nx.Graph:
"""Add nodes into protein graph.
:param G: ``nx.Graph`` with metadata to populate with nodes.
:type G: nx.Graph
:protein_df: DataFrame of protein structure containing nodes & initial node metadata to add to the graph.
:type protein_df: pd.DataFrame, optional
:param verbose: Controls verbosity of this step.
:type verbose: bool
:returns: nx.Graph with nodes added.
:rtype: nx.Graph
"""
# If no protein dataframe is supplied, use the one stored in the Graph object
if protein_df is None:
protein_df = G.graph["pdb_df"]
# Assign intrinsic node attributes
chain_id = protein_df["chain_id"].apply(str)
residue_name = protein_df["residue_name"]
residue_number = protein_df["residue_number"] # .apply(str)
coords = np.asarray(protein_df[["x_coord", "y_coord", "z_coord"]])
b_factor = protein_df["b_factor"]
atom_type = protein_df["atom_name"]
nodes = protein_df["node_id"]
element_symbol = protein_df["element_symbol"]
G.add_nodes_from(nodes)
# Set intrinsic node attributes
nx.set_node_attributes(G, dict(zip(nodes, chain_id)), "chain_id")
nx.set_node_attributes(G, dict(zip(nodes, residue_name)), "residue_name")
nx.set_node_attributes(
G, dict(zip(nodes, residue_number)), "residue_number"
)
nx.set_node_attributes(G, dict(zip(nodes, atom_type)), "atom_type")
nx.set_node_attributes(
G, dict(zip(nodes, element_symbol)), "element_symbol"
)
nx.set_node_attributes(G, dict(zip(nodes, coords)), "coords")
nx.set_node_attributes(G, dict(zip(nodes, b_factor)), "b_factor")
# TODO: include charge, line_idx for traceability?
if verbose:
print(nx.info(G))
print(G.nodes())
return G | b58ae2d3f9bc4feeb2721e8258869f6d69d38f8e | 20,790 |
def mc(dataset):
"""
Modulus calculation.
Calculates sqrt(real^2 + imag^2)
"""
return np.sqrt(dataset.real ** 2 + dataset.imag ** 2) | 8c7d94ed07c7d4102b2650baef11ea470e5673ea | 20,791 |
from typing import List
def _get_fields_list(data: Data) -> List[Field]:
"""Extracts all nested fields from the data as a flat list."""
result = []
def map_fn(value):
if isinstance(value, GraphPieceBase):
# pylint: disable=protected-access
tf.nest.map_structure(map_fn, value._data)
else:
result.append(value)
tf.nest.map_structure(map_fn, data)
return result | ad6a9b4c11749b085edaf639e318e84e75e58cc7 | 20,792 |
def constructAdvancedQuery(qryRoot):
"""
Turns a qry object into a complex Q object by calling its helper and supplying the selected format's tree.
"""
return constructAdvancedQueryHelper(
qryRoot["searches"][qryRoot["selectedtemplate"]]["tree"]
) | 4a1a23c4810e7b4f30a86c620f0e949da8af4ef8 | 20,793 |
def slice_and_dice(text=text):
"""Strip the whitespace (newlines) off text at both ends,
split the text string on newline (\n).
Next check if the first char of each (stripped) line is lowercase,
if so split the line into words and append the last word to
the results list. Make sure the you strip off any trailing
exclamation marks (!) and dots (.), Return the results list."""
results = []
for line in text.strip().split('\n'):
line = line.strip()
if line[0].islower():
last_word = line.split()[-1]
if last_word[-1] == '.' or last_word[-1] == '!':
last_word = last_word[:-1]
results.append(last_word)
return results | 16d3bb77c60738d654a61ac40b3fc7216ee6ed52 | 20,794 |
def solution(data):
""" Solution to the problem """
seats, first_visible_seats, dim_y, dim_x = preprocess(data)
solver = Simulation(seats, first_visible_seats, dim_y, dim_x)
return solver.solve() | 6bbfa11df003b2dce24430cf50420d6d2bcf9683 | 20,795 |
from typing import Tuple
def add_received_ip_tags(
rows: beam.pvalue.PCollection[Row],
ips_with_metadata: beam.pvalue.PCollection[Tuple[DateIpKey, Row]]
) -> beam.pvalue.PCollection[Row]:
"""Add tags for answer ips (field received.ip) - asnum, asname, http, cert
Args:
rows: PCollection of measurement rows
ips_with_metadata: PCollection of dated ips with geo metadata
Returns:
PCollection of measurement rows with tag information added to the recieved.ip row
"""
# PCollection[Tuple[DateIpKey,Row]]
received_keyed_by_ip_and_date = (
rows | 'key by received ips and dates' >> beam.Map(
lambda row: (_make_date_received_ip_key(row), row)).with_output_types(
Tuple[DateIpKey, Row]))
# Iterable[PCollection[Tuple[DateIpKey,Row]]]
partition_by_domain = (
received_keyed_by_ip_and_date | 'partition by domain' >> beam.Partition(
_get_domain_partition, NUM_DOMAIN_PARTITIONS))
collections = []
for i in range(0, NUM_DOMAIN_PARTITIONS):
elements = partition_by_domain[i]
# PCollection[Tuple[Tuple[date,ip],Dict[input_name_key,List[Row]]]]
grouped_received_metadata_and_rows = (({
IP_METADATA_PCOLLECTION_NAME: ips_with_metadata,
ROWS_PCOLLECION_NAME: elements
}) | f'group by received ip keys {i}' >> beam.CoGroupByKey())
# PCollection[Row]
domain_rows_with_tags = (
grouped_received_metadata_and_rows | f'tag received ips {i}' >>
beam.FlatMapTuple(lambda k, v: merge_metadata_with_rows(
k, v, field='received')).with_output_types(Row))
collections.append(domain_rows_with_tags)
# PCollection[Row]
rows_with_tags = (
collections |
'merge domain collections' >> beam.Flatten().with_output_types(Row))
return rows_with_tags | 36dcbd0ced327a1f4ed0645b7c1edfe65bdeb9f8 | 20,796 |
def set_selector(*args):
"""set_selector(sel_t selector, ea_t paragraph) -> int"""
return _idaapi.set_selector(*args) | 8c4b7119979dda3d4b21b56865d20cfa60900a3a | 20,797 |
def other_features(tweet):
"""This function takes a string and returns a list of features.
These include Sentiment scores, Text and Readability scores,
as well as Twitter specific features"""
tweet_text = tweet["text"]
##SENTIMENT
sentiment = sentiment_analyzer.polarity_scores(tweet_text)
words = local_tokenizer.tokenize(tweet_text) #Get text only
num_chars = sum(len(w) for w in words) #num chars in words
num_chars_total = len(tweet_text)
num_terms = len(tweet_text.split())
num_words = len(words)
num_unique_terms = len(set([x.lower() for x in words]))
caps_count = sum([1 if x.isupper() else 0 for x in tweet_text])
caps_ratio = caps_count / num_chars_total
twitter_objs = count_twitter_objs(tweet_text) #Count #, @, and http://
num_media = 0
if "media" in tweet["entities"]:
num_media = len(tweet["entities"]["media"])
retweet = 0
if "rt" in words or "retweeted_status" in tweet:
retweet = 1
has_place = 1 if "coordinates" in tweet else 0
author = tweet["user"]
is_verified = 1 if author["verified"] else 0
log_followers = 0 if author["followers_count"] == 0 else np.log(author["followers_count"])
log_friends = 0 if author["friends_count"] == 0 else np.log(author["friends_count"])
features = [num_chars, num_chars_total, num_terms, num_words,
num_unique_terms, sentiment['neg'], sentiment['pos'],
sentiment['neu'], sentiment['compound'],
twitter_objs[2], twitter_objs[1],
twitter_objs[0], retweet, num_media,
is_verified,
# log_followers, log_friends,
# has_place,
caps_ratio,
]
return features | d4b1e158a80b6d9502c02dc0c2380e8749bb0b6f | 20,798 |
def template(spec_fn):
"""
>>> from Redy.Magic.Classic import template
>>> import operator
>>> class Point:
>>> def __init__(self, p):
>>> assert isinstance(p, tuple) and len(p) is 2
>>> self.x, self.y = p
>>> def some_metrics(p: Point):
>>> return p.x + 2 * p.y
>>> @template
>>> def comp_on_metrics(self: Point, another: Point, op):
>>> if not isinstance(another, Point):
>>> another = Point(another)
>>> return op(*map(some_metrics, (self, another)))
>>> class Space(Point):
>>> @comp_on_metrics(op=operator.lt)
>>> def __lt__(self, other):
>>> ...
>>> @comp_on_metrics(op=operator.eq)
>>> def __eq__(self, other):
>>> ...
>>> @comp_on_metrics(op=operator.gt)
>>> def __gt__(self, other):
>>> ...
>>> @comp_on_metrics(op=operator.le)
>>> def __le__(self, other):
>>> ...
>>> @comp_on_metrics(op=operator.ge)
>>> def __ge__(self, other):
>>> ...
>>> p = Space((0, 1))
>>> p > (1, 2)
>>> p < (3, 4)
>>> p >= (5, 6)
>>> p <= (7, 8)
>>> p == (9, 10)
"""
def specify(*spec_args, **spec_kwds):
def call(_):
def inner(*args, **kwds):
return spec_fn(*spec_args, *args, **spec_kwds, **kwds)
return inner
return call
return specify | a8fd64926cdbec73c1a31c20a27174c86af3405e | 20,799 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.