content stringlengths 35 762k | sha1 stringlengths 40 40 | id int64 0 3.66M |
|---|---|---|
import math
import tqdm
def SurfacePlot(Fields,save_plot,as_video=False, Freq=None, W=None, L=None, h=None, Er=None):
"""Plots 3D surface plot over given theta/phi range in Fields by calculating cartesian coordinate equivalent of spherical form."""
print("Processing SurfacePlot...")
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
# ax = Axes3D(fig)
phiSize = Fields.shape[0] # Finds the phi & theta range
thetaSize = Fields.shape[1]
X = np.ones((phiSize, thetaSize)) # Prepare arrays to hold the cartesian coordinate data.
Y = np.ones((phiSize, thetaSize))
Z = np.ones((phiSize, thetaSize))
for phi in range(phiSize): # Iterate over all phi/theta range
for theta in range(thetaSize):
e = Fields[phi][theta]
xe, ye, ze = sph2cart1(e, math.radians(theta), math.radians(phi)) # Calculate cartesian coordinates
X[phi, theta] = xe # Store cartesian coordinates
Y[phi, theta] = ye
Z[phi, theta] = ze
def init():
ax.plot_surface(X, Y, Z, color='b') # Plot surface
plt.ylabel('Y')
plt.xlabel('X') # Plot formatting
if W!=None:
plt.title("Patch: \nW=" + str(W) + " \nL=" + str(L) + "\nEr=" + str(Er) + " h=" + str(h) + " \n@" + str(Freq) + "Hz")
return fig,
def animate(i):
ax.view_init(elev=10., azim=i)
return fig,
if save_plot!= None:
# Animate
init()
if as_video:
plt.show()
print("Recording Radiation Video ...")
anim = animation.FuncAnimation(fig, animate, init_func=init,
frames= tqdm(range(360)), interval=20, blit=True)
# Save
anim.save(save_plot, fps=30, extra_args=['-vcodec', 'libx264'])
else:
ax.view_init(elev=10., azim=45)
plt.tight_layout()
plt.savefig(save_plot)
plt.show()
else:
init()
plt.show() | 23b0c3f8f569d480f3818ed970ea41adce51dacc | 29,800 |
def resource_method_wrapper(method):
"""
Wrap a 0-ary resource method as a generic renderer backend.
>>> @resource_method_wrapper
... def func(resource):
... print repr(resource)
>>> action = "abc"
>>> resource = "def"
>>> func(action, resource)
'def'
"""
def generic_renderer_backend(action, resource):
return method(resource)
return generic_renderer_backend | e07bd139586a7b80d48c246ea831b39c3183224e | 29,801 |
def rotation_matrix(axis_vector, angle, degrees = True):
"""
Return the rotation matrix corresponding to a rotation axis and angle
For more information, see:
https://en.wikipedia.org/wiki/Rotation_matrix#Rotation_matrix_from_axis_and_angle
Parameters
----------
axis_vector : 3 x 1 numpy array
A unit vector of the axis of rotation
angle : float
Angle of rotation in degrees unless otherwise specified
degrees : bool (optional)
Choose between units of degrees of radians. Default True so degrees
Returns
-------
rot_mat : 3 x 3 numpy array
Rotation matrix
"""
ang = angle
if degrees:
ang = np.radians(ang)
# the matrix the sum of 3 terms
cos_id = np.cos(ang) * np.identity(3)
sin_cross = np.sin(ang) * cross_product_matrix(axis_vector)
cos_tens = (1-np.cos(ang)) * np.outer(axis_vector,axis_vector)
# total
rot_mat = cos_id + sin_cross + cos_tens
return rot_mat | c48711d2b4d2bb8ca5ac01ee2c51778f4a9525fd | 29,802 |
def select_int(sql, *args):
"""
执行一个sql, 返回一个数值
:param sql:
:param args:
:return:
"""
d = _select(sql, True, *args)
if d == None:
raise StandardError('Result is None')
if len(d) != 1:
raise MultiColumnsError('Expect only one column.')
return d.values()[0] | 72826727a45cfa902e710371c8bf4bc9fcd07528 | 29,803 |
import os
import json
import logging
def create_workflow_from_json(
name,
access_token,
workflow_json=None,
workflow_file=None,
parameters=None,
workflow_engine="yadage",
outputs=None,
):
"""Create a workflow from json specification.
:param name: name or UUID of the workflow to be started.
:param access_token: access token of the current user.
:param workflow_json: workflow specification in json format.
:param workflow_file: workflow specification file path.
Ignores ``workflow_json`` if provided.
:param parameters: workflow input parameters dictionary.
:param workflow_engine: one of the workflow engines (yadage, serial, cwl)
:param outputs: dictionary with expected workflow outputs.
:Example:
.. code:: python
create_workflow_from_json(
workflow_json=workflow_json,
name='workflow_name.1',
access_token='access_token',
parameters={'files': ['file.txt'], 'parameters': {'key': 'value'}},
workflow_engine='serial')
"""
if is_uuid_v4(name):
raise ValueError("Workflow name cannot be a valid UUIDv4")
if not access_token:
raise Exception(ERROR_MESSAGES["missing_access_token"])
if os.environ.get("REANA_SERVER_URL") is None:
raise Exception("Environment variable REANA_SERVER_URL is not set")
workflow_engine = workflow_engine.lower()
if workflow_engine not in REANA_WORKFLOW_ENGINES:
raise Exception(
"Workflow engine - {} not found. You must use one of "
"these engines - {}".format(workflow_engine, REANA_WORKFLOW_ENGINES)
)
try:
reana_yaml = dict(workflow={})
if workflow_file:
reana_yaml["workflow"]["file"] = workflow_file
else:
reana_yaml["workflow"]["specification"] = workflow_json
reana_yaml["workflow"]["type"] = workflow_engine
if parameters:
reana_yaml["inputs"] = parameters
if outputs:
reana_yaml["outputs"] = outputs
_validate_reana_yaml(reana_yaml)
reana_specification = reana_yaml
(response, http_response) = current_rs_api_client.api.create_workflow(
reana_specification=json.loads(
json.dumps(reana_specification, sort_keys=True)
),
workflow_name=name,
access_token=access_token,
).result()
if http_response.status_code == 201:
return response
else:
raise Exception(
"Expected status code 201 but replied with "
"{status_code}".format(status_code=http_response.status_code)
)
except HTTPError as e:
logging.debug(
"Workflow creation failed: "
"\nStatus: {}\nReason: {}\n"
"Message: {}".format(
e.response.status_code, e.response.reason, e.response.json()["message"]
)
)
raise Exception(e.response.json()["message"])
except Exception as e:
raise e | a7dd6825fcb7e2b3a7ffa91cedc9ea8b1bb9d933 | 29,804 |
import logging
import torch
def compute_nas_score(any_plain_net, random_structure_str, gpu, args):
""" compute net score
:param any_plain_net: model class
:param random_structure_str (str): model string
:param gpu (int): gpu index
:param args (list): sys.argv
:return score
"""
# compute network zero-shot proxy score
the_model = any_plain_net(num_classes=args.num_classes, plainnet_struct=random_structure_str,
no_create=False, no_reslink=True)
the_model = the_model.cuda(gpu)
try:
if args.zero_shot_score == 'Zen':
the_nas_core_info = compute_zen_score.compute_nas_score(model=the_model, gpu=gpu,
resolution=args.input_image_size,
mixup_gamma=args.gamma, batch_size=args.batch_size,
repeat=1)
the_nas_core = the_nas_core_info['avg_nas_score']
elif args.zero_shot_score == 'TE-NAS':
the_nas_core = compute_te_nas_score.compute_NTK_score(model=the_model, gpu=gpu,
resolution=args.input_image_size,
batch_size=args.batch_size)
elif args.zero_shot_score == 'Syncflow':
the_nas_core = compute_syncflow_score.do_compute_nas_score(model=the_model, gpu=gpu,
resolution=args.input_image_size,
batch_size=args.batch_size)
elif args.zero_shot_score == 'GradNorm':
the_nas_core = compute_gradnorm_score.compute_nas_score(model=the_model, gpu=gpu,
resolution=args.input_image_size,
batch_size=args.batch_size)
elif args.zero_shot_score == 'Flops':
the_nas_core = the_model.get_FLOPs(args.input_image_size)
elif args.zero_shot_score == 'Params':
the_nas_core = the_model.get_model_size()
elif args.zero_shot_score == 'Random':
the_nas_core = np.random.randn()
elif args.zero_shot_score == 'NASWOT':
the_nas_core = compute_NASWOT_score.compute_nas_score(gpu=gpu, model=the_model,
resolution=args.input_image_size,
batch_size=args.batch_size)
except Exception as err: # pylint: disable=broad-except
logging.info(str(err))
logging.info('--- Failed structure: ')
logging.info(str(the_model))
# raise err
the_nas_core = -9999
_ = the_model.cpu()
del the_model
torch.cuda.empty_cache()
return the_nas_core | 6188c88151ca501c22a85cccd65bbc01d99c721f | 29,805 |
def _dict_values_match(*args, **kwargs):
"""
Matcher that matches a dict where each of they keys match the matcher
passed in. Similar to ``MatchesStructure``, but for dictionaries rather
than python objects.
"""
matchers = dict(*args, **kwargs)
def extract_val(key):
def extract_val_for_key(d):
return d.get(key)
return extract_val_for_key
return MatchesAll(*list(AfterPreprocessing(extract_val(key), value)
for key, value in matchers.iteritems())) | b463eb1f24117fb1c37793656632931af05f3a7c | 29,806 |
def fib_lista(n):
"""
Função que retorna uma lista contendo os números da sequência de Fibonacci
até o número n.
"""
lista = []
i, j = 0, 1
while i < n:
lista.append(i)
i, j = j, i + j
return lista | ec307ce80ae70e5fba81d2e26b140f1b86c95619 | 29,807 |
import os
import re
import time
import sys
def migrate(db_file=None, migrations_dir=None, verbose=False):
"""Run the migrations.
Read all the migrations in the migrations directory, and add them to
the migrations table if they are not already there. They are first inserted
with the status 'down'.
Then, for all migrations with status 'down', taken in chronological order,
call the 'up' method and set their status as 'up'.
Args:
db_file (str, optional): The path to the database file. If not
provided, a new database will be created with
the name 'database.db'.
migrations_dir (str, optional): The path to the migrations directory.
If not provided, an empty directory will be
created with the name 'migrations'.
Returns:
True if a migration was run, False otherwise.
"""
# Create required files if not existent
if db_file is None: # pragma: no cover
db_file = 'database.db'
if migrations_dir is None: # pragma: no cover
migrations_dir = 'migrations'
if not os.path.isdir(migrations_dir): # pragma: no cover
os.mkdir(migrations_dir)
# Connect to db
conn, c = connect_database(db_file)
# Create migrations table if not existent
c.execute('''CREATE TABLE IF NOT EXISTS migrations
(file text, status text, created_at datetime)''')
conn.commit()
# Get all migrations
query = "SELECT * FROM migrations"
migrations = [row[0] for row in c.execute(query)]
# Upload the new ones
for mig in os.listdir(migrations_dir):
if os.path.isdir(migrations_dir + '/' + mig): # pragma: no cover
continue
if re.match(r'.*\.py$', mig) is None: # pragma: no cover
os.unlink(migrations_dir + '/' + mig)
elif mig not in migrations:
if verbose:
print("Inserting " + mig + " into migrations table ...")
c.execute("""INSERT INTO migrations
VALUES (?, ?, ?)""",
(mig, 'down', time.strftime('%Y-%m-%d %H:%M:%S')))
conn.commit()
# Add migrations to import path
sys.path.append(migrations_dir)
# For any migration that is down, run it
migrations_run = 0
for row in c.execute("""SELECT * FROM migrations
WHERE status='down'
ORDER BY datetime(created_at) DESC"""):
# Run the up method
if verbose:
print("Running " + row[0] + " ...")
mig = __import__(row[0].split('.py')[0])
mig = reload(mig)
mig_inst = mig.Migration()
mig_inst.up(conn, c)
# Modify it
c.execute("""UPDATE migrations SET status='up'
WHERE file=?""", (row[0],))
conn.commit()
migrations_run += 1
if verbose:
print("Successfully run " + row[0])
# Return the boolean
return migrations_run > 0 | be03cea508c447050c87819b191d8ad64a425574 | 29,808 |
import os
def _get_json_file(module_path):
"""
Returns the path of the JSON file for a module, empty if doesn't exitst.
"""
json_file = '%s.json' % module_path.rsplit('.', 1)[0]
if os.path.isfile(module_path) and os.path.isfile(json_file):
return json_file
else:
return '' | 4a98fc9358d88817311fc0a09c44b8ea54529d74 | 29,809 |
def make_item_accessor(idx):
"""
Returns a property that mirrors access to the idx-th value of an object.
"""
@property
def attr(self):
return self[idx]
@attr.setter
def attr(self, value):
self[idx] = value
return attr | 7cd1248b3f9402fc9be10d277dee849dc47840c0 | 29,810 |
def calc_correlation(data, data2):
"""
Calculate the correlations between 2 DataFrames().
Parameters:
- data: The first dataframe.
- data2: The second dataframe.
Returns:
A Series() object.
"""
return (
data.corrwith(data2).
loc[lambda x: x.notnull()]
) | 7f47592a4525efa9db2fba317d095448d5288399 | 29,811 |
def boschloo_swap(c1r1: int, c2r1: int, c1r2: int, c2r2: int) -> (int, int, int, int):
"""
Four contingency tables always give the same pvalue: ['abcd', 'badc', 'cdab', 'dcba']
Compute and save only one version.
"""
if c1r1 + c1r2 > c2r1 + c2r2: # left > right
c1r1, c1r2, c2r1, c2r2 = c2r1, c2r2, c1r1, c1r2
if c1r1 + c2r1 > c1r2 + c2r2: # left > right
c1r1, c2r1, c1r2, c2r2 = c1r2, c2r2, c1r1, c2r1
return c1r1, c2r1, c1r2, c2r2 | 4da7cccd892dcf03412509c4df79132f8ebd5ad1 | 29,812 |
def get_unit_scale(scale60, val30=1):
"""
Returns a function to be used in the UNIT_SCALE of a descriptor that
will change the scale depending on if the 60fps flag is set or not.
"""
assert 0 not in (val30, scale60), ("60fps scale and default 30fps " +
"value must both be non-zero.")
val60 = val30*scale60
key = (val30, val60)
if key in _func_unit_scales:
return _func_unit_scales[key]
def unit_scale(*args, **kwargs):
w = kwargs.get('f_widget')
if kwargs.get('get_scales'):
# used for getting both the 30 and 60 fps scales
return (val30, val60)
try:
if w.tag_window.save_as_60:
return val60
except AttributeError:
return val30
_func_unit_scales[key] = unit_scale
unit_scale.fps_60_scale = True
return unit_scale | 36463d8e7d0cf46bce527cf9cefccdcf730b4414 | 29,813 |
def sample_ingredient(user,name = 'cinnoan'):
"""create and return a sample ingredient"""
return Ingredient.objects.create(user=user,name=name) | 3ccf096e68ed25dc4c35cf2abf68e9139c34b82c | 29,814 |
import six
def before(action):
"""Decorator to execute the given action function *before* the responder.
Args:
action: A function with a similar signature to a resource responder
method, taking (req, resp, params), where params includes values for
URI template field names, if any. Hooks may also add pseudo-params
of their own. For example:
def do_something(req, resp, params):
try:
params['id'] = int(params['id'])
except ValueError:
raise falcon.HTTPBadRequest('Invalid ID',
'ID was not valid.')
params['answer'] = 42
"""
def _before(responder_or_resource):
if isinstance(responder_or_resource, six.class_types):
resource = responder_or_resource
for method in HTTP_METHODS:
responder_name = 'on_' + method.lower()
try:
responder = getattr(resource, responder_name)
except AttributeError:
# resource does not implement this method
pass
else:
# Usually expect a method, but any callable will do
if hasattr(responder, '__call__'):
# This pattern is necessary to capture the current
# value of responder in the do_before_all closure;
# otherwise, they will capture the same responder
# variable that is shared between iterations of the
# for loop, above.
def let(responder=responder):
@wraps(responder)
def do_before_all(self, req, resp, **kwargs):
action(req, resp, kwargs)
responder(self, req, resp, **kwargs)
api_helpers._propagate_argspec(
do_before_all,
responder)
setattr(resource, responder_name, do_before_all)
let()
return resource
else:
responder = responder_or_resource
@wraps(responder)
def do_before_one(self, req, resp, **kwargs):
action(req, resp, kwargs)
responder(self, req, resp, **kwargs)
api_helpers._propagate_argspec(do_before_one, responder)
return do_before_one
return _before | d385f6b9ab45546cd2c07612528ad487ae5363d9 | 29,815 |
def commandLine(Argv):
"""
Method converting a list of arguments/parameter in a command line format (to include in the execution of a program for exemple).
list --> str
"""
assert type(Argv) is list, "The argument of this method are the arguments to convert in the command line format. (type List)"
commandLine = ''
for i in Argv[1::]:
commandLine += i+" "
return(commandLine) | 4b27e73fd43ec914f75c22f2482271aafd0848ac | 29,816 |
import sys
def select_diff_from_dic(dic, spacegroup_tuples, sample_key='Mat', drop_nan=None):
""" Get data frame of selected spacegroup_tuples from dictionary of dictionaries.
Creating a pandas data frame with columns of samples and selected space group tuples (energy differnces).
Parameters:
dic: dict {samples -> space group tuples -> energy differences.}
spacegroup_tuples: tuple, list of tuples, tuples of tuples
Each tuple has to contain two space groups numbers,
to be looked up in the input dic.
sample_key: string
Will be the column title of the samples of the created data frame
drop_nan: string, optional {'rows', 'SG_tuples'}
Drops all rows or columns (SG_tuples) containing NaN.
.. codeauthor:: Emre Ahmetcik <ahmetcik@fhi-berlin.mpg.de>
"""
if isinstance(spacegroup_tuples, tuple) and all(isinstance(item, (float, int)) for item in spacegroup_tuples):
spacegroup_tuples = [spacegroup_tuples]
df_out = pd.DataFrame(dic, index=spacegroup_tuples).T
if drop_nan is not None:
if drop_nan == 'rows':
df_out.dropna(axis=0, inplace=True)
elif drop_nan == 'SG_tuples':
df_out.dropna(axis=1, inplace=True)
else:
raise ValueError("Argument 'drop_nan' has to be 'None', 'rows' or 'SG_tuples'.")
# check if df_out is empty
len_columns = len(df_out.columns)
len_rows = len(df_out.index)
if len_columns == 0 or len_rows == 0:
if len_rows == 0:
string = 'rows'
else:
string = 'spacegroup_tuples'
logger.error('Dropping {0} with NaNs leads to empty data frame.'.format(string))
logger.error('Hint: Select different spacegroup_tuples or set drop_nan=None')
sys.exit(1)
df_out.reset_index(inplace=True)
df_out.rename(columns={'index': sample_key}, inplace=True)
return df_out | 909acfe66b50202a7853c81a52f41891df5bf616 | 29,817 |
def prob(n: int, p: float) -> float:
"""
Parameters:
- n (int): số lần thực hiện phép thử
- p (float): xác suất phép thử thành công
Returns:
- float: xác suất hình học
"""
pr = p * (1 - p) ** (n - 1)
return pr | fca3fab45ec852c8910619889ac19b0753f5b498 | 29,818 |
from typing import Mapping
import pandas
import numpy
def to_overall_gpu_process_df(gpu_stats: Mapping) -> DataFrame:
""" """
resulta = []
columns = []
for k2, v2 in gpu_stats.items():
device_info = v2["devices"]
for device_i in device_info:
processes = device_i["processes"]
if len(processes) > 0:
columns = list(processes[0].keys())
df = pandas.DataFrame(data=processes)
df["machine"] = [k2] * len(processes)
resulta.append(df)
out_df = pandas.concat(resulta, sort=False)
out_df.sort_values(by="used_gpu_mem", axis=0, ascending=False, inplace=True)
if len(out_df) == 0:
return pandas.DataFrame()
k = columns
idx = ["machine", *k]
out_df = out_df[idx]
out_df.create_time = out_df.create_time.map(timestamp_to_datetime)
for c in INT_COLUMNS:
out_df[c] = out_df[c].astype(int)
for c in PERCENT_COLUMNS:
out_df[c] = numpy.round(out_df[c], 2)
for c in MB_COLUMNS:
out_df[c] = numpy.round(out_df[c] // MB_DIVISOR, 2)
out_cols = [c for c in out_df.columns if c not in DROP_COLUMNS]
out_df = out_df[out_cols]
return out_df | 7fd260b1a0d232f42958d3c073300ad6e7098c2c | 29,819 |
from teospy import liq5_f03
def genliq5():
"""Generate liq5_f03 Testers.
"""
funs = liq5_f03.liq_g
args1 = (300.,1e5)
fargs = [(der+args1) for der in _DERS2]
refs = [-5265.05056073,-393.062597709,0.100345554745e-2,-13.9354762020,
0.275754520492e-6,-0.452067557155e-12]
fnames = 'liq_g'
argfmt = '({0:3d},{1:3d})'
header = 'Feistel 2003 g derivatives'
testliq5_1 = Tester(funs,fargs,refs,fnames,argfmt,header=header)
funs = [liq5_f03.cp,liq5_f03.density,liq5_f03.expansion,liq5_f03.kappa_t,
liq5_f03.soundspeed]
fargs = args1
refs = [4180.64286060,996.556352243,0.274804919056e-3,0.450510795725e-9,
1501.52808421]
fnames = ['cp','density','expansion','kappa_t','soundspeed']
argfmt = '({0:3g},{1:3g})'
header = 'Feistel 2003 functions'
testliq5_2 = Tester(funs,fargs,refs,fnames,argfmt,header=header)
return (testliq5_1,testliq5_2) | 4051aaa831bcfb74a5e871e1619b982ad06cc859 | 29,820 |
import os
import logging
def retrieve_image(image_dir, message):
"""Actually change the content of message from video bytes to image bytes"""
message_id = get_message_id(message.timestamp, message.topic)
message_path = os.path.join(image_dir, message_id)
if not os.path.exists(message_path):
logging.error('message {} not found in image dir'.format(message_id))
return None
img_bin = cv2.imread(message_path)
# Check by using NoneType explicitly to avoid ambitiousness
if img_bin is None:
logging.error('failed to read original message: {}'.format(message_path))
return None
encode_param = [int(cv2.IMWRITE_JPEG_QUALITY), 95]
result, encode_img = cv2.imencode('.jpg', img_bin, encode_param)
if not result:
logging.error('failed to encode message {}'.format(message_id))
return None
message_proto = image_message_to_proto(message)
message_proto.format = '; jpeg compressed bgr8'
message_proto.data = message_proto.data.replace(message_proto.data[:], bytearray(encode_img))
return message_proto.SerializeToString() | b258f7412172bf5a567bdb6142bd0bb711327e4a | 29,821 |
def tridiagonalize_by_lanczos(P, m, k):
"""
Tridiagonalize matrix by lanczos method
Parameters
----------
P : numpy array
Target matrix
q : numpy array
Initial vector
k : int
Size of the tridiagonal matrix
Returns
-------
T : numpy array
tridiagonal matrix
"""
# Initialize variables
T = np.zeros((k, k))
r0 = m
beta0 = 1
q0 = np.zeros(m.shape)
for i in range(k):
q1 = r0 / beta0
C = np.dot(P, q1)
alpha1 = np.dot(q1, C)
r1 = C - alpha1 * q1 - beta0 * q0
beta1 = np.linalg.norm(r1)
T[i, i] = alpha1
if i + 1 < k:
T[i, i + 1] = beta1
T[i + 1, i] = beta1
q0 = q1
beta0 = beta1
r0 = r1
return T | 0becf3801e7e486fd0a59fac95223e4d9ca68957 | 29,822 |
def Polygon(xpoints, ypoints, name="", visible=True, strfmt="{:.5f}"):
"""
Polygon defined by point verticies.
Returns
---------
:class:`lxml.etree.Element`
"""
polygon = Element("Polygon", name=str(name), visible=str(visible).lower())
polygon.extend(
[
Element("Point", x=strfmt.format(x), y=strfmt.format(y))
for x, y in zip(xpoints, ypoints)
]
)
return polygon | f2cd966a0dd536b8134ccaab4a85607e8511c60c | 29,823 |
def fake_index_by_name(name, pattern, timezone='+08:00'):
"""
generate a fake index name for index template matching
ATTENTION:
- rollover postfix is not supported in index template pattern
- timezone is not supported cause tz is not well supported in python 2.x
"""
if pattern == 'YYYY.MM.dd':
return '%s-%s-000001' % (name, now.strftime('%Y.%m.%d'))
elif pattern == 'YYYY.MM':
return '%s-%s-000001' % (name, now.strftime('%Y.%m'))
elif pattern == 'YYYY':
return '%s-%s-000001' % (name, now.strftime('%Y.%m'))
else:
return '%s-000001' % name | b8754ce0086409c75edba8d6bc14b7ca313d56ae | 29,824 |
def map_code(func):
"""
Map v to an Ontology code
"""
def mapper(v):
if v is None:
return v
else:
return func(str(v))
return mapper | 76eb3c6756c983fd73c180b57c1c998a348d32eb | 29,825 |
def install_dvwa(instance, verbose: bool=True):
""" Install and configure DVWA web server
instance (object): This argmument define the lxc instance.
verbose (bool, optional): This argument define if the function prompt some informations during his execution. Default to True.
"""
if update(instance, verbose=False):
return 1
if install(instance, {"module":"nmap"}, verbose=False):
return 1
if install(instance, {"module":"apache2"}, verbose=False):
return 1
if install(instance, {"module":"mysql-server"}, verbose=False):
return 1
if install(instance, {"module":"php"}, verbose=False):
return 1
if install(instance, {"module":"php7.2-mysql"}, verbose=False):
return 1
if install(instance, {"module":"php-gd"}, verbose=False):
return 1
if install(instance, {"module":"libapache2-mod-php"}, verbose=False):
return 1
if install(instance, {"module":"git"}, verbose=False):
return 1
if delete_file(instance, {"instance_path":"/var/www/html/index.html"}, verbose=False):
return 1
git_clone(instance, {"branch":"","repository":"https://github.com/ethicalhack3r/DVWA","instance_path":"/var/www/html/"}, verbose=False)
result = execute_command(instance, {"command":["cp", "/var/www/html/config/config.inc.php.dist", "/var/www/html/config/config.inc.php"], "expected_exit_code":"0"}, verbose=False)
if result.exit_code == 0:
execute_command(instance, {"command":["mysql", "-e", "create database dvwa;"], "expected_exit_code":"0"}, verbose=False)
execute_command(instance, {"command":["mysql", "-e", "create user dvwa@localhost identified by 'p@ssw0rd';"], "expected_exit_code":"0"}, verbose=False)
execute_command(instance, {"command":["mysql", "-e", "grant all on dvwa.* to dvwa@localhost;"], "expected_exit_code":"0"}, verbose=False)
execute_command(instance, {"command":["mysql", "-e", "flush privileges;"], "expected_exit_code":"0"}, verbose=False)
if result.exit_code == 0:
result = execute_command(instance, {"command":["chmod", "a+w", "/var/www/html/hackable/uploads/"], "expected_exit_code":"0"}, verbose=False)
if restart_service(instance, {"service":"apache2"}, verbose=False):
return 1
if restart_service(instance, {"service":"mysql"}, verbose=False):
return 1
if result.exit_code == 0:
if verbose:
print(Fore.GREEN + " Config file for dvwa is up" + Style.RESET_ALL)
return 0
print(Fore.RED + " Error while changing folder rights in dvwa "+" ["+result.stderr+"]" + Style.RESET_ALL)
return 1
print(Fore.RED + " Error during configuration of SQL in dvwa "+" ["+result.stderr+"]" + Style.RESET_ALL)
return 1
print(Fore.RED + " Error while copying config file of dvwa "+" ["+result.stderr+"]" + Style.RESET_ALL)
return 1 | 299bddddc06364abfe34c482fa12932f893a224c | 29,826 |
import json
def card_update(handler, delete=False, review=False):
"""Update or Delete an exisiting Card."""
user_data = get_current_user(handler)
if not user_data:
return
path = handler.request.path
route_root = '/api/card/'
err_response = '{}'
if not path.startswith(route_root) and len(path) > len(route_root):
return err_response
card_key = path[len(route_root):]
if card_key.endswith('/review'):
card_key = card_key[:-len('/review')]
card = ndb.Key(urlsafe=card_key).get()
if not card:
return err_response
if user_data.key != card.user_key:
# Disallow modification of other people's cards
return err_response
# Finally ready to do the update
card_tags_original = set(card.tags)
if delete:
card_tags_updated = set()
card.key.delete()
search.delete_cards([card])
elif review:
data = json.loads(handler.request.body)
card.record_review(data.get('grade'))
card_tags_updated = set(card.tags) # unchanged in this case
card.put()
else:
data = json.loads(handler.request.body)
card.update_from_dict(data)
card_tags_updated = set(card.tags)
card.put()
search.insert_cards([card])
# Update the list of all known tags for this user
user_data.update_card_tags(card_tags_original, card_tags_updated)
user_data.put() # TODO(jace): only put if necessary
# TODO(jace) Notify followers
return card.key.urlsafe() | d21ea5d80112459ad75374559bd061d38a12a697 | 29,827 |
def get(role_arn, principal_arn, assertion, duration):
"""Use the assertion to get an AWS STS token using Assume Role with SAML"""
# We must use a session with a govcloud region for govcloud accounts
if role_arn.split(':')[1] == 'aws-us-gov':
session = boto3.session.Session(region_name='us-gov-west-1')
client = session.client('sts')
else:
client = boto3.client('sts')
token = client.assume_role_with_saml(
RoleArn=role_arn,
PrincipalArn=principal_arn,
DurationSeconds=(duration),
SAMLAssertion=assertion)
return token | f1012c71eff41bffdad6390b9353745b0e07ab0c | 29,828 |
import time
def put_keyless():
"""
Handle PUT requests for key-less database insertions
"""
# Check if the PUT request actually carries any data in its body to avoid
# storing empty blocks under a key.
start = time.clock()
data = request.body.getvalue()
if not data:
return abort(400, "The request body must contain data to store")
key = store(key=None, data=request.body.getvalue())
end = time.clock()
elapsed = end - start
LOGGER.debug("put_keyless request took {:f} seconds".format(elapsed))
return key | 2270eee778603966536d9fe4c395aaef00b4cd83 | 29,829 |
def find_nth(s, x, n):
"""
find the nth occurence in a string
takes string where to search, substring, nth-occurence
"""
i = -1
for _ in range(n):
i = s.find(x, i + len(x))
if i == -1:
break
return i | b54998db817272ec534e022a9f04ec8d350b08fb | 29,830 |
from typing import Union
from typing import Dict
from typing import Any
def parse_buffer(value: Union[Dict[str, Any], str]) -> str:
"""Parse value from a buffer data type."""
if isinstance(value, dict):
return parse_buffer_from_dict(value)
if is_json_string(value):
return parse_buffer_from_json(value)
return value | e98cad3020fffdaef5ad71d1f59b89db83e05d03 | 29,831 |
def cancel_job(request): # pylint: disable=unused-argument
"""Handler for `cancel_job/` request."""
if not job_ids:
print('No jobs are running, nothing to cancel!')
else:
job_id = job_ids.popleft()
print('CANCELING JOB:', job_id)
long_job.cancel(job_id)
return django.http.HttpResponse() | 365b305b88329cf394f3b7d36c6cd3e02121b5a1 | 29,832 |
def LO_solver_multiprocessing(H,N,dis, args,pipe):
"""
Allows to solve the Hamiltonian using several CPUs.
Parameters
----------
H: arr
Discretized Lutchyn-Oreg Hamiltonian built with Lutchyn_builder.
N: int or arr
Number of sites. If it is an array, each element is the number of
sites in each direction.
dis: int or arr
Distance (in nm) between sites. If it is an array, each element is
the distance between sites in each direction.
arg: dictionary
Dictionary with the keywords arguments of Lutchyn_solver.
pipe: pipe
Pipe to the corresponding process.
"""
#Send work to a given process:
E,U=LO_solver(H,N,dis,1,n_CPU=1,
mu=args['mu'],B=args['B'],aR=args['aR'],d=args['d'],
BdG=args['BdG'],
space=args['space'],k_vec=args['k_vec'], m_eff=args['m_eff'],
sparse=args['sparse'],n_eig=args['n_eig'], near=args['near'],
section=args['section'],
method=args['method'],Nxp=args['Nxp'],n_orb=args['n_orb'])
#Recover output:
pipe.send((E,U))
#Close process:
pipe.close()
return True | b4738ababdd47a9dc633760dbc3ee7f29744e1e3 | 29,833 |
def _clip_grad(clip_type, clip_value, grad):
"""
Clip gradients.
Inputs:
clip_type(int): The way to clip, 0 for 'value', 1 for 'norm'.
clip_value(float): Specifies how much to clip.
grad (tuple[Tensor]): Gradients.
Outputs:
tuple[Tensor], clipped gradients.
"""
if clip_type not in (0, 1):
return grad
dt = ops.dtype(grad)
if clip_type == 0:
new_grad = ops.clip_by_value(grad, ops.cast(ops.tuple_to_array((-clip_value,)), dt),
ops.cast(ops.tuple_to_array((clip_value,)), dt))
else:
new_grad = nn.ClipByNorm()(grad, ops.cast(ops.tuple_to_array((clip_value,)), dt))
return new_grad | 998003fa6ef24e917af55cdd831034cafceeed74 | 29,834 |
def lightness_correlate(A, A_w, c, z):
"""
Returns the *Lightness* correlate :math:`J`.
Parameters
----------
A : numeric or array_like
Achromatic response :math:`A` for the stimulus.
A_w : numeric or array_like
Achromatic response :math:`A_w` for the whitepoint.
c : numeric or array_like
Surround exponential non linearity :math:`c`.
z : numeric or array_like
Base exponential non linearity :math:`z`.
Returns
-------
numeric or ndarray
*Lightness* correlate :math:`J`.
Examples
--------
>>> A = 23.9394809667
>>> A_w = 46.1882087914
>>> c = 0.69
>>> z = 1.9272135955
>>> lightness_correlate(A, A_w, c, z) # doctest: +ELLIPSIS
41.7310911...
"""
A = as_float_array(A)
A_w = as_float_array(A_w)
c = as_float_array(c)
z = as_float_array(z)
J = 100 * spow(A / A_w, c * z)
return J | 2692225728d9621ac427cedafcb18c9fe014d4ac | 29,835 |
import requests
def get(url, body=None, cookies=None, auth_data=None):
""" This function sends REST API GET request and prints some
useful information for debugging.
"""
result = requests.get(url, cookies=cookies, params=body, auth=auth_data)
print('GET request to {0}'.format(url))
print('Status code: {0}'.format(result.status_code))
print('RESPONSE: {0}'.format(result.text))
return result | 5095857dfa68c08b135abccab7357cc2662a6588 | 29,836 |
def fetch_file_from_guest(module, content, vm, username, password, src, dest):
""" Use VMWare's filemanager api to fetch a file over http """
result = {'failed': False}
tools_status = vm.guest.toolsStatus
if tools_status == 'toolsNotInstalled' or tools_status == 'toolsNotRunning':
result['failed'] = True
result['msg'] = "VMwareTools is not installed or is not running in the guest"
return result
# https://github.com/vmware/pyvmomi/blob/master/docs/vim/vm/guest/NamePasswordAuthentication.rst
creds = vim.vm.guest.NamePasswordAuthentication(
username=username, password=password
)
# https://github.com/vmware/pyvmomi/blob/master/docs/vim/vm/guest/FileManager/FileTransferInformation.rst
fti = content.guestOperationsManager.fileManager. \
InitiateFileTransferFromGuest(vm, creds, src)
result['size'] = fti.size
result['url'] = fti.url
# Use module_utils to fetch the remote url returned from the api
rsp, info = fetch_url(module, fti.url, use_proxy=False,
force=True, last_mod_time=None,
timeout=10, headers=None)
# save all of the transfer data
for k, v in iteritems(info):
result[k] = v
# exit early if xfer failed
if info['status'] != 200:
result['failed'] = True
return result
# attempt to read the content and write it
try:
with open(dest, 'wb') as f:
f.write(rsp.read())
except Exception as e:
result['failed'] = True
result['msg'] = str(e)
return result | 6906573831b9889f82a7015c3a6ba9e82ca1cdea | 29,837 |
def method_wrapper(m):
"""Generates a method from a `GrpcMethod` definition."""
if m.is_simple:
def simple_method(self):
"""TODO: no docstring!"""
return apply_transform(
self.__service__, m.output_transform,
grpc_call(self.__service__, m, unwrap(self)))
return simple_method
elif m.input_transform is not None:
def transform_method(self, *args, **kwargs):
"""TODO: no docstring!"""
request = m.input_transform(self, *args, **kwargs)
return apply_transform(
self.__service__, m.output_transform,
grpc_call(self.__service__, m, request))
return transform_method
elif m.static:
def static_method(cls, *args, **kwargs):
"""TODO: no docstring!"""
request = make_static_request(m, *args, **kwargs)
return apply_transform(
cls.__stub__(__server__), m.output_transform,
grpc_call(cls.__stub__(__server__), m, request))
return static_method
else:
def request_method(self, *args, **kwargs):
"""TODO: no docstring!"""
request = make_request(self, m, *args, **kwargs)
return apply_transform(
self.__service__, m.output_transform,
grpc_call(self.__service__, m, request))
return request_method | 28fbb9a112e5dcb61be7af9878eb2732b6adf2a6 | 29,838 |
def ReadFile(filename):
"""
description: Read program from file
param {*} filename
return {*} file
"""
input_file = open(filename, "r")
result = []
while True:
line = input_file.readline()
if not line:
break
result.append(line)
for line_index in range(len(result)):
result[line_index] = result[line_index][:-1] # delete the '\n' of every line
input_file.close()
return result | fd7d7faab401f335579719f6e015bf7b9d82c2e2 | 29,839 |
def item_url(item):
"""Return a Markdown URL for the WCAG item."""
fragment = item["id"].split(":")[1]
return url(item["handle"], f"https://www.w3.org/TR/WCAG21/#{fragment}") | d670da65ef794116ae5ccd650f3618e7c6a5dc45 | 29,840 |
def gen_nested_prop_getter(val_name, throws, klass):
"""
generates a nested property getter, it
actually returns an _Internal object
"""
def _internal(self):
try:
getattr(self, val_name)
except AttributeError:
setattr(self, val_name, klass())
return getattr(self, val_name)
return _internal | 54f766ae1dfcbc0e491355a4c741ccbadff6d26f | 29,841 |
import numpy
import scipy
def quad_genz_keister(order, dist, rule=24):
"""
Genz-Keister quadrature rule.
Examples:
>>> abscissas, weights = quad_genz_keister(
... order=1, dist=chaospy.Iid(chaospy.Uniform(0, 1), 2))
>>> abscissas.round(2)
array([[0.04, 0.04, 0.04, 0.5 , 0.5 , 0.5 , 0.96, 0.96, 0.96],
[0.04, 0.5 , 0.96, 0.04, 0.5 , 0.96, 0.04, 0.5 , 0.96]])
>>> weights.round(2)
array([0.03, 0.11, 0.03, 0.11, 0.44, 0.11, 0.03, 0.11, 0.03])
"""
assert isinstance(rule, int)
if len(dist) > 1:
if isinstance(order, int):
values = [quad_genz_keister(order, d, rule) for d in dist]
else:
values = [quad_genz_keister(order[i], dist[i], rule)
for i in range(len(dist))]
abscissas = [_[0][0] for _ in values]
abscissas = combine(abscissas).T
weights = [_[1] for _ in values]
weights = numpy.prod(combine(weights), -1)
return abscissas, weights
foo = GENS_KEISTER_FUNCTIONS[rule]
abscissas, weights = foo(order)
abscissas = dist.inv(scipy.special.ndtr(abscissas))
abscissas = abscissas.reshape(1, abscissas.size)
return abscissas, weights | f4d4590f2910ea82e5e824a47be196f82bdd5da3 | 29,842 |
def get_maf(variant):
"""
Gets the MAF (minor allele frequency) tag from the info field for the
variant.
Args:
variant (cyvcf2.Variant)
Returns:
maf (float): Minor allele frequency
"""
return variant.INFO.get("MAF") | 1d25f577a3cec14b8d05095d320fad6584484718 | 29,843 |
import glob
def check_channels(file_path_address: str, image_type: str):
"""Manual verifier to determine which images to further clean or remove.
This checks to see if there is a consistent third dimension in each of the images.
Paramters:
---------
file_path_address: str
Address of where all jpgs are located.
image_type: str
image type as in .png or .jpg
Return:
---------
Array of name of jpgs to address.
"""
imgs = glob(f'{file_path_address}*.{image_type}')
arr_other = []
for i, j in enumerate(imgs):
print(f"Starting {i} for filename: {j}")
im = cv2.imread(j)
try:
if im.shape[2] != 3:
arr_other.append(j)
except Exception as e:
arr_other.append(j)
print(e)
return arr_other | 6d7307dbc103fd74a21e6fbb5193c4aaebc1fd35 | 29,844 |
import heapq
def break_large_contigs(contigs, break_t, verbose=False):
"""Break large contigs in half until all contigs are under
the size threshold."""
# initialize a heapq of contigs and lengths
contig_heapq = []
for ctg in contigs:
ctg_len = ctg.end - ctg.start
heapq.heappush(contig_heapq, (-ctg_len, ctg))
ctg_len = break_t + 1
while ctg_len > break_t:
# pop largest contig
ctg_nlen, ctg = heapq.heappop(contig_heapq)
ctg_len = -ctg_nlen
# if too large
if ctg_len > break_t:
if verbose:
print('Breaking %s:%d-%d (%d nt)' % (ctg.chr,ctg.start,ctg.end,ctg_len))
# break in two
ctg_mid = ctg.start + ctg_len//2
try:
ctg_left = Contig(ctg.genome, ctg.chr, ctg.start, ctg_mid)
ctg_right = Contig(ctg.genome, ctg.chr, ctg_mid, ctg.end)
except AttributeError:
ctg_left = Contig(ctg.chr, ctg.start, ctg_mid)
ctg_right = Contig(ctg.chr, ctg_mid, ctg.end)
# add left
ctg_left_len = ctg_left.end - ctg_left.start
heapq.heappush(contig_heapq, (-ctg_left_len, ctg_left))
# add right
ctg_right_len = ctg_right.end - ctg_right.start
heapq.heappush(contig_heapq, (-ctg_right_len, ctg_right))
# return to list
contigs = [len_ctg[1] for len_ctg in contig_heapq]
return contigs | 82b039abd675303def8360acf9814426af50e503 | 29,845 |
def create_content(address, owner, content):
"""
Create a new page with some content.
Args:
address (str): the new page's absolute address.
owner (Account): the owner of the page to be created.
content (str): the Markdown content of the first revision.
Returns:
page (Page): the newly-created page.
"""
if address.startswith("/"):
address = address[1:]
return Page.objects.create_content(address, owner, content) | 229df67cc230d39d7b6d0a129ee91d9a2f0246dd | 29,846 |
def update_op_dims_mapping_by_default_dist_impl(op_dist_attr):
"""Each operator has a default distributed operator, only allowed to be sharded in batch dimension."""
changed = False
op_desc = op_dist_attr.get_owner_op().desc
# The following statement will be replaced by a more elegent way
if op_desc.type() == "shape" or op_desc.type() == "slice":
return False
output_names = op_desc.output_names()
xshape_arg_names = []
if "XShape" in output_names:
xshape_arg_names = op_desc.output("XShape")
batch_dim_mappings = []
for arg_name in op_desc.input_arg_names():
if op_dist_attr.is_parameter(arg_name):
continue
dims_mapping = op_dist_attr.get_input_dims_mapping(arg_name)
if len(dims_mapping) > 1:
for idx, mapping in enumerate(dims_mapping[1:]):
assert mapping == -1, \
"{} only the batch dimension (0-dim) can be sharded, but the dimension {} is sharded by {} part."\
.format(op_desc.type(), idx, mapping)
batch_dim_mappings.append(dims_mapping[0])
for arg_name in op_desc.output_arg_names():
if op_dist_attr.is_parameter(arg_name):
continue
dims_mapping = op_dist_attr.get_output_dims_mapping(arg_name)
if arg_name not in xshape_arg_names:
if len(dims_mapping) > 1:
for idx, mapping in enumerate(dims_mapping[1:]):
assert mapping == -1, \
"{} only the batch dimension (0-dim) can be sharded, but the dimension {} is sharded by {} part."\
.format(op_desc.type(), idx, mapping)
batch_dim_mappings.append(dims_mapping[0])
else:
assert dims_mapping[0] == -1, \
"{} only the batch dimension (1-dim) of XShape can be sharded, but the dimension 0 is sharded by {} part."\
.format(op_desc.type(), mapping)
if len(dims_mapping) > 2:
for idx, mapping in enumerate(dims_mapping[2:]):
assert mapping == -1, \
"{} only the batch dimension (1-dim) of XShape can be sharded, but the dimension {} is sharded by {} part."\
.format(op_desc.type(), idx, mapping)
batch_dim_mappings.append(dims_mapping[1])
compatible_dim_mapping = compute_compatible_dim_mapping(batch_dim_mappings)
assert compatible_dim_mapping is not None, "There is no compatible dim mapping."
for arg_name in op_desc.input_arg_names():
if op_dist_attr.is_parameter(arg_name):
continue
dims_mapping = op_dist_attr.get_input_dims_mapping(arg_name)
if compatible_dim_mapping != dims_mapping[0]:
dims_mapping[0] = compatible_dim_mapping
changed = True
for arg_name in op_desc.output_arg_names():
if op_dist_attr.is_parameter(arg_name):
continue
dims_mapping = op_dist_attr.get_output_dims_mapping(arg_name)
if arg_name not in xshape_arg_names:
if compatible_dim_mapping != dims_mapping[0]:
dims_mapping[0] = compatible_dim_mapping
changed = True
else:
if compatible_dim_mapping != dims_mapping[1]:
dims_mapping[1] = compatible_dim_mapping
changed = True
return changed | 75f226ff4902cd935abadd60b16874929d35883c | 29,847 |
import random
import torch
def perturb_box(box, min_iou=0.5, sigma_factor=0.1):
""" Perturb the input box by adding gaussian noise to the co-ordinates
args:
box - input box
min_iou - minimum IoU overlap between input box and the perturbed box
sigma_factor - amount of perturbation, relative to the box size. Can be either a single element, or a list of
sigma_factors, in which case one of them will be uniformly sampled. Further, each of the
sigma_factor element can be either a float, or a tensor
of shape (4,) specifying the sigma_factor per co-ordinate
returns:
torch.Tensor - the perturbed box
"""
if isinstance(sigma_factor, list):
# If list, sample one sigma_factor as current sigma factor
c_sigma_factor = random.choice(sigma_factor)
else:
c_sigma_factor = sigma_factor
if not isinstance(c_sigma_factor, torch.Tensor):
c_sigma_factor = c_sigma_factor * torch.ones(4)
perturb_factor = torch.sqrt(box[2] * box[3]) * c_sigma_factor
# multiple tries to ensure that the perturbed box has iou > min_iou with the input box
for i_ in range(100):
c_x = box[0] + 0.5 * box[2]
c_y = box[1] + 0.5 * box[3]
c_x_per = random.gauss(c_x, perturb_factor[0])
c_y_per = random.gauss(c_y, perturb_factor[1])
w_per = random.gauss(box[2], perturb_factor[2])
h_per = random.gauss(box[3], perturb_factor[3])
if w_per <= 1:
w_per = box[2] * rand_uniform(0.15, 0.5)
if h_per <= 1:
h_per = box[3] * rand_uniform(0.15, 0.5)
box_per = torch.Tensor([c_x_per - 0.5 * w_per, c_y_per - 0.5 * h_per, w_per, h_per]).round()
if box_per[2] <= 1:
box_per[2] = box[2] * rand_uniform(0.15, 0.5)
if box_per[3] <= 1:
box_per[3] = box[3] * rand_uniform(0.15, 0.5)
box_iou = iou(box.view(1, 4), box_per.view(1, 4))
# if there is sufficient overlap, return
if box_iou > min_iou:
return box_per, box_iou
# else reduce the perturb factor
perturb_factor *= 0.9
return box_per, box_iou | 1b1e7cb831d52be0b96b69d68817c678865447d2 | 29,848 |
import statistics
def coverageCalc(coverageList,minCov):
"""Function parsing coverageList for
:param coverageList: List of pacbam coverage information
:param minCov: Int of minimum passing coverage
:return:
covCount: Int of bases with coverage
minCovCount: Int of bases with minimum coverage
meanDepth: Int mean coverage stat
"""
covCount = 0
minCovCount = 0
meanDepth = statistics.mean(coverageList)
for i in coverageList:
if i != 0:
covCount +=1
if i >= minCov:
minCovCount +=1
return(covCount,minCovCount,round(meanDepth,2)) | e20dc1e1f0b6f7e328501afe9921455a705f196a | 29,849 |
def truncate_top_k_2(x, k):
"""Keep top_k highest values elements for each row of a numpy array
Args:
x (np.Array): numpy array
k (int): number of elements to keep for each row
Returns:
np.Array: processed array
"""
s = x.shape
# ind = np.argsort(x)[:, : s[1] - k]
ind = np.argpartition(x, -k, axis=1)[:, :-k]
rows = np.arange(s[0])[:, None]
x[rows, ind] = 0
return x | 1e84987b01d4cbab9c97174886c87d88e541f380 | 29,850 |
import glob
import os
import collections
def run():
"""
read pipeline and do infer
"""
args = parse_args()
# input_ids file list, every file content a tensor[1,128]
file_list = glob.glob(os.path.join(os.path.realpath(args.data_dir), "10_data", "*.txt"))
cwq_lists = []
for i in range(len(file_list)):
b = os.path.split(file_list[i])
cwq_lists.append(b)
def take_second(elem):
return elem[1]
cwq_lists.sort(key=take_second)
yms_lists = []
for i in range(len(cwq_lists)):
c = cwq_lists[i][0] + '/' + cwq_lists[i][1]
yms_lists.append(c)
file_list = yms_lists
all_predictions = collections.OrderedDict()
for input_ids in file_list:
file_name = input_ids.split('/')[-1].split('.')[0] + '.bin'
start_logits, end_logits = get_infer_logits(args, input_ids.split('/')[-1])
prelim_predictions, features = get_prelim_predictions(args, file_name, start_logits=start_logits,
end_logits=end_logits, n_best_size=20,
max_answer_length=30)
nbest, qas_id = get_nbest(args, prelim_predictions, features, n_best_size=20, do_lower_case=True)
nbest_json = get_one_prediction(nbest)
all_predictions[qas_id] = nbest_json[0]["text"]
file = open('infer_result.txt', 'w')
file.write(str(all_predictions))
file.close()
print('done')
post_process(args.eval_json_path, all_predictions, output_metrics="output.json") | 960aef6f532ace6e77d4c5e2a553bdb5847221b8 | 29,851 |
def permute_dimensions(x, pattern):
"""Permutes axes in a tensor.
# Arguments
pattern: should be a tuple of
dimension indices, e.g. (0, 2, 1).
# Returns
A tensor.
"""
return KerasSymbol(mx.sym.transpose(x.symbol, axes=pattern)) | 3665221ec55a01dcf2eaa3f51d716d08f09eed60 | 29,852 |
import copy
def relaxStructure(s, operation):
"""
Performs a gulp relaxation (either relaxation of unit cell parameters only, or both unit cell and atomic positions.
s: structure_class
operation: string
Specifies the gulp calculation to execute.
Returns
-------
s : structure_class
The object containing the relaxed configuration.
{True or False}:
Relaxation succeed or failed.
result: string
The gulp output.
"""
s2 = copy.copy(s)
s.structure, s.energy, result, calc_time = gulp_calc.gulp_relaxation(copy.copy(s), operation, gulp_shells=[])
return (s, True, result) if result == "converged" else (s2, False, result) | ccc8a2fe75d11693030fea6771bb127e3fc9ebcd | 29,853 |
import time
def dos_gaussian_shift(energies, dos_total, projections, nGridpoints, smearing):
"""
Produces a single gaussian function then shifts the gaussian around the grid
Advantages:
+ Very fast compared to other methods
Disadvantages:
- Produces an edge effect, energy range should be larger than required
- Very reliable, but not as accurate as addition method as mean needs to be on energy grid
- Due to edge effect, grids produced will vary in size
- Grids can be made consistent but edge effect will be shown in data
Parameters
-------------
energies : list
list of eigenvalues, floating point numbers
dos_total : list
Density of states weightings
nGridPoints : float
Number of grid points to perform this method on
smearing : float
Smearing value
Returns
--------------
list, list
A list of energies and smeared eigenvalues
"""
# Start time for function:
start = time.time()
nComponents = len(projections[0])
nColumns = nComponents + 1
# Create grid for energy values:
energyGrid = np.linspace(min_energy, max_energy, nGridpoints)
# Final dos using np:
final_dos = np.zeros( (nColumns, nGridpoints) )
# Define gaussian function:
func = gaus_func(energyGrid, 0, smearing)
# Find max index of gaussian:
maximum = func.argmax()
if components:
# Move gaussian around grid until mean of gaussian is nearest to the DOS value
for index, item in enumerate(energies):
maximum = func.argmax()
idx = (np.abs(energyGrid - item)).argmin()
rolled = np.roll(func, idx-maximum)
final_dos[0] += rolled*dos_total[index]
for index2, projection in enumerate(projections[index]):
final_dos[index2+1] += rolled*projection
else:
for index, item in enumerate(energies):
maximum = func.argmax()
idx = (np.abs(energyGrid - item)).argmin()
rolled = np.roll(func, idx-maximum)
final_dos[0] += rolled*dos_total[index]
# Remove 20% of grid due to edge effects:
n = int(0.2*func.size)
final_dos = final_dos[:, n:-n]
energyGrid = energyGrid[n:-n]
# finish timing:
end = time.time()
print(f"Time elapsed (s), shift method: {end-start:.5f}")
return energyGrid, final_dos | ff1ea14a96c217083eba01a5cde7deb668e1267d | 29,854 |
def temp_h5_file(tmpdir_factory):
""" a fixture that fetches a temporary output dir/file for a test
file that we want to read or write (so it doesn't clutter up the test
directory when the automated tests are run)"""
return str(tmpdir_factory.mktemp('data').join('test.h5')) | 23ca5e58aa7afadcd18394bfa7ea6aa3a48c412e | 29,855 |
def get_day_name(date):
"""
returns the day name for a give date
@param date datatime
@return month name
.. faqref::
:tag: python
:title: Récupérer le nom du jour à partir d'une date
.. runpython::
:showcode:
import datetime
dt = datetime.datetime(2016, 1, 1)
print(dt.strftime("%A"))
"""
return date.strftime("%A") | 1e6b67d5b853156d5e6a8624c9644a08ebb4ee20 | 29,856 |
def get_samples(n_samples, data, labels=None, use_random_transpose=False):
"""Return some random samples of the training data."""
indices = np.random.choice(len(data), n_samples, False)
if np.issubdtype(data.dtype, np.bool_):
sample_data = data[indices] * 2. - 1.
else:
sample_data = data[indices]
if use_random_transpose:
sample_data = np.array([random_transpose(x) for x in sample_data])
if labels is None:
return sample_data
return sample_data, labels[indices] | 7a9fc5256b438619af8e366802fc54db196536d7 | 29,857 |
from typing import Union
import torch
import collections
from typing import Callable
def apply_to_tensor(
x: Union[torch.Tensor, collections.Sequence, collections.Mapping, str, bytes], func: Callable
) -> Union[torch.Tensor, collections.Sequence, collections.Mapping, str, bytes]:
"""Apply a function on a tensor or mapping, or sequence of tensors.
Args:
x: input tensor or mapping, or sequence of tensors.
func: the function to apply on ``x``.
"""
return apply_to_type(x, torch.Tensor, func) | 53f966bfbd6b68efa2bfeb72b8c34e38be008196 | 29,858 |
def status():
""" Returns a page showing the number of unprocessed certficate-requests. """
result = db.session.query(Request).filter(Request.generation_date == None).count()
return render_template('status.html', requests=result) | 9e04e870a4d3d707da078e5681c145726ba563c2 | 29,859 |
def weak_connect(sender, signal, connector, attr, idle=False, after=False):
"""
Function to connect some GObject with weak callback
"""
wc = WeakCallback(connector, attr, idle)
if after:
wc.gobject_token = sender.connect_after(signal, wc)
else:
wc.gobject_token = sender.connect(signal, wc)
#print "Connected", sender, signal, connector, attr, idle, after
return wc.gobject_token | e18ba634c039cfb2d03649c76d1aad02f216c653 | 29,860 |
def load_user(id):
"""
Provides login_manager with a method to load a user
"""
return User.get_by_id(int(id)) | 3aca05c0bf6ad62401c442ba813a8a8a646dabe4 | 29,861 |
def _compute_min_dfc(fnrs, fprs, thresholds, p_target, c_miss, c_fa):
"""
Computes the minimum of the detection cost function. The comments refer to
equations in Section 3 of the NIST 2016 Speaker Recognition Evaluation Plan.
:param fnrs: the list of false negative rates
:param fprs: the list of false positive rates
:param thresholds: the list of decision thresholds
:param p_target: a priori probability of the specified target speaker
:param c_miss: cost of a missed detection
:param c_fa: cost of a spurious detection
:return: the minimum detection cost and accompanying threshold
"""
min_c_det = float("inf")
min_c_det_threshold = thresholds[0]
for i in range(0, len(fnrs)):
# See Equation (2). it is a weighted sum of false negative
# and false positive errors.
c_det = c_miss * fnrs[i] * p_target + c_fa * fprs[i] * (1 - p_target)
if c_det < min_c_det:
min_c_det = c_det
min_c_det_threshold = thresholds[i]
# See Equations (3) and (4). Now we normalize the cost.
c_def = min(c_miss * p_target, c_fa * (1 - p_target))
min_dcf = min_c_det / c_def
return min_dcf, min_c_det_threshold | 23931070ad23f2dc8b1fdc63d0e4635f9fede535 | 29,862 |
def parse_rsync_url(location):
"""Parse a rsync-style URL."""
if ':' in location and '@' not in location:
# SSH with no user@, zero or one leading slash.
(host, path) = location.split(':', 1)
user = None
elif ':' in location:
# SSH with user@host:foo.
user_host, path = location.split(':', 1)
if '@' in user_host:
user, host = user_host.rsplit('@', 1)
else:
user = None
host = user_host
else:
raise ValueError('not a valid rsync-style URL')
return (user, host, path) | fc315c1a6b376cbb83b047246fee51ae936b68ef | 29,863 |
def ThetaE(tempk, pres, e):
"""Calculate Equivalent Potential Temperature
for lowest model level (or surface)
INPUTS:
tempk: Temperature [K]
pres: Pressure [Pa]
e: Water vapour partial pressure [Pa]
OUTPUTS:
theta_e: equivalent potential temperature
References:
Eq. (9.40) from Holton (2004)
Eq. (22) from Bolton (1980)
Michael P. Byrne and Paul A. O'Gorman (2013), 'Land-Ocean Warming
Contrast over a Wide Range of Climates: Convective Quasi-Equilibrium
Theory and Idealized Simulations', J. Climate """
# tempc
tempc = tempk - degCtoK
# Calculate theta
theta = Theta(tempk, pres)
# T_lcl formula needs RH
es = VaporPressure(tempc)
RH = 100. * e / es
# theta_e needs q (water vapour mixing ratio)
qv = MixRatio(e, pres)
# Calculate the temp at the Lifting Condensation Level
T_lcl = ((tempk-55)*2840 / (2840-(np.log(RH/100)*(tempk-55)))) + 55
# print "T_lcl :%.3f"%T_lcl
# DEBUG STUFF ####
theta_l = tempk * \
(100000./(pres-e))**(Rs_da/Cp_da)*(tempk/T_lcl)**(0.28*qv)
# print "theta_L: %.3f"%theta_l
# Calculate ThetaE
theta_e = theta_l * np.exp((Lv * qv) / (Cp_da * T_lcl))
return theta_e | 9b1876e930d7f2ca093d6f894791045f226c3b98 | 29,864 |
import random
def generate_address_street():
"""Concatenate number, street, and street sufix."""
number = random.randint(1, 9999)
street = last_names[random.randint(0, len(last_names) - 1)]
suffix = address_street_suffix[random.randint(0, len(address_street_suffix) - 1)]
return "{0} {1} {2}".format(number, street, suffix) | a7dafb282d1d0abb25ad6cb44ba6f2e0a9e190dd | 29,865 |
def try_all_eliza_transformations(doc):
"""
Try to do eliza transformation for all the functions and add the transformed string to the
responses list
"""
responses = []
question = ask_do_you_like_to(doc)
if question:
responses.append(question)
question = rephrase_question(doc)
if question:
responses.append(question)
question = ask_why(doc)
if question:
responses.append(question)
return responses | 3a76b9a1fc422e8db1e02f41dd286ce385d09213 | 29,866 |
def get_jwt_subject():
"""Returns a the subject from a valid access tokekn"""
token = get_token_auth_header()
payload = verify_decode_jwt(token)
if "sub" not in payload:
abort(401)
return payload["sub"] | a7f8bf3989dbdc1a0894d63d6fdc58e5c07356f4 | 29,867 |
import functools
def api(function):
"""
Decorator of API functions that protects user code from
unknown exceptions raised by gRPC or internal API errors.
It will catch all exceptions and throw InternalError.
:param function: function to be decorated
:return: decorated function
"""
@functools.wraps(function)
def wrapper(*args, **kwargs):
try:
return function(*args, **kwargs)
except (SyntaxError, TypeError, InternalError):
raise
except _Rendezvous as e:
raise InternalError(str(e.code()), e.details())
except Exception as e:
raise InternalError(details=str(e)) from e
return wrapper | 1e023eed5986224967c6057962576afc4c84adb2 | 29,868 |
def mock_signal_receiver(signal, wraps=None, **kwargs):
"""
Taken from mock_django as importing mock_django created issues with Django
1.9+
Temporarily attaches a receiver to the provided ``signal`` within the scope
of the context manager.
The mocked receiver is returned as the ``as`` target of the ``with``
statement.
To have the mocked receiver wrap a callable, pass the callable as the
``wraps`` keyword argument. All other keyword arguments provided are passed
through to the signal's ``connect`` method.
"""
if wraps is None:
def wraps(*args, **kwrags):
return None
receiver = mock.Mock(wraps=wraps)
signal.connect(receiver, **kwargs)
yield receiver
signal.disconnect(receiver) | d3f0a481609bf9491b159a7331e1fffc3a5bf92e | 29,869 |
import urllib
def is_valid_cover(cover_metadata):
"""Fetch all sizes of cover from url and evaluate if they are valid."""
syndetics_urls = build_syndetic_cover_urls(cover_metadata)
if syndetics_urls is None:
return False
try:
for size in ["small", "medium", "large"]:
resp = urllib.request.urlopen(syndetics_urls[size])
has_error = resp.getcode() != 200
less_than_1_pixel = (
int(resp.getheader("Content-Length")) <= MIN_CONTENT_LENGTH
)
if has_error or less_than_1_pixel:
return False
except Exception:
return False
return True | b41aa1f558d1080fc1a3a2d03180417b38b92931 | 29,870 |
def setup_go_func(func, arg_types=None, res_type=None):
"""
Set up Go function, so it know what types it should take and return.
:param func: Specify Go function from library.
:param arg_types: List containing file types that function is taking. Default: None.
:param res_type: File type that function is returning. Default: None.
:return: Returns func arg back for cases when you want to setup function and assign it to variable in one line.
"""
if arg_types is not None:
func.argtypes = arg_types
if res_type is not None:
func.restype = res_type
return func | 05f48f4dfecdf0133613f76f235b1e82f14bc5a9 | 29,871 |
import itertools
def compress_cubic(G):
"""Calculate the matricized cubic operator that operates on the compact
cubic Kronecker product.
Parameters
----------
G : (r,r**3) ndarray
The matricized cubic tensor that operates on the full cubic Kronecker
product. This should be a symmetric operator in the sense that each
layer of G.reshape((r,r,r,r)) is a symmetric (r,r,r) tensor, but it is
not required.
Returns
-------
Gc : (r,s) ndarray
The matricized cubic tensor that operates on the compact cubic
Kronecker product. Here s = r * (r+1) * (r+2) / 6.
"""
# TODO: only check that r3 is a perfect cube, not necessarily r**3
# (may be useful for cubic interactions of input or for systems).
r = G.shape[0]
r3 = G.shape[1]
if r3 != r**3:
raise ValueError(f"invalid shape (r,a) = {(r,r3)} with a != r**3")
s = r * (r+1) * (r+2) // 6
Gc = np.empty((r, s))
fj = 0
for i in range(r):
for j in range(i+1):
for k in range(j+1):
idxs = set(itertools.permutations((i,j,k),3))
Gc[:,fj] = np.sum([G[:,(a*r**2)+(b*r)+c] for a,b,c in idxs],
axis=0)
fj += 1
# assert fj == s
return Gc | 2058cf169695a3fbef41e78bc521577b191e6d08 | 29,872 |
import ctypes
def xonly_pubkey_tweak_add(
xonly_pubkey: Secp256k1XonlyPubkey, tweak32: bytes
) -> Secp256k1Pubkey:
"""
Tweak an x-only public key by adding the generator multiplied with tweak32
to it.
Note that the resulting point can not in general be represented by an x-only
pubkey because it may have an odd Y coordinate. Instead, the output_pubkey
is a normal Secp256k1Pubkey.
:param xonly_pubkey: initialized xonly pubkey
:param tweak32: 32-byte tweak
:return: tweaked public key
:raises ValueError: if tweak32 is not of type bytes and length 32
:raises Libsecp256k1Exception: arguments are invalid or the resulting public
key would be invalid (only when the tweak is
the negation of the corresponding secret key)
"""
tweaked_pubkey = ctypes.create_string_buffer(INTERNAL_PUBKEY_LENGTH)
result = lib.secp256k1_xonly_pubkey_tweak_add(
secp256k1_context_verify, tweaked_pubkey, xonly_pubkey, tweak32
)
if result != 1:
assert_zero_return_code(result)
raise Libsecp256k1Exception(
"arguments are invalid or the resulting public key "
"would be invalid (only when the tweak is the negation "
"of the corresponding secret key)"
)
return tweaked_pubkey | 727b84ec239bb19d83fa9fe4e72b08ea17972e31 | 29,873 |
from typing import Optional
def get_organisms_df(url: Optional[str] = None) -> pd.DataFrame:
"""Convert tab separated txt files to pandas Dataframe.
:param url: url from KEGG tab separated file
:return: dataframe of the file
:rtype: pandas.DataFrame
"""
df = pd.read_csv(
url or ensure_path(MODULE_NAME, KEGG_ORGANISM_URL, path='organisms.tsv'),
sep='\t',
header=None,
names=[
'kegg_id',
'kegg_code',
'name',
# fourth column is the taxonomy hierarchy
],
usecols=[0, 1, 2],
)
df['name'] = df['name'].map(lambda name: name.replace(')', '').split(' ('))
return df | 4b28571848076a785ae773410c70102e5a83d096 | 29,874 |
import argparse
def positive_int(val):
"""
ArgumentParse positive int check
"""
try:
ival = int(val)
assert ival > 0
return ival
except (ValueError, AssertionError):
raise argparse.ArgumentTypeError("'%s' is not a valid positive int" % val) | cf98daeeb9876bc768e9c3ad0d227ce39386e8b4 | 29,875 |
import random
def split(dataset: Dataset, count: int, shuffle=False):
"""Datasetを指定個数に分割する。"""
dataset_size = len(dataset)
sub_size = dataset_size // count
assert sub_size > 0
indices = np.arange(dataset_size)
if shuffle:
random.shuffle(indices)
return [
dataset.slice(indices[o : o + sub_size])
for o in range(0, dataset_size, sub_size)
] | d4f50f617fb65499190c7c5e014178d548a7dccb | 29,876 |
import pathlib
def load_fixture(filename):
"""Load a fixture."""
return (
pathlib.Path(__file__)
.parent.joinpath("fixtures", filename)
.read_text(encoding="utf8")
) | f1382161ad6226cd585a2ecbbe08dc486b3a5f2d | 29,877 |
import re
def natural_sort(l):
"""
From
http://stackoverflow.com/a/4836734
"""
def convert(text):
return int(text) if text.isdigit() else text.lower()
def alphanum_key(key):
return [convert(c) for c in re.split('([0-9]+)', key)]
return sorted(l, key=alphanum_key) | c1cd34aa4c9ea2323cb311d9af6f141aa85abef2 | 29,878 |
import inspect
def is_verifier(cls):
"""Determine if a class is a Verifier that can be instantiated"""
return inspect.isclass(cls) and issubclass(cls, Verifier) and \
not inspect.isabstract(cls) | 83cd18155f23631f2e1dac1ec1eac07a5017809d | 29,879 |
import os
def get_config():
"""Parse the aliases and configuration."""
if pyversion("3"): import configparser
else: import ConfigParser as configparser
config = configparser.ConfigParser()
rcfiles = [
"/etc/weatherrc",
"/etc/weather/weatherrc",
os.path.expanduser("~/.weather/weatherrc"),
os.path.expanduser("~/.weatherrc"),
"weatherrc"
]
for rcfile in rcfiles:
if os.access(rcfile, os.R_OK):
if pyversion("3"):
config.read(rcfile, encoding="utf-8")
else:
config.read(rcfile)
for section in config.sections():
if section != section.lower():
if config.has_section(section.lower()):
config.remove_section(section.lower())
config.add_section(section.lower())
for option,value in config.items(section):
config.set(section.lower(), option, value)
return config | dfabba5cc501a9d3998ecb9e219e4762e321fe30 | 29,880 |
import os
def get_framework_sample(project_id: str, framework: str, sample: str):
"""
Route for getting sample code for an ML framework
for optimization of the projects model
:param project_id: the project_id to get the available frameworks for
:param framework: the ML framework to get available sample code types for
:param sample: the type of sample code to get
:return: a tuple containing (json response, http status code)
"""
_LOGGER.info(
(
"getting the sample code for project_id {}, framework {}, and sample {}"
).format(project_id, framework, sample)
)
# make sure project exists
# currently project_id doesn't do anything,
# but gives us flexibility to edit the frameworks for a projects model in the future
optim_validate_and_get_project_by_id(project_id)
code_samples_dir = os.path.join(
os.path.dirname(clean_path(__file__)), "code_samples"
)
if framework not in ML_FRAMEWORKS:
raise HTTPNotFoundError(
"could not find the given framework of {}".format(framework)
)
sample_file = os.path.join(code_samples_dir, "{}__{}.py".format(framework, sample))
if not os.path.exists(sample_file):
raise HTTPNotFoundError(
(
"could not find sample code for project_id {}, "
"framework {} and sample {}"
).format(project_id, framework, sample)
)
_LOGGER.info(
(
(
"retrieved available sample code for project_id {}, "
"framework {}, and sample {} from {}"
)
).format(project_id, framework, sample, sample_file)
)
return send_file(sample_file, mimetype="text/plain") | c9a2e8f868c7586de9b99bcb0eeaa4299a9ced19 | 29,881 |
def get_bleu_score(references, hypothesis):
"""
Args:
references: list(list(list(str)))
# examples: list(examples)
hypothesis: list(list(list(str)))
# hypotheses: list(list(str))
"""
hypothesis = [hyp[0][0] for hyp in hypothesis]
return 100.0 * bleu_score.corpus_bleu(list_of_references=references, hypotheses=hypothesis) | a2a17186555564a02acedf5540aedce0b1a14cd1 | 29,882 |
import os
import tempfile
import re
def GetLicenseTypesFromEbuild(ebuild_path):
"""Returns a list of license types from the ebuild file.
This function does not always return the correct list, but it is
faster than using portageq for not having to access chroot. It is
intended to be used for tasks such as presubmission checks.
Args:
ebuild_path: ebuild to read.
Returns:
list of licenses read from ebuild.
Raises:
ValueError: ebuild errors.
"""
ebuild_env_tmpl = """
has() { [[ " ${*:2} " == *" $1 "* ]]; }
inherit() {
local overlay_list="%(overlay_list)s"
local eclass overlay f
for eclass; do
has ${eclass} ${_INHERITED_} && continue
_INHERITED_+=" ${eclass}"
for overlay in %(overlay_list)s; do
f="${overlay}/eclass/${eclass}.eclass"
if [[ -e ${f} ]]; then
source "${f}"
break
fi
done
done
}
source %(ebuild)s"""
# TODO: the overlay_list hard-coded here should be changed to look
# at the current overlay, and then the master overlays. E.g. for an
# ebuild file in overlay-parrot, we will look at parrot overlay
# first, and then look at portage-stable and chromiumos, which are
# listed as masters in overlay-parrot/metadata/layout.conf.
tmpl_env = {
'ebuild': ebuild_path,
'overlay_list': '%s %s' % (
os.path.join(constants.SOURCE_ROOT,
'src/third_party/chromiumos-overlay'),
os.path.join(constants.SOURCE_ROOT,
'src/third_party/portage-stable'))
}
with tempfile.NamedTemporaryFile(bufsize=0) as f:
osutils.WriteFile(f.name, ebuild_env_tmpl % tmpl_env)
env = osutils.SourceEnvironment(
f.name, whitelist=['LICENSE'], ifs=' ', multiline=True)
if not env.get('LICENSE'):
raise ValueError('No LICENSE found in the ebuild.')
if re.search(r'[,;]', env['LICENSE']):
raise ValueError(
'LICENSE field in the ebuild should be whitespace-limited.')
return env['LICENSE'].split() | 99e2296edc370fd1d275b368b2d5027fab695c58 | 29,883 |
def load_json_link_index(out_dir, link):
"""check for an existing link archive in the given directory,
and load+merge it into the given link dict
"""
link = {
**parse_json_link_index(out_dir),
**link,
}
link.update({
'history': link.get('history') or {},
})
check_link_structure(link)
return link | 58c034daa7305e06407af9cf226ff939544ee961 | 29,884 |
def relative_phase(input_phase: float, output_phase: float) -> float:
"""
Calculates the relative phase between two phases.
:param input_phase: the input phase.
:param output_phase: the output phase.
:return: the relative phase.
"""
phi = output_phase - input_phase
if phi < -np.pi:
return phi + 2 * np.pi
elif phi > np.pi:
return phi - 2 * np.pi
else:
return phi | d912754fe060582e5ffe9dc3aad0286b80ee945a | 29,885 |
def get_process_state(*args):
"""
get_process_state() -> int
Return the state of the currently debugged process. \sq{Type,
Synchronous function, Notification, none (synchronous function)}
@return: one of Debugged process states
"""
return _ida_dbg.get_process_state(*args) | 338efc688fe4c3631b965ac2f2e64b5ea0fe2ebe | 29,886 |
def jaccard_similarity(x, y):
""" Returns the Jaccard Similarity Coefficient (Jarccard Index) between two
lists.
From http://en.wikipedia.org/wiki/Jaccard_index: The Jaccard
coefficient measures similarity between finite sample sets, as is defined as
the size of the intersection divided by the size of the union of the sample
sets.
"""
intersection_cardinality = len(set.intersection(*[set(x), set(y)]))
union_cardinality = len(set.union(*[set(x), set(y)]))
return intersection_cardinality / float(union_cardinality) | 81cf0c882ff4b06e79b102abb2d8f13755b68873 | 29,887 |
def align_address_to_size(address, align):
"""Align the address to the given size."""
return address + ((align - (address % align)) % align) | 9496c969e257fb3c00ecddf8e941ddb0bd41155e | 29,888 |
import shlex
def tokenizer_word(text_string, keep_phrases=False):
"""
Tokenizer that tokenizes a string of text on spaces and new lines (regardless of however many of each.)
:param text_string: Python string object to be tokenized.
:param keep_phrases: Booalean will not split "quoted" text
:return: Array of strings, each is a word
"""
text_string = str(text_string)
if keep_phrases:
tokens = shlex.split(text_string.replace('\n', ' ').replace('/', ' '))
else:
tokens = text_string.replace('\n', ' ').replace('/', ' ').split()
return tokens | 940f716072e9b2ce522c9854b2394327fbd1e934 | 29,889 |
def getiso():
"""Get iso level of sensor.."""
global camera
maxtint = 4
iso = float(camera.analog_gain) # get current ambient brightness 0..8
iso = (iso * maxtint) # adjust buy max tint level
iso = (256 - (maxtint * 8)) + iso # clear - max tint + ISO tint
return int(iso) | 45fa48897cd297232fde00cbe49d7717608466ed | 29,890 |
import logging
import traceback
def _get_credentials(rse, endpoint):
"""
Pass an endpoint and return its credentials.
:param endpoint: URL endpoint string.
:param rse: RSE name.
:returns: Dictionary of credentials.
"""
key = '%s_%s' % (rse, endpoint)
result = REGION.get(key)
if type(result) is NoValue:
try:
logging.debug("Loading account credentials")
result = config.get_rse_credentials(None)
if result and rse in result:
result = result[rse]
result['is_secure'] = result['is_secure'][endpoint]
REGION.set(key, result)
else:
raise Exception("Failed to load account credentials")
logging.debug("Loaded account credentials")
except KeyError as e:
raise exception.CannotAuthenticate('RSE %s endpoint %s not in rse account cfg: %s' % (rse, endpoint, e))
except:
raise exception.RucioException("Failed to load credentials for RSE(%s) endpoint(%s), error: %s" % (rse, endpoint, traceback.format_exc()))
return result | 25ce4eee8eb0bcb84e312cc1658376b0dbeaf7c6 | 29,891 |
from typing import Union
from typing import Sequence
from typing import List
from typing import Dict
def get_manual_comparisons(db: cosem_db.MongoCosemDB,
cropno: Union[None, str, int, Sequence[Union[str, int]]] = None,
mode: str = "across_setups") -> \
List[Union[Dict[str, str], Dict[str, Union[str, Sequence[str]]]]]:
"""
Read best configurations optimized manually from corresponding csv files and translate into dictionary that can be
used for queries to the database with automatic evaluations.
Args:
db: Database with crop information
cropno: Specific crop numbers or list of crop numbers that should be included in queries.
mode: "per_setup" for queries specifying the optimized manual iteration for each setup, "across_setups"
(default) for queries specifying the optimized manual iteration and setup for each label and "all" for both.
Returns: List of corresponding queries.
"""
if isinstance(cropno, int) or isinstance(cropno, str):
cropno = [cropno]
if mode == "across_setups":
all_queries = _get_setup_queries(cropno, db)
elif mode == "per_setup":
all_queries = _get_iteration_queries(cropno, db)
elif mode == "all":
all_queries = _get_iteration_queries(cropno, db) + _get_setup_queries(cropno, db)
else:
raise ValueError("Unknown mode {mode:}".format(mode=mode))
return all_queries | fc4a62d09f9df289b08a249d70875ce6ca19ed39 | 29,892 |
def draw_circle(center_x:float, center_y:float, radius:float = 0.3, segments:int = 360, fill:bool=False):
"""
Returns an Object2D class that draws a circle
Arguments:
center_x : float : The x cord for the center of the circle.
center_y : float : The y cord for the center of the circle.
radius : float : The radius of the circle.
segments : int : How many segments to make the circle from.
fill : bool : Should the shape be filled.
"""
edges = []
cords = []
for i in range(segments):
theta = (2 * pi * i)/segments # Get the current angle
x = radius * cos(theta) + center_x # Get the x cord
y = radius * sin(theta) + center_y # Get the y cord
cords.append([x, y])
if fill:
cords.insert(0, [center_x, center_y])
for i in range(len(cords)-2):
edges.append([0, i+1, i+2])
edges.append([0, segments, 1]) # Fixes a little glitch
return Object2D(cords, edges, draw_type='triangles')
else:
for i in range(len(cords)-1):
edges.append([i, i+1])
edges.append([segments-1,0]) # Fixes a little glitch
return Object2D(cords, edges, draw_type='lines') | 78caa0cbb25df7c947053a10d54f7ae3fd2fc8b2 | 29,893 |
def weighted_mean(values, weights):
"""Calculate the weighted mean.
:param values: Array of values
:type values: numpy.ndarray
:param weights: Array of weights
:type weights: numpy.ndarray
:rtype: float
"""
weighted_mean = (values * weights).sum() / weights.sum()
return weighted_mean | 886d7cff1555c40b448cda03e08620a0e2d69ede | 29,894 |
def shared_cluster():
"""Create a shared cluster"""
global _shared_cluster
if _shared_cluster is None:
cluster = PseudoHdfs4()
atexit.register(cluster.stop)
try:
cluster.start()
except Exception, ex:
LOG.exception("Failed to fully bring up test cluster: %s" % (ex,))
# Fix config to reflect the cluster setup.
webhdfs_url = "http://localhost:%s/webhdfs/v1" % (cluster.dfs_http_port,)
closers = [
hadoop.conf.HDFS_CLUSTERS['default'].FS_DEFAULTFS.set_for_testing(cluster.fs_default_name),
hadoop.conf.HDFS_CLUSTERS['default'].WEBHDFS_URL.set_for_testing(webhdfs_url),
hadoop.conf.MR_CLUSTERS['default'].HOST.set_for_testing('localhost'),
hadoop.conf.MR_CLUSTERS['default'].PORT.set_for_testing(cluster._jt_port),
hadoop.conf.MR_CLUSTERS['default'].JT_THRIFT_PORT.set_for_testing(cluster.jt_thrift_port),
]
old = hadoop.cluster.clear_caches()
def restore_config():
hadoop.cluster.restore_caches(old)
for x in closers:
x()
cluster.shutdown_hook = restore_config
_shared_cluster = cluster
return _shared_cluster | fd51186f8d46ae236b3f4220750ab0f412354669 | 29,895 |
import reprlib
def _format_args(args):
"""Format function arguments.
Special case for a single parameter: ('hello',) is formatted as ('hello').
"""
# use reprlib to limit the length of the output
args_repr = reprlib.repr(args)
if len(args) == 1 and args_repr.endswith(',)'):
args_repr = args_repr[:-2] + ')'
return args_repr | a54f06358b629340c1f16ecc86eff15b8fca3bd3 | 29,896 |
import requests
import logging
import json
def request(config, url_params={}):
"""Wrapper for sending GET to Facebook.
Args:
config: YAML object of config file.
url_params: Dictionary of parameters to add to GET.
Returns:
HTTP response or error.
"""
host = HOST + f"/{config['user_id']}/"
params = {"fields": "id,name",
"access_token": config['user_token']}
params.update(url_params)
try:
response = requests.get(host, params=params)
logging.info(f"Sending to Facebook: {response.status_code}")
response.encoding = "utf-8"
return json.dumps(response.text, indent=4)
except HTTPError as e:
return e | e4ef9315170ab7d1c7e39645bde46b6cbb9f9de9 | 29,897 |
import miniupnpc
def setup(hass, config):
"""Register a port mapping for Home Assistant via UPnP."""
upnp = miniupnpc.UPnP()
hass.data[DATA_UPNP] = upnp
upnp.discoverdelay = 200
upnp.discover()
try:
upnp.selectigd()
except Exception:
_LOGGER.exception("Error when attempting to discover an UPnP IGD")
return False
unit = config[DOMAIN].get(CONF_UNITS)
discovery.load_platform(hass, 'sensor', DOMAIN, {'unit': unit}, config)
port_mapping = config[DOMAIN].get(CONF_ENABLE_PORT_MAPPING)
if not port_mapping:
return True
base_url = urlsplit(hass.config.api.base_url)
host = base_url.hostname
external_port = internal_port = base_url.port
upnp.addportmapping(
external_port, 'TCP', host, internal_port, 'Home Assistant', '')
def deregister_port(event):
"""De-register the UPnP port mapping."""
upnp.deleteportmapping(hass.config.api.port, 'TCP')
hass.bus.listen_once(EVENT_HOMEASSISTANT_STOP, deregister_port)
return True | ca5d7d90efb849412e2256dab3516deca1531539 | 29,898 |
def load_formatted_objects_json(fp):
"""A function to load formatted object data data. The function assumes
the input json is of the form:
[
{
id: <number>,
regions :
[
{
x: <number>,
y : <number>,
height : <number>,
width : <number>,
id : <number>,
phrase: "somthing cool",
image : <image id>,
}
]
}
}
]
Will return a dictionary using the image name (id + .jpg) and bounding box
information
Parameters
-----------
fp : string
path to a json file
Returns
--------
dictionary
"""
parsed = json_load(fp)
out = {}
for item in parsed:
# the formatted input only holds a number that is later turned into a
# filename within densecap code
src_img = "{0}.jpg".format(item['id'])
regions = item['regions']
out_regions = []
for region in regions:
formatted = dict(x=region['x'], y=region['y'], h=region['height'],
w=region['width'], names=[region['phrase']])
out_regions.append(formatted)
out[src_img] = out_regions
return out | 2647f2c2cfc7530998361b19693ea6e187bf64f1 | 29,899 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.