content stringlengths 35 762k | sha1 stringlengths 40 40 | id int64 0 3.66M |
|---|---|---|
def _strategies(min_difficulty=None, max_difficulty=None):
"""DOCUMENT ME!!!"""
return draw(one_of(
_global_strategy_lookup.items()[min_difficulty : max_difficulty]
)) | a447a63d9739897e9a3e9d2987542bb4ccf485f4 | 3,635,100 |
def imag(z):
"""
Returns the imaginary part of z.
>>> imag(2+3j)
3.0
If the input is a number, a number is returned:
>>> isinstance(imag(2+3j), float)
True
Can be used with arrays, too:
>>> imag(np.array([1+10j, 2+20j, 3+30j]))
array([ 10., 20., 30.])
"""
return content_if_0d_array(np.imag(z)) | 82385e71479e09b4676daedb26c6a82d2d51d9fd | 3,635,101 |
from typing import Callable
from typing import Sequence
def apply(f: Callable[[str], B_monoid], description: str) -> Parser[B_monoid]:
"""
A shortcut for ``item(description).apply(f)``.
In contrast to :py:meth:`Parser.apply`, this function spares ``f``
the trouble of outputting a :py:class:`Result<dollar_lambda.result.Result>` object.
Here is an example of usage. First we define a simple :py:func:`argument` parser:
>>> p1 = argument("foo")
>>> p1.parse_args("bar")
{'foo': 'bar'}
Here we use ``f`` to directly manipulate the binding generated by :py:func:`argument`:
>>> from dollar_lambda import apply
>>> p2 = apply(lambda bar: Output.from_dict(**{bar + "e": bar + "f"}), description="baz")
>>> p2.parse_args("bar")
{'bare': 'barf'}
"""
def g(out: Output[Sequence[KeyValue[str]]]) -> Result[B_monoid]:
*_, (_, v) = map(astuple, out.get)
assert v is not None # because item produces output
try:
y = f(v)
except Exception as e:
usage = f"argument {v} raised exception {e}"
return Result(ArgumentError(usage))
return Result.return_(y)
return item(description).apply(g) | 0de89f6b9db794ace5244e03d37e3b233bcfe385 | 3,635,102 |
def fit_normalized_gaussian_process(X, y, nu=1.5):
"""
We fit a gaussian process but first subtract the mean and divide by stddev.
To undo at prediction tim, call y_pred = gp.predict(X) * y_stddev + y_mean
"""
gp = gaussian.GaussianProcessRegressor(
kernel=gaussian.kernels.Matern(nu=nu), n_restarts_optimizer=2, alpha=0.0000001, random_state=2
)
if len(y) == 1:
y = np.array(y)
y_mean = y[0]
y_stddev = 1
else:
y_mean = np.mean(y)
y_stddev = np.std(y) + 0.0001
y_norm = (y - y_mean) / y_stddev
gp.fit(X, y_norm)
return gp, y_mean, y_stddev | 7a2a17fcaaf79f8395d697cf8cf91a883da125be | 3,635,103 |
def choices_on_ballots(L, printing_wanted=False):
"""
Return a dict of the choices shown on ballot list L, with counts.
Args:
L (list): list of ballots
Returns:
C (dict): dict of distinct strings appearing in ballots in L,
each with count of number of occurrences.
Example:
"""
C = dict()
ballot_no = 0
for ballot in L:
ballot_no += 1
for choice in ballot:
if False and choice not in C:
print("Choice {} first seen in ballot {}"
.format(choice, ballot_no))
C[choice] = 1 + C.get(choice, 0)
return C | e489eef70ee0efd0f40f5163c2135a5549c8893e | 3,635,104 |
def prime_mask(n: int) -> np.ndarray:
"""Generate boolean array of length N, where prime indices are True."""
primes = np.ones(n, dtype=bool)
primes[:2] = False
for i in range(2, n):
if primes[i]:
# Mark all multiples of i as composite
composite = 2 * i
while composite < n:
primes[composite] = False
composite += i
return primes | cd8d64e35b440e92727a76508f52e1b6540bc461 | 3,635,105 |
from django_pg_returning import ReturningQuerySet
def _bulk_update_no_validation(model, values, conn, key_fds, upd_fds, ret_fds, where):
# type: (Type[Model], TUpdateValuesValid, TDatabase, Tuple[FieldDescriptor], Tuple[FieldDescriptor], Optional[Tuple[FieldDescriptor]], Tuple[str, tuple]) -> Union[int, 'ReturningQuerySet'] # noqa: F821
"""
Does bulk update, skipping parameters validation.
It is used for speed up in bulk_update_or_create, where parameters are already formatted.
:param model: Model to update, a subclass of django.db.models.Model
:param values: Data to update. All items must update same fields!!!
Dict of key_values_tuple: update_fields_dict
:param conn: Database connection used
:param key_fds: Field names, by which items would be selected (tuple)
:param upd_fds: FieldDescriptor objects to update
:param ret_fds: Optional fds to return as ReturningQuerySet
:param where: A sql, params tuple to filter query data before update
:return: Number of records updated if ret_fds not given. ReturningQuerySet otherwise
"""
# No any values to update. Return that everything is done.
if not upd_fds or not values:
return len(values) if ret_fds is None else ReturningQuerySet(None)
values_sql, values_params = _with_values_query_part(model, values, conn, key_fds, upd_fds)
upd_sql, upd_params = _bulk_update_query_part(model, conn, key_fds, upd_fds, where)
ret_sql, ret_params = _returning_query_part(model, conn, ret_fds)
sql = "%s %s %s" % (values_sql, upd_sql, ret_sql)
params = values_params + upd_params + ret_params
return _execute_update_query(model, conn, sql, params, ret_fds) | 424613c09a9b3a7697bce01ae1e576bfe6c01f39 | 3,635,106 |
def dmm_exitcell(subidxs_ds, subuparea, subshape, shape, cellsize, mv=_mv):
"""Returns exit highres cell indices of lowres cells according to the
double maximum method (DMM).
Parameters
----------
subidxs_ds : 1D-array of int
highres linear indices of downstream cells
subuparea : 1D-array of int
highres flattened upstream area array
subshape : tuple of int
highres raster shape
shape : tuple of int
lowres raster shape
cellsize : int
size of lowres cell measured in higres cells
Returns
-------
1D array of int
highres indices of representative cells
"""
_, subncol = subshape
nrow, ncol = shape
# allocate output
subidxs_rep = np.full(nrow * ncol, mv, dtype=subidxs_ds.dtype)
uparea = np.zeros(nrow * ncol, dtype=subuparea.dtype)
# loop over valid indices
for subidx in range(subidxs_ds.size):
subidx_ds = subidxs_ds[subidx]
if subidx_ds == mv:
continue
# NOTE including pits in the edge area is different from the original
ispit = subidx_ds == subidx
edge = cell_edge(subidx, subncol, cellsize)
# check upstream area if cell ispit or at effective area
if ispit or edge:
idx = subidx_2_idx(subidx, subncol, cellsize, ncol)
upa = subuparea[subidx]
upa0 = uparea[idx]
# cell with largest upstream area is representative cell
if upa > upa0:
uparea[idx] = upa
subidxs_rep[idx] = subidx
return subidxs_rep | e8c10e39c07cdcdd245653c05ed456f049b58e89 | 3,635,107 |
import re
def cast_to_decimal(amount: str):
"""Cast the amount to either an instance of Decimal or None.
Args:
amount: A string of amount. The format may be '¥1,000.00', '5.20', '200'
Returns:
The corresponding Decimal of amount.
"""
if amount is None:
return None
amount = "".join(amount.split(","))
numbers = re.findall(r"\d+\.?\d*", amount)
assert len(numbers) == 1
return D(numbers[0]) | c043f7449b42154e7dcfba1b2ed9b64feb9ffece | 3,635,108 |
import json
def test_csrf_exempt(csrf_app, csrf):
"""Test before CSRF protect decorator."""
# Test `exempt` as a function passing the name of the view as string
csrf.exempt('conftest.csrf_test')
with csrf_app.test_client() as client:
res = client.post(
'/csrf-protected',
data=json.dumps(dict(foo='bar')),
content_type='application/json'
)
assert res.status_code == 200
# Test `exempt` as a decorator on a view
@csrf_app.route('/another-csrf-protect', methods=['POST'])
@csrf.exempt
def another_csrf_test():
return 'another test'
with csrf_app.test_client() as client:
res = client.post(
'/another-csrf-protect',
data=json.dumps(dict(foo='bar')),
content_type='application/json'
)
assert res.status_code == 200
# Test `exempt` as a decorator on a blueprint
blueprint = Blueprint("test_csrf_bp", __name__, url_prefix="")
@blueprint.route('/csrf-protect-bp', methods=['POST'])
def csrf_bp():
return 'csrf bp test'
@blueprint.route('/csrf-protect-bp-2', methods=['POST'])
def csrf_bp_2():
return 'csrf bp test 2'
csrf_app.register_blueprint(blueprint)
csrf.exempt(blueprint)
with csrf_app.test_client() as client:
res = client.post(
'/csrf-protect-bp',
data=json.dumps(dict(foo='bar')),
content_type='application/json'
)
assert res.status_code == 200
res = client.post(
'/csrf-protect-bp-2',
data=json.dumps(dict(foo='bar')),
content_type='application/json'
)
assert res.status_code == 200 | fcc7bd34add8da223b5a89d644949d5fe930718f | 3,635,109 |
def ITERATIVETEST_Variation_Of_Input_Parameter_inUseSecondPlane():
"""Tests variation of input value at field "inUseSecondPlane"."""
Logging.infoHTML( "<b>ToDo: Add more \"TestData\" objects to maximize variations!</b>" )
testDataVariations = {
"Variation_1" : __getTestDataVariation1_inUseSecondPlane(),
}
return ( testDataVariations, executeTestOfUpdate ) | 5d53797367154cc0315c6b5ded3fab8cc22cea5d | 3,635,110 |
def wave_energy(F, df, rhow=1000, g=9.8):
"""Returns total wave energy."""
return rhow * g * np.sum(F * df) | 663299aa6732c034fe494cc7353d05f673329ec5 | 3,635,111 |
def get_x_vector(N, K):
"""
Return x from given order of WH matrix and K
:param N: Order of WH matrix
:param K: Number of ones
:return: numpy.ndarray
"""
x = np.zeros(N)
random_pos = np.random.choice(
np.arange(0, N), K, replace=False
)
x[random_pos] = 1
return x | 90f90f09d6d2516c9558938514bd965d719f65dc | 3,635,112 |
import random
def person_split(whole_data, train_names, valid_names, test_names):
"""Split data by person."""
random.seed(30)
random.shuffle(whole_data)
train_data = []
valid_data = []
test_data = []
for idx, data in enumerate(whole_data): # pylint: disable=unused-variable
if data["name"] in train_names:
train_data.append(data)
elif data["name"] in valid_names:
valid_data.append(data)
elif data["name"] in test_names:
test_data.append(data)
print("train_length:" + str(len(train_data)))
print("valid_length:" + str(len(valid_data)))
print("test_length:" + str(len(test_data)))
return train_data, valid_data, test_data | ef0475fbc515af1352401c576be27351cda81a35 | 3,635,113 |
def budget_delete(request, slug):
"""
Delete a budget object.
"""
budget = get_object_or_404(Budget.active.all(), slug=slug)
if request.POST:
if request.POST.get('confirmed'):
budget.delete()
return HttpResponseRedirect(reverse('budget:budget_budget_list'))
context = {'budget': budget}
return render(request, 'budgets/delete.html', context) | 706e578bba7d33049188f2c9f62a87f39d528c1b | 3,635,114 |
def get_connection_name(db_connection_id):
"""
To give data base connection name if data base exist.
Args:
db_connection_id(int):data base connection id.
Returns:
Returns data base name if exist or return message saying that db not
exist.
"""
if db_connection_id == APIMessages.DB_NOT_EXIST:
return APIMessages.DB_NOT_EXIST
else:
db_obj = DbConnection.query.filter(
DbConnection.db_connection_id == db_connection_id,
DbConnection.is_deleted == False).first()
if db_obj:
return db_obj.db_connection_name
else:
return APIMessages.DB_NOT_EXIST | 8617d83012548daa7f8691899c1b8ec208dce101 | 3,635,115 |
import torch
def cal_area(group_xyz):
"""
Calculate Area of Triangle
:param group_xyz: [B, N, K, 3] / [B, N, G, K, 3]; K = 3
:return: [B, N, 1] / [B, N, G, 1]
"""
pad_shape = group_xyz[..., 0, None].shape
det_xy = torch.det(torch.cat([group_xyz[..., 0, None], group_xyz[..., 1, None], torch.ones(pad_shape)], dim=-1))
det_yz = torch.det(torch.cat([group_xyz[..., 1, None], group_xyz[..., 2, None], torch.ones(pad_shape)], dim=-1))
det_zx = torch.det(torch.cat([group_xyz[..., 2, None], group_xyz[..., 0, None], torch.ones(pad_shape)], dim=-1))
area = torch.sqrt(det_xy ** 2 + det_yz ** 2 + det_zx ** 2).unsqueeze(-1)
return area | bbafa626c1833b5bde81303b4038081dae7bc965 | 3,635,116 |
import copy
def detect_edges_better(img: Image, threshold: int) -> Image:
"""
Returns a copy of an image with the pixels changed to either black
or white based on the contrast of the pixel above, below, or to the right
based on the inputed threshold.
Author: Anita Ntomchukwu
>>>detect_edges(img, 5)
"""
new_img = copy(img)
height = get_height(img)
width = get_width(img)
black = create_color(0, 0, 0)
white = create_color(255, 255, 255)
set_color(new_img, width - 1, height - 1, white)
for x in range(0, width):
set_color(new_img, x, height - 1, white)
for y in range(0, height - 1):
for x in range(0, width):
color1 = get_color(img, x, y)
color2 = get_color(img, x, y + 1)
brightness1 = (color1[0] + color1[1] + color1[2]) // 3
brightness2 = (color2[0] + color2[1] + color2[2]) // 3
difference1 = abs(brightness1 - brightness2)
if difference1 > threshold:
set_color(new_img, x, y, black)
else:
set_color(new_img, x, y, white)
for y in range(0, height):
for x in range(0, width - 1):
color3 = get_color(img, x, y)
color4 = get_color(img, x + 1, y)
brightness3 = (color3[0] + color3[1] + color3[2]) // 3
brightness4 = (color4[0] + color4[1] + color4[2]) // 3
difference2 = abs(brightness3 - brightness4)
if difference2 > threshold:
set_color(new_img, x, y, black)
else:
set_color(new_img, x, y, white)
return new_img | 97ed5a1404599586ac427a6e54a6d1f9f91ff53b | 3,635,117 |
import functools
def lru_cache(timeout=10, maxsize=128, typed=False):
"""Least Recently Used Cache- cache the result of a function.
Args:
timeout
How many seconds to cache results for.
maxsize
The maximum size of the cache in bytes
typed
When `True` argument types will be taken into consideration, for example `3` and `3.0` will be treated as different keys.
"""
def wrapper_cache(func):
func = functools.lru_cache(maxsize=maxsize, typed=typed)(func)
func.expiration = monotonic() + timeout
@functools.wraps(func)
def wrapped_func(*args, **kwargs):
if monotonic() >= func.expiration:
func.expiration = monotonic() + timeout
func.cache_clear()
return func(*args, **kwargs)
wrapped_func.cache_info = func.cache_info
wrapped_func.cache_clear = func.cache_clear
return wrapped_func
return wrapper_cache | 82fb0732583707064d773e6264d612ee4cd61b76 | 3,635,118 |
def _mutator_plugins_bucket_name():
"""Mutator plugins bucket name."""
return environment.get_value('MUTATOR_PLUGINS_BUCKET') | 1d5aabc949947a8b5ca5c89a9c709f8575ecd063 | 3,635,119 |
def densenet_imagenet_169(inputs, is_training=True, num_classes=1001):
"""DenseNet 121."""
depths = [6, 12, 32, 32]
growth_rate = 32
return densenet_imagenet_model(inputs, growth_rate, depths, num_classes,
is_training) | 1fdb578b09d6ad54301cce67beccaf22e1e221e7 | 3,635,120 |
import logging
import os
def delete_todo(request, todo_id):
"""Delete one to_do task"""
logging.info(f'{os.getenv("ID_LOG", "")} Deleting the element with id={todo_id}')
TodoItem.objects.get(id=todo_id).delete()
logging.info(f'{os.getenv("ID_LOG", "")} Todo task with id={todo_id} successfully deleted')
# Redirect to the app_todo page
return HttpResponseRedirect('/todo/') | d772610e1d30120b21ba1ba7b724d24eb2736391 | 3,635,121 |
def get_jquery_min_js():
"""
Return the location of jquery.min.js. It's an entry point to adapt the path
when it changes in Django.
"""
return 'admin/js/vendor/jquery/jquery.min.js' | 86315a0992dc181435f6899b24eb93abc0a47941 | 3,635,122 |
def getLines_from_file(path, clean=False):
""" returns the table of lines from text file """
text = getText_from_file(path)
if not text:
return None
text = text.split("\n")
if clean:
text = [t.strip(' \t') for t in text]
return text | 26bc682c58c09cc875a071b735304bdba320e4db | 3,635,123 |
def channel_shift(img, random_state):
"""
Adds random brightness to image.
Parameters
------
img: np.array
Image array [CWH].
random_state: np.random
Randomized state.
Returns
------
img: np.array
Image array [CWH].
"""
shift_val = int(random_state.uniform(10, 20))
img = np.int16(img)
img = img + shift_val
img = np.clip(img, 0, 255)
img = np.uint8(img)
return img | d490d3cdd49ba0e918e5cbef76ed14b13cb9f4a4 | 3,635,124 |
def quicksort(inputArray):
"""input: array
output: new sorted array
features: stable
efficiency O(n^2) (worst case), O(n log(n)) (avg case), O(n) (best case):
space complexity: O(n)
method:
Pick the last element in the array as the pivot.
Separate values into arrays based on whether they are
greater than, less than, or equal to the pivot.
Recursively sort the greater than and less than arrays.
Return an new array merging the sorted arrays and the pivot.
"""
if len(inputArray) <= 1:
return inputArray
pivot = inputArray[-1]
lesser = []
greater = []
equal = []
for value in inputArray[:-1]:
if value > pivot:
greater.append(value)
elif value < pivot:
lesser.append(value)
elif value == pivot:
equal.append(value)
lesser = quicksort(lesser)
greater = quicksort(greater)
return lesser + equal + [pivot] + greater | 2a8036ba038f4f7a8e817175d9a810184911ce4b | 3,635,125 |
def get_paypal_currency_code(iso_currency_code):
"""
Function will map the currency code to paypal currency code
"""
if iso_currency_code == 124:
return 'CAD'
if iso_currency_code == 840:
return 'USD'
if iso_currency_code == 484:
return 'MXN'
return 'CAD' | af9579a6d12e44dd3263956eb41ece9eadeacaee | 3,635,126 |
def get_num_audio_tracks(mpeg4_file, in_fh):
""" Returns the number of audio track in the input mpeg4 file. """
num_audio_tracks = 0
for element in mpeg4_file.moov_box.contents:
if (element.name == mpeg.constants.TAG_TRAK):
for sub_element in element.contents:
if (sub_element.name != mpeg.constants.TAG_MDIA):
continue
for mdia_sub_element in sub_element.contents:
if (mdia_sub_element.name != mpeg.constants.TAG_HDLR):
continue
position = mdia_sub_element.content_start() + 8
in_fh.seek(position)
if (in_fh.read(4) == mpeg.constants.TAG_SOUN):
num_audio_tracks += 1
return num_audio_tracks | fcd650290bc041d9db61912ec654d88dbcdd6955 | 3,635,127 |
import logging
def pandas_pivot(filename):
"""Used to import a csv file to a pandas data frame,
so the data can be pivoted and aggregated by date"""
if filename == None:
raise FileNotFoundError("File is not found.")
else:
logging.info("Reading .csv and writing to .xlsx file...")
cols_to_use = ["impressions", "clicks", "costs USD", "costs EUR", "conversions"]
pandas_csv = pd.read_csv(filename)
pandas_pivot = pd.pivot_table(
pandas_csv,index="date",aggfunc=np.sum
).reindex(cols_to_use, axis=1)
# Data frame is stored in a dict which is unordered,
# reindex is needed to display the columns in the desired order
filename = (f"{filename}_pivot.xlsx") #To distinguish the name from the .csv file
writer = pd.ExcelWriter(filename)
pandas_pivot.to_excel(writer,"Sheet1")
writer.save()
logging.info("Success!")
return filename | c9d2d4b738ee57b7e7b3689c884c004e21eaebd5 | 3,635,128 |
def add_user_session(username):
"""Generates a token for a user and adds that token and username to the sesssions."""
token = b64encode(uuid4().bytes).decode()
con, cur = create_con()
cur.execute('INSERT INTO sessions(username, token) VALUES (?, ?);', (username, token))
con.commit()
cur.close()
con.close()
return token | bd4c57a06a1a2da500e43266bf3b96a0833f6070 | 3,635,129 |
def array_input(f):
""" decorator to provide the __call__ methods with an array """
@wraps(f)
def wrapped(self, t):
t = np.atleast_1d(t)
r = f(self, t)
return r
return wrapped | 58cb8c3fb1ef5b50c6f983646efea2410a0e84a7 | 3,635,130 |
import socket
def get_own_ip():
"""
returns own ip
original from:
https://stackoverflow.com/a/25850698/3990615
"""
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(("8.8.8.8", 1)) # connect() for UDP doesn't send packets
local_ip_address = s.getsockname()[0]
return local_ip_address | 53195ee3880a9025ba525c120f2e4ccc0e676a93 | 3,635,131 |
def get_sle_after_datetime(self):
"""get Stock Ledger Entries after a particular datetime, for reposting"""
return get_stock_ledger_entries(self.previous_sle or frappe._dict({
"item_code": self.args.get("item_code"), "warehouse": self.args.get("warehouse")}),
">", "asc", for_update=True, check_serial_no=False) | 29c813507eab3c1f76df0acab49b374681f95f01 | 3,635,132 |
import time
def chaperone(method):
"""
Wraps all write, read and query methods of the adapters; monitors and handles communication issues
:param method: (callable) method to be wrapped
:return: (callable) wrapped method
"""
def wrapped_method(self, *args, validator=None, **kwargs):
if not self.connected:
raise ConnectionError(f'Adapter is not connected for instrument at address {self.instrument.address}')
while self.busy: # wait for turn to talk to the instrument
time.sleep(0.05)
self.busy = True # block other methods from talking to the instrument
# Catch communication errors and either try to repeat communication or reset the connection
attempts = 0
reconnects = 0
while reconnects <= self.max_reconnects:
while attempts < self.max_attempts:
try:
response = method(self, *args, **kwargs)
if validator:
valid_response = validator(response)
else:
valid_response = (response != '') * (response != float('nan')) * (response is not None)
if not valid_response:
raise ValueError(f'invalid response, {response}, from {method.__name__} method')
elif attempts > 0 or reconnects > 0:
print('Resolved')
self.busy = False
return response
except BaseException as err:
print(f'Encountered {err} while trying to talk to {self.instrument.name}')
print('Trying again...')
attempts += 1
# repeats have maxed out, so try reconnecting with the instrument
print('Reconnecting...')
self.disconnect()
time.sleep(self.delay)
self.connect()
attempts = 0
reconnects += 1
# Getting here means that both repeats and reconnects have been maxed out
raise ConnectionError(f'Unable to communicate with {self.instrument.name}!')
wrapped_method.__doc__ = method.__doc__ # keep method doc string
return wrapped_method | ec224565208428c9daacdb2e3d15ae0dbb4ee9b1 | 3,635,133 |
def mc_wheeny_purification(p,s):
""" The McWheeny Prurification for an idempotent matrix p in a basis with
overlaps S
"""
return (3 * np.dot(np.dot(p, s), p) - np.dot(np.dot(np.dot(np.dot(p, s), p), s), p)) / 2 | 10e95ca413340262b2209a28da4e29032f0ae722 | 3,635,134 |
def shutdown():
"""
Shuts down the Pi
"""
auth = auth_active()
if auth["status"] and "username" not in session:
flash(auth["msg"], "error")
return redirect(url_for("index"))
shutdown_pi()
return redirect(url_for("index")) | d8f4ecf7a7ac23e012ab02d35b418bc203b387a4 | 3,635,135 |
import os
def _set_up_model_thermo_rxns(base_df: pd.DataFrame, rxns_order: list, rxn_list: list, use_equilibrator:bool,
file_bigg_kegg_ids: str = None, pH: float = 7.0, ionic_strength: float = 0.1) \
-> pd.DataFrame:
"""
Fills in the thermoRxns sheet on the excel GRASP input file.
If use_equilibrator is set to True, it first gets all standard Gibbs energies from eQuilibrator, then it copies any
values that may be defined in base_df.
Args:
base_df: dictionary with base excel input file.
rxns_order: list with reaction IDs.
rxn_list: list with reaction strings.
use_equilibrator: flag determining whether or not to get the standard Gibbs energies from eQuilibrator.
pH : pH value to use to get the standard Gibbs energies from eQuilibrator.
ionic_strength: ionic strength value to use to get the standard Gibbs energies from eQuilibrator.
file_bigg_kegg_ids: path to the file containing the metabolites mapping from BiGG to KEGG ids,
Returns:
thermoRxns dataframe for the output excel file.
"""
columns = ['∆Gr\'_min (kJ/mol)', '∆Gr\'_max (kJ/mol)']
thermo_rxns_df = pd.DataFrame(index=rxns_order, columns=columns, data=np.zeros([len(rxns_order), len(columns)]))
thermo_rxns_df.index.name = 'reaction ID'
if use_equilibrator:
if file_bigg_kegg_ids and not os.path.isfile(file_bigg_kegg_ids):
raise FileNotFoundError(f'Didn\'t find {file_bigg_kegg_ids}. Please provide a valid ' +
'path to the file with metabolite mappings from BiGG to KEGG ids.')
elif not file_bigg_kegg_ids:
this_dir, this_filename = os.path.split(__file__)
file_bigg_kegg_ids = os.path.join(this_dir, '..', '..', 'data', 'map_bigg_to_kegg_ids.csv')
if not os.path.isfile(file_bigg_kegg_ids):
raise FileNotFoundError(f'Didn\'t find map_bigg_to_kegg_ids.csv in the data folder. Please provide ' +
'the path to the file with metabolite mappings from BiGG to KEGG ids.')
rxn_dG_dict = get_dGs(rxn_list, file_bigg_kegg_ids, pH=pH, ionic_strength=ionic_strength, digits=2)
rxn_dG_df = pd.DataFrame().from_dict(rxn_dG_dict, orient='index')
rxn_dG_df.columns = ['average', 'stdev']
rxn_dG_df['min'] = rxn_dG_df['average'] - 2 * rxn_dG_df['stdev']
rxn_dG_df['max'] = rxn_dG_df['average'] + 2 * rxn_dG_df['stdev']
thermo_rxns_df.loc[rxn_dG_df.index.values, '∆Gr\'_min (kJ/mol)'] = rxn_dG_df.loc[rxn_dG_df.index.values, 'min']
thermo_rxns_df.loc[rxn_dG_df.index.values, '∆Gr\'_max (kJ/mol)'] = rxn_dG_df.loc[rxn_dG_df.index.values, 'max']
if 'thermoRxns' in base_df.keys():
index_intersection = set(base_df['thermoRxns'].index.values).intersection(thermo_rxns_df.index.values)
thermo_rxns_df.loc[index_intersection, :] = base_df['thermoRxns'].loc[index_intersection, :]
return thermo_rxns_df | 587376614ea7dc5f0a2a033814e8ea53700878ca | 3,635,136 |
def load_ref_system():
""" Returns cyclopentane as found in the IQMol fragment library.
All credit to https://github.com/nutjunkie/IQmol
"""
return psr.make_system("""
C -0.8201 -1.0104 -0.1068
C -1.2133 0.4696 0.0650
C 0.0767 1.2934 -0.0981
H -1.9775 0.7710 -0.6688
H -1.6556 0.6382 1.0619
C 1.2384 0.3351 0.2162
H 0.1607 1.6760 -1.1302
H 0.0840 2.1729 0.5646
C 0.7195 -1.0852 -0.0760
H 2.1318 0.5711 -0.3826
H 1.5369 0.4262 1.2751
H 1.1066 -1.4485 -1.0430
H 1.0676 -1.7990 0.6877
H -1.2614 -1.6277 0.6925
H -1.2089 -1.4114 -1.0576
""") | 3691043f20a9313d2db67881d75d9558ba68370f | 3,635,137 |
def load_audio_channel(delay, attenuation, pytorch=True):
"""
Return an art LFilter object for a simple delay (multipath) channel
If attenuation == 0 or delay == 0, return an identity channel
Otherwise, return a channel with length equal to delay + 1
NOTE: lfilter truncates the end of the echo, so output length equals input length
"""
delay = int(delay)
attenuation = float(attenuation)
if delay < 0:
raise ValueError(f"delay {delay} must be a nonnegative number (of samples)")
if delay == 0 or attenuation == 0:
logger.warning("Using an identity channel")
numerator_coef = np.array([1.0])
denominator_coef = np.array([1.0])
else:
if not (-1 <= attenuation <= 1):
logger.warning(f"filter attenuation {attenuation} not in [-1, 1]")
# Simple FIR filter with a single multipath delay
numerator_coef = np.zeros(delay + 1)
numerator_coef[0] = 1.0
numerator_coef[delay] = attenuation
denominator_coef = np.zeros_like(numerator_coef)
denominator_coef[0] = 1.0
if pytorch:
try:
return LFilterPyTorch(
numerator_coef=numerator_coef, denominator_coef=denominator_coef
)
except ImportError:
logger.exception("PyTorch not available. Resorting to scipy filter")
logger.warning("Scipy LFilter does not currently implement proper gradients")
return LFilter(numerator_coef=numerator_coef, denominator_coef=denominator_coef) | 684490c3fe7416f6059263eb64934b7473efe364 | 3,635,138 |
def slices(img, shape=[3, 4]):
"""
create tiled image with multiple slices
:param img:
:param shape:
:return:
"""
sh = np.asarray(shape)
i_max = np.prod(sh)
allimg = np.zeros(img.shape[-2:] * sh)
for i in range(0, i_max):
# i = 0
islice = round((img.shape[0] / float(i_max)) * i)
# print(islice)
imgi = img[islice, :, :]
coords = index_to_coords(i, sh)
aic = np.asarray(img.shape[-2:]) * coords
allimg[
aic[0] : aic[0] + imgi.shape[-2], aic[1] : aic[1] + imgi.shape[-1]
] = imgi
# plt.imshow(imgi)
# print(imgi.shape)
# print(img.shape)
return allimg | d2223a2f7a6b1a704288b682b878c189d6538262 | 3,635,139 |
def getRawInput(display):
"""
Wrapper around raw_input; put into separate function so that it
can be easily mocked for tests.
"""
return raw_input(display) | 9eaf45446caa8794b79b908ef8e9eec50cbb646a | 3,635,140 |
import os
def path_completer(text, state):
"""
Completer method for system paths.
"""
return [x if os.path.isfile(x) else (x + os.sep)
for x in sorted_glob(text + '*')][state] | 873a7ff489c9004fc9ae03b129b4c60aceabf34d | 3,635,141 |
import PIL
def prepare_input_image(img_fpath):
"""Read and prepare input image as AlexNet input."""
# Read input image as 3-channel 8-bit values
pil_img = PIL.Image.open(img_fpath)
# Resize to AlexNet input size
res_img = pil_img.resize((IMG_SIZE, IMG_SIZE), PIL.Image.LANCZOS)
# Convert to NumPy array and float values
img = np.array(res_img, dtype=np.float32)
# Change HWC to CHW
img = img.swapaxes(1, 2).swapaxes(0, 1)
# Change RGB to BGR
img = img[(2, 1, 0), :, :]
# Mean subtraction
img = img - MEAN
# Change CHW to NCHW by adding batch dimension at front
img = img[np.newaxis, :, :, :]
return img | 1e177ddd17a3858a6f6c063728037c0b641e693f | 3,635,142 |
def sparse_spectral_matrix(c, ell, em, ess=-2):
"""
Combine functions to create the sparse matrix to be solved.
Inputs:
c (float): a * omega
ell (int): swsh mode number
em (int): mode number
ess (int) [-2]: spin number
Returns:
band_matrix (sparse<float>): sparse matrix to be solved
"""
lmin = max(abs(ess), abs(em))
nmax = 50 + np.ceil(abs((3*c)/2. - c*c/250.))
if nmax % 2 == 0:
nmax += 1
nmax = int(nmax)
nmin = min(ell - lmin, nmax)
i = np.arange(0, nmax + nmin + 1) # iterator
spec_ii = -kHat(c, ell - nmin - 1 + i + 1, em, ess) # good
i = np.delete(i, 0)
spec_ijm1 = -kTilde2(c, ell - nmin + i - 2 + 1, em, ess) # good
spec_ijp1 = -kTilde2(c, ell - nmin + i - 2 + 1, em, ess) # good
i = np.delete(i, 0)
spec_ijp2 = -k2(c, ell - nmin + i - 3 + 1, em, ess) # good
spec_ijm2 = -k2(c, ell - nmin - 3 + i + 1, em, ess) # good
diagonals = [spec_ijm2, spec_ijm1, spec_ii, spec_ijp1, spec_ijp2]
# print(diagonals)
band_matrix = sparse.diags(diagonals, [-2, -1, 0, 1, 2])
return band_matrix | cb2708c2a95a11e0f7d0129d6343886b202fb9ed | 3,635,143 |
import re
def preprocess_text(text, lower=True):
""" Prepsocess text.
"""
text = text.replace("ä", "äe").replace("ö", "oe").replace("ü", "ue").replace("ß", "ss")
# Remove punctuations and numbers
text = re.sub("[^a-zA-Z]+", " ", text)
# Single character removal
text = re.sub(r"\b[a-zA-Z]\b", "", text)
# Removing multiple spaces
text = re.sub(r"\s+", " ", text)
if lower==True:
text = text.lower()
return text | fb0c982b8ce3dce2d78918dd8a6ce469a33c93eb | 3,635,144 |
def decode_aes256_base64_auto(data, encryption_key):
"""Guesses AES cipher (EBC or CBD) from the length of the base64 encoded data."""
assert isinstance(data, bytes)
length = len(data)
if length == 0:
return b''
if data[0] == b'!'[0]:
return decode_aes256_cbc_base64(data, encryption_key)
return decode_aes256_ecb_base64(data, encryption_key) | 71292d967cce08fc344ac787dc2bbcc7fbbd72a2 | 3,635,145 |
def get_TS(
norm_sh,
mass,
# Msh,
# csh,
bsh,
vsh,
spec_sh_interp,
# bsh_range,
N_samples,
n_nu,
nthetas,
ext_bool,
ext_unc,
# N_track_bins,
spec_halo_interp,
spec_1a,
spec_neutrons,
spec_nu,
indep_index,
wimp_masses,
Gaussian_likelihood=False,
Relative_background_systematics=0.0,
use_minimizer_minuit=True,
include_bkg_nu_solar=True,
include_bkg_nu_GSNB=True,
include_bkg_nu_DSNB=True,
include_bkg_nu_atm=True,
include_bkg_rad_1a=True,
include_bkg_rad_neutrons=True,
mass_active=True,
):
"""
Function to compute test statistic
- Input parameters same as get_spec_asimov
- Works by first trying a few guesses for the WIMP mass (and
corresponding guesses for norm_halo), and then using the
best one as the initial value for the optimization
- Bounds all optimization parameters to be positive, except the
WIMP mass, which must lie within the bounds of the interpolator
- Note that technically, the test statistic is the ratio of the
maximum likelihood under H1 to the maximum likelihood under H0.
Since we know the maximum likelihood under H1 exactly, here we will
compute the ratio of this maximum likelihood to the likelihood
under H0 for fixed thetas, and minimize this ratio over all thetas.
"""
# compute asimov data set
spec_asimov_func = lambda norm_sh, WIMP_m, bsh_temp: get_spec_asimov(
norm_sh,
WIMP_m,
bsh_temp,
spec_sh_interp,
spec_halo_interp,
spec_1a,
spec_neutrons,
spec_nu,
indep_index,
Neutron_background=include_bkg_rad_neutrons,
SingleAlpha_background=include_bkg_rad_1a,
)
spec_asimov = spec_asimov_func(norm_sh, mass, bsh)
spec_H0_partial = lambda thetas_temp: get_spec_H0(
thetas_temp,
n_nu,
N_samples,
spec_halo_interp,
spec_nu,
spec_neutrons,
spec_1a,
Optimize_mass=mass_active,
SingleAlpha_background=include_bkg_rad_1a,
Neutron_background=include_bkg_rad_neutrons,
)
# ----------------------
# mk function for likelihood ratio
# ----------------------
if not Gaussian_likelihood:
fun_logLR = lambda l10_thetas: 2.0 * (
get_Poisson_logLR(l10_thetas, spec_asimov, spec_H0_partial)
- get_Gaussian_constraints_logL_H0(l10_thetas, ext_bool, ext_unc)
)
else:
spec_asimov_bkg = spec_asimov_func(0.0, mass, vsh)
fun_logLR = lambda l10_thetas: 2.0 * (
get_Gaussian_logLR(
l10_thetas,
spec_asimov,
spec_asimov_bkg,
spec_H0_partial,
rel_sys=Relative_background_systematics,
)
- get_Gaussian_constraints_logL_H0(l10_thetas, ext_bool, ext_unc)
)
# ----------------------
# get initial values of parameters for optimizer
# ----------------------
logLRs = []
l10_guess_thetas = []
# array of WIMP mass guesses to try
l10_guess_masses = np.linspace(
np.log10(wimp_masses[0]), np.log10(wimp_masses[-1]), 100
)
for l10_guess_mass in l10_guess_masses:
# corresponding guess for norm_halo
l10_guess_norm_halo = np.log10(
get_init_norm_halo(
10 ** l10_guess_mass,
n_nu,
nthetas,
N_samples,
spec_halo_interp,
spec_nu,
spec_neutrons,
spec_1a,
spec_asimov,
Optimize_mass=mass_active,
SingleAlpha_background=include_bkg_rad_1a,
Neutron_background=include_bkg_rad_neutrons,
)
)
# guess for all parameters (use 1s for all parameters other than
# last two)
l10_guess_thetas.append(
np.concatenate((np.zeros(nthetas), [l10_guess_norm_halo], [l10_guess_mass]))
)
logLRs.append(fun_logLR(l10_guess_thetas[-1]))
# choose best mass guess
l10_init_thetas = l10_guess_thetas[np.argmin(logLRs)]
# ----------------------
# set up list of parameter bounds
# ----------------------
l10_bounds = []
for val in l10_init_thetas[:-1]:
l10_bounds.append((val - 10.0, val + 20.0))
# WIMP mass bounds
l10_bounds.append((np.log10(wimp_masses[0]), np.log10(wimp_masses[-1])))
l10_bounds = np.array(l10_bounds)
# ----------------------
# run the optimizer
# ----------------------
if use_minimizer_minuit:
"""
Optimize using iminuit. Strategy:
1. Fix the parameters controlling each component of the
null-hypothesis model except for the sample masses and ages
(i.e., the normalizations of the neutrino parameters, the
uranium concentration, and the two parameters controlling
the MW-halo contribution)
2. Order these parameters by the effect a small change (EPS)
in the parameter has in the likelihood (DELTA_LOGLR) around
the inital guess (INIT_THETAS)
3. Release the parameter causing the largest change. (or both
the halo mass and normalization if either of these causes
the largest change) and run the optimizer (MIGRAD)
4. Release the next component, run the optimizer (it automatically
starts from the endpoint of the previous optimization)
5. Repeat until all parameters are free
"""
# initialize optimizer
optimizer = iminuit.Minuit(fun_logLR, l10_init_thetas)
optimizer.limits = l10_bounds
optimizer.errordef = optimizer.LIKELIHOOD
# get indices of the positions of the background normalization
# parameters in theta
theta_inds = np.append(np.arange(nthetas), np.array([-666])) # np.concatenate((
# np.linspace(0, n_nu-1, n_nu),
# np.linspace(-run_params.N_samples-2, -2-1, run_params.N_samples*(run_params.Neutron_background or run_params.SingleAlpha_background)),
# np.array([-666])
# ))
# get change of test statistic with each parameter
eps = 1e-3
logLR_ref = np.min(logLRs)
Delta_logLR = np.zeros(theta_inds.shape)
for i, ind in enumerate(theta_inds):
if ind == -666:
l10_thetas1 = np.copy(l10_init_thetas)
l10_thetas1[-2] += eps
l10_thetas2 = np.copy(l10_init_thetas)
l10_thetas2[-1] += eps
Delta_logLR[i] = np.max(
[
np.abs(fun_logLR(l10_thetas1) - fun_logLR(l10_init_thetas)),
np.abs(fun_logLR(l10_thetas2) - fun_logLR(l10_init_thetas)),
]
)
else:
l10_thetas = np.copy(l10_init_thetas)
l10_thetas[int(ind)] += eps
Delta_logLR[i] = np.abs(
fun_logLR(l10_thetas) - fun_logLR(l10_init_thetas)
)
# lock parameters
for ind in theta_inds:
if ind == -666:
optimizer.fixed[-2] = True
optimizer.fixed[-1] = True
else:
optimizer.fixed[int(ind)] = True
# release parameters one by one and optimize
for i in np.argsort(-Delta_logLR):
if i == len(Delta_logLR) - 1:
optimizer.fixed[-2] = False
optimizer.fixed[-1] = False
optimizer.migrad()
else:
optimizer.fixed[int(theta_inds[i])] = False
optimizer.migrad()
TS = optimizer.fval
else:
# default scipy optimizer
optimizer_output = optimize.minimize(
fun_logLR,
l10_init_thetas,
bounds=l10_bounds,
)
TS = optimizer_output.fun
return TS | 0f5d90b92412d75550c5bc6936dbdfc27f7e3951 | 3,635,146 |
import collections
def rotate(start):
"""Rotate the orientation clockwise one increment from the starting
orientation.
Args:
start: The starting orientation.
Returns:
The orientation one increment clockwise from the start.
"""
orientations = collections.deque([NE, NW, W, SW, SE, E])
if start not in orientations:
raise ValueError('Invalid orientation {}'.format(start))
while True:
orientations.rotate()
if orientations[1] == start:
break
return orientations[0] | 70a7bdc6fc28355d9bdc0462bd089d52b08d9453 | 3,635,147 |
def make_wcs(shape, galactic=False):
"""
Create a simple celestial `~astropy.wcs.WCS` object in either the
ICRS or Galactic coordinate frame.
Parameters
----------
shape : 2-tuple of int
The shape of the 2D array to be used with the output
`~astropy.wcs.WCS` object.
galactic : bool, optional
If `True`, then the output WCS will be in the Galactic
coordinate frame. If `False` (default), then the output WCS
will be in the ICRS coordinate frame.
Returns
-------
wcs : `astropy.wcs.WCS` object
The world coordinate system (WCS) transformation.
See Also
--------
make_gwcs, make_imagehdu
Notes
-----
The `make_gwcs` function returns an equivalent WCS transformation to
this one, but as a `gwcs.wcs.WCS` object.
Examples
--------
>>> from photutils.datasets import make_wcs
>>> shape = (100, 100)
>>> wcs = make_wcs(shape)
>>> print(wcs.wcs.crpix) # doctest: +FLOAT_CMP
[50. 50.]
>>> print(wcs.wcs.crval) # doctest: +FLOAT_CMP
[197.8925 -1.36555556]
"""
wcs = WCS(naxis=2)
rho = np.pi / 3.
scale = 0.1 / 3600. # 0.1 arcsec/pixel in deg/pix
wcs.pixel_shape = shape
wcs.wcs.crpix = [shape[1] / 2, shape[0] / 2] # 1-indexed (x, y)
wcs.wcs.crval = [197.8925, -1.36555556]
wcs.wcs.cunit = ['deg', 'deg']
wcs.wcs.cd = [[-scale * np.cos(rho), scale * np.sin(rho)],
[scale * np.sin(rho), scale * np.cos(rho)]]
if not galactic:
wcs.wcs.radesys = 'ICRS'
wcs.wcs.ctype = ['RA---TAN', 'DEC--TAN']
else:
wcs.wcs.ctype = ['GLON-CAR', 'GLAT-CAR']
return wcs | fec4b247875f6bbecc61f8db9973da3f88fc6ff3 | 3,635,148 |
def _is_shape(expected_shape, actual_tensor, actual_shape=None):
"""Returns whether actual_tensor's shape is expected_shape.
Note that -1 in `expected_shape` is recognized as unknown dimension.
Args:
expected_shape: Integer list defining the expected shape, or tensor of same.
actual_tensor: Tensor to test.
actual_shape: Shape of actual_tensor, if we already have it.
Returns:
New tensor.
"""
with ops.name_scope('is_shape', values=[actual_tensor]) as scope:
is_rank = _is_rank(array_ops.size(expected_shape), actual_tensor)
if actual_shape is None:
actual_shape = array_ops.shape(actual_tensor, name='actual')
shape_equal = _shape_tensor_compatible(expected_shape, actual_shape)
return math_ops.logical_and(is_rank, shape_equal, name=scope) | e3ae49991e3f224ef58b3f41cc5d520fd04449cf | 3,635,149 |
def separate_last_day(df_):
"""
takes a dataset which has the target and features built
and separates it into the last day
"""
# take the last period
last_period = df_.iloc[-1]
# the last period is now a series, so it's name will be the timestamp
training_data = df_.loc[df_.index < last_period.name]
return last_period, training_data | 0e7e7ea31a55c6f648e218b44845290689e344ab | 3,635,150 |
from sklearn.neighbors import KernelDensity
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import LeaveOneOut
def KDEbounded(x_d,x,bandwidth=np.nan,lowbnd=np.nan,uppbnd=np.nan,kernel = 'gaussian'):
"""Estimate the probability by Kernel Density Estimation
If bandwidth is np.nan, calculate the optimal kernel width, aka bandwidth.
Be careful, this can take a while.
Mirrors the data at either or both of the edges if the domain is bounded.
Args:
| x_d (np.array[N,]): domain over which values must be returned
| x (np.array[N,]): input ample data set
| bandwidth (float): the bandwidth width to be used, np.nan if to be calculated
| lowbnd (float): lower mirror fold boundary, np.nan means no lower bound and mirror
| uppbnd (float): upper mirror fold boundary, np.nan means no upper bound and mirror
| kernel (str): kernel to be used ['gaussian'|'tophat'|'epanechnikov'|'exponential'|'linear'|'cosine']
Returns:
| x_d (np.array[N,]): input vector used as domain for the calculations
| x (np.array[N,]): probability density over x_d, the range of the PDF
| bandwidth (float): bandwidth used in the KDE
| kernel (str): kernel used
Raises:
| No exception is raised.
See here for more detail and examples:
https://github.com/NelisW/PythonNotesToSelf/tree/master/KernelDensityEstimation
Other references:
https://jakevdp.github.io/PythonDataScienceHandbook/05.13-kernel-density-estimation.html#Motivating-KDE:-Histograms
https://jakevdp.github.io/blog/2013/12/01/kernel-density-estimation/
https://scikit-learn.org/stable/auto_examples/neighbors/plot_kde_1d.html
https://stats.stackexchange.com/questions/405357/how-to-choose-the-bandwidth-of-a-kde-in-python
https://scikit-learn.org/stable/modules/generated/sklearn.neighbors.KernelDensity.html
https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.GridSearchCV.html
https://stats.stackexchange.com/questions/405357/how-to-choose-the-bandwidth-of-a-kde-in-python
https://scikit-learn.org/stable/modules/cross_validation.html#cross-validation
https://towardsdatascience.com/how-to-find-probability-from-probability-density-plots-7c392b218bab
https://github.com/admond1994/calculate-probability-from-probability-density-plots/blob/master/cal_probability.ipynb
"""
# find optimal bandwidth if not supplied
if np.isnan(bandwidth):
bandwidths = 10 ** np.linspace(-1, 1, 100)
kd = KernelDensity(kernel=kernel)
grid = GridSearchCV(kd,param_grid={'bandwidth': bandwidths},
cv=LeaveOneOut())
grid.fit(x[:, None]); # create additional axes of length one
bandwidth = grid.best_params_['bandwidth']
X = []
Xo = []
# base data, and if required lower flipped, upper flipped
X.append(x)
if not np.isnan(lowbnd):
X.append(-x + 2 * lowbnd)
if not np.isnan(uppbnd):
X.append(-x + 2 * uppbnd)
# do for base, and if present lower and upper flipped
for i,x in enumerate(X):
# instantiate and fit the KDE model
kde = KernelDensity(bandwidth=bandwidth, kernel=kernel)
kde.fit(x[:, None]) # create additional axes of length one
# score_samples returns the log of the probability density
prob = np.exp(kde.score_samples(x_d[:, None])) # create additional axes of length one
Xo.append(prob)
# add base and flipped together
x = np.zeros_like(x_d)
for xi in Xo:
x += xi
# only cut out the base domain
if not np.isnan(lowbnd):
x = np.where(x_d<=lowbnd,0,x)
if not np.isnan(uppbnd):
x = np.where(x_d>=uppbnd,0,x)
return x_d,x,bandwidth, kernel | 5970a59ee7c38b56e86d44a684baf660b36dba3c | 3,635,151 |
def update(movie_id, **options):
"""
updates the info of given movie.
it returns a value indicating that update is done.
:param uuid.UUID movie_id: movie id.
:keyword bool content_rate: update content rate.
defaults to True if not provided.
:keyword bool country: update country.
defaults to True if not provided.
:keyword bool genre: update genre.
defaults to True if not provided.
:keyword bool imdb_rate: update imdb rate.
defaults to True if not provided.
:keyword bool language: update language.
defaults to True if not provided.
:keyword bool meta_score: update meta score.
defaults to True if not provided.
:keyword bool movie_poster: update movie poster.
defaults to True if not provided.
:keyword bool original_title: update original title.
defaults to True if not provided.
:keyword bool production_year: update production year.
defaults to True if not provided.
:keyword bool runtime: update runtime.
defaults to True if not provided.
:keyword bool storyline: update storyline.
defaults to True if not provided.
:keyword bool title: update title.
defaults to True if not provided.
:keyword bool actors: update actors.
defaults to True if not provided.
:keyword bool directors: update directors.
defaults to True if not provided.
:keyword str imdb_page: an imdb movie page to be used to fetch data from.
if not provided the movie page will be fetched
automatically if possible.
:keyword bool force: force update data even if a category already
has valid data. defaults to False if not provided.
:raises ValidationError: validation error.
:raises MovieIMDBPageNotFoundError: movie imdb page not found error.
:rtype: bool
"""
return get_component(UpdaterPackage.COMPONENT_NAME).update(movie_id, **options) | f7efffac6ca0cbd8d49d65135ae284b78d6b5dd6 | 3,635,152 |
from typing import Tuple
def validate_sig_integrity(signer_info: cms.SignedData,
cert: x509.Certificate,
expected_content_type: str,
actual_digest: bytes) -> Tuple[bool, bool]:
"""
Validate the integrity of a signature for a particular signerInfo object
inside a CMS signed data container.
.. warning::
This function does not do any trust checks, and is considered
"dangerous" API because it is easy to misuse.
.. warning::
This function currently does not deal with the case where signed
attributes are absent.
:param signer_info:
A :class:`cms.SignerInfo` object.
:param cert:
The signer's certificate.
.. note::
This function will not attempt to extract certificates from
the signed data.
:param expected_content_type:
The expected value for the content type attribute (as a Python string,
see :class:`cms.ContentType`).
:param actual_digest:
The actual digest to be matched to the message digest attribute.
:return:
A tuple of two booleans. The first indicates whether the provided
digest matches the value in the signed attributes.
The second indicates whether the signature of the digest is valid.
"""
signature_algorithm: cms.SignedDigestAlgorithm = \
signer_info['signature_algorithm']
digest_algorithm_obj = signer_info['digest_algorithm']
md_algorithm = digest_algorithm_obj['algorithm'].native
signature = signer_info['signature'].native
# signed_attrs comes with some context-specific tagging
# because it's an implicit field. This breaks validation
signed_attrs = signer_info['signed_attrs'].untag()
# TODO if there are no signed_attrs, we should validate the signature
# against actual_digest. Find some real-world exmples to test this
# Also, signed_attrs is mandatory if content_type is not id-data
# check the CMSAlgorithmProtection attr, if present
try:
cms_algid_protection, = find_cms_attribute(
signed_attrs, 'cms_algorithm_protection'
)
signed_digest_algorithm = \
cms_algid_protection['digest_algorithm'].native
if signed_digest_algorithm != digest_algorithm_obj.native:
raise SignatureValidationError(
"Digest algorithm does not match CMS algorithm protection "
"attribute."
)
signed_sig_algorithm = \
cms_algid_protection['signature_algorithm'].native
if signed_sig_algorithm is None:
raise SignatureValidationError(
"CMS algorithm protection attribute not valid for signed data"
)
elif signed_sig_algorithm != signature_algorithm.native:
raise SignatureValidationError(
"Signature mechanism does not match CMS algorithm "
"protection attribute."
)
except KeyError:
pass
except SignatureValidationError:
raise
except ValueError:
raise SignatureValidationError(
'Multiple CMS protection attributes present'
)
signed_blob = signed_attrs.dump(force=True)
try:
content_type, = find_cms_attribute(signed_attrs, 'content_type')
content_type = content_type.native
if content_type != expected_content_type:
raise SignatureValidationError(
'Content type did not match expected value'
)
except (KeyError, ValueError):
raise SignatureValidationError(
'Content type not found in signature, or multiple content-type '
'attributes present.'
)
try:
embedded_digest, = find_cms_attribute(signed_attrs, 'message_digest')
embedded_digest = embedded_digest.native
except (KeyError, ValueError):
raise SignatureValidationError(
'Message digest not found in signature, or multiple message '
'digest attributes present.'
)
intact = actual_digest == embedded_digest
try:
_validate_raw(
signature, signed_blob, cert, signature_algorithm, md_algorithm
)
valid = True
except SignatureError:
valid = False
return intact, valid | 9cf3164ffb9b1c4952d15537e22313e82fe797ba | 3,635,153 |
def get_c_header_path(*args):
"""get_c_header_path(char buf) -> ssize_t"""
return _idaapi.get_c_header_path(*args) | d9cca8050dac372953ef59f4df54787e7c2e3591 | 3,635,154 |
from datetime import datetime
def str_to_datetime(str_datetime: str) -> datetime.datetime:
"""
WebAPIがサポートしているISO8601の文字列をdatetime objectに変換します。
datetime objectはawareです。
Args:
str_datetime (str): ISO8601の文字列(例: ``2021-04-01T01:23:45.678Z`` )
Returns:
datetime object
"""
# 末尾がZだと、datetime.fromisoformatが利用できないので、strptimeでパースする
return datetime.datetime.strptime(str_datetime, "%Y-%m-%dT%H:%M:%S.%fZ").replace(tzinfo=datetime.timezone.utc) | aa1324beb7889dc5a390e46678cdac1515091805 | 3,635,155 |
def uncertainty_separation_variance(predicted_distribution, true_labels):
"""Total, epistemic and aleatoric uncertainty based on a variance measure
B = batch size, N = num predictions
Note: if a batch with B samples is given,
then the output is a tensor with B values
The true targets argument is simply there for conformity
so that the entropy metric functions like any metric.
Args:
NOT USED true_labels: torch.tensor((B, 1))
predicted_distribution: torch.tensor((B, N, 2))
Returns:
Tuple of uncertainties (relative the maximum uncertainty):
Total uncertainty: torch.tensor(B,)
Epistemic uncertainty: torch.tensor(B,)
Aleatoric uncertainty: torch.tensor(B,)
"""
true_labels = None
total_uncertainty = np.var(predicted_distribution[:, :, 0], axis=-1)
aleatoric_uncertainty = np.mean(predicted_distribution[:, :, 1], axis=-1)
epistemic_uncertainty = total_uncertainty - aleatoric_uncertainty
return total_uncertainty, epistemic_uncertainty, aleatoric_uncertainty | 16dfb2260b972e1615e0e9b62ddd11edf3ff4e52 | 3,635,156 |
def cubic_spline_breaksToknots(bvec):
"""
Given breakpoints generated from _cubic_spline_breaks,
[x0, x0, x0, x0, x1, x2, ..., xN-2, xf, xf, xf, xf],
return the spline knots [x0, x1, ..., xN-1=xf].
This function ``undoes" _cubic_spline_breaks:
knot_vec = _cubic_spline_breaks2knots(_cubic_spline_breaks(knot_vec))
"""
return bvec[3:-3] | 15a73dea4b001e05bd67075ec21e15247db1f031 | 3,635,157 |
from datetime import datetime
def as_iso_date(wx_date):
""" Convert a QDate object into and iso date string.
"""
day = wx_date.GetDay()
month = wx_date.GetMonth() + 1 # wx peculiarity!
year = wx_date.GetYear()
return datetime.date(year, month, day).isoformat() | 2c74aa2a16ff46089d1dfab30abfef6396c304e9 | 3,635,158 |
from datetime import datetime
def should_certificate_be_visible(
certificates_display_behavior,
certificates_show_before_end,
has_ended,
certificate_available_date,
self_paced
):
"""
Returns whether it is acceptable to show the student a certificate download
link for a course, based on provided attributes of the course.
Arguments:
certificates_display_behavior (str): string describing the course's
certificate display behavior.
See CourseFields.certificates_display_behavior.help for more detail.
certificates_show_before_end (bool): whether user can download the
course's certificates before the course has ended.
has_ended (bool): Whether the course has ended.
certificate_available_date (datetime): the date the certificate is available on for the course.
self_paced (bool): Whether the course is self-paced.
"""
if settings.FEATURES.get("ENABLE_V2_CERT_DISPLAY_SETTINGS"):
show_early = (
certificates_display_behavior == CertificatesDisplayBehaviors.EARLY_NO_INFO
or certificates_show_before_end
)
past_available_date = (
certificates_display_behavior == CertificatesDisplayBehaviors.END_WITH_DATE
and certificate_available_date
and certificate_available_date < datetime.now(utc)
)
ended_without_available_date = (
certificates_display_behavior == CertificatesDisplayBehaviors.END
and has_ended
)
else:
show_early = (
certificates_display_behavior in ('early_with_info', 'early_no_info')
or certificates_show_before_end
)
past_available_date = (
certificate_available_date
and certificate_available_date < datetime.now(utc)
)
ended_without_available_date = (certificate_available_date is None) and has_ended
return any((self_paced, show_early, past_available_date, ended_without_available_date)) | 76ebaa5f924d5c4209859a6047f5866c7eb4e6a6 | 3,635,159 |
def svn_repos_invoke_freeze_func(*args):
"""svn_repos_invoke_freeze_func(svn_repos_freeze_func_t _obj, void * baton, apr_pool_t pool) -> svn_error_t"""
return _repos.svn_repos_invoke_freeze_func(*args) | 5145309e8ab7c1d8c7ab22ebe9b463f6f7c1f5b2 | 3,635,160 |
from typing import Union
from pathlib import Path
def load_results(
files_or_dir: Union[str, list, Path],
scoring_key: str = "balanced_accuracy",
average_results: bool = True,
) -> pd.DataFrame:
"""Load prediction results from *results.csv"""
# Create Dataframes from Files
files_or_dir = _handle_files_or_dir(
files_or_dir=files_or_dir, extensions="results.csv"
)
results = []
for file in files_or_dir:
data_raw = pd.read_csv(file, index_col=[0], header=[0])
data: pd.DataFrame = pd.melt(
data_raw, id_vars=["channel_name"], value_vars=[scoring_key]
)
accuracies = []
for ch_name in data["channel_name"].unique():
accuracies.append(
[
"LFP" if "LFP" in ch_name else "ECOG",
data[data.channel_name == ch_name]
.mean(numeric_only=True)
.value, # type: ignore
]
)
df_acc = pd.DataFrame(accuracies, columns=["Channels", scoring_key])
df_lfp = df_acc[df_acc["Channels"] == "LFP"]
df_ecog = df_acc[df_acc["Channels"] == "ECOG"]
subject = mne_bids.get_entities_from_fname(file, on_error="ignore")[
"subject"
]
values = [
file,
subject,
"OFF" if "MedOff" in file else "ON",
"OFF" if "StimOff" in file else "ON",
]
results.extend(
[
values + ["LFP", df_lfp[scoring_key].max()],
values + ["ECOG", df_ecog[scoring_key].max()],
]
)
columns = [
"Filename",
"Subject",
"Medication",
"Stimulation",
"Channels",
scoring_key,
]
columns = _normalize_columns(columns)
df_raw = pd.DataFrame(results, columns=columns)
if not average_results:
return df_raw
scoring_key = _normalize_columns([scoring_key])[0]
results_average = []
for ch_name in df_raw["Channels"].unique():
df_ch = df_raw.loc[df_raw["Channels"] == ch_name]
for subject in df_ch["Subject"].unique():
df_subj = df_ch.loc[df_ch["Subject"] == subject]
series_single = pd.Series(
df_subj.iloc[0].values, index=df_subj.columns
).drop("Filename")
series_single[scoring_key] = df_subj[scoring_key].mean()
results_average.append(series_single)
df_average = pd.DataFrame(results_average)
return df_average | e4668b0e2881a5acff8156f1e1d65687105dc840 | 3,635,161 |
def _tflite_convert_verify_op(tflite_convert_function, *args, **kwargs):
"""Verifies that the result of the conversion contains Gelu op."""
result = tflite_convert_function(*args, **kwargs)
tflite_model_binary = result[0]
if not result[0]:
tf.compat.v1.logging.error(result[1]) # stderr from running tflite_convert.
raise RuntimeError("Failed to build model: \n\n" + result[1])
interpreter = tf.lite.Interpreter(model_content=tflite_model_binary)
interpreter.allocate_tensors()
for op in interpreter._get_ops_details(): # pylint: disable=protected-access
if op["op_name"] == "GELU":
return result
raise RuntimeError("Expected to generate GELU op node in graph.") | 430cf0068f3c144fc26a09f56b0c90bcd1c8fd35 | 3,635,162 |
def tamper_nt_response(data, vars):
"""The connection is sometimes terminated if NTLM is successful, this prevents that"""
print("Tamper with NTLM response")
nt_response = vars["nt_response"]
fake_response = bytes([(nt_response[0] + 1 ) % 0xFF]) + nt_response[1:]
return data.replace(nt_response, fake_response) | cf2acad343f457b5ea5529d91653169d2093d500 | 3,635,163 |
def get_banned_per_category(cat: int, w_id: int) -> bool:
"""
Determine if a weapon is banned.
:param cat: Category of the weapon.
:param w_id: Id of the weapon.
:return: True if the weapon is banned, False if not.
"""
d = dict()
# Edit this function to change the ruleset:
# To ban a weapon, add it to the dict, to unban it remove it.
# Knife
if cat == 2:
d = {
271: "Carver",
285: "Ripper",
286: "Lumine Edge",
1082: "MAX Punch",
1083: "MAX Punch",
1084: "MAX Punch",
6005451: "Lumine Edge AE",
6005452: "Ripper AE",
6005453: "Carver AE",
6008687: "Defector Claws",
600946: "NS Icebreaker",
6009515: "NS Icebreaker",
6009516: "NS Icebreaker",
6009517: "NS Icebreaker",
6009518: "NS Icebreaker",
6009600: "NS Firebug"
}
# Pistol
elif cat == 3:
d = {
1889: "The Executive",
1954: "The President",
1959: "The Immortal",
7390: "NC08 Mag-Scatter",
802733: "NS-44L Blackhand",
802781: "NS-44LB Blackhand",
802782: "NS-44LG Blackhand",
804960: "NS-44LP Blackhand",
6002661: 'NS-44L "Ravenous" Blackhand',
6003793: "NS-44L Showdown",
6003943: "NS-357 IA",
6004714: "Soldier Soaker",
6004995: "Ectoblaster",
6005969: "NSX-A Yawara",
6009652: 'NS-357 "Endeavor" Underboss',
6009902: "U-100 Lastly",
6009903: "U-150 Recall",
6009904: "U-200 Harbinger"
}
# SMG
elif cat == 5:
d = {
1899: "Tempest",
1944: "Shuriken",
1949: "Skorpios",
27000: "AF-4 Cyclone",
27005: "AF-4G Cyclone",
28000: "SMG-46 Armistice",
28005: "SMG-46G Armistice",
29000: "Eridani SX5",
29005: "Eridani SX5G",
6002772: "Eridani SX5-AE",
6002800: "SMG-46AE Armistice",
6002824: "AF-4AE Cyclone",
6003850: "MGR-S1 Gladius",
6003879: "MG-S1 Jackal",
6003925: "VE-S Canis",
6005968: "NSX-A Kappa",
6009203: "NS-66 Punisher"
}
# LMG
elif cat == 6:
d = {
1879: "NC6A GODSAW",
1894: "Betelgeuse 54-A",
1924: 'T9A "Butcher"'
}
# Assault Rifle
elif cat == 7:
d = {
1904: "T1A Unity",
1909: "Darkstar",
77822: "Gauss Prime",
6009864: "AR-100",
6009891: "AR-101",
6009892: "AR-N203",
6009893: "CB-100",
6009894: "CB-X75",
6009895: "CB-200",
6009896: "PMG-100",
6009897: "PMG-200",
6009898: "PMG-3XB",
6009899: "XMG-100",
6009900: "XMG-155",
6009901: "XMG-200"
}
# Carbine
elif cat == 8:
d = {
1869: "19A Fortuna",
1914: "TRAC-Shot",
1919: "Eclipse VE3A"
}
elif cat == 13:
d = {
1964: "The Kraken"
}
# Sniper Rifle
elif cat == 11:
d = {
1969: "The Moonshot",
1974: "Bighorn .50M",
1979: "Parsec VX3-A"
}
# Grenades
elif cat == 17:
d = {
6050: "Decoy Grenade",
6003418: "NSX Fujin",
6004742: "Water Balloon",
6004743: "Water Balloon",
6004744: "Water Balloon",
6004750: "Flamewake Grenade",
6005304: "Smoke Grenade",
6005472: "NSX Raijin",
6007252: "Water Balloon",
6009459: "Lightning Grenade",
6009524: "Condensate Grenade",
6009583: "Infernal Grenade"
}
# Battle Rifle
elif cat == 19:
d = {
1984: "GD Guardian",
1989: "DMR-99",
1994: "Revenant",
6004209: "MGR-M1 Bishop",
6004214: "VE-LR Obelisk",
6004216: "MG-HBR1 Dragoon",
6005970: "NSX-A Sesshin",
6009101: "NS-30 Tranquility"
}
return w_id in d.keys() | e55ec72e1b672f6f31f296df749b179614f08a42 | 3,635,164 |
def pascal_classes():
"""Get Pascal VOC classes
:return: mapping from class name to an integer
"""
return {
'aeroplane': 1, 'bicycle' : 2, 'bird' : 3, 'boat' : 4,
'bottle' : 5, 'bus' : 6, 'car' : 7, 'cat' : 8,
'chair' : 9, 'cow' : 10, 'diningtable': 11, 'dog' : 12,
'horse' : 13, 'motorbike': 14, 'person' : 15, 'potted-plant': 16,
'sheep' : 17, 'sofa' : 18, 'train' : 19, 'tv/monitor' : 20
} | e6f488df00075ed6977024466e0eebb995b98605 | 3,635,165 |
def get_duty_cate_score(chosen_duty_list: list) -> pmag.MagicDict:
"""
Get duty score of each category.
We don't calculate each post score, we think what a man like can be
described on category level.
Parameters
----------
chosen_duty_list: list
Duty list chosen by user, each word with category together.
Returns
-------
Duty score for each category the user have chosen.
It is a dict that each item with category as key and score as value.
Notes
-----
"""
res = pmag.MagicDict()
for w, cate in chosen_duty_list:
freq = MODEL[cate]['duty'][w]['freq']
prob = MODEL[cate]['duty'][w]['prob']
score = prob # freq * prob / DUTY_NF[cate]
if cate in res:
res[cate] += score
else:
res[cate] = score
return res | 0b4fe97499be40f6058465aa3454a2e2654e9549 | 3,635,166 |
from typing import Optional
def offset(xs: Optional[ColumnSize] = None,
sm: Optional[ColumnSize] = None,
md: Optional[ColumnSize] = None,
lg: Optional[ColumnSize] = None,
xl: Optional[ColumnSize] = None) -> Optional[str]:
"""
Arguments:
xs: Offset (in column count) size for extra small screens.
sm: Offset (in column count) size for small screens.
md: Offset (in column count) size for mid-sized screens.
lg: Offset (in column count) size for large screens.
xl: Offset (in column count) size for extra large screens.
Returns:
The formatted class string or `None` if no sizes were specified.
"""
return " ".join(f"offset-{c}-{w}" for c, w in zip(_size_names, (xs, sm, md, lg, xl)) if w) or None | 7b2cc1c96deda1cdbea02e44415c98ef3ba1a34d | 3,635,167 |
def play_sound(data):
"""
Parameters
----------
data: dict
Returns
-------
"""
if 'sound_name' in data:
clientUtils.sound(data.get('sound_name'))
return ""
return "Je ne trouve pas le son demandé" | 3206d44682581458d62ebda2c90597d10aff9fab | 3,635,168 |
async def async_setup(hass, config):
"""Start the Fortigate component."""
conf = config[DOMAIN]
host = conf[CONF_HOST]
user = conf[CONF_USERNAME]
api_key = conf[CONF_API_KEY]
devices = conf[CONF_DEVICES]
is_success = await async_setup_fortigate(hass, config, host, user, api_key, devices)
return is_success | 0ed909bc2e18a242131bee458a4d7b0082576fb4 | 3,635,169 |
import select
from operator import and_
def get_snapshot_usages_project(meta, project_id):
"""Return the snapshot resource usages of a project"""
snapshots_t = Table('snapshots', meta, autoload=True)
snapshots_q = select(columns=[snapshots_t.c.id,
snapshots_t.c.volume_size,
snapshots_t.c.volume_type_id],
whereclause=and_(
snapshots_t.c.deleted == 0,
snapshots_t.c.project_id == project_id))
return snapshots_q.execute() | 8185cd670d595f07d739d9c9e9e52b23eace432c | 3,635,170 |
import contextlib
def compute_patch_embeddings(
samples,
model,
patches_field,
embeddings_field=None,
force_square=False,
alpha=None,
handle_missing="skip",
batch_size=None,
num_workers=None,
skip_failures=True,
):
"""Computes embeddings for the image patches defined by ``patches_field``
of the samples in the collection using the given :class:`Model`.
This method supports all the following cases:
- Using an image model to compute patch embeddings for an image
collection
- Using an image model to compute frame patch embeddings for a video
collection
The ``model`` must expose embeddings, i.e., :meth:`Model.has_embeddings`
must return ``True``.
If an ``embeddings_field`` is provided, the embeddings are saved to the
samples; otherwise, the embeddings are returned in-memory.
Args:
samples: a :class:`fiftyone.core.collections.SampleCollection`
model: a :class:`Model`
patches_field: the name of the field defining the image patches in each
sample to embed. Must be of type
:class:`fiftyone.core.labels.Detection`,
:class:`fiftyone.core.labels.Detections`,
:class:`fiftyone.core.labels.Polyline`, or
:class:`fiftyone.core.labels.Polylines`. When computing video frame
embeddings, the "frames." prefix is optional
embeddings_field (None): the name of a field in which to store the
embeddings. When computing video frame embeddings, the "frames."
prefix is optional
force_square (False): whether to minimally manipulate the patch
bounding boxes into squares prior to extraction
alpha (None): an optional expansion/contraction to apply to the patches
before extracting them, in ``[-1, inf)``. If provided, the length
and width of the box are expanded (or contracted, when
``alpha < 0``) by ``(100 * alpha)%``. For example, set
``alpha = 1.1`` to expand the boxes by 10%, and set ``alpha = 0.9``
to contract the boxes by 10%
handle_missing ("skip"): how to handle images with no patches.
Supported values are:
- "skip": skip the image and assign its embedding as ``None``
- "image": use the whole image as a single patch
- "error": raise an error
batch_size (None): an optional batch size to use, if the model supports
batching
num_workers (None): the number of workers to use when loading images.
Only applicable for Torch models
skip_failures (True): whether to gracefully continue without raising an
error if embeddings cannot be generated for a sample
Returns:
one of the following:
- ``None``, if an ``embeddings_field`` is provided
- a dict mapping sample IDs to ``num_patches x num_dim`` arrays of
patch embeddings, when computing patch embeddings for image
collections and no ``embeddings_field`` is provided. If
``skip_failures`` is ``True`` and any errors are detected, this
dictionary will contain ``None`` values for any samples for which
embeddings could not be computed
- a dict of dicts mapping sample IDs to frame numbers to
``num_patches x num_dim`` arrays of patch embeddings, when
computing patch embeddings for the frames of video collections and
no ``embeddings_field`` is provided. If ``skip_failures`` is
``True`` and any errors are detected, this nested dict will contain
missing or ``None`` values to indicate uncomputable embeddings
"""
if not isinstance(model, Model):
raise ValueError(
"Model must be a %s instance; found %s" % (Model, type(model))
)
if not model.has_embeddings:
raise ValueError(
"Model must expose embeddings; found model.has_embeddings = %s"
% model.has_embeddings
)
if model.media_type != "image":
raise ValueError(
"This method only supports image models; found "
"model.media_type = %s" % model.media_type
)
_handle_missing_supported = {"skip", "image", "error"}
if handle_missing not in _handle_missing_supported:
raise ValueError(
"Unsupported handle_missing = '%s'; supported values are %s"
% (handle_missing, _handle_missing_supported)
)
use_data_loader = (
isinstance(model, TorchModelMixin) and samples.media_type == fom.IMAGE
)
if num_workers is not None and not use_data_loader:
logger.warning(
"Ignoring `num_workers` parameter; only supported for Torch models"
)
if samples.media_type == fom.VIDEO:
patches_field, _ = samples._handle_frame_field(patches_field)
if embeddings_field is not None:
embeddings_field, _ = samples._handle_frame_field(embeddings_field)
fov.validate_collection_label_fields(
samples,
samples._FRAMES_PREFIX + patches_field,
_ALLOWED_PATCH_TYPES,
)
else:
fov.validate_collection_label_fields(
samples, patches_field, _ALLOWED_PATCH_TYPES
)
batch_size = _parse_batch_size(batch_size, model, use_data_loader)
with contextlib.ExitStack() as context:
if use_data_loader:
# pylint: disable=no-member
context.enter_context(fou.SetAttributes(model, preprocess=False))
# pylint: disable=no-member
context.enter_context(model)
if samples.media_type == fom.VIDEO:
return _embed_frame_patches(
samples,
model,
patches_field,
embeddings_field,
force_square,
alpha,
handle_missing,
batch_size,
skip_failures,
)
if use_data_loader:
return _embed_patches_data_loader(
samples,
model,
patches_field,
embeddings_field,
force_square,
alpha,
handle_missing,
batch_size,
num_workers,
skip_failures,
)
return _embed_patches(
samples,
model,
patches_field,
embeddings_field,
force_square,
alpha,
handle_missing,
batch_size,
skip_failures,
) | 371d4ae24e1e451b0e9970b78d43c4b8791d0e3c | 3,635,171 |
import os
import logging
def get_exact_file_name(file_name, file_path):
"""
:param file_name:
:param file_path:
:return:
"""
for root, dirs, files in os.walk(file_path):
for name in files:
logging.debug("Checking {0}".format(name))
if str(name).startswith(file_name):
return os.path.join(root, name)
return None | 7b7e72a5d2d5001f7452347f4bebcff9a9a9d0c3 | 3,635,172 |
def getRetRange( rets, naLower, naUpper, naExpected = "False", s_type = "long"):
"""
@summary Returns the range of possible returns with upper and lower bounds on the portfolio participation
@param rets: Expected returns
@param naLower: List of lower percentages by stock
@param naUpper: List of upper percentages by stock
@return tuple containing (fMin, fMax)
"""
# Calculate theoretical minimum and maximum theoretical returns """
fMin = 0
fMax = 0
rets = deepcopy(rets)
if naExpected == "False":
naExpected = np.average( rets, axis=0 )
na_signs = np.sign(naExpected)
indices, = np.where(na_signs == 0)
na_signs[indices] = 1
if s_type == "long":
na_signs = np.ones(len(na_signs))
elif s_type == "short":
na_signs = np.ones(len(na_signs))*(-1)
rets = na_signs*rets
naExpected = na_signs*naExpected
naSortInd = naExpected.argsort()
# First add the lower bounds on portfolio participation """
for i, fRet in enumerate(naExpected):
fMin = fMin + fRet*naLower[i]
fMax = fMax + fRet*naLower[i]
# Now calculate minimum returns"""
# allocate the max possible in worst performing equities """
# Subtract min since we have already counted it """
naUpperAdd = naUpper - naLower
fTotalPercent = np.sum(naLower[:])
for i, lInd in enumerate(naSortInd):
fRetAdd = naUpperAdd[lInd] * naExpected[lInd]
fTotalPercent = fTotalPercent + naUpperAdd[lInd]
fMin = fMin + fRetAdd
# Check if this additional percent puts us over the limit """
if fTotalPercent > 1.0:
fMin = fMin - naExpected[lInd] * (fTotalPercent - 1.0)
break
# Repeat for max, just reverse the sort, i.e. high to low """
naUpperAdd = naUpper - naLower
fTotalPercent = np.sum(naLower[:])
for i, lInd in enumerate(naSortInd[::-1]):
fRetAdd = naUpperAdd[lInd] * naExpected[lInd]
fTotalPercent = fTotalPercent + naUpperAdd[lInd]
fMax = fMax + fRetAdd
# Check if this additional percent puts us over the limit """
if fTotalPercent > 1.0:
fMax = fMax - naExpected[lInd] * (fTotalPercent - 1.0)
break
return (fMin, fMax) | 7e51851ab82d9da6ff670ef52b09464190e8ee3c | 3,635,173 |
def landing():
"""
Landing page - either shows login/sign-up or redirects to dashboard
"""
if g.uid:
return redirect("/dashboard")
else:
return render_template("landing.html", menu_item="login") | 4ca27c76ca76bd762dc24ba2fa6a64cc06b355d3 | 3,635,174 |
def pi_estimator(iterations: int):
"""An implementation of the Monte Carlo method used to find pi.
1. Draw a 2x2 square centred at (0,0).
2. Inscribe a circle within the square.
3. For each iteration, place a dot anywhere in the square.
3.1 Record the number of dots within the circle.
4. After all the dots are placed, divide the dots in the circle by the total.
5. Multiply this value by 4 to get your estimate of pi.
6. Print the estimated and numpy value of pi
"""
circle_dots = 0
# A local function to see if a dot lands in the circle.
def circle(x: float, y: float):
distance_from_centre = sqrt((x ** 2) + (y ** 2))
# Our circle has a radius of 1, so a distance greater than 1 would land outside the circle.
return distance_from_centre <= 1
circle_dots = sum(
int(circle(uniform(-1.0, 1.0), uniform(-1.0, 1.0))) for i in range(iterations)
)
# The proportion of guesses that landed within the circle
proportion = circle_dots / iterations
# The ratio of the area for circle to square is pi/4.
pi_estimate = proportion * 4
print("The estimated value of pi is ", pi_estimate)
print("The numpy value of pi is ", pi)
print("The total error is ", abs(pi - pi_estimate)) | 65475038c3655ea9e093b82b64eb1086f81a1ba8 | 3,635,175 |
from typing import Optional
from typing import Sequence
def get_projects(filters: Optional[Sequence[pulumi.InputType['GetProjectsFilterArgs']]] = None,
sorts: Optional[Sequence[pulumi.InputType['GetProjectsSortArgs']]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetProjectsResult:
"""
Retrieve information about all DigitalOcean projects associated with an account, with
the ability to filter and sort the results. If no filters are specified, all projects
will be returned.
Note: You can use the `Project` data source to
obtain metadata about a single project if you already know the `id` to retrieve or the unique
`name` of the project.
:param Sequence[pulumi.InputType['GetProjectsFilterArgs']] filters: Filter the results.
The `filter` block is documented below.
:param Sequence[pulumi.InputType['GetProjectsSortArgs']] sorts: Sort the results.
The `sort` block is documented below.
"""
__args__ = dict()
__args__['filters'] = filters
__args__['sorts'] = sorts
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('digitalocean:index/getProjects:getProjects', __args__, opts=opts, typ=GetProjectsResult).value
return AwaitableGetProjectsResult(
filters=__ret__.filters,
id=__ret__.id,
projects=__ret__.projects,
sorts=__ret__.sorts) | fa282a9420eb30633c4f354963343d46d77d4c2f | 3,635,176 |
def get_market_impact(
portfolio_name,
start_date,
end_date,
denominator="reference_equity",
model_id=DEFAULT_MODEL_ID,
):
"""Get market impact for each daily change of a portfolio in terms of dollars and percent of a denominator, either reference_equity or gmv.
Note that this ignores changes in position due to price changes, and assumes all changes are from trading and calculates market impact on that basis.
A more correct approach would be to calculate a synthetic position for each day assuming no trading from the prior day, and then calculate market impact
on the delta from synthetic to actual position.
"""
prev_date = start_date + timedelta(days=-4)
df_pos = get_exposure_contributors(portfolio_name, prev_date, end_date)
df_composition = get_composition(portfolio_name, start_date, end_date)
nav = df_composition.set_index("date").to_dict()["reference_equity"]
denominators = df_composition.set_index("date").to_dict()[denominator]
prev_pos = None
df_total, df_contrib = None, None
for dt in [dt for dt in sorted(df_pos.date.unique()) if dt >= start_date]:
curr_pos = df_pos[df_pos.date == dt]
if prev_pos is None:
prev_pos = curr_pos
continue
deltas = get_position_deltas(curr_pos, prev_pos)
total_date, contrib_date = get_market_impact_date(
dt, deltas, nav[dt], denominators[dt]
)
if df_total is None:
df_total, df_contrib = total_date, contrib_date
else:
df_total, df_contrib = (
pd.concat([df_total, total_date]),
pd.concat([df_contrib, contrib_date]),
)
prev_pos = curr_pos
return df_total, df_contrib | fa74d8aad26bd4bb759a115cfc51a8fbc21b9353 | 3,635,177 |
def importH5(name, df):
"""
"""
f = h5py.File(name,'r')
data = f.get(df)
data = np.array(data)
oldShape = data.shape
data = np.swapaxes(data, 1, 2)
print 'convert shape %s to %s' % (oldShape, data.shape)
return data | 4c6f14fec8497f965c2b14a153bd2f6f21f429c7 | 3,635,178 |
def svc_longi_u_polar(vr,vpsi,vz,gamma_l=-1,R=1,m=0,ntheta=180,polar_out=False):
""" Raw function, not intended to be exported.
Induced velocity from a skewed semi infinite cylinder of longitudinal vorticity.
Takes polar coordinates as inputs, returns velocity either in Cartesian (default) or polar.
The cylinder axis is defined by x=m.z, m=tan(chi). The rotor is in the plane z=0.
INPUTS:
vr,vpsi,vz : control points in polar coordinates, may be of any shape
gamma_t : tangential vorticity of the vortex sheet (circulation per unit of length oriented along psi). (for WT rotating positively along psi , gamma psi is negative)
R : radius of cylinder
m =tan(chi): tangent of wake skew angle
ntheta : number of points used for integration
Reference: [1,2]"""
EPSILON_AXIS=1e-7; # relative threshold for using axis formula
vtheta = np.linspace(0,2 * np.pi,ntheta) + np.pi / ntheta
# Flattening, and dimensionless!
shape_in=vr.shape
vr = np.asarray(vr/R).ravel()
vpsi = np.asarray(vpsi).ravel()
vz = np.asarray(vz/R).ravel()
u_z = np.zeros(vr.shape)
if polar_out:
u_r = np.zeros(vr.shape)
u_psi = np.zeros(vr.shape)
for i,(r,psi,z) in enumerate(zip(vr,vpsi,vz)):
Den1 = np.sqrt(1 + r**2 + z**2 - 2*r* np.cos(vtheta - psi))
Den2 = - z + m * np.cos(vtheta) + np.sqrt(1 + m ** 2) * np.sqrt(1 + r ** 2 + z ** 2 - 2 * r * np.cos(vtheta - psi)) - m * r * np.cos(psi)
DenInv = gamma_l/(4*np.pi)/np.multiply(Den1,Den2)
u_r[i] = np.trapz(( - m*z*np.sin(psi) + np.sin(vtheta-psi))*DenInv,vtheta)
u_psi[i] = np.trapz((r - m*z*np.cos(psi) - np.cos(vtheta-psi))*DenInv,vtheta)
u_z[i] = np.trapz(m * (-np.sin(vtheta) + r*np.sin(psi)) *DenInv,vtheta)
# Reshaping to input shape
u_r = u_psi.reshape(shape_in)
u_psi = u_psi.reshape(shape_in)
u_z = u_z.reshape(shape_in)
return (u_r,u_psi,u_z)
else:
u_x = np.zeros(vr.shape)
u_y = np.zeros(vr.shape)
for i,(r,psi,z) in enumerate(zip(vr,vpsi,vz)):
Den1 = np.sqrt(1 + r**2 + z**2 - 2*r* np.cos(vtheta - psi))
Den2 = - z + m * np.cos(vtheta) + np.sqrt(1 + m ** 2) * np.sqrt(1 + r ** 2 + z ** 2 - 2 * r * np.cos(vtheta - psi)) - m * r * np.cos(psi)
DenInv = gamma_l/(4*np.pi)/np.multiply(Den1,Den2)
u_x[i] = np.trapz( (np.sin(vtheta) - r*np.sin(psi)) *DenInv,vtheta)
u_y[i] = np.trapz((- m*z - np.cos(vtheta) + r*np.cos(psi)) *DenInv,vtheta)
u_z[i] = np.trapz(m * (-np.sin(vtheta) + r*np.sin(psi)) *DenInv,vtheta)
# Reshaping to input shape
u_x = u_x.reshape(shape_in)
u_y = u_y.reshape(shape_in)
u_z = u_z.reshape(shape_in)
return (u_x,u_y,u_z) | c4d1713ef08c4672bfefba0e11d2daca121032f4 | 3,635,179 |
import requests
import json
import logging
def _unremediate_email_o365_EWS(emails):
"""Remediates the given emails specified by a list of tuples of (message-id, recipient email address)."""
assert emails
assert all([len(e) == 2 for e in emails])
result = [] # tuple(message_id, recipient, result_code, result_text)
# get the hostname and port for our EWS proxy system
# this system receives requests for remediation and restorations and submits them to EWS on our behalf
ews_host = saq.CONFIG['remediation']['ews_host']
ews_port = saq.CONFIG['remediation'].getint('ews_port')
# the format of each request is a POST to
# https://host:port/delete
# with JSON as the POST data content
# note that we make a separate request for each one
url = 'https://{}:{}/restore'.format(saq.CONFIG['remediation']['ews_host'], saq.CONFIG['remediation']['ews_port'])
session = requests.Session()
data = { 'recipient': None, 'message_id': None }
headers = { 'Content-Type': 'application/json' }
for message_id, recipient in emails:
try:
if recipient.startswith('<'):
recipient = recipient[1:]
if recipient.endswith('>'):
recipient = recipient[:-1]
data['recipient'] = recipient
data['message_id'] = message_id
json_data = json.dumps(data)
logging.info("restoring message_id {} to {}".format(message_id, recipient))
r = session.post(url, headers=headers, data=json_data, verify=False)
logging.info("got result {} text {} for message_id {} to {}".format(r.status_code, r.text, message_id, recipient))
result.append((message_id, recipient, r.status_code, r.text))
except Exception as e:
error_message = 'unable to restore message_id {} to {}: {}'.format(message_id, recipient, str(e))
logging.error(error_message)
report_exception()
result.append((message_id, recipient, 'N/A', str(e)))
return result | 15c4b07b0f45d0792f9632cdf3312f8ab6025e44 | 3,635,180 |
def compute_feedback_score(user_search, result):
"""
Compute the feedback score
Input: user_search: keyword entered by the user
result: proposed result to the user
Output: Feedback score, default value to 0.4 if no feedbacks available
"""
# This old version take into account when a user doesn't choose a keyword, I changed it so that a keyword not chosen doesn't get as much as a penality
"""
# get feedback for that particular search_id -> result_url sequence (TODO: check for similar search?)
feedbacks = sql_query.get_feedback_for_reranking(user_search, result)
if feedbacks != None and len(feedbacks) > 0:
# Normalize mean of all feedbacks (-1->1 to 0->1)
feedback_score = (np.mean(feedbacks) - (-1)) / (1 - (-1))
else:
# Default value if no feedbacks available
feedback_score = 0
return feedback_score
"""
# get feedback for that particular keyword1 -> keyword2 sequence (TODO: check for similar search?)
feedbacks = sql_query.get_feedback_for_reranking(user_search, result)
chosen = 0
ignored = 0
base_score = 0.4
if feedbacks is not None and len(feedbacks) > 0:
for feedback in feedbacks:
if feedback == 1:
chosen += 1
if feedback == -1:
ignored += 1
# remove a point for every 10 users that didn't choose it
chosen -= int(ignored / 10)
feedback_score = base_score + (chosen / len(feedbacks))
else:
feedback_score = base_score
# print(result.title, ":", max(0, min(feedback_score, 1)))
return max(0, min(feedback_score, 1)) | 33e9e70111824e6eff60214cc0b55778a0cc5108 | 3,635,181 |
def filenames_per_batch (gen):
""" arg = name of the data generator (datagen.flow_from_dataframe) """
img_paths_per_batch=[]
batches_per_epoch = gen.samples // gen.batch_size + (gen.samples % gen.batch_size > 0)
for i in range(batches_per_epoch):
batch = next(gen)
current_index = ((gen.batch_index-1) * gen.batch_size)
if current_index < 0:
if gen.samples % gen.batch_size > 0:
current_index = max(0, gen.samples - gen.samples % gen.batch_size)
else:
current_index = max(0,gen.samples - gen.batch_size)
index_array = gen.index_array[current_index:current_index + gen.batch_size].tolist()
img_paths = [gen.filepaths[idx] for idx in index_array]
img_paths_per_batch.append(img_paths)
return img_paths_per_batch | 23ea9dfbbfe64fc51796af22c83a470847c9f698 | 3,635,182 |
def coarsemask_head_generator(params):
"""Generator function for ShapeMask coarse mask head architecture."""
head_params = params.shapemask_head
return heads.ShapemaskCoarsemaskHead(
head_params.num_classes,
head_params.num_downsample_channels,
head_params.mask_crop_size,
head_params.use_category_for_mask,
head_params.num_convs,
batch_norm_activation=batch_norm_activation_generator(
params.batch_norm_activation)) | 6dea83780e2b0169f71401ca3577d9c4383280f0 | 3,635,183 |
def ethtype_to_int_priv_pubv(priv, pubv):
"""
将 priv 和 pubv 转换为 weidentity 支持的格式(十进制)
:param priv: type: bytes
:param pubv: type: hex
:return: priv int, pubv int
"""
private_key = int.from_bytes(priv, byteorder='big', signed=False)
public_key = eval(pubv)
return {"priv": str(private_key), "pubv": str(public_key)} | 763a284015029a43257061818634b50d69417de5 | 3,635,184 |
import argparse
def build_arg_parse() -> argparse.ArgumentParser:
"""Builds the arguments parser."""
parser = argparse.ArgumentParser(
description="This script updates the python extension micro version based on the release or pre-release channel."
)
parser.add_argument(
"--release",
action="store_true",
help="Treats the current build as a release build.",
)
parser.add_argument(
"--build-id",
action="store",
type=int,
default=None,
help="If present, will be used as a micro version.",
required=False,
)
parser.add_argument(
"--for-publishing",
action="store_true",
help="Removes `-dev` or `-rc` suffix.",
)
return parser | 8151e3366c7a2acecb7f40cb05684af368dc9e1f | 3,635,185 |
import time
def solver(I, a, L, Nx, F, T, theta=0.5, u_L=0, u_R=0,
user_action=None):
"""
Solve the diffusion equation u_t = a*u_xx on (0,L) with
boundary conditions u(0,t) = u_L and u(L,t) = u_R,
for t in (0,T]. Initial condition: u(x,0) = I(x).
Method: (implicit) theta-rule in time.
Nx is the total number of mesh cells; mesh points are numbered
from 0 to Nx.
F is the dimensionless number a*dt/dx**2 and implicitly specifies the
time step. No restriction on F.
T is the stop time for the simulation.
I is a function of x.
user_action is a function of (u, x, t, n) where the calling code
can add visualization, error computations, data analysis,
store solutions, etc.
The coefficient matrix is stored in a scipy data structure for
sparse matrices. Input to the storage scheme is a set of
diagonals with nonzero entries in the matrix.
"""
t0 = time.clock()
x = linspace(0, L, Nx+1) # mesh points in space
dx = x[1] - x[0]
dt = F*dx**2/a
Nt = int(round(T/float(dt)))
print('Number of time steps:', Nt)
t = linspace(0, T, Nt+1) # mesh points in time
u = zeros(Nx+1) # solution array at t[n+1]
u_n = zeros(Nx+1) # solution at t[n]
# Representation of sparse matrix and right-hand side
diagonal = zeros(Nx+1)
lower = zeros(Nx+1)
upper = zeros(Nx+1)
b = zeros(Nx+1)
# Precompute sparse matrix (scipy format)
Fl = F*theta
Fr = F*(1-theta)
diagonal[:] = 1 + 2*Fl
lower[:] = -Fl # 1
upper[:] = -Fl # 1
# Insert boundary conditions
# (upper[1:] and lower[:-1] are the active values)
upper[0:2] = 0
lower[-2:] = 0
diagonal[0] = 1
diagonal[Nx] = 1
diags = [0, -1, 1]
A = spdiags([diagonal, lower, upper], diags, Nx+1, Nx+1)
# print(A.todense())
# Set initial condition
for i in range(0, Nx+1):
u_n[i] = I(x[i])
if user_action is not None:
user_action(u_n, x, t, 0)
# Time loop
for n in range(0, Nt):
b[1:-1] = u_n[1:-1] + Fr*(u_n[:-2] - 2*u_n[1:-1] + u_n[2:])
b[0] = u_L
b[-1] = u_R # Boundary conditions
u[:] = spsolve(A, b)
if user_action is not None:
user_action(u, x, t, n+1)
# Switch variables before next step
u_n, u = u, u_n
t1 = time.clock()
return u, x, t, t1-t0 | 99da0c06fcbbc36515b6b4aee23b4b3b4eff0032 | 3,635,186 |
def create_clean_df_from_cloud_json(js):
""" Given a json downloaded on the cloud from get_datasource_data or get_location_data, returns a corrected df"""
df = json_to_df(js)
df_data = set_timestamp_df_index(df)
for col in df_data.columns:
if col[:6] == 'values':
df_data[col] = __pd.to_numeric(df_data[col], errors='coerce')
return df_data | cdad4f51dd3014a05e9c1e5f5068d344220bccd5 | 3,635,187 |
def tfidf_corpus(docs=CORPUS):
""" Count the words in a corpus and return a TfidfVectorizer() as well as all the TFIDF vecgtors for the corpus
Args:
docs (iterable of strs): a sequence of documents (strings)
Returns:
(TfidfVectorizer, tfidf_vectors)
"""
vectorizer = TfidfVectorizer()
vectorizer = vectorizer.fit(docs)
return vectorizer, vectorizer.transform(docs) | 401b9e0ed9321e7a20f9efeb303b5c8f51b70a75 | 3,635,188 |
def svn_utf_cstring_from_utf8_string(*args):
"""svn_utf_cstring_from_utf8_string(svn_string_t const * src, apr_pool_t pool) -> svn_error_t"""
return _core.svn_utf_cstring_from_utf8_string(*args) | 63be2c2624a66d7221845e8c0546f58c5f2f952a | 3,635,189 |
def squint(t, r, orbit, attitude, side, angle=0.0, dem=None, **kw):
"""Find squint angle given imaging time and range to target.
"""
assert orbit.reference_epoch == attitude.reference_epoch
p, v = orbit.interpolate(t)
R = attitude.interpolate(t).to_rotation_matrix()
axis = R[:,1]
# In NISAR coordinate frames (see D-80882 and REE User Guide) left/right is
# implemented as a 180 yaw flip, so the left/right flag can just be
# inferred by the sign of axis.dot(v). Verify this convention.
inferred_side = "left" if axis.dot(v) > 0 else "right"
if side.lower() != inferred_side:
raise ValueError(f"Requested side={side.lower()} but "
f"inferred side={inferred_side} based on orientation "
f"(Y_RCS.dot(V) = {axis.dot(v)})")
if dem is None:
dem = isce3.geometry.DEMInterpolator()
# NOTE Here "left" means an acute, positive look angle by right-handed
# rotation about `axis`. Since axis will flip sign, always use "left" to
# get the requested side in the sense of velocity vector.
xyz = isce3.geometry.rdr2geo_cone(p, axis, angle, r, dem, "left", **kw)
look = (xyz - p) / np.linalg.norm(xyz - p)
vhat = v / np.linalg.norm(v)
return np.arcsin(look.dot(vhat)) | 0172b4f525e5738740eb0c948b650bf3c1abbb11 | 3,635,190 |
def get_cmfgenNoRot_atmosphere(metallicity=0, temperature=30000, gravity=4.14):
"""
metallicity = [M/H] (def = 0)
temperature = Kelvin (def = 30000)
gravity = log gravity (def = 4.14)
"""
sp = pysynphot.Icat('cmfgenF15_noRot', temperature, metallicity, gravity)
# Do some error checking
idx = np.where(sp.flux != 0)[0]
if len(idx) == 0:
print( 'Could not find CMFGEN non-rotating atmosphere model (Fierro+15) for')
print( ' temperature = %d' % temperature)
print( ' metallicity = %.1f' % metallicity)
print( ' log gravity = %.1f' % gravity)
return sp | a188661f24083b4ab29f83559dac6011eee4d7aa | 3,635,191 |
def _SanitizeDoc(doc, leader):
"""Cleanup the doc string in several ways:
* Convert None to empty string
* Replace new line chars with doxygen comments
* Strip leading white space per line
"""
if doc is None:
return ''
return leader.join([line.lstrip() for line in doc.split('\n')]) | 7ca6f17296c9b23c05239092e28c8d6b4df7c725 | 3,635,192 |
import json
import yaml
def load_file(file_path: str):
"""Loads a file using a serializer which guesses based on the file extension"""
if file_path.lower().endswith('.json'):
with open(file_path) as input_file:
return json.load(input_file)
elif file_path.lower().endswith('.yaml') or file_path.lower().endswith('.yml'):
with open(file_path) as input_file:
return yaml.safe_load(input_file)
raise _ValidatorError("Unknown file format. Supported extension: '.yaml', '.json'") | 1b9c3278bd40a23e142590952d95876401c5f99b | 3,635,193 |
import json
def list_assets(event, context):
"""
Get a list of assets of the given type.
Query string parameters
-----------------------
asset_type (required):
The type of asset to get. Allowed values are found in the
``asset_map`` dict.
"""
query_params = event.get('queryStringParameters', {})
try:
asset_type = query_params['asset_type']
except KeyError:
return {
'statusCode': StatusCode.bad_request,
'body': json.dumps({
'error': 'Query string parameter `asset_type` is required'
})
}
try:
asset = asset_map[asset_type]
except KeyError:
return {
'statusCode': StatusCode.bad_request,
'body': json.dumps({
'error': f'Invalid value for `asset_type`. '
f'Allowed values are {set(asset_map.keys())}'
})
}
return asset.list() | e0cf59d2ee275dc9c6610f30daacc91a9042723a | 3,635,194 |
def pop_execute_query_kwargs(keyword_arguments):
""" pop the optional execute query arguments from arbitrary kwargs;
return non-None query kwargs in a dict
"""
query_kwargs = {}
for key in ('transaction', 'isolate', 'pool'):
val = keyword_arguments.pop(key, None)
if val is not None:
query_kwargs[key] = val
return query_kwargs | d4ae2df3158660f62e21153d943922692f633b76 | 3,635,195 |
import time
import json
def decompress(target_file):
"""This is the decompression section"""
# extract binary string from a file
start_decompress = float(time.process_time()) # start measure time in this line to check processing time
binary_file_name = target_file
filename = target_file
with open(binary_file_name, 'rb') as f:
binary_content = BitArray(f.read())
binary_string = binary_content.bin
# print("content: ",binary_string) # test code
# print(type(binary_string)) # test code
# identify dictionary string section
def Invalid():
"""This function display a warning if not catchphrase is found
which indicate the compressed .bin file could be random"""
print("Decompression Error! Invalid compressed file!")
exit()
pos = 0 # position counter
catch_found = False
while catch_found is False:
if binary_string[len(binary_string) - 87 - pos:len(
binary_string) - pos] == "110001101100001011101000110001101101000011100000110100001110010011000010111001101100101":
# print("found!") # test code
catch_found = True
else:
pos += 1
if pos > len(binary_string):
Invalid() # wrong file
# print(pos) # test code
# dict_binary = binary_string[(pos + 87):] # the binary string of the dictionary ####
dict_binary = binary_string[0:len(binary_string) - pos - 87] ####
# dict_binary = binary_string[pos:]
# print("dict: ", dict_binary) # test code
# print("dict")
# print("catch: ",binary_string[pos:(pos+87)]) # test code
# convert to dictionary text
# convert dict binary back to dict text
binary_int3 = int(dict_binary, 2)
byte_number3 = binary_int3.bit_length() + 7 // 8
binary_array3 = binary_int3.to_bytes(byte_number3, "big")
dict_ascii_text = str(binary_array3.decode())
# print(type(dict_ascii_text)) # test code
# print(dict_ascii_text) # test code
# print("len: ",len(str(dict_ascii_text))) # test code
# the binary conversion to string creates a ton of spaces before the actual dictionary string
cpos = 0
cfound = False
# remove space before dictionary string
while cfound is False:
if dict_ascii_text[cpos] == "{":
cfound = True
cpos += 1
# print(cpos) # test code
dict_text = dict_ascii_text[cpos - 1:]
# print(len(dict_text)) # test code
# convert dictionary string into a dictionary
huffman_feq = json.loads(dict_text)
# print("freq dictionary: ", huffman_feq) # test code
# print(type(huffman_hash)) # test code
# identify binary content
binary_content = binary_string[len(binary_string) - pos:]
# print("bin: ", binary_content) # test code
# print("bin")
# decompress to a new file: reconstruct the huffman tree and traverse
# Class for creating node objects in a tree
class Node(object):
"""This is the class that creates an object that holds 2 daughter objects"""
# constructor
def __init__(self, left_node=None, right_node=None):
"""
In a binary tree, each node can only have up to 2 daughters.
The variable 'left' and 'right' stores the daughter nodes when initialised.
The node stored in the daughters can either be a character (leaf) or another node object.
"""
self.left_node = left_node # left-hand nodes
self.right_node = right_node # right-hand node
# getter method for left and right item
def daughters(self):
"""This is a method that returns the contents of both nodes stored from their parent node"""
return self.left_node, self.right_node
# a ascending sorted array list of nodes (can be either a node object or a character node) in tuple
# sort all tuple item by their second element (at position 1) in the array
# convert evey item in the letter frequency dictionary 'letter_freq' into a tuple consist of a character and number
list_nodes_objects = sorted(huffman_feq.items(), key=lambda x: x[1], reverse=False)
# print(list_nodes_objects) # test code
# this while loop iterates
while len(list_nodes_objects) > 1:
"""
This while loop iterates the list of node tuple until one item is left.
The goal of this loop is to generate 1 node, which contains all the combined nodes
"""
# take the 2 least weight nodes
(character_1, freq_1) = list_nodes_objects[0] # tuple (character, freq)
(character_2, freq_2) = list_nodes_objects[1]
list_nodes_objects = list_nodes_objects[2:] # list of nodes updated after 2 nodes are taken out
# print("list_nodes_objects ", list_nodes_objects) # test code
new_node = Node(character_1, character_2) # new node, contains the 2 combined smallest node in an object
list_nodes_objects.append(
(new_node, freq_1 + freq_2)) # put the combined node back to the array of node with new weight
# (combined node, sum of weight)
# sort the array in order after a new node is append
list_nodes_objects = sorted(list_nodes_objects, key=lambda x: x[1], reverse=False)
# print("sorted list_nodes_objects ", list_nodes_objects) # test code
# tree traversal
print("decompressing...%..")
decompress_string = "" # store the decoding result
print(decompress_string)
current_node = list_nodes_objects[0][0] # root node
for i in binary_content:
# if a node is a string, that means it is a leaf
if type(current_node) is str:
decompress_string += current_node # the leaf contains the character, add to string
current_node = list_nodes_objects[0][0] # reset traversal from root
# create a tuple, extract items (left_item, right_item) from a nodes' daughter
(left_item, right_item) = current_node.daughters()
if int(i) == 0:
# go to 'left'
current_node = left_item
elif int(i) == 1:
# go to 'right'
current_node = right_item
else:
print("Error!\nNon_binary content detected, abort decompression.")
exit()
# print("result: ", decompress_string) # test code
# write file
file_name = filename[:-4] # assuming the file to be decompressed is .bin
open_file = file_name + '-Decompressed' + '.txt'
f = open(open_file, "w", encoding="utf_8_sig") # create new file
f.write(decompress_string) # write text
f.close()
print("decompression completed!")
end_decompress = float(
time.process_time()) # end measure time in this line to check processing time for this section
print(str(end_decompress - start_decompress) + " second taken to compress file!")
main() | 0491bdcb3b4555b42705b2a3a02fe3bf98ca6bb6 | 3,635,196 |
def resize(img, new_shape, interpolation=1):
"""
img: [H, W, D, C] or [H, W, D]
new_shape: [H, W, D]
"""
type = 1
if type == 0:
new_img = skt.resize(img, new_shape, order=interpolation, mode='constant', cval=0, clip=True, anti_aliasing=False)
else:
shp = tuple(np.array(new_shape) / np.array(img.shape[:3]))
# Multichannel
data = []
for i in range(img.shape[-1]):
d0 = zoom(img[..., i].astype(np.uint8).copy(), shp, order=interpolation)
data.append(d0.copy())
new_img = np.stack(data, axis=3)
return new_img | 5754f8e61cca50927fc7ad65d4f3122c1718bcc1 | 3,635,197 |
import logging
def get_oneview_client(session_id=None, is_service_root=False):
"""Establishes a OneView connection to be used in the module
Establishes a OV connection if one does not exists.
If one exists, do a single OV access to check if its sill
valid. If not tries to establish a new connection.
Sets the connection on the ov_conn global var
Args:
session_id: The ID of a valid authenticated session, if the
authentication_mode is session. Defaults to None.
is_service_root: Informs if who is calling this function is the
ServiceRoot blueprint. If true, even if authentication_mode is
set to session it will use the information on the conf file to
return a connection. This is a workaround to allow ServiceRoot
to retrieve the appliance UUID before user logs in.
Returns:
OneViewClient object
Exceptions:
HPOneViewException if can't connect or reconnect to OV
"""
config = globals()['config']
auth_mode = config["redfish"]["authentication_mode"]
if auth_mode == "conf" or is_service_root:
# Doing conf based authentication
ov_client = globals()['ov_client']
ov_config = globals()['ov_config']
# Check if connection is ok yet
try:
ov_client.connection.get('/rest/logindomains')
return ov_client
# If expired try to make a new connection
except Exception:
try:
logging.exception('Re-authenticated')
ov_client.connection.login(ov_config['credentials'])
return ov_client
# if failed abort
except Exception:
raise
else:
# Auth mode is session
oneview_config = dict(config.items('oneview_config'))
oneview_config['credentials'] = {"sessionID": session_id}
oneview_config['api_version'] = int(oneview_config['api_version'])
try:
oneview_client = OneViewClient(oneview_config)
oneview_client.connection.get('/rest/logindomains')
return oneview_client
except Exception:
logging.exception("Failed to recover session based connection")
raise | eeb5c80bbced6deb6188878ff646ef0a7a54a184 | 3,635,198 |
def SegmentByPeaks(data, peaks, weights=None):
"""Average the values of the probes within each segment.
Parameters
----------
data : array
the probe array values
peaks : array
Positions of copy number breakpoints in the original array
Source: SegmentByPeaks.R
"""
segs = np.zeros_like(data)
for seg_start, seg_end in zip(np.insert(peaks, 0, 0),
np.append(peaks, len(data))):
if weights is not None and weights[seg_start:seg_end].sum() > 0:
# Weighted mean of individual probe values
val = np.average(data[seg_start:seg_end],
weights=weights[seg_start:seg_end])
else:
# Unweighted mean of individual probe values
val = np.mean(data[seg_start:seg_end])
segs[seg_start:seg_end] = val
return segs | 97acb45c320d4da9a3094188239a6442cafe48b1 | 3,635,199 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.