content stringlengths 35 762k | sha1 stringlengths 40 40 | id int64 0 3.66M |
|---|---|---|
def createSMAbasis(delta, pistonMode, pistonProj):
"""
Input args:
<delta> is the geometric covariance matrix of actuators, it is computed elsewhere.
It is a square, symmetric matrix 60x60
<pistonMode> : piston mode (will be used in sparta)
This will create a basis orthogonal to piston
with last modes having large voltages and only small phase variance.
"""
m = filterOutPiston( np.identity(60), pistonMode, pistonProj )
lam, mo = diagonalisation( np.dot(m.T, np.dot(delta,m)) )
mo = np.dot(m, mo)
SMAbasis = np.zeros(delta.shape)
SMAbasis[:,0] = pistonMode
SMAbasis[:,1:] = mo[:,:-1]
return SMAbasis | 216e0cd5e36ab9ea437f47b349ccffc670d3a898 | 24,900 |
def s3_put_bucket_website(s3_obj, bucketname, website_config):
"""
Boto3 client based Put bucket website function
Args:
s3_obj (obj): MCG or OBC object
bucketname (str): Name of the bucket
website_config (dict): Website configuration info
Returns:
dict : PutBucketWebsite response
"""
return s3_obj.s3_client.put_bucket_website(
Bucket=bucketname, WebsiteConfiguration=website_config
) | a60d95ef43e5a3643edeb6dacb2b149fef1892d9 | 24,901 |
from pma_api.manage.db_mgmt import list_cloud_datasets, download_dataset, \
from pma_api.models import ApiMetadata, Task
from pma_api.task_utils import upload_dataset
def admin_route():
"""Route to admin portal for uploading and managing datasets.
.. :quickref: admin; Route to admin portal for uploading and managing
datasets.
Notes:
- flash() uses Bootstrap 4.0 alert categories,
https://getbootstrap.com/docs/4.0/components/alerts/
# GET REQUESTS
Args: n/a
Query Args: n/a
Returns:
flask.render_template(): A rendered HTML template.
Examples: n/a
# POST REQUESTS
Receives a file uploaded, which is of the type:
ImmutableMultiDict([('file', <FileStorage: 'FILENAME' ('FILETYPE')>)])
"""
delete_dataset
# upload
if request.method == 'POST':
try:
file = request.files['file']
filename = secure_filename(file.filename)
file_url: str = upload_dataset(filename=filename, file=file)
return jsonify({'success': bool(file_url)})
except ExistingDatasetError as err:
return jsonify({'success': False, 'message': str(err)})
except Exception as err:
msg = 'An unexpected error occurred.\n' + \
err.__class__.__name__ + ': ' + str(err)
return jsonify({'success': False, 'message': msg})
elif request.method == 'GET':
if request.args:
args = request.args.to_dict()
if 'download' in args:
# TODO: Delete tempfile after sending
tempfile_path: str = download_dataset(
version_number=int(args['download']))
return send_file(
filename_or_fp=tempfile_path,
attachment_filename=os.path.basename(tempfile_path),
as_attachment=True)
if 'delete' in args:
try:
delete_dataset(version_number=int(args['delete']))
except FileNotFoundError as err:
msg = 'FileNotFoundError: ' + str(err)
flash(message=msg, category='danger')
return redirect(url_for('root.admin_route'))
active_api_dataset: Dict = \
ApiMetadata.get_current_api_data(as_json=True)
# TODO 2019.04.18-jef: active_dataset_version seems messy / breakable
active_dataset_version: str = \
re.findall(r'-v[0-9]*', active_api_dataset['name'])[0]\
.replace('-v', '')
present_task_list: List[str] = Task.get_present_tasks()
task_id_url_map: Dict[str, str] = {
task_id: url_for('root.taskstatus', task_id=task_id)
for task_id in present_task_list}
present_tasks: str = json.dumps(task_id_url_map)
try:
datasets: List[Dict[str, str]] = list_cloud_datasets()
except EndpointConnectionError:
msg = 'Connection Error: Unable to connect to data storage ' \
'server to retrieve list of datasets.'
datasets: List[Dict[str, str]] = []
flash(message=msg, category='danger')
return render_template(
'admin.html',
datasets=datasets, # List[Dict[str, str]]
active_dataset_version=active_dataset_version, # int
active_tasks=present_tasks, # str(json({id: url}))
this_env=os.getenv('ENV_NAME', 'development')) | dc59d0819829553ac5224e9ed7b380201719ae79 | 24,902 |
from typing import List
from typing import Tuple
def check_assignment(tokenlist : List[str], current_line : int) -> Tuple[bool, List[Token.Token]]:
"""Checks if the given construction is of the type 'assignment'. If it is, the first value will return True and the second value will return a list of tokens.
If it isn't of the type 'assignment', the first value will return False and the second value wil return None or an error token.
Args:
tokenlist (List[str]): A list of strings consisting of an instruction and their parameters
Returns(either):
bool, List[Token.Token]: Returns a bool(whether the token is of this type) and a list of tokens, which is the instruction and the parameters.
bool, None : Returns a bool(whether the token is of this type) and None
"""
variable_keywords = {
"int": int
}
assignment_operators = ['=']
variable_keyword,tokenlist = tokenlist.next()
if variable_keyword not in variable_keywords:
return False, [Token.Token('ERROR', "Token is not of type 'location'", current_line)]
name,tokenlist = tokenlist.next()
assignment_operator,tokenlist = tokenlist.next()
if assignment_operator not in assignment_operators:
return True, [Token.Token('ERROR', "Unknown assignment operator", current_line)]
value,tokenlist = tokenlist.next()
if type(eval(value)) != variable_keywords[variable_keyword]:
return True, [Token.Token('ERROR', 'Error: Value does not match type', current_line)]
tokens = [Token.Token('TYPE', variable_keyword, current_line), Token.Token('IDENTIFIER', name, current_line),
Token.Token('ASSIGNMENT', assignment_operator, current_line), Token.Token('VALUE', value, current_line)]
return True, tokens | 2faa56afe89c7d89ff4ec6f4443d8542073bcdaa | 24,903 |
def load_nifc_fires():
"""load nifc data for 2020/2021 fire season
NB this is a bit of an undocumented NIFC feature -- the data supposedly only cover 2021
but there are definitely 2020 fires included at the endpoint.
This might not be true in the future.
https://data-nifc.opendata.arcgis.com/datasets/nifc::wfigs-wildland-fire-perimeters-full-history/about
"""
nifc_uri = "https://storage.googleapis.com/carbonplan-data/raw/nifc/WFIGS_-_Wildland_Fire_Perimeters_Full_History.geojson"
fires = geopandas.read_file(nifc_uri)
nifc_colnames = {"poly_IncidentName": "name", "poly_Acres_AutoCalc": "acres"}
fires = fires.rename(columns=nifc_colnames)
fires = fires[fires["irwin_FireDiscoveryDateTime"].str[:4].isin(["2020", "2021"])]
fires["ignite_at"] = (
fires["irwin_FireDiscoveryDateTime"]
.apply(pd.Timestamp)
.apply(lambda x: pd.Timestamp(x.date()))
)
return fires.to_crs(crs)[["name", "acres", "ignite_at", "geometry"]] | 97767eb2bf850e7753cab5fc945efa7b4e235b85 | 24,904 |
from datetime import datetime
from unittest.mock import call
from unittest.mock import patch
def test_api_query_paginated_trades_pagination(mock_bitstamp):
"""Test pagination logic for trades works as expected.
First request: 2 results, 1 valid trade (id 2)
Second request: 2 results, no trades
Third request: 2 results, 1 valid trade (id 5) and 1 invalid trade (id 6)
Trades with id 2 and 5 are expected to be returned.
"""
# Not a trade
user_transaction_1 = """
{
"id": 1,
"type": -1,
"datetime": "2020-12-02 09:00:00"
}
"""
# First trade, buy BTC with USD, within timestamp range
user_transaction_2 = """
{
"id": 2,
"type": 2,
"datetime": "2020-12-02 09:30:00",
"btc": "0.50000000",
"usd": "-10000.00000000",
"btc_usd": "0.00005000",
"fee": "20.00000000",
"order_id": 2
}
"""
# Not a trade
user_transaction_3 = """
{
"id": 3,
"type": -1,
"datetime": "2020-12-02 18:00:00"
}
"""
# Not a trade
user_transaction_4 = """
{
"id": 4,
"type": -1,
"datetime": "2020-12-03 9:00:00"
}
"""
# Second trade, sell EUR for USD, within timestamp range
user_transaction_5 = """
{
"id": 5,
"type": 2,
"datetime": "2020-12-03 11:30:00",
"eur": "-1.00000000",
"usd": "1.22000000",
"eur_usd": "0.81967213",
"fee": "0.00610000",
"order_id": 3
}
"""
# Third trade, buy ETH with USDC, out of timestamp range
user_transaction_6 = """
{
"id": 6,
"type": 2,
"datetime": "2020-12-03 12:00:01",
"eth": "1.00000000",
"usdc": "-750.00000000",
"eth_usdc": "0.00133333",
"fee": "3.75000000",
"order_id": 1
}
"""
api_limit = 2
now = datetime.now()
now_ts = int(now.timestamp())
options = {
'since_id': USER_TRANSACTION_MIN_SINCE_ID,
'limit': api_limit,
'sort': USER_TRANSACTION_SORTING_MODE,
'offset': 0,
}
expected_calls = [
call(
endpoint='user_transactions',
method='post',
options={
'since_id': 1,
'limit': 2,
'sort': 'asc',
'offset': 0,
},
),
call(
endpoint='user_transactions',
method='post',
options={
'since_id': 3,
'limit': 2,
'sort': 'asc',
'offset': 0,
},
),
call(
endpoint='user_transactions',
method='post',
options={
'since_id': 3,
'limit': 2,
'sort': 'asc',
'offset': 2,
},
),
]
def get_paginated_response():
results = [
f'[{user_transaction_1},{user_transaction_2}]',
f'[{user_transaction_3},{user_transaction_4}]',
f'[{user_transaction_5},{user_transaction_6}]',
]
for result_ in results:
yield result_
def mock_api_query_response(endpoint, method, options): # pylint: disable=unused-argument
return MockResponse(HTTPStatus.OK, next(get_response))
get_response = get_paginated_response()
with patch(
'rotkehlchen.exchanges.bitstamp.API_MAX_LIMIT',
new_callable=MagicMock(return_value=api_limit),
):
with patch.object(
mock_bitstamp,
'_api_query',
side_effect=mock_api_query_response,
) as mock_api_query:
result = mock_bitstamp._api_query_paginated(
start_ts=Timestamp(0),
end_ts=Timestamp(now_ts),
options=options,
case='trades',
)
assert mock_api_query.call_args_list == expected_calls
expected_result = [
Trade(
timestamp=1606901400,
location=Location.BITSTAMP,
pair=TradePair('BTC_USD'),
trade_type=TradeType.BUY,
amount=FVal("0.50000000"),
rate=FVal("0.00005000"),
fee=FVal("20.00000000"),
fee_currency=Asset('USD'),
link='2',
notes='',
),
Trade(
timestamp=1606995000,
location=Location.BITSTAMP,
pair=TradePair('EUR_USD'),
trade_type=TradeType.SELL,
amount=FVal("1.22000000"),
rate=FVal("0.81967213"),
fee=FVal("0.00610000"),
fee_currency=Asset('EUR'),
link='5',
notes='',
),
]
assert result == expected_result | f1bb9cd15c0b595bb9fc1bbfb7e6ce87042ff087 | 24,905 |
def eigenvalue_nonunitary_entanglement_infidelity(a, b, mx_basis):
"""
Returns (d^2 - 1)/d^2 * (1 - sqrt(U)), where U is the eigenvalue-unitarity of a*b^{-1}
Parameters
----------
a : numpy.ndarray
The first process (transfer) matrix.
b : numpy.ndarray
The second process (transfer) matrix.
mx_basis : Basis or {'pp', 'gm', 'std'}
the basis that `a` and `b` are in.
Returns
-------
float
"""
d2 = a.shape[0]; U = eigenvalue_unitarity(a, b)
return (d2 - 1.0) / d2 * (1.0 - _np.sqrt(U)) | 754717a951868bdc498f4f3a7bc0013c9ffe662f | 24,906 |
import sys
import os
def upload_to_s3(local_filepath, file_name, s3_path, bucket_name=BUCKET_NAME):
"""
Returns
----------
Uploads local file to appropriate s3 key, and prints status
Parameters
----------
local_filepath : str
ex. 'my/local/path'
file_name : str
ex. 'cleaned_data.csv' or 'model.pkl'
bucket_name : str
ex. 'dsapp-edu-data'
s3_path : str
ex. 'NC-Cabarrus/cleaned_data'
"""
def percent_cb(complete, total):
"""
Helper function that prints progress
"""
sys.stdout.write('.')
sys.stdout.flush()
conn = boto.connect_s3()
bucket = conn.get_bucket(bucket_name)
full_key_name = os.path.join(s3_path, file_name)
k = bucket.new_key(full_key_name)
full_filepath = os.path.join(local_filepath, file_name)
k.set_contents_from_filename(full_filepath, cb=percent_cb, num_cb=10)
return None | 52b34550dc534743b15072929acc750b685044c4 | 24,907 |
def morph(word, rootlist, Indo = False, n = 5):
"""
Bagi sesuatu perkataan ("word"), kembalikan n analisis morphologi yang paling mungkin berdasarkan
senarai akar ("rootlist").
Format output: akar, perkataan, proklitik/awalan, akhiran/enklitik, apitan, reduplikasi
@param Indo: Jika benar, awalan N- dan akhiran -in juga termasuk dalam analisis.
@param n: Bilangan calon yang dikembalikan.
"""
cand = set()
check = set()
cand1 = NyahApitan(word, rootlist)
cand2 = NyahAwalan(word, rootlist)
cand3 = NyahAkhiran(word, rootlist)
if Indo:
cand1 = NyahApitan(word, rootlist, Indo = True)
cand2 = NyahAwalan(word, rootlist, Indo = True)
cand3 = NyahAkhiran(word, rootlist, Indo = True)
# Tanpa imbuhan
for (c1, c2, c3) in [(c1, c2, c3) for c1 in cand1 for c2 in cand2 for c3 in cand3]:
if c1[0] == c2[0] == c3[0] and (c1[4], c2[4], c3[4]) == ("0", "0", "0"):
cand.add((c1[0], c1[1], "0", "0", "0", c1[5]))
# Dengan imbuhan
else:
for c1 in cand1:
# Tanpa awalan, tanpa akhiran
if not c1[2] and not c1[3]:
cand.add((c1[0], c1[1], "0", "0", c1[4], c1[5]))
# Tanpa awalan
elif not c1[2]:
temp = c1[1] + c1[3] # bentuk tanpa huruf-huruf apitan
cand3c = NyahAkhiran(temp, rootlist)
if Indo:
cand3c = NyahAkhiran(temp, rootlist, Indo = True)
for c3 in cand3c:
if c1[1] == c3[0][0] and c1[3] == c3[0][2] and not c3[3]:
cand.add((c1[0], c1[1], "0", c3[4], c1[4], c1[5]))
# Tanpa akhiran
elif not c1[3]:
temp = c1[2] + c1[1] # bentuk tanpa huruf-huruf apitan
cand2c = NyahAwalan(temp, rootlist)
if Indo:
cand2c = NyahAwalan(temp, rootlist, Indo = True)
for c2 in cand2c:
if c1[1] == c2[0][0] and c1[2] == c2[0][1] and not c2[2]:
cand.add((c1[0], c1[1], c2[4], "0", c1[4], c1[5]))
# Dengan awalan dan akhiran
else:
temp = c1[2] + c1[1] + c1[3] # bentuk tanpa huruf-huruf apitan
cand2c = NyahAwalan(temp, rootlist)
cand3c = NyahAkhiran(temp, rootlist)
if Indo:
cand2c = NyahAwalan(temp, rootlist, Indo = True)
cand3c = NyahAkhiran(temp, rootlist, Indo = True)
for c2 in cand2c:
if c1[1] == c2[0][0] and c1[2] == c2[0][1] and not c2[2]:# and c1[3] == c2[0][2]:
for c3 in cand3c:
if c1[1] == c3[0][0] and c1[3] == c3[0][2] and not c3[3]:
cand.add((c1[0], c1[1], c2[4], c3[4], c1[4], c1[5]))
# Utamakan akar yang sedia ada
cand4 = set([c for c in cand if c[1] in rootlist])
if cand4:
cand = cand4
# Jika tiada analisis ditemui, cuba dengan huruf kecil
if not cand:
if not word.islower():
kecil = morph(word.lower(), rootlist)
for k in kecil:
check.add((k[0], word, k[2], k[3], k[4], k[5]))
else:
check.add((word, word, "0", "0", "0", c1[5]))
# Susun mengikut jumlah suku kata (2 > 3 > 1 > 4 ...) dan panjang akar
cand = sorted(cand, key = lambda x: SylCount(x[1], root = True, mono = True) + len(x[1])/100)
# Tambah 5 hasil yang paling besar kemungkinannnya kepada senarai semak
for c in cand[:n]:
check.add((c[1], word, c[2], c[3], c[4], c[5]))
return check | 320ee4767b87ee336df4c132fb282d2a6a987412 | 24,908 |
def get_logger(name):
"""
Returns a logger from the registry
Parameters
----------
name : str
the name indicating the logger to return
Returns
-------
:class:`delira.logging.base_logger.Logger`
the specified logger object
"""
return _AVAILABLE_LOGGERS[name] | 3228e2e0bff57795c590868a06276a0ec57ea985 | 24,909 |
from typing import Union
from pathlib import Path
import yaml
def load_yaml(path: Union[str, Path], pure: bool = False) -> dict:
"""config.yaml file loader.
This function converts the config.yaml file to `dict` object.
Args:
path: .yaml configuration filepath
pure: If True, just load the .yaml without converting to EasyDict
and exclude extra info.
Returns:
`dict` object containing configuration parameters.
Example:
.. code-block:: python
from dlp import CNF_PATH
config = load_yaml(CNF_PATH)
print(config["project_name"])
"""
path = str(Path(path).absolute().resolve())
# * Load config file
with open(path) as file:
config = yaml.load(file)
if pure == False: # Add extra features
# Convert dict to easydict
config = edict(config)
return config | 163f48dc48e8dff998ce35dd5f9f1dcfce94eeee | 24,910 |
def InterpolatedCurveOnSurfaceUV1(thisSurface, points, tolerance, closed, closedSurfaceHandling, multiple=False):
"""
Returns a curve that interpolates points on a surface. The interpolant lies on the surface.
Args:
points (System.Collections.Generic.IEnumerable<Point2d>): List of at least two UV parameter locations on the surface.
tolerance (double): Tolerance used for the fit of the push-up curve. Generally, the resulting interpolating curve will be within tolerance of the surface.
closed (bool): If false, the interpolating curve is not closed. If true, the interpolating curve is closed, and the last point and first point should generally not be equal.
closedSurfaceHandling (int): If 0, all points must be in the rectangular domain of the surface. If the surface is closed in some direction,
then this routine will interpret each point and place it at an appropriate location in the covering space.
This is the simplest option and should give good results.
If 1, then more options for more control of handling curves going across seams are available.
If the surface is closed in some direction, then the points are taken as points in the covering space.
Example, if srf.IsClosed(0)=True and srf.IsClosed(1)=False and srf.Domain(0)=srf.Domain(1)=Interval(0,1)
then if closedSurfaceHandling=1 a point(u, v) in points can have any value for the u coordinate, but must have 0<=v<=1.
In particular, if points = { (0.0,0.5), (2.0,0.5) } then the interpolating curve will wrap around the surface two times in the closed direction before ending at start of the curve.
If closed=True the last point should equal the first point plus an integer multiple of the period on a closed direction.
Returns:
NurbsCurve: A new NURBS curve if successful, or None on error.
"""
url = "rhino/geometry/surface/interpolatedcurveonsurfaceuv-surface_point2darray_double_bool_int"
if multiple: url += "?multiple=true"
args = [thisSurface, points, tolerance, closed, closedSurfaceHandling]
if multiple: args = list(zip(thisSurface, points, tolerance, closed, closedSurfaceHandling))
response = Util.ComputeFetch(url, args)
response = Util.DecodeToCommonObject(response)
return response | 84ef7894b7d2f3aba43d494212f900ddb683bb92 | 24,911 |
def show(request, url, alias_model, template):
"""List all vouched users with this group."""
group_alias = get_object_or_404(alias_model, url=url)
if group_alias.alias.url != url:
return redirect('groups:show_group', url=group_alias.alias.url)
group = group_alias.alias
in_group = group.members.filter(user=request.user).exists()
profiles = group.members.vouched()
page = request.GET.get('page', 1)
paginator = Paginator(profiles, settings.ITEMS_PER_PAGE)
try:
people = paginator.page(page)
except PageNotAnInteger:
people = paginator.page(1)
except EmptyPage:
people = paginator.page(paginator.num_pages)
show_pagination = paginator.count > settings.ITEMS_PER_PAGE
profile = request.user.userprofile
hide_leave_group_button = (hasattr(group, 'steward') and
profile == group.steward)
data = dict(people=people,
group=group,
in_group=in_group,
show_pagination=show_pagination,
hide_leave_group_button=hide_leave_group_button)
if isinstance(group, Group) and group.steward:
""" Get the most globally popular skills that appear in the group
Sort them with most members first
"""
skills = (Skill.objects
.filter(members__in=profiles)
.annotate(no_users=Count('members'))
.order_by('-no_users'))
data.update(skills=skills)
data.update(irc_channels=group.irc_channel.split(' '))
data.update(members=profiles.count())
return render(request, template, data) | f2fccbc267ac8ce589182ff9bdf520c0d91cf294 | 24,912 |
import re
def index():
""" Home page. Displays subscription info and smart-sorted episodes. """
client = JsonClient(session["username"], session["password"])
subs = get_subscriptions(client, session["username"])
recent_episodes = smart_sort(client, session["username"])
for ep in recent_episodes:
ep['description'] = re.sub(r'http\S+', '', ep['description'])
ep['released'] = ep['released'].split('T', 1)[0]
if request.method == 'POST':
if request.form['submit'] == 'fetch':
if not request.form['queryvalue']:
return render_template('index.html', subs=subs)
else:
return redirect(url_for('searchresults', query=request.form['queryvalue']))
elif request.form['submit'] == 'advanced':
return redirect(url_for('advancedsearch'))
elif request.form['submit'] == 'sugg':
return redirect(url_for('suggestions'))
return render_template('index.html', subs=subs, recent_episodes=recent_episodes) | 5596198aa8e8f257f0f2531dd4e76d4f3ec9d23a | 24,913 |
from typing import Optional
def range(
lower: int, upper: int, step: Optional[int] = None, name: Optional[str] = None
) -> Series:
"""
Create a Series that ranges from lower bound to upper bound.
Parameters
----------
lower
Lower bound value.
upper
Upper bound value.
step
Optional step size. If none given, the step size will be 1.
name
Name of the Series
"""
if name is None:
name = ""
return Series(name, np.arange(lower, upper, step), nullable=False) | 849cb808495d89294768d8d98d7444f03eade593 | 24,914 |
def local_gpu_masked_careduce(node):
"""
Detects eligible CAReduce{add}(GpuElemwise{Switch}) instances and replaces
them with a masked CAReduce.
"""
# TODO: Probably don't need this hack checking for both GpuCAReduce and its
# non-gpu counterpart anymore. Just the GPU should be fine.
if not isinstance(node.op, GpuCAReduce):
# Send this off to local_gpu_careduce first.
# HACK: This happens outside of the standard optimization sequence.
ret = local_gpu_careduce.transform(node)
if not ret:
return False
print "local_gpu_careduce returned with", ret
if isinstance(ret[0].owner.op, HostFromGpu):
ret = ret[0].owner.inputs[0].owner
else:
ret = ret[0].owner
node = ret
if node.op.scalar_op.__class__ != theano.scalar.Add:
return False
above = node.inputs[0].owner
if above is None or not isinstance(above.op, GpuElemwise):
return False
# The graph looks okay. Check the dims.
if node.op.reduce_mask != (1, 0, 0):
return False
if node.op.pre_scalar_op:
return False
# Check switch op.
# TODO: Check that it's actually a switch .. !
if len(above.inputs) != 3:
return False
mask, ift, iff = above.inputs
if not mask.broadcastable:
return False
if not (not mask.broadcastable[0] and all(mask.broadcastable[1:])):
return False
if any(ift.broadcastable) or any(iff.broadcastable):
return False
new_op = GpuMaskedCAReduce()
return [new_op(mask, ift, iff)] | 9ca5a78cd61f1857b62c5066d724fec32751dfd5 | 24,915 |
import bisect
def ticks_lt(exact_price):
"""
Returns a generator for all the ticks below the given price.
>>> list(ticks_lt(Decimal('0.35')))
[Decimal('0.34'), Decimal('0.33'), Decimal('0.20'), Decimal('0.10'), Decimal('0.01')]
>>> list(ticks_lt(Decimal('0.20')))
[Decimal('0.10'), Decimal('0.01')]
>>> list(ticks_lt(Decimal('0.0001')))
[]
"""
first_viable = bisect.bisect_left(_ALL_TICKS, exact_price) - 1
first_invalid_index, step = -1, -1
return (_ALL_TICKS[i] for i in range(first_viable, first_invalid_index, step)) | aa291f00021e4b3bfe78c7fb406aa81beb9d3467 | 24,916 |
def _decode_to_string(to_decode):
"""
This function is needed for Python 3,
because a subprocess can return bytes instead of a string.
"""
try:
return to_decode.decode("utf-8")
except AttributeError: # bytesToDecode was of type string before
return to_decode | 3a9f4ef2719f74e259e119dc1e43a9cbdd655dd5 | 24,917 |
def nn(x_dict):
""" Implementation of a shallow neural network."""
# Extract Input.
x = x_dict["images"]
# First Hidden Layer.
layer_1 = tf.layers.dense(x, 256)
# Second Hidden Layer.
layer_2 = tf.layers.dense(layer_1, 256)
# Output Layer.
output_layer = tf.layers.dense(layer_2, 10)
return output_layer | 6e47efcd03c335137f0ce30665978a9d38c7df3f | 24,918 |
def find_negamax_move_alphabeta(game_state, valid_moves, depth, alpha, beta, turn_multiplier):
"""
NegaMax algorithm with alpha beta pruning.
Alpha beta pruning eliminates the need to check all moves within the game_state tree when
a better branch has been found or a branch has too low of a score.
alpha: upper bound (max possible); beta: lower bound (min possible)
If max score is greater than alpha, that becomes the new alpha value.
If alpha becomes >= beta, break out of branch.
White is always trying to maximise score and black is always
trying to minimise score. Once the possibility of a higher max or lower min
has been eliminated, there is no need to check further branches.
"""
global next_move
if depth == 0:
return turn_multiplier * score_board(game_state)
max_score = -checkmate_points
for move in valid_moves:
game_state.make_move(move)
next_moves = game_state.get_valid_moves()
score = -find_negamax_move_alphabeta(game_state, next_moves, depth - 1, -beta, -alpha, -turn_multiplier)
if score > max_score:
max_score = score
if depth == set_depth:
next_move = move
game_state.undo_move()
# Pruning
if max_score > alpha:
alpha = max_score
if alpha >= beta:
break
return max_score | de245eaa7a675af7348348d84e61972138663270 | 24,919 |
def sum_kernel(X, Y, kernels = None):
"""
Meta Kernel for summing multiple kernels.
"""
_sum = 0
for kernel in kernels:
print("Doing", kernel["class"], "with parameters:", kernel["parameters"])
_sum = _sum + globals()[kernel["class"]](X, Y, **kernel["parameters"])
return _sum | a2b042b08026e4c87f028687c4521cc1e81c4af5 | 24,920 |
def pretvori_v_sekunde(niz):
"""
Pretvori niz, ki predstavlja dolžino skladbe v formatu hh:mm:ss v število sekund.
"""
h, m, s = map(int, niz.split(":"))
return s + m*60 + h*3600 | db0cc5872109b15e635b2b1e8731a5343d63f518 | 24,921 |
import logging
def _get_profiling_data(filename):
"""Read a given file and parse its content for profiling data."""
data, timestamps = [], []
try:
with open(filename, "r") as f:
file_data = f.readlines()
except Exception:
logging.error("Could not read profiling data.", exc_info=True)
raise SystemExit(1)
for line in file_data:
if line == "\n":
continue
line = line.strip()
line_data = line.split(" ")
if len(line_data) != 3:
continue
_, mem_usage, timestamp = line.split(" ")
data.append(float(mem_usage))
timestamps.append(float(timestamp))
if not data:
logging.error("No samples to parse in {}.".format(filename))
raise SystemExit(1)
return {"data": data, "timestamp": timestamps} | 85f434c9aa22d60bae06205162623cde83e5a716 | 24,922 |
def parse_dataset_name(dataset_name: str) -> (str, str):
"""
Split the string of the dataset name into two parts: dataset source name (e.g., cnc_in_domain)
and dataset part (e.g., train).
:param dataset_name:
:return: dataset source name (e.g., cnc_in_domain) and dataset part (e.g., train).
"""
name_parts = dataset_name.rsplit('_', 1)
dataset_source = name_parts[0]
dataset_part = DatasetPart[name_parts[1].upper()]
return dataset_source, dataset_part | e308d3f29e37b5453d47a36ef2baf94454ac90d3 | 24,923 |
import os
import glob
def get_analytics_zoo_classpath():
"""
Get and return the jar path for analytics-zoo if exists.
"""
if os.getenv("BIGDL_CLASSPATH"):
return os.environ["BIGDL_CLASSPATH"]
jar_dir = os.path.abspath(__file__ + "/../../")
jar_paths = glob.glob(os.path.join(jar_dir, "share/lib/*.jar"))
if jar_paths:
assert len(jar_paths) == 1, "Expecting one jar: %s" % len(jar_paths)
return jar_paths[0]
return "" | e56bf7e81d42de6a20e8f77159a39e78ac150804 | 24,924 |
def plot_pq(df_pq, df_pq_std=None, columns=('mae', 'r2s'),
title='Performance-Quantile'):
"""Plot the quantile performance plot from the prepared metrics table.
Args:
df_pq (pd.DataFrame): The QP table information with mean values.
df_pq_std (pd.DataFrame): The QP table information with std values.
columns (tuple): Which column of the qp table to be plotted, limited
to 2 items.
title (str): An optional name of the figure.
Returns:
plt.Figure: A figure of the resulting QP plot.
"""
fig, ax1 = plt.subplots(figsize=(5, 3))
if len(columns) == 1:
ax1.plot(df_pq['quantile'], df_pq[columns[0]], 'r', label=columns[0])
ax1.set_ylabel(columns[0].upper())
ax1.legend(loc=1)
if df_pq_std is not None:
ax1.fill_between(df_pq['quantile'],
df_pq[columns[0]] - df_pq_std[columns[0]],
df_pq[columns[0]] + df_pq_std[columns[0]],
color='r',
alpha=0.5
)
elif len(columns) == 2:
_ = ax1.plot(df_pq['quantile'], df_pq[columns[0]], 'r',
label=columns[0])
ax1.set_ylabel(columns[0].upper())
ax2 = ax1.twinx()
_ = ax2.plot(df_pq['quantile'], df_pq[columns[1]], 'g',
label=columns[1])
ax2.set_ylabel(columns[1].upper())
ax1.legend(loc=1)
ax2.legend(loc=4)
if df_pq_std is not None:
ax1.fill_between(df_pq['quantile'],
df_pq[columns[0]] - df_pq_std[columns[0]],
df_pq[columns[0]] + df_pq_std[columns[0]],
color='r',
alpha=0.5
)
ax2.fill_between(df_pq['quantile'],
df_pq[columns[1]] - df_pq_std[columns[1]],
df_pq[columns[1]] + df_pq_std[columns[1]],
color='g',
alpha=0.5
)
else:
raise ValueError('Too many columns. Currently only two are allowed.')
ax1.set_xlabel('Quantile')
ax1.set_title(title)
plt.show()
return fig | 3bd02080c74b1bf05f9f6a8cda3b0d22ac847e9f | 24,925 |
def protoToOpenAPISchemaRecursive(lines, schemas, schemaPrefix, basename):
"""
Recursively create a schema from lines read from a proto file.
This method is recursive because proto messages can contain internal messages and enums.
If this is the case the method will call itself recursively.
:param lines: list of lines read from a proto file.
:param schemas: dictionary of schemas to which the new definitions will be added.
:param basename: basename respectively prefix which is added before the name of a schema.
This is used to prefix internal messages/enums with the name of the message containing it.
:return: the filled schemas dictionary and the current procssing index. The return value should not be used
because it deals with parameters only required for the recursion.
"""
# create a new schema
schema = {}
# save the current name for the schema
name = ""
# index for the current line parsed
i = 0;
# iterate till end of file
while (i < len(lines)):
# get current line and remove whitespaces at front and end
line = lines[i].strip()
# replace multiple whitepaces with a single one, see https://stackoverflow.com/questions/2077897/substitute-multiple-whitespace-with-single-whitespace-in-python
line = ' '.join(line.split())
# increase index
i += 1
# if the line is irrelevant for parsing, continue the loop
if skipLine(line):
continue
# closing curly brackets indicate that a message/enum definition has ended
if line.startswith('}'):
# return schemas and current index so that loop which recursively called this can resume at the correct location
return schemas, i
# test if line indicates an internal message/enum
if name != "" and (line.startswith('message') or line.startswith('enum')):
# name is already specified but there is a message/enum, so it is internal
# recursively call this method but splice the lines to begin at the definition of the internal type
_, processedLines = protoToOpenAPISchemaRecursive(lines[(i-1):len(lines)-1], schemas, schemaPrefix, basename=(name + '.'))
# move the index of this iteration after the definition of the internal type
i += processedLines
continue
# type is a message
if line.startswith('message'):
# set message flag
isMessage = True
# extract name
name = basename + line.split(' ')[1]
if basename == '':
name = schemaPrefix + name
# create schema and add to schemas
schemas[name] = schema
schema['type'] = 'object'
schema['properties'] = {}
continue
# type is an enum
if line.startswith('enum'):
# set message flag to false
isMessage = False
# extract name
name = basename + line.split(' ')[1]
if basename == '':
name = schemaPrefix + name
# create schema for enum and add to schemas
schemas[name] = schema
schema['type'] = 'string'
schema['enum'] = []
continue
# if item is an enum, parse lines as its values
if not isMessage:
enumValue = line.split('=')[0].strip()
# ignore values called unknown
if enumValue == "UNKNOWN":
continue
else:
schema['enum'].append(enumValue)
continue
# extract information for field
split = line.split(' ')
option = split[0] # option is repeated, optional, ...
fieldType = split[1] # fieldType is string, uint64, reference to another type, ...
fieldName = split[2] # the name of the field
# create a property for the field
prop = {}
# if the field option is repeated add the property as an array, else normally
if option == "repeated":
properties = schema['properties']
properties[fieldName] = {}
properties[fieldName]['type'] = 'array'
properties[fieldName]['items'] = prop
else:
schema['properties'][fieldName] = prop
# add property fields based on field type and print an error if it could not be done
if not addTypeToProp(fieldType, prop, schemaPrefix, schemas):
print('Could not parser fieldType[' + fieldType + '] into an openAPI property')
return schemas, i | c011a37ddc3fa9fea7c141f24f60a178ac0f7032 | 24,926 |
import typing
def to_binary(s: typing.Union[str, bytes], encoding='utf8') -> bytes:
"""Cast function.
:param s: object to be converted to bytes.
"""
return s if isinstance(s, bytes) else bytes(s, encoding=encoding) | ddc442a8124b7d55618cdc06081e496930d292a5 | 24,927 |
import gzip
def load_numpy(data_path, save_disk_flag=True):
"""Load numpy."""
if save_disk_flag:
# Save space but slow
f_data = gzip.GzipFile(f'{data_path}.gz', "r")
data = np.load(f_data)
else:
data = np.load(data_path)
return data | 9979e2e232fcc96d5fe865ba01a4ba5d36fd1b11 | 24,928 |
def mock_weather_for_coordinates(*args, **kwargs): # noqa: F841
"""Return mock data for request weather product type."""
if args[2] == aiohere.WeatherProductType[MODE_ASTRONOMY]:
return astronomy_response
if args[2] == aiohere.WeatherProductType[MODE_HOURLY]:
return hourly_response
if args[2] == aiohere.WeatherProductType[MODE_DAILY]:
return daily_response
if args[2] == aiohere.WeatherProductType[MODE_DAILY_SIMPLE]:
return daily_simple_forecasts_response
if args[2] == aiohere.WeatherProductType[MODE_OBSERVATION]:
return observation_response | 527bd91866984cc966ff6ad2e1b438591bc7f9d2 | 24,929 |
def get_user(request, project_key):
"""Return the ID of the current user for the given project"""
projects = request.cookies.get('projects')
if projects is None:
return None
try:
projects = json.loads(projects)
except (ValueError, KeyError, TypeError):
print "JSON format error"
logging.exception("Cookie json could not be decoded")
return None
user_id = projects.get(project_key)
if user_id is not None:
return int(user_id)
else:
return None | 4edb40eb0ccece32bd1c0fc3f44ab42e97b9770c | 24,930 |
def extract_month(cube, month):
"""
Slice cube to get only the data belonging to a specific month.
Parameters
----------
cube: iris.cube.Cube
Original data
month: int
Month to extract as a number from 1 to 12
Returns
-------
iris.cube.Cube
data cube for specified month.
"""
if month not in range(1, 13):
raise ValueError('Please provide a month number between 1 and 12.')
if not cube.coords('month_number'):
iris.coord_categorisation.add_month_number(cube, 'time',
name='month_number')
return cube.extract(iris.Constraint(month_number=month)) | 31e51654875abb08f727ecf6eb226a3b3b008657 | 24,931 |
def is_insert_grad_of_statement(node):
"""Check whether a context manager calls `insert_grad_of`.
Args:
node: The context manager node.
Returns:
Whether or not this node contains `insert_grad_of` calls.
Raises:
ValueError: If the `insert_grad_of` calls are mixed with other calls.
"""
tangent_calls = [anno.getanno(item.context_expr, 'func', None)
is utils.insert_grad_of for item in node.items]
if all(tangent_calls):
return True
elif any(tangent_calls):
raise ValueError
else:
return False | f1a8494716577f349b780880210d80cc4a941c1e | 24,932 |
import typing
import random
import itertools
def get_word(count: typing.Union[int, typing.Tuple[int]] = 1, # pylint: disable=dangerous-default-value
sep: str = ' ',
func: typing.Optional[typing.Union[str, typing.Callable[[str], str]]] = None,
args: typing.Tuple[str] = (), kwargs: typing.Dict[str, str] = {}) -> str:
"""Return random words.
.. code:: python
>>> get_word(count=3)
'anim voluptate non'
>>> get_word(count=3, func='capitalize')
'Non Labore Ut'
>>> get_word(count=3, func=lambda s: s.upper())
'NISI TEMPOR CILLUM'
Args:
count (:obj:`Union[int, Tuple[int]]`): Number of random words. To generate random
number of words, supply a 2-element tuple of :obj:`int`, the function will use
:func:`random.randint` to choose a random integer as the number of random words.
sep (str): Seperator between each word.
func (:obj:`Optional[Union[str, Callable[[str], str]]]`): Filter function. It can be
a function name of :obj:`str`, or a customised function that takes the original
:obj:`str` and returns the modified :obj:`str`.
args (:obj:`Tuple[str]`): Additional positional arguments for ``func``.
kwargs (:obj:`Dict[str, Any]`): Additional keyword arguments for ``func``.
Returns:
:obj:`str`: Random words.
"""
if isinstance(count, tuple):
count = random.randint(*count)
return sep.join(itertools.islice(word(count, func, args, kwargs), count)) | 3d5e1f82a4f32eae88016c0a89e9295b80d382e5 | 24,933 |
def button_debug():
""" Debugger for testing websocket sent signals from RPi buttons
(for now simulated in in browser)
"""
return render_template('button_debug.html') | 4ec37f35d5c51a13299c4158279e3ef01dc66bd2 | 24,934 |
import codecs
def get_int(b):
"""@TODO: Docs. Contribution is welcome."""
return int(codecs.encode(b, "hex"), 16) | 14be8bb32e2a4c025c85223ef5dbec654611ea19 | 24,935 |
def chrom_exp_cusp(toas, freqs, log10_Amp=-7, sign_param=-1.0,
t0=54000, log10_tau=1.7, idx=2):
"""
Chromatic exponential-cusp delay term in TOAs.
:param t0: time of exponential minimum [MJD]
:param tau: 1/e time of exponential [s]
:param log10_Amp: amplitude of cusp
:param sign_param: sign of waveform
:param idx: index of chromatic dependence
:return wf: delay time-series [s]
"""
t0 *= const.day
tau = 10**log10_tau * const.day
wf = (10**log10_Amp * np.heaviside(toas - t0, 1) * \
np.exp(- (toas - t0) / tau)) + (10**log10_Amp * \
(1 - np.heaviside(toas - t0, 1)) * np.exp(- (t0 - toas) / tau))
return np.sign(sign_param) * wf * (1400 / freqs) ** idx | 4075901c5dcbe10ad8554835c20a0a22d29f1af7 | 24,936 |
def powerspectrum_t(flist, mMax=30, rbins=50, paramname=None, parallel=True,
spacing='linear'):
"""
Calculates the power spectrum along the angular direction for a whole
simulation (see powerspectrum).
Loops through snapshots in a simulation, in parallel. Uses the same radial
and angular bins for every timestep
Parameters
----------
flist : list
A list of filenames OR of SimSnaps for a simulation
mMax : int
Maximum fourier mode to calculate
rbins : int or array
Number of radial bins or the binedges to use
paramname : str
Filename of .param file. Used for loading if flist is a list of
filenames
parallel : bool
Flag to perform this in parallel or not
spacing : str
If rbins is an int, this defines whether to use 'log' or 'linear'
binspacing
Returns
-------
m : array
Number of the fourier modes
power : SimArray
Power spectrum vs time along the angular direction
"""
# Set-up radial bins (use the same ones at all time steps)
f = flist[0]
if isinstance(f, str):
f = pynbody.load(f, paramname=paramname)
r = f.g['rxy']
rbins = setupbins(r, rbins, spacing)
# Prepare arguments
nFiles = len(flist)
mMax = [mMax] * nFiles
rbins = [rbins] * nFiles
paramname = [paramname] * nFiles
args = zip(flist, mMax, rbins, paramname)
# Calculate power
if parallel:
pool = Pool(cpu_count())
try:
results = pool.map(_powerspectrum, args, chunksize=1)
finally:
pool.close()
pool.join()
else:
results = []
for f in flist:
results.append(powerspectrum(f, mMax, rbins, paramname))
# Format returns
m = results[0][0]
power_units = results[0][1].units
nr = len(results[0][1])
power = SimArray(np.zeros([nFiles, nr]), power_units)
for i, result in enumerate(results):
power[i] = result[1]
return m, power | e2dd0ab1fa06a111530350e2ab2da119607dc9eb | 24,937 |
def score(scores, main_channel, whiten_filter):
"""
Whiten scores using whitening filter
Parameters
----------
scores: np.array (n_data, n_features, n_neigh)
n_data is the number of spikes
n_feature is the number features
n_neigh is the number of neighboring channels considered
main_channel: np.array (n_data,)
The main channel information for each spike
whilten_filter: np.array (n_channels, n_neigh, n_neigh)
whitening filter as described above
Returns
-------
whiten_scores: np.array (n_data, n_features, n_neigh)
scores whitened after applying whitening filter
"""
# get necessary parameters
n_data, n_features, n_neigh = scores.shape
n_channels = whiten_filter.shape[0]
# apply whitening filter
whitened_scores = np.zeros(scores.shape)
for c in range(n_channels):
# index of spikes with main channel as c
idx = main_channel == c
whitened_scores_c = np.matmul(
np.reshape(scores[idx], [-1, n_neigh]), whiten_filter[c])
whitened_scores[idx] = np.reshape(whitened_scores_c,
[-1, n_features, n_neigh])
return whitened_scores | b416bd38a874f6c8ee3b26b8fec35a19b0604de0 | 24,938 |
def make_title(raw_input):
"""Capitalize and strip"""
return raw_input.title().strip() | 517977638d72a8e5c8026147246739231be6258f | 24,939 |
def perform(target, write_function=None):
"""
Perform an HTTP request against a given target gathering some basic
timing and content size values.
"""
fnc = write_function or (lambda x: None)
assert target
connection = pycurl.Curl()
connection.setopt(pycurl.URL, target)
connection.setopt(pycurl.FOLLOWLOCATION, True)
connection.setopt(pycurl.WRITEFUNCTION, fnc)
connection.perform()
result = {
'response': connection.getinfo(pycurl.RESPONSE_CODE),
'rtt': round(connection.getinfo(pycurl.CONNECT_TIME), 5),
'response_time': round(connection.getinfo(pycurl.TOTAL_TIME), 5),
'content_size': (
int(connection.getinfo(pycurl.SIZE_DOWNLOAD)) + int(connection.getinfo(pycurl.HEADER_SIZE))
) * 8
}
try:
result['bps'] = round(
result['content_size'] / result['response_time'],
5
)
except ZeroDivisionError:
result['bps'] = 0
return result | 0cecdb6bc43acd80ebca701b4607b2013b612d93 | 24,940 |
import xml.etree.ElementTree as ET
import os
def load_xml_images(renderer, filename, _filter=[], by_name=False):
"""
Load images from a TextureAtlas XML file. Images may be filtered
and are return in a list, or optionally, a dict images indexed by
the name found in the xml file.
:param renderer: renderer to attach texture to
:param filename: path to a valid TextureAtlas XML file
:param fiter: list of file names or numbers to include in the list
:param by_name: set true to return a dict with image names
:rvalue: list of images as ordered in filename by default
optionally returns dict with image names instead
"""
root = ET.parse(filename).getroot()
if root.tag != 'TextureAtlas':
raise ValueError("'{}' is not a TextureAtlas XML file".format(filename))
path = os.path.join(os.path.dirname(filename), root.attrib['imagePath'])
if path in loaded_textures.get(renderer, {}):
texture = loaded_textures[renderer][path]
else:
texture = load_texture(renderer, path)
if renderer not in loaded_textures:
loaded_textures[renderer] = {}
loaded_textures[renderer][path] = texture
numbers = []
names = {}
for n, child in enumerate(root):
if not _filter or n in _filter or child.attrib.get('n') in _filter:
rect = pg.Rect(
int(child.attrib['x']), int(child.attrib['y']),
int(child.attrib['w']), int(child.attrib['h']))
img = Image(texture, srcrect=rect)
numbers.append(img)
names[child.attrib.get('n')] = img
return names if by_name else numbers | b61ffcda72769fe065f44c0ca8df5c48d955ab24 | 24,941 |
import typing
def merge_property_into_method(
l: Signature, r: typing.Tuple[Metadata, OutputType]
) -> Signature:
"""
Merges a property into a method by just using method
"""
return l | ae626fece9dbd36567f0b8c79cddfbe58c0a2cb4 | 24,942 |
import os
def get_image_paths(dir_path, image_filename_pattern="img_{:05d}.jpg", fps=15):
"""each dir contains the same number of flow_x_{:05d}.jpg, flow_y_{:05d}.jpg, img_{:05d}.jpg.
Index starts at 1, not 0, thus there is no img_00000.jpg, etc.
"""
num_rgb_images = int(len(os.listdir(dir_path)) / 3) # must be divisible by 3
offsets_per_second = np.arange(0, num_rgb_images, fps) # (0, 30, 15) => [0, 15]
# original frames are extracted for the following frames, (video fps=30): [1-5], [11-15], [21-25] + 30*n
offsets_inside_second = [3, 8, 13] # the middle of every 5 frames., note this is not used for indexing.
selected_img_indices = np.concatenate(
[offsets_per_second + e for e in offsets_inside_second]
, axis=0)
selected_img_indices = selected_img_indices[selected_img_indices <= num_rgb_images]
return [image_filename_pattern.format(e) for e in selected_img_indices] | 5f9272285e3b0f57068d86000497e4858e8ecf1e | 24,943 |
def _serve_archive(content_hash, file_name, mime_type):
"""Serve a file from the archive or by generating an external URL."""
url = archive.generate_url(content_hash,
file_name=file_name,
mime_type=mime_type)
if url is not None:
return redirect(url)
try:
local_path = archive.load_file(content_hash, file_name=file_name)
if local_path is None:
return Response(status=404)
return send_file(local_path,
as_attachment=True,
conditional=True,
attachment_filename=file_name,
mimetype=mime_type)
finally:
archive.cleanup_file(content_hash) | be30f5585efd229518671b99c1560d44510db2c6 | 24,944 |
def preprocess(path ,scale = 3):
"""
This method prepares labels and downscaled image given path
of image and scale. Modcrop is used on the image label to ensure
length and width of image is divisible by scale.
Inputs:
path: the image directory path
scale: scale to downscale
Outputs:
input_: downscaled version of image
label_: label after applying moderop
"""
img = imread(path)
# Crops image to ensure length and width of img is divisble by
# scale for resizing by scale
label_ = modcrop(img, scale)
# Resize by scaling factor
input_ = cv2.resize(label_,None,fx = 1.0/scale ,fy = 1.0/scale,
interpolation = cv2.INTER_CUBIC)
#kernel_size = (7, 7);
#sigma = 3.0;
#input_ = cv2.GaussianBlur(input_, kernel_size, sigma);
#checkimage(input_)
return input_, label_ | 6b02b81dca775ad9e614b8d285d3784aec433ca2 | 24,945 |
import math
def get_goal_sample_rate(start, goal):
"""Modifie la probabilité d'obtenir directement le but comme point selon la distance entre le départ et le but.
Utile pour la précision et les performances."""
try :
dx = goal[0]-start[0]
dy = goal[1]-start[1]
d = math.sqrt(dx * dx + dy * dy)
except TypeError:
goal_sample_rate = 5
return goal_sample_rate
if d < 600 :
goal_sample_rate = (10-d/140)**2
else :
goal_sample_rate = 30
return goal_sample_rate | a48ad7adba534455a149142cfeae9c47e3a25677 | 24,946 |
import warnings
def sample_cov(prices, returns_data=False, frequency=252, log_returns=False, **kwargs):
"""
Calculate the annualised sample covariance matrix of (daily) asset returns.
:param prices: adjusted closing prices of the asset, each row is a date
and each column is a ticker/id.
:type prices: pd.DataFrame
:param returns_data: if true, the first argument is returns instead of prices.
:type returns_data: bool, defaults to False.
:param frequency: number of time periods in a year, defaults to 252 (the number
of trading days in a year)
:type frequency: int, optional
:param log_returns: whether to compute using log returns
:type log_returns: bool, defaults to False
:return: annualised sample covariance matrix
:rtype: pd.DataFrame
"""
if not isinstance(prices, pd.DataFrame):
warnings.warn("data is not in a dataframe", RuntimeWarning)
prices = pd.DataFrame(prices)
if returns_data:
returns = prices
else:
returns = returns_from_prices(prices, log_returns)
return fix_nonpositive_semidefinite(
returns.cov() * frequency, kwargs.get("fix_method", "spectral")
) | 3e60ef20b976bf35d9ee818c27dfb7b877fe1f3f | 24,947 |
import argparse
def str2bool(v):
"""Transforms string flag into boolean
:param v: boolean as type or string
:type v: str
:return: bool or argparse error (if it's not recognized)
:rtype: bool
"""
if isinstance(v, bool):
return v
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.') | 728131e1498c212d57b221aabb7e5ae4c441c4ef | 24,948 |
from typing import Dict
def read_prev_timings(junit_report_path: str) -> Dict[str, float]:
"""Read the JUnit XML report in `junit_report_path` and
returns its timings grouped by class name.
"""
tree = ET.parse(junit_report_path)
if tree is None:
pytest.exit(f"Could not find timings in JUnit XML {junit_report_path}")
assert isinstance(tree, ET.ElementTree)
return group_prev_timings(tree.getroot()) | 8c796845289fc08ffb815d649c732fdd1ae626b3 | 24,949 |
def update_model():
""" Updates a model """
data = request.get_json()
params = data.get('params', {'model_id': 1})
entry = Model.objects(model_id=params.model_id).first()
if not entry:
return {'error': ModelNotFoundError()}
entry.update(**params)
return entry.to_json() | 8ba13df354c21e72045d319b3c47d8ef4e291182 | 24,950 |
def SmartConnect(protocol='https',
host='localhost',
port=443,
user='root',
pwd='',
service="hostd",
path="/sdk",
preferredApiVersions=None,
keyFile=None,
certFile=None,
thumbprint=None,
sslContext=None,
httpConnectionTimeout=None,
connectionPoolTimeout=CONNECTION_POOL_IDLE_TIMEOUT_SEC,
token=None,
disableSslCertValidation=False,
customHeaders=None):
"""
Determine the most preferred API version supported by the specified server,
then connect to the specified server using that API version, login and return
the service instance object.
Throws any exception back to caller. The service instance object is
also saved in the library for easy access.
Clients should modify the service parameter only when connecting to
a VMOMI server other than hostd/vpxd. For both of the latter, the
default value is fine.
@param protocol: What protocol to use for the connection (e.g. https or http).
@type protocol: string
@param host: Which host to connect to.
@type host: string
@param port: Port
@type port: int
@param user: User
@type user: string
@param pwd: Password
@type pwd: string
@param service: Service
@type service: string
@param path: Path
@type path: string
@param preferredApiVersions: Acceptable API version(s) (e.g. vim.version.version9)
If a list of versions is specified the versions should
be ordered from most to least preferred. If None is
specified, the list of versions support by pyVmomi will
be used.
@type preferredApiVersions: string or string list
@param keyFile: ssl key file path
@type keyFile: string
@param certFile: ssl cert file path
@type certFile: string
@param thumbprint: host cert thumbprint
@type thumbprint: string
@param sslContext: SSL Context describing the various SSL options. It is only
supported in Python 2.7.9 or higher.
@type sslContext: SSL.Context
@param httpConnectionTimeout: Timeout in secs for http requests.
@type httpConnectionTimeout: int
@param connectionPoolTimeout: Timeout in secs for idle connections to close, specify
negative numbers for never closing the connections
@type connectionPoolTimeout: int
@type token: string
@param token: Authentication and Authorization token to use for the connection.
The presence of this token overrides the user and pwd parameters.
@type disableSslCertValidation: bool
@param disableSslCertValidation: Creates an unverified SSL context when True.
"""
if preferredApiVersions is None:
preferredApiVersions = GetServiceVersions('vim25')
sslContext = getSslContext(host, sslContext, disableSslCertValidation)
supportedVersion = __FindSupportedVersion(protocol, host, port, path,
preferredApiVersions, sslContext)
if supportedVersion is None:
raise Exception("%s:%s is not a VIM server" % (host, port))
portNumber = protocol == "http" and -int(port) or int(port)
return Connect(host=host,
port=portNumber,
user=user,
pwd=pwd,
service=service,
adapter='SOAP',
version=supportedVersion,
path=path,
keyFile=keyFile,
certFile=certFile,
thumbprint=thumbprint,
sslContext=sslContext,
httpConnectionTimeout=httpConnectionTimeout,
connectionPoolTimeout=connectionPoolTimeout,
token=token,
disableSslCertValidation=disableSslCertValidation,
customHeaders=customHeaders) | 19fafca3767b221d0b35c8377c0b367c097a2c8c | 24,951 |
def _pseudoArrayFromScalars(scalarvalues, type):
"""Wrap a scalar in a buffer so it can be used as an array"""
arr = _bufferPool.getBuffer()
arr._check_overflow = 1
newtype = type # _numtypedict[type]
arr._strides = (newtype.bytes,)
arr._type = newtype
arr._itemsize = newtype.bytes
arr._strides = None
if isinstance(scalarvalues, (list, tuple)):
arr._shape = (len(scalarvalues),)
for i in xrange(len(scalarvalues)):
arr[i] = scalarvalues[i]
else:
arr._shape = ()
arr[()] = scalarvalues
# Modify block buffer attributes to look like vector/vector setup.
return arr | ac2953eeff6b6549ef633de2728d72220e61bd76 | 24,952 |
def triangulate_ellipse(corners, num_segments=100):
"""Determines the triangulation of a path. The resulting `offsets` can
multiplied by a `width` scalar and be added to the resulting `centers`
to generate the vertices of the triangles for the triangulation, i.e.
`vertices = centers + width*offsets`. Using the `centers` and `offsets`
representation thus allows for the computed triangulation to be
independent of the line width.
Parameters
----------
corners : np.ndarray
4xD array of four bounding corners of the ellipse. The ellipse will
still be computed properly even if the rectangle determined by the
corners is not axis aligned. D in {2,3}
num_segments : int
Integer determining the number of segments to use when triangulating
the ellipse
Returns
-------
vertices : np.ndarray
Mx2/Mx3 array coordinates of vertices for triangulating an ellipse.
Includes the center vertex of the ellipse, followed by `num_segments`
vertices around the boundary of the ellipse (M = `num_segments`+1)
triangles : np.ndarray
Px3 array of the indices of the vertices for the triangles of the
triangulation. Has length (P) given by `num_segments`,
(P = M-1 = num_segments)
Notes
-----
Despite it's name the ellipse will have num_segments-1 segments on their outline.
That is to say num_segments=7 will lead to ellipses looking like hexagons.
The behavior of this function is not well defined if the ellipse is degenerate
in the current plane/volume you are currently observing.
"""
if not corners.shape[0] == 4:
raise ValueError(
trans._(
"Data shape does not match expected `[4, D]` shape specifying corners for the ellipse",
deferred=True,
)
)
assert corners.shape in {(4, 2), (4, 3)}
center = corners.mean(axis=0)
adjusted = corners - center
# Take to consecutive corners difference
# that give us the 1/2 minor and major axes.
ax1 = (adjusted[1] - adjusted[0]) / 2
ax2 = (adjusted[2] - adjusted[1]) / 2
# Compute the transformation matrix from the unit circle
# to our current ellipse.
# ... it's easy just the 1/2 minor/major axes for the two column
# note that our transform shape will depends on wether we are 2D-> 2D (matrix, 2 by 2),
# or 2D -> 3D (matrix 2 by 3).
transform = np.stack((ax1, ax2))
if corners.shape == (4, 2):
assert transform.shape == (2, 2)
else:
assert transform.shape == (2, 3)
# we discretize the unit circle always in 2D.
v2d = np.zeros((num_segments + 1, 2), dtype=np.float32)
theta = np.linspace(0, np.deg2rad(360), num_segments)
v2d[1:, 0] = np.cos(theta)
v2d[1:, 1] = np.sin(theta)
# ! vertices shape can be 2,M or 3,M depending on the transform.
vertices = np.matmul(v2d, transform)
# Shift back to center
vertices = vertices + center
triangles = (
np.arange(num_segments) + np.array([[0], [1], [2]])
).T * np.array([0, 1, 1])
triangles[-1, 2] = 1
return vertices, triangles | feae8b79020c612185dcdcbd9f3d3b9bd897b11b | 24,953 |
def is_dbenv_loaded():
"""
Return True of the dbenv was already loaded (with a call to load_dbenv),
False otherwise.
"""
return settings.LOAD_DBENV_CALLED | a4dc5c6b69e457aedf31f7729dd6bab0a75aaa07 | 24,954 |
def parse_python_settings_for_dmlab2d(
lab2d_settings: config_dict.ConfigDict) -> Settings:
"""Flatten lab2d_settings into Lua-friendly properties."""
# Since config_dicts disallow "." in keys, we must use a different character,
# "$", in our config and then convert it to "." here. This is particularly
# important for levels with config keys like 'player.%default' in DMLab2D.
lab2d_settings = _config_dict_to_dict(lab2d_settings)
lab2d_settings = settings_helper.flatten_args(lab2d_settings)
lab2d_settings_dict = {}
for key, value in lab2d_settings.items():
converted_key = key.replace("$", ".")
lab2d_settings_dict[converted_key] = str(value)
return lab2d_settings_dict | 5cdf1d1a9a6b82e23f0dffc022bbc0a352f65766 | 24,955 |
import IPython
import re
def get_new_name(x: str) -> str:
"""
Obtains a new name for the given site.
Args:
x: The original name.
Returns:
The new name.
"""
y = x.lower()
if y == "cervical":
return "cervical_spine"
m = re.match(r"^([lr])\s+(.+)$", y)
if not m:
raise ValueError("cannot parse site name: {!r}".format(x))
side = "left" if m.group(1) == "l" else "right"
site = m.group(2)
if site == "ip":
site = "pip1"
elif site == "si":
site = "sacroiliac"
elif re.match(r"tm[jl]", site):
site = "tmj"
m_toe = re.match(r"^toe(\d)$", site)
if m_toe:
site = "toe_ip{}".format(m_toe.group(1))
return "{}_{}".format(site, side)
IPython.embed()
raise Exception() | 0481c54c43ddb58758513bd654b7d5b1a7539761 | 24,956 |
from typing import Dict
from typing import Any
def u2f_from_dict(data: Dict[str, Any]) -> U2F:
"""
Create an U2F instance from a dict.
:param data: Credential parameters from database
"""
return U2F.from_dict(data) | 53d90b86fc0a7fd44938b7eb2f9d9f178161642f | 24,957 |
import pandas
import sys
def get_sequences(datafile, seq_column = "sequence_1D", test = False, test_size=100):
"""
Read DF, return sequences. So we do not hold the DF in memory.
We could only read csv and grab the sequence column in the future.
:return:
"""
if datafile.endswith(".csv"):
df = pandas.read_csv(datafile, usecols=[seq_column])
else:
df = get_dataframe_from_json_csv_pkl(options.datafile)
print("Read DF")
if seq_column not in df.columns:
sys.exit("Column not found in dataframe:"+options.seq_column)
if test:
return df[seq_column][:test_size].to_numpy()
else:
return df[seq_column].to_numpy() | f8d067731f19dd9cd656e2b009c47bf44f67b057 | 24,958 |
def filterForDoxygen (contents):
"""
filterForDoxygen(contents) -> contents
Massage the content of a python file to better suit Doxygen's expectations.
"""
contents = filterContents(contents)
contents = filterDocStrings(contents)
return contents | c216328bed4d9af656dfd61477add1a15ee34bd6 | 24,959 |
from typing import List
from typing import Callable
def calculate_quantum_volume(
*,
num_qubits: int,
depth: int,
num_circuits: int,
seed: int,
device: cirq.google.xmon_device.XmonDevice,
samplers: List[cirq.Sampler],
compiler: Callable[[cirq.Circuit], cirq.Circuit] = None,
repetitions=10_000,
) -> List[QuantumVolumeResult]:
"""Run the quantum volume algorithm.
This algorithm should compute the same values as Algorithm 1 in
https://arxiv.org/abs/1811.12926. To summarize, we generate a random model
circuit, compute its heavy set, then transpile an implementation onto our
architecture. This implementation is run a series of times and if the
percentage of outputs that are in the heavy set is greater than 2/3, we
consider the quantum volume test passed for that size.
Args:
num_qubits: The number of qubits for the circuit.
depth: The number of gate layers to generate.
num_circuits: The number of random circuits to run.
seed: A seed to pass into the RandomState.
device: The device to run the compiled circuit on.
samplers: The samplers to run the algorithm on.
compiler: An optional function to compiler the model circuit's
gates down to the target devices gate set and the optimize it.
repetitions: The number of bitstrings to sample per circuit.
Returns: A list of QuantumVolumeResults that contains all of the information
for running the algorithm and its results.
"""
random_state = np.random.RandomState(seed)
circuits = prepare_circuits(num_qubits=num_qubits,
depth=depth,
num_circuits=num_circuits,
random_state=random_state)
return execute_circuits(
circuits=circuits,
device=device,
compiler=compiler,
samplers=samplers,
repetitions=repetitions,
) | da1f2eb072f5d91ec99be8fa6a447c2661aabb6d | 24,960 |
import os
def get_plugin_translator(plugin_path):
"""Returns a new ui.Translator object for plugin specified
by plugin_path argument. If a file is passed, the last
path component is removed.
"""
if os.path.isfile(plugin_path):
plugin_path = os.path.split(plugin_path)[0]
path = os.path.join(plugin_path, ('lang\\' + app.language))
trans = ui.Translator()
trans.try_to_load(path)
return trans | 48c6df60f88252438f2a63d81d8165c115e5a38d | 24,961 |
def get_key(item, key_length):
"""
key + value = item
number of words of key = key_length
function returns key
"""
word = item.strip().split()
if key_length == 0: # fix
return item
elif len(word) == key_length:
return item
else:
return ' '.join(word[0:key_length]) | 6407d98d62a4d83bf577e82be696b6aee1f6d2e8 | 24,962 |
from typing import Tuple
def identity(shape: Tuple[int, ...], gain: float = 1) -> JaxArray:
"""Returns the identity matrix. This initializer was proposed in
`A Simple Way to Initialize Recurrent Networks of Rectified Linear Units
<https://arxiv.org/abs/1504.00941>`_.
Args:
shape: Shape of the tensor. It should have exactly rank 2.
gain: optional scaling factor.
Returns:
Tensor initialized to the identity matrix.
"""
assert len(shape) == 2
return gain * jn.eye(*shape) | 59fb436485a04b5861bfdcfbe9bf36f4084aeb3d | 24,963 |
from typing import Optional
from typing import Dict
def pyreq_nlu_trytrain(httpreq_handler: HTTPRequestHandler, project_id: int, locale: str) -> Optional[Dict]:
"""
Get try-annotation on utterance with latest run-time NLU model for a Mix project and locale, by sending requests
to Mix API endpoint with Python 'requests' package.
API endpoint: POST /nlu/api/v1/nlu/<PROJ_ID>/annotations/train?sources=nuance_custom_data&locale=<LOCALE>
Request payload: None
:param httpreq_handler: HTTPRequestHandler to process requests and responses
:param project_id: Mix project ID
:param locale: Mix project NLU locale
:return: JSON reponse payload from API endpoint
"""
api_endpoint = f'/nlu/api/v1/nlu/{project_id}/annotations/train?sources=nuance_custom_data&locale={locale}'
resp = httpreq_handler.request(url=api_endpoint, method=POST_METHOD, data='{}',
default_headers=True, json_resp=True)
return resp | 7103fae177535f5c7d37ce183e9d828ebdca7b7a | 24,964 |
from typing import Tuple
def month_boundaries(month: int, year: int) -> Tuple[datetime_.datetime, datetime_.datetime]:
"""
Return the boundary datetimes of a given month.
"""
start_date = datetime_.date(year, month, 1)
end_date = start_date + relativedelta(months=1)
return (midnight(start_date), midnight(end_date)) | f727ae0d8f28bd75f0a326305d172ea45ec29982 | 24,965 |
def calculate_Hubble_flow_velocity_from_cMpc(cMpc, cosmology="Planck15"):
"""
Calculates the Hubble flow recession velocity from comoving distance
Parameters
----------
cMpc : array-like, shape (N, )
The distance in units of comoving megaparsecs. Must be 1D or scalar.
cosmology : string or astropy.cosmology.core.FLRW
The cosmology to assume whilst calculating distance. Default: Planck15.
Returns
-------
a : array-like, shape (N, )
The scale factor.
"""
cosmo = get_cosmology_from_name(cosmology)
H0 = cosmo.H0
scale_factor = calculate_scale_factor_from_cMpc(cMpc, cosmology=cosmology)
proper_dist = cMpc * apu.Mpc / scale_factor
velocity = proper_dist * H0
return velocity | 994722494de5ae918c3f1855b1b58fec21849f7e | 24,966 |
def join_items(
*items, separator="\n", description_mode=None,
start="", end="", newlines=1
):
"""
joins items using separator, ending with end and newlines
Args:
*items - the things to join
separator - what seperates items
description_mode - what mode to use for description
start - what to start the string with
end - what to end the string with
newlines - how many newlines to add after end
Returns a string
"""
output_list = []
if description_mode:
for item in items:
output_list.append(description(
*(item if item else ""), mode=description_mode,
newlines=0
))
else:
output_list = convert_items(list(items), type_=str)
output_list = [item.strip() for item in output_list]
output_text = separator.join(output_list).strip()
output_text += "" if output_text.endswith(end) else end
output_text = start + newline(output_text, newlines)
return output_text | 0df5b55f10d73600ea2f55b0e9df86c17622e779 | 24,967 |
def JacobianSpace(Slist, thetalist):
"""Computes the space Jacobian for an open chain robot
:param Slist: The joint screw axes in the space frame when the
manipulator is at the home position, in the format of a
matrix with axes as the columns
:param thetalist: A list of joint coordinates
:return: The space Jacobian corresponding to the inputs (6xn real
numbers)
Example Input:
Slist = np.array([[0, 0, 1, 0, 0.2, 0.2],
[1, 0, 0, 2, 0, 3],
[0, 1, 0, 0, 2, 1],
[1, 0, 0, 0.2, 0.3, 0.4]]).T
thetalist = np.array([0.2, 1.1, 0.1, 1.2])
Output:
np.array([[ 0, 0.98006658, -0.09011564, 0.95749426]
[ 0, 0.19866933, 0.4445544, 0.28487557]
[ 1, 0, 0.89120736, -0.04528405]
[ 0, 1.95218638, -2.21635216, -0.51161537]
[0.2, 0.43654132, -2.43712573, 2.77535713]
[0.2, 2.96026613, 3.23573065, 2.22512443]])
"""
Js = Slist.copy()
T = eye(4)
for i in range(1, len(thetalist)):
T = T * MatrixExp6(VecTose3(Slist[:, i - 1] \
* thetalist[i - 1]))
Js[:, i] = Adjoint(T) * Slist[:, i]
return Js | e0f3fba57b2d1595a59b708a452fd2b57c6011e7 | 24,968 |
async def delete_bank(org_id: str, bank_id:str,
user: users_schemas.User = Depends(is_authenticated),
db:Session = Depends(get_db)):
"""delete a given bank of id bank_id.
Args:
bank_id: a unique identifier of the bank object.
user: authenticates that the user is a logged in user.
db (Session): The database for storing the article object.
Returns:
HTTP_200_OK (sucess response))
Raises
HTTP_424_FAILED_DEPENDENCY: failed to delete bank details
HTTP_4O4_NOT_FOUND: Bank does not exist.
"""
bank = await fetch_bank(user=user, id=bank_id, db=db)
db.delete(bank)
db.commit()
return JSONResponse({"detail": f"bank details with {bank_id} successfully deleted"},
status_code=status.HTTP_200_OK) | 537159c6c19c6bb1dde02eec13f5c55932f9d6ee | 24,969 |
from typing import Union
from typing import Iterable
from typing import Any
def label_encode(
df: pd.DataFrame, column_names: Union[str, Iterable[str], Any]
) -> pd.DataFrame:
"""
Convert labels into numerical data.
This method will create a new column with the string "_enc" appended
after the original column's name. Consider this to be syntactic sugar.
This method behaves differently from `encode_categorical`. This method
creates a new column of numeric data. `encode_categorical` replaces the
dtype of the original column with a "categorical" dtype.
This method mutates the original DataFrame.
Functional usage example:
.. code-block:: python
label_encode(df, column_names="my_categorical_column") # one way
Method chaining example:
.. code-block:: python
import pandas as pd
import janitor
categorical_cols = ['col1', 'col2', 'col4']
df = pd.DataFrame(...).label_encode(column_names=categorical_cols)
:param df: The pandas DataFrame object.
:param str/iterable column_names: A column name or an iterable (list or
tuple) of column names.
:returns: A pandas DataFrame.
"""
le = LabelEncoder()
if isinstance(column_names, list) or isinstance(column_names, tuple):
for col in column_names:
if col not in df.columns:
raise JanitorError(f"{col} missing from column_names")
df[f"{col}_enc"] = le.fit_transform(df[col])
elif isinstance(column_names, str):
if column_names not in df.columns:
raise JanitorError(f"{column_names} missing from column_names")
df[f"{column_names}_enc"] = le.fit_transform(df[column_names])
else:
raise JanitorError(
"kwarg `column_names` must be a string or iterable!"
)
return df | 62d937dc8bb02db8a099a5647bf0673005489605 | 24,970 |
import tempfile
import os
def chainable(func):
"""
If no output_path is specified, generate an intermediate file and pass it to the function.
Add the path of the intermediate file to the resulting Video.intermediate_files list before
returning it. If an output_path is specified, use it and then delete the intermediate files.
"""
@wraps(func)
def wrapper(*args, **kwargs):
""" Function wrapper. """
intermediate_required = "output_path" not in kwargs
self = args[0]
if intermediate_required:
_, output_path = tempfile.mkstemp(
dir=self.dirname, prefix=func.__name__, suffix=self.ext)
kwargs["output_path"] = output_path
ret = None
try:
ret = func(*args, **kwargs)
except:
if intermediate_required:
os.unlink(output_path)
raise
finally:
if self.intermediate_file is not None:
os.unlink(self.intermediate_file)
if intermediate_required and ret is not None:
ret.intermediate_file = output_path
return ret
return wrapper | f1ee128e84b67d453d04601a3710025f13de21a2 | 24,971 |
def get_server_now_with_delta_str(timedelta):
"""Get the server now date string with delta"""
server_now_with_delta = get_server_now_with_delta(timedelta)
result = server_now_with_delta.strftime(DATE_FORMAT_NAMEX_SEARCH)
return result | f555ed28ec98f9edfa62d7f52627ea06cad9513b | 24,972 |
def GetPrimaryKeyFromURI(uri):
"""
example:
GetPrimaryKeyFromURI(u'mujin:/\u691c\u8a3c\u52d5\u4f5c1_121122.mujin.dae')
returns u'%E6%A4%9C%E8%A8%BC%E5%8B%95%E4%BD%9C1_121122'
"""
return uriutils.GetPrimaryKeyFromURI(uri, fragmentSeparator=uriutils.FRAGMENT_SEPARATOR_AT, primaryKeySeparator=uriutils.PRIMARY_KEY_SEPARATOR_AT) | 49a489d02af3195ea7ed0a7f41b9fbb19bb16407 | 24,973 |
import math
def normalize(score, alpha=15):
"""
Normalize the score to be between -1 and 1 using an alpha that
approximates the max expected value
"""
norm_score = score/math.sqrt((score*score) + alpha)
if norm_score < -1.0:
return -1.0
elif norm_score > 1.0:
return 1.0
else:
return norm_score | ec158416a4199d17948986dfb3f8d659d82e07b7 | 24,974 |
from datetime import datetime
import pytz
def get_timestamp(request):
""" hhs_oauth_server.request_logging.RequestTimeLoggingMiddleware
adds request._logging_start_dt
we grab it or set a timestamp and return it.
"""
if not hasattr(request, '_logging_start_dt'):
return datetime.now(pytz.utc).isoformat()
else:
return request._logging_start_dt | f3117a66ebfde0b1dc48591e0665c3d7120826fd | 24,975 |
from typing import Optional
from typing import List
def human_size(bytes: int | float, units: Optional[List[str]] = None) -> str:
"""
Convert bytes into a more human-friendly format
:param bytes: int
Number of bytes
:param units: Optional[List[str]]
units used
:return: str
Return size in human friendly format: <number> <size_unit>
"""
if units is None:
units = ['bytes', 'KB', 'MB', 'GB', 'TB', 'PB', 'EB']
return f"{round(bytes, 2)} " + units[0] if bytes < 1024 else human_size(bytes / 1024, units[1:]) | 9b652f0a09024c22dcefa5909c17f7b14d0183f4 | 24,976 |
def ray_casting_2d(p: Point, poly: Poly) -> bool:
"""Implements ray-casting algorithm to check if a point p is inside a (closed) polygon poly"""
intersections = [int(rayintersectseg(p, edge)) for edge in poly.edges]
return _odd(sum(intersections)) | 149f797bdcecce483cf87ed012fe7a21370c2ab6 | 24,977 |
def average_price(offers):
"""Returns the average price of a set of items.
The first item is ignored as this is hopefully underpriced.
The last item is ignored as it is often greatly overpriced.
IMPORTANT: It is important to only trade items with are represented on the market in great numbers.
This is due to the fact that with lower competition between sellers, the prices are often non-competitive.
Keyword arguments:
offers -- A list of offers from which to find the average price."""
if len(offers) > 1:
remove_last_item = (True if (len(offers) > 3) else False)
cheapest_item = offers[0]['price']
if remove_last_item:
sum_ = sum(x['price'] for x in offers[1:-1])
else:
sum_ = sum(x['price'] for x in offers[1:])
return sum_ / (len(offers) - (2 if remove_last_item else 1)) | 4849996d13e4c00d845f5fb6a5a150397c9b84f0 | 24,978 |
import sys
def main(argv=sys.argv):
"""Main point of Entry"""
return pbparser_runner(
argv=argv[1:],
parser=_get_parser(),
args_runner_func=_args_runner,
contract_runner_func=_resolved_tool_contract_runner,
alog=log,
setup_log_func=setup_log) | 76978672961efd1c9aa60af98f6f602aabdd270d | 24,979 |
def get_train_data():
"""get all the train data from some paths
Returns:
X: Input data
Y_: Compare data
"""
TrainExamples = [8, 9, 10, 11, 12, 14]
# from path set_22 to set_35
path = PATH_SIMPLE + str(5) + '/'
X, Y_ = generate(path, isNormalize=True)
maxvalue = (get_image(path, isInput=True)[0].max() / 10000)
for train in TrainExamples:
path = PATH + str(train) + '/'
temp_X, temp_Y = generate(path, isNormalize=True)
X = np.append(X, temp_X, axis=0)
Y_ = np.append(Y_, temp_Y, axis=0)
print("Finish generating all the train data!")
return X, Y_, maxvalue | 01517db3cb4b5987b895c2b435771c745837bf65 | 24,980 |
def effective_dimension_vector(emb, normalize=False, is_cov=False):
"""Effective dimensionality of a set of points in space.
Effection dimensionality is the number of orthogonal dimensions needed to capture the overall correlational structure of data.
See Del Giudice, M. (2020). Effective Dimensionality: A Tutorial. _Multivariate Behavioral Research, 0(0), 1–16. https://doi.org/10.1080/00273171.2020.1743631.
:param emb: embedding vectors
:type emb: numpy.ndarray (num_entities, dim)
:param q: Parameter for the Renyi entropy function, defaults to 1
:type q: int, optional
:param normalize: Set True to center data. For spherical or quasi-spherical data (such as the embedding by word2vec), normalize=False is recommended, defaults to False
:type normalize: bool, optional
:param is_cov: Set True if `emb` is the covariance matrix, defaults to False
:type is_cov: bool, optional
:return: effective dimensionality
:rtype: float
.. highlight:: python
.. code-block:: python
>>> import emlens
>>> import numpy as np
>>> emb = np.random.randn(100, 20)
>>> ed = emlens.effective_dimension(emb)
"""
if is_cov:
Cov = emb
else:
if normalize:
emb = StandardScaler().fit_transform(emb)
Cov = (emb.T @ emb) / emb.shape[0]
lam, v = linalg.eig(Cov)
order = np.argsort(lam)[::-1]
lam = lam[order]
v = v[:, order]
lam = np.real(lam)
lam = np.maximum(lam, 1e-10)
p = lam / np.sum(lam)
p = p[p > 0]
return v, p | 5d4247b92216bc9e77eabbd35746c06fec22161c | 24,981 |
def getProductMinInventory(db, productID):
"""
Gives back the minimum inventory for a given product
:param db: database pointer
:param productID: int
:return: int
"""
# make the query and receive a single tuple (first() allows us to do this)
result = db.session.query(Product).filter(Product.id == productID).first()
# grab the name in the keyed tuple received
return result.min_inventory | 032c95685e1c578f9251d269899f4ee04d93e326 | 24,982 |
import re
def read_config6(section, option, filename='', verbosity=None): #format result: {aaa:[bbb, ccc], ddd:[eee, fff], ggg:[hhh, qqq], xxx:[yyy:zzz]}
"""
option: section, option, filename=''
format result: {aaa:bbb, ccc:ddd, eee:fff, ggg:hhh, qqq:xxx, yyy:zzz}
"""
filename = get_config_file(filename, verbosity)
data = {}
cfg = ConfigParser.RawConfigParser(allow_no_value=True, dict_type=MultiOrderedDict)
cfg.read(filename)
cfg = cfg.get(section, option)
for i in cfg:
if ":" in i:
d1 = str(i).split(":")
d2 = int(str(d1[0]).strip())
for j in d1[1]:
d3 = re.split("['|','|']", d1[1])
d4 = str(d3[1]).strip()
d5 = str(d3[-2]).strip()
data.update({d2:[d4, d5]})
else:
pass
return data | e80c80b2033b10c03c7ee0b2a5e25c5739777f3f | 24,983 |
def bboxes_iou(boxes1, boxes2):
"""
boxes: [xmin, ymin, xmax, ymax] format coordinates.
"""
boxes1 = np.array(boxes1)
boxes2 = np.array(boxes2)
boxes1_area = (boxes1[..., 2] - boxes1[..., 0]) * (boxes1[..., 3] - boxes1[..., 1])
boxes2_area = (boxes2[..., 2] - boxes2[..., 0]) * (boxes2[..., 3] - boxes2[..., 1])
left_up = np.maximum(boxes1[..., :2], boxes2[..., :2])
right_down = np.minimum(boxes1[..., 2:], boxes2[..., 2:])
inter_section = np.maximum(right_down - left_up, 0.0)
inter_area = inter_section[..., 0] * inter_section[..., 1]
union_area = boxes1_area + boxes2_area - inter_area
ious = np.maximum(1.0 * inter_area / union_area, 0.0)
return ious | ca2083dd0138a6bd1ee741fc58739340dc1bac61 | 24,984 |
import re
import os
import zipfile
import html
def zip_all_downloadables(getZipped_n_clicks, value, session_data):
"""Create a downloadable zip of USER selected set of output files.
Args:
getZipped_n_clicks: int
value: str
session_data: Dash.dcc.Store(type='session')
Returns:
html.Div([]): Dash HTML div component↦ itself an array of Dash HTML components
"""
if getZipped_n_clicks > 0 and any(session_data):
selection = output_filetype_genres[value]
session_output = session_data["PATH_TO_SESSION_OUTPUT"]
RUN_ID = session_data["RUN_ID"]
source = session_output if value != "REF" else PLASMIDS_ARCHIVE
files = get_output_files(selection, final_output_dir=source)
zipped = re.sub("['\ \(\)]", "", f"{session_output}{RUN_ID}_{clock()[-4:]}.zip")
for i, filename in enumerate(files):
fbn = f"{os.path.basename(filename)}"
app.logger.info(f"{i}: Adding file {filename} to new zipped archive: {fbn}...")
with zipfile.ZipFile(zipped, "a") as zipf:
zipf.write(filename, fbn)
return html.Div(
[
html.H4(f"Zip⇝⇶🗃⇨Download All:"),
html.H4([html.Li(file_download_link(zipped), className="zip-dl-link")]),
]
) | 385dc9a2d37adf26655dd11fb72a020470a104d3 | 24,985 |
import torch
import collections
def predict_all_task(trained_model_path, config, subject_specific):
"""Predict.
Parameters
----------
model_path : str
Description of parameter `model_path`.
config : dict
A dictionary of hyper-parameters used in the network.
Returns
-------
float
Predicted labels from the model.
"""
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
trained_model = torch.load(trained_model_path, map_location=device)
labels = collections.defaultdict(dict)
for subject in config['subjects']:
for trial in config['trials']:
if subject_specific:
data_iterator = subject_specific_data_iterator(
subject, trial, config)
labels[subject][trial] = calculate_predictions(
trained_model, data_iterator, config)
else:
data_iterator = collective_data_iterator(config)
labels[subject][trial] = calculate_predictions(
trained_model, data_iterator, config)
return labels | ae179d41cff63b52c9836f7bbb25675618e1aa86 | 24,986 |
def hjorth(X):
""" Compute Hjorth mobility and complexity of a time series.
Notes
-----
To speed up, it is recommended to compute D before calling this function
because D may also be used by other functions whereas computing it here
again will slow down.
Parameters
----------
X : array_like, shape(N,)
a 1-D real time series.
Returns
-------
HM : float
Hjorth mobility
Comp : float
Hjorth complexity
References
----------
.. [1] B. Hjorth, "EEG analysis based on time domain properties,"
Electroencephalography and Clinical Neurophysiology , vol. 29,
pp. 306-310, 1970.
"""
# Compute the first order difference
D = np.diff(X)
# pad the first difference
D = np.hstack([X[0], D])
#
n = X.shape[0]
M2 = np.float(np.sum(D ** 2))/n
TP = np.sum(X ** 2)
M4 = np.sum((D[1:] - D[:D.shape[0]-1])**2)/n
# Hjorth Mobility and Complexity
HM = np.sqrt(M2 / TP)
Comp = np.sqrt(np.float(M4) * TP / M2 / M2)
return HM, Comp | 8ca56b45e2c5af0d28d34c181cc1b6bc915e507d | 24,987 |
def check_and_set_owner(func):
"""
Decorator that applies to functions expecting the "owner" name as a
second argument.
It will check if a user exists with this name and if so add to the
request instance a member variable called owner_user pointing to the
User instance corresponding to the owner.
If the owner doesn't exists, the visitor is redirected to 404.
"""
def _check_and_set_owner(request, owner_name, *args, **kwargs):
try:
owner_user = User.objects.get(username=owner_name)
except User.DoesNotExist:
return HttpResponseNotFound()
else:
request.owner_user = owner_user
return func(request, owner_name, *args, **kwargs)
return _check_and_set_owner | 9c5ba9d0b0bb1058ddf830da5fc59df3724b2c0e | 24,988 |
def schedule(self: Client) -> ScheduleProxy:
"""Delegates to a
:py:class:`mcipc.rcon.je.commands.schedule.ScheduleProxy`
"""
return ScheduleProxy(self, 'schedule') | a8402088fa9fa697e988b2bdb8f185b14f012873 | 24,989 |
def check_permission(permission):
"""Returns true if the user has the given permission."""
if 'permissions' not in flask.session:
return False
# Admins always have access to everything.
if Permissions.ADMIN in flask.session['permissions']:
return True
# Otherwise check if the permission is present in their permission list.
return permission in flask.session['permissions'] | b4c45b15a68a07140c70b3f46001f0ca6a737ea5 | 24,990 |
from datetime import datetime
def get_current_max_change_version(context, start_after, school_year: int, use_change_queries: bool):
"""
If job is configured to use change queries, get
the newest change version number from the target Ed-Fi API.
Upload data to data lake.
"""
if use_change_queries:
stats = context.instance.event_log_storage.get_stats_for_run(context.run_id)
launch_datetime = datetime.utcfromtimestamp(stats.launch_time)
response = context.resources.edfi_api_client.get_available_change_versions(
school_year
)
context.log.debug(response)
path = context.resources.data_lake.upload_json(
path=f"edfi_api/school_year_{school_year}/{launch_datetime}.json",
records=[{
"school_year": school_year,
"run_id": context.run_id,
"oldest_change_version": response["OldestChangeVersion"],
"newest_change_version": response["NewestChangeVersion"]
}],
)
context.log.debug(path)
return response["NewestChangeVersion"]
else:
context.log.info("Will not use change queries")
return None | 49f8a2e1c1f38daa70d950c614ce711942c8a3ca | 24,991 |
import subprocess
def number_of_jobs_in_queue():
"""
This functions returns the number of jobs in queue for a given
user.
"""
# Initialize #
user_name = get_username()
process = subprocess.check_output(["squeue", "-u", user_name])
return len([line for line in process.split("\n") if user_name in line]) | 83c448eba706fc5e9287c672c9396312c611c815 | 24,992 |
def Energy_value (x):
"""
Energy of an input signal
"""
y = np.sum(x**2)
return y | cf2c650c20f2a7dac8bf35db21f25b8af428404e | 24,993 |
import re
def get_int():
"""Read a line of text from standard input and return the equivalent int."""
while True:
s = get_string();
if s is None:
return None
if re.search(r"^[+-]?\d+$", s):
try:
i = int(s, 10)
if type(i) is int: # could become long in Python 2
return i
except ValueError:
pass
print("Retry: ", end="") | e6f4e1c49f4b4bc0306af50283728f016db524d7 | 24,994 |
def create_save_featvec_homogenous_time(yourpath, times, intensities, filelabel, version=0, save=True):
"""Produces the feature vectors for each light curve and saves them all
into a single fits file. requires all light curves on the same time axis
parameters:
* yourpath = folder you want the file saved into
* times = a single time axis for all
* intensities = array of all light curves (NOT normalized)
* sector, camera, ccd = integers
* version = what version of feature vector to calculate for all.
default is 0
* save = whether or not to save into a fits file
returns: list of feature vectors + fits file containing all feature vectors
requires: featvec()
modified: [lcg 08212020]"""
fname_features = yourpath + "/"+ filelabel + "_features_v"+str(version)+".fits"
feature_list = []
if version == 0:
#median normalize for the v0 features
intensities = normalize(intensities)
elif version == 1:
#mean normalize the intensity so goes to 1
intensities = mean_norm(intensities)
print("Begining Feature Vector Creation Now")
for n in range(len(intensities)):
feature_vector = featvec(times, intensities[n], v=version)
feature_list.append(feature_vector)
if n % 25 == 0: print(str(n) + " completed")
feature_list = np.asarray(feature_list)
if save == True:
hdr = fits.Header()
hdr["VERSION"] = version
hdu = fits.PrimaryHDU(feature_list, header=hdr)
hdu.writeto(fname_features)
else:
print("Not saving feature vectors to fits")
return feature_list | 29250aad4cfa1aa5bbd89b880f93a7a70a775dfb | 24,995 |
import asyncio
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry):
"""Set up Eaton xComfort Bridge from a config entry."""
ip_address = entry.data.get(CONF_IP_ADDRESS)
auth_key = entry.data.get("authkey")
bridge = Bridge(ip_address, auth_key)
# bridge.logger = lambda x: _LOGGER.warning(x)
# hass.async_create_task(bridge.run())
asyncio.create_task(bridge.run())
if DOMAIN not in hass.data:
hass.data[DOMAIN] = {}
hass.data[DOMAIN][entry.entry_id] = bridge
for component in PLATFORMS:
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(entry, component)
)
return True | b24b78054c1e8236b5a29fed7287db4150f9deb0 | 24,996 |
def _cpu_string(platform_type, settings):
"""Generates a <platform>_<arch> string for the current target based on the given parameters."""
if platform_type == "ios":
ios_cpus = settings["//command_line_option:ios_multi_cpus"]
if ios_cpus:
return "ios_{}".format(ios_cpus[0])
cpu_value = settings["//command_line_option:cpu"]
if cpu_value.startswith("ios_"):
return cpu_value
return "ios_x86_64"
if platform_type == "macos":
macos_cpus = settings["//command_line_option:macos_cpus"]
if macos_cpus:
return "darwin_{}".format(macos_cpus[0])
return "darwin_x86_64"
if platform_type == "tvos":
tvos_cpus = settings["//command_line_option:tvos_cpus"]
if tvos_cpus:
return "tvos_{}".format(tvos_cpus[0])
return "tvos_x86_64"
if platform_type == "watchos":
watchos_cpus = settings["//command_line_option:watchos_cpus"]
if watchos_cpus:
return "watchos_{}".format(watchos_cpus[0])
return "watchos_i386"
fail("ERROR: Unknown platform type: {}".format(platform_type)) | 7cf483c45bb209a9e3e0775538934945324281a7 | 24,997 |
def Check2DBounds(atomMatch,mol,pcophore):
""" checks to see if a particular mapping of features onto
a molecule satisfies a pharmacophore's 2D restrictions
>>> activeFeats = [ChemicalFeatures.FreeChemicalFeature('Acceptor', Geometry.Point3D(0.0, 0.0, 0.0)),
... ChemicalFeatures.FreeChemicalFeature('Donor',Geometry.Point3D(0.0, 0.0, 0.0))]
>>> pcophore= Pharmacophore.Pharmacophore(activeFeats)
>>> pcophore.setUpperBound2D(0,1,3)
>>> m = Chem.MolFromSmiles('FCC(N)CN')
>>> Check2DBounds(((0,),(3,)),m,pcophore)
True
>>> Check2DBounds(((0,),(5,)),m,pcophore)
False
"""
dm = Chem.GetDistanceMatrix(mol,False,False,False)
nFeats = len(atomMatch)
for i in range(nFeats):
for j in range(i+1,nFeats):
lowerB = pcophore._boundsMat2D[j,i] #lowerB = pcophore.getLowerBound2D(i,j)
upperB = pcophore._boundsMat2D[i,j] #upperB = pcophore.getUpperBound2D(i,j)
dij=10000
for atomI in atomMatch[i]:
for atomJ in atomMatch[j]:
try:
dij = min(dij,dm[atomI,atomJ])
except IndexError:
print('bad indices:',atomI,atomJ)
print(' shape:',dm.shape)
print(' match:',atomMatch)
print(' mol:')
print(Chem.MolToMolBlock(mol))
raise IndexError
if dij<lowerB or dij>upperB:
return False
return True | d119645de037eeaf536e290766d4dacf9c2e2f08 | 24,998 |
def get_sdk_dir(fips_dir) :
"""return the platform-specific SDK dir"""
return util.get_workspace_dir(fips_dir) + '/fips-sdks/' + util.get_host_platform() | f3fcf05a8dd1ae0f14431a84ae570c56dd900c69 | 24,999 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.