content stringlengths 22 815k | id int64 0 4.91M |
|---|---|
def element_png_display(element, max_frames):
"""
Used to render elements to PNG if requested in the display formats.
"""
if 'png' not in Store.display_formats:
return None
info = process_object(element)
if info:
IPython.display.display(IPython.display.HTML(info))
return
backend = Store.current_backend
if type(element) not in Store.registry[backend]:
return None
renderer = Store.renderers[backend]
# Current renderer does not support PNG
if 'png' not in renderer.params('fig').objects:
return None
data, info = renderer(element, fmt='png')
return data | 37,000 |
def list_documents(connection, name: str = None, to_dictionary: bool = False,
to_dataframe: bool = False, limit: int = None, **filters):
"""Get all Documents available in the project specified within the
`connection` object.
Args:
connection(object): MicroStrategy connection object returned
by 'connection.Connection()'
name: exact name of the document to list
to_dictionary(bool, optional): if True, return Documents as
list of dicts
to_dataframe(bool, optional): if True, return Documents as
pandas DataFrame
limit: limit the number of elements returned to a sample of documents.
If `None`, all objects are returned.
**filters: Available filter parameters: ['name', 'id', 'type',
'subtype', 'date_created', 'date_modified', 'version', 'acg',
'owner', 'ext_type', 'view_media', 'certified_info', 'project_id']
Returns:
List of documents.
"""
# TODO: consider adding Connection.project_selected attr/method
if connection.project_id is None:
raise ValueError("Please log into a specific project to load documents within it. "
f"To load all documents across the whole environment use {list_documents_across_projects.__name__} function")
return Document._list_all(connection, to_dictionary=to_dictionary,
name=name, limit=limit,
to_dataframe=to_dataframe, **filters) | 37,001 |
def test_read_metric_thresholds():
"""Test that the metric thresholds can be read form a csv file."""
thresholds = StringIO(
"""Quadrant, Complexity, Function size, Coverage
Q1, < 5,< 15,> 80
Q2, < 8,< 30,> 95
Q3, < 16,< 50,> 70
Q4, < 20,< 100,> 60"""
)
threshold_file = r"../src/riskmatrix/quadrant_metric_thresholds.csv"
matrix = RiskMatrix()
test_reader = csv.DictReader(thresholds, delimiter=",", quotechar='"', quoting=csv.QUOTE_ALL, skipinitialspace=True)
with patch("src.riskmatrix.risk_matrix.open", mock_open()) as mocked_file:
matrix.add_metric_thresholds(threshold_file, test_reader)
mocked_file.assert_called_once_with(threshold_file, encoding="utf-8")
q1_thresholds = matrix.metric_thresholds["Q1"]
assert len(matrix.metric_thresholds) == 4
assert q1_thresholds["Complexity"] == "< 5"
assert q1_thresholds["Function size"] == "< 15"
assert q1_thresholds["Coverage"] == "> 80" | 37,002 |
def rsa_obj(key_n, key_e, key_d=None, key_p=None, key_q=None):
""" Wrapper for the RSAObj constructor
The main reason for its existance is to compute the
prime factors if the private exponent d is being set.
In testing, the construct method threw exceptions because
it wasn't able to compute the prime factors. The
recover_prime_factors function seems work better.
"""
if key_n != None and key_e != None and key_d == None \
and key_p == None and key_q == None:
key = RSA.construct((key_n, key_e))
elif key_n != None and key_e != None and key_d != None \
and key_p == None and key_q == None:
key_p, key_q = recover_prime_factors(key_n, key_e, key_d)
key = RSA.construct((key_n, key_e, key_d, long(key_p), long(key_q)))
elif key_n != None and key_e != None and key_d != None \
and key_p != None and key_q == None:
key = RSA.construct((key_n, key_e, key_d, key_p, key_n/key_p))
elif key_n != None and key_e != None and key_d != None \
and key_p != None and key_q != None:
key = RSA.construct((key_n, key_e, key_d, key_p, key_q))
return key | 37,003 |
def create_surface_and_gap(surf_data, radius_mode=False, prev_medium=None,
wvl=550.0, **kwargs):
""" create a surface and gap where surf_data is a list that contains:
[curvature, thickness, refractive_index, v-number] """
s = surface.Surface()
if radius_mode:
if surf_data[0] != 0.0:
s.profile.cv = 1.0/surf_data[0]
else:
s.profile.cv = 0.0
else:
s.profile.cv = surf_data[0]
if len(surf_data) > 2:
if isanumber(surf_data[2]): # assume all args are numeric
if len(surf_data) < 3:
if surf_data[2] == 1.0:
mat = m.Air()
else:
mat = m.Medium(surf_data[2])
else:
mat = m.Glass(surf_data[2], surf_data[3], '')
else: # string args
if surf_data[2].upper() == 'REFL':
s.interact_mode = 'reflect'
mat = prev_medium
else:
num_args = len(surf_data[2:])
if num_args == 2:
name, cat = surf_data[2], surf_data[3]
else:
name, cat = surf_data[2].split(',')
try:
mat = gfact.create_glass(name, cat)
except ge.GlassNotFoundError as gerr:
logging.info('%s glass data type %s not found',
gerr.catalog,
gerr.name)
logging.info('Replacing material with air.')
mat = m.Air()
else: # only curvature and thickness entered, set material to air
mat = m.Air()
thi = surf_data[1]
g = gap.Gap(thi, mat)
rndx = mat.rindex(wvl)
tfrm = np.identity(3), np.array([0., 0., thi])
return s, g, rndx, tfrm | 37,004 |
def deprecate_build(id):
"""Mark a build as deprecated.
**Authorization**
User must be authenticated and have ``deprecate_build`` permissions.
**Example request**
.. code-block:: http
DELETE /builds/1 HTTP/1.1
Accept: */*
Accept-Encoding: gzip, deflate
Authorization: Basic ZXlKcFlYUWlPakUwTlRZM056SXpORGdzSW1WNGNDSTZNVFEx...
Connection: keep-alive
Content-Length: 0
Host: localhost:5000
User-Agent: HTTPie/0.9.3
**Example response**
.. code-block:: http
HTTP/1.0 200 OK
Content-Length: 2
Content-Type: application/json
Date: Tue, 01 Mar 2016 17:21:29 GMT
Server: Werkzeug/0.11.3 Python/3.5.0
{}
:reqheader Authorization: Include the token in a username field with a
blank password; ``<token>:``.
:param id: ID of the build.
:statuscode 200: No error.
:statuscode 404: Build not found.
"""
build = Build.query.get_or_404(id)
build.deprecate_build()
db.session.commit()
return jsonify({}), 200 | 37,005 |
def link_set_addr(devname, macaddr):
"""Set mac address of the link
:param ``str`` devname:
The name of the network device.
:param ``str`` macaddr:
The mac address.
"""
subproc.check_call(
[
'ip', 'link',
'set',
'dev', devname,
'address', macaddr,
],
) | 37,006 |
def project_task_list_layout(list_id, item_id, resource, rfields, record,
icon="tasks"):
"""
Default dataList item renderer for Tasks on Profile pages
Args:
list_id: the HTML ID of the list
item_id: the HTML ID of the item
resource: the CRUDResource to render
rfields: the S3ResourceFields to render
record: the record as dict
"""
raw = record._row
record_id = raw["project_task.id"]
item_class = "thumbnail"
author = record["project_task.modified_by"]
name = record["project_task.name"]
assigned_to = record["project_task.pe_id"] or ""
description = record["project_task.description"]
date_due = record["project_task.date_due"]
source_url = raw["project_task.source_url"]
status = raw["project_task.status"]
priority = raw["project_task.priority"]
project_id = raw["project_task.project_id"]
if project_id:
project = record["project_task.project_id"]
project = SPAN(A(project,
_href = URL(c="project", f="project",
args=[project_id, "profile"])
),
" > ",
_class="task_project_title"
)
else:
project = ""
if priority in (1, 2):
# Urgent / High
priority_icon = DIV(ICON("exclamation"),
_class="task_priority")
elif priority == 4:
# Low
priority_icon = DIV(ICON("arrow-down"),
_class="task_priority")
else:
priority_icon = ""
# @ToDo: Support more than just the Wrike/MCOP statuses
status_icon_colour = {2: "#AFC1E5",
6: "#C8D571",
7: "#CEC1FF",
12: "#C6C6C6",
}
active_statuses = current.s3db.project_task_active_statuses
status_icon = DIV(ICON("active" if status in active_statuses else "inactive"),
_class="task_status",
_style="background-color:%s" % (status_icon_colour.get(status, "none"))
)
location = record["project_task.location_id"]
org_logo = ""
#org_url = URL(c="org", f="organisation", args=[organisation_id, "profile"])
#org_logo = raw["org_organisation.logo"]
#if org_logo:
# org_logo = A(IMG(_src=URL(c="default", f="download", args=[org_logo]),
# _class="media-object",
# ),
# _href=org_url,
# _class="pull-left",
# )
#else:
# # @ToDo: use a dummy logo image
# org_logo = A(IMG(_class="media-object"),
# _href=org_url,
# _class="pull-left",
# )
# Edit Bar
# @ToDo: Consider using S3NavigationItem to hide the auth-related parts
permit = current.auth.s3_has_permission
table = current.db.project_task
if permit("update", table, record_id=record_id):
edit_btn = A(ICON("edit"),
_href=URL(c="project", f="task",
args=[record_id, "update.popup"],
vars={"refresh": list_id,
"record": record_id},
),
_class="s3_modal",
_title=get_crud_string(resource.tablename,
"title_update"),
)
else:
edit_btn = ""
if permit("delete", table, record_id=record_id):
delete_btn = A(ICON("delete"),
_class="dl-item-delete",
_title=get_crud_string(resource.tablename,
"label_delete_button"),
)
else:
delete_btn = ""
if source_url:
source_btn = A(ICON("link"),
_title=source_url,
_href=source_url,
_target="_blank"
)
else:
source_btn = ""
edit_bar = DIV(edit_btn,
delete_btn,
source_btn,
_class="edit-bar fright",
)
# Render the item
item = DIV(DIV(ICON(icon),
SPAN(location, _class="location-title"),
SPAN(date_due, _class="date-title"),
edit_bar,
_class="card-header",
),
DIV(org_logo,
priority_icon,
DIV(project,
name, _class="card-title task_priority"),
status_icon,
DIV(DIV((description or ""),
DIV(author,
" - ",
assigned_to,
#A(organisation,
# _href=org_url,
# _class="card-organisation",
# ),
_class="card-person",
),
_class="media",
),
_class="media-body",
),
_class="media",
),
#docs,
_class=item_class,
_id=item_id,
)
return item | 37,007 |
def subtractNums(x, y):
"""
subtract two numbers and return result
"""
return y - x | 37,008 |
def getTraceback(error=None):
"""Get formatted exception"""
try:
return traceback.format_exc( 10 )
except Exception, err:
return str(error) | 37,009 |
def test_registry_delete_key(cbcsdk_mock):
"""Test the response to the 'reg delete key' command."""
cbcsdk_mock.mock_request('POST', '/integrationServices/v3/cblr/session/2468', SESSION_INIT_RESP)
cbcsdk_mock.mock_request('GET', '/integrationServices/v3/cblr/session/1:2468', SESSION_POLL_RESP)
cbcsdk_mock.mock_request('GET', '/integrationServices/v3/device/2468', DEVICE_RESPONSE)
cbcsdk_mock.mock_request('POST', '/integrationServices/v3/cblr/session/1:2468/command', REG_DELETE_KEY_START_RESP)
cbcsdk_mock.mock_request('GET', '/integrationServices/v3/cblr/session/1:2468/command/64', REG_DELETE_KEY_END_RESP)
cbcsdk_mock.mock_request('PUT', '/integrationServices/v3/cblr/session', SESSION_CLOSE_RESP)
manager = LiveResponseSessionManager(cbcsdk_mock.api)
with manager.request_session(2468) as session:
session.delete_registry_key('HKLM\\SYSTEM\\CurrentControlSet\\services\\ACPI\\Nonsense') | 37,010 |
def db_cluster(fileutils, wlmutils, request):
"""
Yield fixture for startup and teardown of a clustered orchestrator.
This should only be used in on_wlm and full_wlm tests.
"""
launcher = wlmutils.get_test_launcher()
exp_name = request.function.__name__
exp = Experiment(exp_name, launcher=launcher)
test_dir = fileutils.make_test_dir(
caller_function=exp_name, caller_fspath=request.fspath
)
db = wlmutils.get_orchestrator(nodes=3)
db.set_path(test_dir)
exp.start(db)
yield db
# pass or fail, the teardown code below is ran after the
# completion of a test case that uses this fixture
exp.stop(db) | 37,011 |
def _copy_and_rename_file(file_path: str, dest_dir: str, new_file_name):
"""
Copies the specified file to the dest_dir (creating the directory if necessary) and renames it to the new_file_name
:param file_path: file path of the file to copy
:param dest_dir: directory to copy the file to
:param new_file_name: name the file should be changed to
:return: file path of the new file
"""
# Copy File
try:
# Creating new directory with year if does not exist
os.makedirs(dest_dir, exist_ok=True)
# Copying File
print("Copying file: {0}".format(file_path))
# new_file_copy = shutil.copyfile(file_path, dest_dir)
new_file_copy = shutil.copy(file_path, dest_dir)
print("Copied file to {0}".format(dest_dir))
# Renaming File
print("Renaming file: {0}".format(new_file_copy))
new_file_path = os.path.join(dest_dir, new_file_name)
os.rename(src=new_file_copy, dst=new_file_path)
print("File successfully renamed to " + new_file_path)
return new_file_path
except Exception as e:
print("Failed to copy or rename file.")
print(e) | 37,012 |
def greasePencilCtx(*args, **kwargs):
"""
This is a tool context command for the grease pencil tool. In query mode, return type is based on queried
flag.
Flags:
- autoCreateFrames : acf (bool) []
- canDraw : cd (int) []
- createOrEditFrame : cef (int) []
- exists : ex (bool) []
- exportArchive : eac (unicode, unicode) []
- fileTextureSize : fts (int) []
- greasePencilType : gpt (int) []
- image1 : i1 (unicode) []
- image2 : i2 (unicode) []
- image3 : i3 (unicode) []
- importArchive : iac (unicode) []
- makeStroke : mst (int) []
- removeFrame : rf (int) []
- resetBrushes : rb (bool) []
- rgbcolor : rgb (float, float, float) []
- sequenceNodeName : snn (unicode) []
Derived from mel command `maya.cmds.greasePencilCtx`
"""
pass | 37,013 |
def confirm_email(token):
""" Verify email confirmation token and activate the user account."""
# Verify token
user_manager = current_app.user_manager
db_adapter = user_manager.db_adapter
is_valid, has_expired, object_id = user_manager.verify_token(
token,
user_manager.confirm_email_expiration)
if has_expired:
flash(_('Seu token de confirmacao expirou.'), 'error')
return redirect(url_for('user.login'))
if not is_valid:
flash(_('Token de confirmacao invalido.'), 'error')
return redirect(url_for('user.login'))
""" Confirm email by setting User.confirmed_at=utcnow() or
UserEmail.confirmed_at=utcnow()"""
user = None
if db_adapter.UserEmailClass:
user_email = user_manager.get_user_email_by_id(object_id)
if user_email:
user_email.confirmed_at = datetime.utcnow()
user = user_email.user
else:
user_email = None
user = user_manager.get_user_by_id(object_id)
if user:
user.confirmed_at = datetime.utcnow()
if user:
user.set_active(True)
db_adapter.commit()
else: # pragma: no cover
flash(_('Token de confirmacao invalido.'), 'error')
return redirect(url_for('user.login'))
# Send email_confirmed signal
signals.user_confirmed_email.send(
current_app._get_current_object(), user=user)
# Prepare one-time system message
flash(_('Seu email foi confirmado.'), 'success')
# Auto-login after confirm or redirect to login page
safe_next = _get_safe_next_param(
'next', user_manager.after_confirm_endpoint)
if user_manager.auto_login_after_confirm:
return _do_login_user(user, safe_next) # auto-login
else:
return redirect(
url_for('user.login')+'?next='+quote(safe_next)
) # redirect to login page | 37,014 |
def get_vector_paths_4_sample_set(set_name: str, base_path: str) -> List[PosixPath]:
"""
Gets the files for a given directory containing sample set
:param set_name: Str indicating the name of the directory for a given set of samples
:param base_path: Str indicating the location directory of samples
"""
paths = []
vectors_path = f"{base_path}/{set_name}"
path = Path(vectors_path)
# TODO: Validate existence of directory
# Iterate over all the samples for a set
for sample_directory in path.iterdir():
vectors_path = list(sample_directory.rglob('*-ft_vecs.npy'))
if len(vectors_path) == 0:
logging.warning(f"Could not load vectors for sample {str(directory)}")
continue
paths.append(vectors_path[0])
return paths | 37,015 |
def _GetInstanceField(instance, field):
"""Get the value of a field of an instance.
@type instance: string
@param instance: Instance name
@type field: string
@param field: Name of the field
@rtype: string
"""
return _GetInstanceFields(instance, [field])[0] | 37,016 |
def test_get_most_populous_class2():
"""
"""
segment_mask = np.array(
[
[1,0,0],
[1,1,0],
[0,1,0]
])
label_map = np.array(
[
[9,8,8],
[7,7,8],
[8,7,8]
])
assert get_most_populous_class(segment_mask, label_map) == 7 | 37,017 |
def infectious_rate_tweets(t,
p0=0.001,
r0=0.424,
phi0=0.125,
taum=2.,
t0=0,
tm=24,
bounds=None):
"""
Alternative form of infectious rate from paper. Supports bounds for r0 and taum. Bounds should be passed as an array
in the form of [(lower r0, lower taum), (upper r0, upper taum)].
Converted to hours.
:param t: point to evaluate function at (in hours)
:param p0: base rate
:param r0: amplitude
:param phi0: shift (in days)
:param taum: decay/freshness (in days)
:param t0: start time of observation (in hours)
:param tm: cyclic property (after what time a full circle passed, in hours)
:param bounds: bounds for r0 and taum
:return: intensity for point t
"""
if bounds is not None:
if not (bounds[0][0] < r0 < bounds[1][0]):
r0 = max(bounds[0][0], bounds[1][0] * sigmoid(taum / bounds[1][0]))
if not (bounds[0][1] < taum < bounds[1][1]):
taum = max(bounds[0][1],
bounds[1][1] * sigmoid(taum / bounds[1][1]))
return p0 * (1. - r0 * sin(
(48 / tm) * pi * ((t + t0) / 24 + phi0))) * exp(-t / (24 * taum)) | 37,018 |
def test_valid_registration(test_client, init_database):
"""
GIVEN a Flask application
WHEN the '/register' page is posted with valid data
THEN check the response is valid and the user is logged in
"""
response = test_client.post('/users/register',
data=dict(email='testuser@gmail.com',
password='password',
password_confirm='password'),
follow_redirects=True)
assert response.status_code == 200
assert '新しくアカウントが作成されました。'.encode('utf-8') in response.data
user = User.query.filter_by(email='testuser@gmail.com').first()
assert isinstance(user, User)
assert user.grades == 'none' | 37,019 |
def fetch_user(username):
"""
This method 'fetch_user'
fetches an instances of an user
if any
"""
return User.objects.get(username=username) | 37,020 |
def plot_segments(onsets,
offsets,
y=0.5,
ax=None,
line_kwargs=None):
"""plot segments on an axis.
Creates a collection of horizontal lines
with the specified `onsets` and `offsets`
all at height `y` and places them on the axes `ax`.
Parameters
----------
onsets : numpy.ndarray
onset times of segments
offsets : numpy.ndarray
offset times of segments
y : float, int
height on y-axis at which segments should be plotted.
Default is 0.5.
ax : matplotlib.axes.Axes
axes on which to plot segment. Default is None,
in which case a new Axes instance is created
line_kwargs : dict
keyword arguments passed to the `LineCollection`
that represents the segments. Default is None.
"""
if line_kwargs is None:
line_kwargs = {}
if ax is None:
fig, ax = plt.subplots
segments = []
for on, off in zip(onsets, offsets):
segments.append(
((on, y), (off, y))
)
lc = LineCollection(segments, **line_kwargs)
ax.add_collection(lc) | 37,021 |
def print_advancement(file: str, curr: int, total: int):
"""Prints an advancement info of the predict_on_stocks function"""
print('\r\x1b[2K', end='')
print(f'[{LOADING[curr % LEN_LOADING]}]', end='')
current_percent = round((curr / total) * 20)
advancement = '\033[0;37;44m \033[0;0;0m' * \
current_percent + '_' * (20 - current_percent)
print(f'{advancement}| ', end='')
print(f'Predicting on file {file} ...', end='', flush=True) | 37,022 |
def test_pyparser():
"""Test case: general parsing."""
def _check_blocks(actual, expected):
assert actual, "No parse results"
for i in range(len(actual)):
assert i < len(expected), "Unexpected block %d:\n%r" % (i, actual[i])
valid = False
if isinstance(expected[i], type):
if isinstance(actual[i], expected[i]): valid = True
elif isinstance(expected[i], tuple):
if isinstance(actual[i], expected[i][0]) and actual[i].type == expected[i][1]: valid = True
if not valid:
assert False, "Invalid block: expected %r, got %r" % (expected[i], actual[i])
def check_code(code, blocks=None, filename=None):
code = textwrap.dedent(code)
if not code.endswith("\n"): code += "\n"
if filename:
print("Testing file %s..." % filename, end=" ")
else:
check_code.index = getattr(check_code, "index", 0) + 1
print("Testing code fragment %d..." % check_code.index, end=" ")
preparsed = None
parsed = None
unparsed = None
try:
preparsed = pyparser.parse_text(code)
parsed = preparsed.parse(2)
try:
unparsed = parsed.unparse()
except ValueError as e:
for i, tok in enumerate(parsed.tokens):
print("%3d %r" % (i, tok))
raise AssertionError("Cannot unparse code: %s" % e)
assert_same_code(code, unparsed)
if blocks:
_check_blocks(parsed.parsed, blocks)
print("ok")
except AssertionError as e:
print()
print(u"Error: " + str(e))
print(u"Original code fragment:\n" + code)
if unparsed: print(u"Unparsed code:\n" + unparsed)
if parsed:
print(parsed)
for i, tok in enumerate(parsed.tokens):
print("%3d %r" % (i, tok))
raise
except Exception as e:
print()
print(u"Error: " + str(e))
if preparsed:
print("Preparsed tokens:")
for i, tok in enumerate(preparsed.tokens):
print("%4d %r" % (i, tok))
else:
print("Initial parsing has failed...")
raise
check_code("""
# -*- encoding: utf-8 -*-
# copyright: 2016 h2o.ai
\"\"\"
A code example.
It's not supposed to be functional, or even functionable.
\"\"\"
from __future__ import braces, antigravity
# Standard library imports
import sys
import time
import this
import h2o
from h2o import H2OFrame, init
from . import *
# Do some initalization for legacy python versions
if PY2:
def unicode():
raise RuntimeError # disable this builtin function
# because it doesn't exist in Py3
handler = lambda: None # noop
# (will redefine later)
################################################################################
# comment 1
class Foo(object):
#------ Public -------------------------------------------------------------
def bar(self):
pass
# def foo():
# print(1)
#
# print(2)
# comment 2
@decorated(
1, 2, (3))
@dddd
def bar():
# be
# happy
print("bar!")
# bye""", [Ws, Comment, Docstring, Import_future, Ws, Import_stdlib, Ws, Import_1stpty, Ws, Expression,
Ws, Expression, Ws, Comment_banner, Ws, Class, Ws, Comment_code, Ws, Function, Comment, Ws])
for directory in [".", "../../h2o-py", "../../py"]:
absdir = os.path.abspath(directory)
for dir_name, subdirs, files in os.walk(absdir):
for f in files:
if f.endswith(".py"):
filename = os.path.join(dir_name, f)
with open(filename, "rt", encoding="utf-8") as fff:
check_code(fff.read(), filename=filename) | 37,023 |
def write_tableau_old_format(n, Omega, ssestr):
"""
Write tableau to stdout in the original
(Arun) TableauCreator format, with angles in degrees,
full matrix, number of SSEs on first line and SSE sequence
(DSSP codes E,H) on second line.
n - order of tableau matrix (n by n)
Omega - Numeric matrix for Omega matrix
sse_str - SSE string correspdonding to the Omega matrix
"""
sys.stdout.write(str(len(Omega)) + '\n')
sys.stdout.write(ssestr + '\n')
for k in range(n):
for l in range(n):
angle = degrees(Omega[k,l])
if isNaN(angle) or k == l:
angle = -999.0
sys.stdout.write("% 7.1f " % angle)
sys.stdout.write("\n") | 37,024 |
def get_script_name(key_combo, key):
""" (e.g. ctrl-shift, a -> CtrlShiftA, a -> A """
if key_combo != 'key':
return get_capitalized_key_combo_pattern(key_combo) + key.capitalize()
return key.capitalize() | 37,025 |
def _configure_legend(axs):
"""Configure the legend for the plot"""
blue = mpatches.Patch(color='blue', label='commercial')
green = mpatches.Patch(color='green', label='regular broadcast')
yellow = mpatches.Patch(color='#fcba03', label='playing through')
black = mpatches.Patch(color='black', label='segment')
axs.legend( handles=[black, yellow, green, blue], prop={'size': 96}, loc="best", bbox_to_anchor=(1.20, 1.0, 0., 0.)) | 37,026 |
def layer(name, features):
"""Make a vector_tile.Tile.Layer from GeoJSON features."""
pbl = vector_tile_pb2.tile.layer()
pbl.name = name
pbl.version = 1
pb_keys = []
pb_vals = []
pb_features = []
for j, f in enumerate(
chain.from_iterable(singles(ob) for ob in features)):
pbf = vector_tile_pb2.tile.feature()
pbf.id = j
# Pack up the feature geometry.
g = f.get('geometry')
if g:
gtype = g['type']
coords = g['coordinates']
if gtype == 'Point':
geometry = [(1<<3)+1] + [
(n << 1) ^ (n >> 31) for n in imap(int, coords)]
elif gtype == 'LineString':
num = len(coords)
geometry = [0]*(4 + 2*(num-1))
geometry[0] = (1<<3)+1
geometry[1:3] = (
(n << 1) ^ (n >> 31) for n in imap(int, coords[0]))
geometry[3] = ((num-1)<<3)+2
for i, (prev, pair) in enumerate(pairwise(coords), 1):
prev = map(int, prev)
pair = map(int, pair)
geometry[2*i+2:2*i+4] = (
(n << 1) ^ (n >> 31) for n in (
pair[0]-prev[0], pair[1]-prev[1]))
pbf.geometry.extend(geometry)
elif gtype == 'Polygon':
rings = []
for ring in coords:
num = len(ring)
geometry = [0]*(5 + 2*(num-1))
geometry[0] = (1<<3)+1
geometry[1:3] = (
(n << 1) ^ (n >> 31) for n in imap(int, ring[0]))
geometry[3] = ((num-1)<<3)+2
for i, (prev, pair) in enumerate(pairwise(ring), 1):
prev = map(int, prev)
pair = map(int, pair)
geometry[2*i+2:2*i+4] = (
(n << 1) ^ (n >> 31) for n in (
pair[0]-prev[0], pair[1]-prev[1]))
geometry[-1] = (1<<3)+7
pbf.geometry.extend(geometry)
pbf.type = geom_type_map[gtype]
# Pack up feature properties.
props = f.get('properties', {})
tags = [0]*(2*len(props))
for i, (k, v) in enumerate(props.items()):
if k not in pb_keys:
pb_keys.append(k)
if v not in pb_vals:
pb_vals.append(v)
tags[i*2:i*2+2] = pb_keys.index(k), pb_vals.index(v)
pbf.tags.extend(tags)
pb_features.append(pbf)
# Finish up the layer.
pbl.keys.extend(map(str, pb_keys))
pbl.values.extend(map(value, ifilter(None, pb_vals)))
return pbl | 37,027 |
def serialize(formula, threshold=None):
"""Provides a string representing the formula.
:param formula: The target formula
:type formula: FNode
:param threshold: Specify the threshold
:type formula: Integer
:returns: A string representing the formula
:rtype: string
"""
return get_env().serializer.serialize(formula,
threshold=threshold) | 37,028 |
def shutdown_all_simulators(path=None):
"""Shutdown all simulator devices.
Fix for DVTCoreSimulatorAdditionsErrorDomain error.
Args:
path: (str) A path with simulators
"""
command = ['xcrun', 'simctl']
if path:
command += ['--set', path]
LOGGER.info('Shutdown all simulators from folder %s.' % path)
else:
LOGGER.info('Shutdown all simulators.')
try:
subprocess.check_call(command + ['shutdown', 'all'])
except subprocess.CalledProcessError as e:
# Logging error instead of throwing so we don't cause failures in case
# this was indeed failing to clean up.
message = 'Failed to shutdown all simulators. Error: %s' % e.output
LOGGER.error(message) | 37,029 |
def find_single_network_cost(region, option, costs, global_parameters,
country_parameters, core_lut):
"""
Calculates the annual total cost using capex and opex.
Parameters
----------
region : dict
The region being assessed and all associated parameters.
option : dict
Contains the scenario and strategy. The strategy string controls
the strategy variants being tested in the model and is defined based
on the type of technology generation, core and backhaul, and the
strategy for infrastructure sharing, the number of networks in each
geotype, spectrum and taxation.
costs : dict
All equipment costs.
global_parameters : dict
All global model parameters.
country_parameters : dict
All country specific parameters.
core_lut : dict
Contains the number of existing and required, core and regional assets.
Returns
-------
region : dict
Contains all regional data.
"""
strategy = option['strategy']
generation = strategy.split('_')[0]
core = strategy.split('_')[1]
backhaul = strategy.split('_')[2]
new_mno_sites = region['new_mno_sites']
upgraded_mno_sites = region['upgraded_mno_sites']
all_sites = new_mno_sites + upgraded_mno_sites
new_backhaul = region['backhaul_new']
regional_cost = []
regional_asset_cost = []
for i in range(1, int(all_sites) + 1):
if i <= upgraded_mno_sites and generation == '4G':
cost_structure = upgrade_to_4g(region, strategy, costs,
global_parameters, core_lut, country_parameters)
backhaul_quant = backhaul_quantity(i, new_backhaul)
total_cost, cost_by_asset = calc_costs(region, cost_structure, backhaul,
backhaul_quant, global_parameters, country_parameters)
regional_cost.append(total_cost)
regional_asset_cost.append(cost_by_asset)
if i <= upgraded_mno_sites and generation == '5G' and core == 'nsa':
cost_structure = upgrade_to_5g_nsa(region, strategy, costs,
global_parameters, core_lut, country_parameters)
backhaul_quant = backhaul_quantity(i, new_backhaul)
total_cost, cost_by_asset = calc_costs(region, cost_structure, backhaul,
backhaul_quant, global_parameters, country_parameters)
regional_cost.append(total_cost)
regional_asset_cost.append(cost_by_asset)
if i <= upgraded_mno_sites and generation == '5G' and core == 'sa':
cost_structure = upgrade_to_5g_sa(region, strategy, costs,
global_parameters, core_lut, country_parameters)
backhaul_quant = backhaul_quantity(i, new_backhaul)
total_cost, cost_by_asset = calc_costs(region, cost_structure, backhaul,
backhaul_quant, global_parameters, country_parameters)
regional_cost.append(total_cost)
regional_asset_cost.append(cost_by_asset)
if i > upgraded_mno_sites and generation == '4G':
cost_structure = greenfield_4g(region, strategy, costs,
global_parameters, core_lut, country_parameters)
backhaul_quant = backhaul_quantity(i, new_backhaul)
total_cost, cost_by_asset = calc_costs(region, cost_structure, backhaul,
backhaul_quant, global_parameters, country_parameters)
regional_cost.append(total_cost)
regional_asset_cost.append(cost_by_asset)
if i > upgraded_mno_sites and generation == '5G' and core == 'nsa':
cost_structure = greenfield_5g_nsa(region, strategy, costs,
global_parameters, core_lut, country_parameters)
backhaul_quant = backhaul_quantity(i, new_backhaul)
total_cost, cost_by_asset = calc_costs(region, cost_structure, backhaul,
backhaul_quant, global_parameters, country_parameters)
regional_cost.append(total_cost)
regional_asset_cost.append(cost_by_asset)
if i > upgraded_mno_sites and generation == '5G' and core == 'sa':
cost_structure = greenfield_5g_sa(region, strategy, costs,
global_parameters, core_lut, country_parameters)
backhaul_quant = backhaul_quantity(i, new_backhaul)
total_cost, cost_by_asset = calc_costs(region, cost_structure, backhaul,
backhaul_quant, global_parameters, country_parameters)
regional_cost.append(total_cost)
regional_asset_cost.append(cost_by_asset)
counter = collections.Counter()
for d in regional_asset_cost:
counter.update(d)
counter_dict = dict(counter)
network_cost = 0
for k, v in counter_dict.items():
region[k] = v
network_cost += v
region['network_cost'] = network_cost
return region | 37,030 |
def get_headings(bulletin):
""""function to get the headings from text file
takes a single argument
1.takes single argument list of bulletin files"""
with open("../input/cityofla/CityofLA/Job Bulletins/"+bulletins[bulletin]) as f: ##reading text files
data=f.read().replace('\t','').split('\n')
data=[head for head in data if head.isupper()]
return data | 37,031 |
def phi_text_page_parse(pageAnalyse_str, phi_main_url):
"""
params: pageAnalyse_str, str.
return: phi_page_dict, dict.
It takes the precedent functions and maps their
information to a dictionary.
"""
#
phi_page_dict = {}
phi_page_dict["phi_text_id_no"] = phi_text_id(pageAnalyse_str, domain_url=phi_main_url)[0]
phi_page_dict["phi_text_region"] = phi_lemma_region(pageAnalyse_str)
phi_page_dict["phi_text_url"] = phi_text_id(pageAnalyse_str, domain_url=phi_main_url)[1]
phi_page_dict["phi_text_info"] = phi_lemma_text_info(pageAnalyse_str)
phi_page_dict["phi_text"] = phi_lemma_text(pageAnalyse_str)
#
return phi_page_dict | 37,032 |
def fdfilt_lagr(tau, Lf, fs):
"""
Parameters
----------
tau : delay / s
Lf : length of the filter / sample
fs : sampling rate / Hz
Returns
-------
h : (Lf)
nonzero filter coefficients
ni : time index of the first element of h
n0 : time index of the center of h
"""
d = tau * fs
if Lf % 2 == 0:
n0 = np.ceil(d)
Lh = int(Lf/2)
idx = np.arange(n0-Lh, n0+Lh).astype(int)
elif Lf % 2 == 1:
n0 = np.round(d)
Lh = int(np.floor(Lf/2))
idx = np.arange(n0-Lh, n0+Lh+1).astype(int)
else:
print('Invalid value of Lf. Must be an integer')
return lagr_poly_barycentric2(idx, d), idx[0], n0 | 37,033 |
def main():
"""! Main program entry."""
arg_dict = gd.cli("jh")
get_jh_data(**arg_dict) | 37,034 |
def hit_n_run(A_mat, b_vec, n_samples=200, hr_timeout=ALG_TIMEOUT_MULT):
""" Hit and Run Sampler:
1. Sample current point x
2. Generate a random direction r
3. Define gamma_i = ( b - a_i'x ) / ( r'a_i )
4. Calculate max(gamma < 0) gamma_i and min(gamma > 0) gamma_i
5. Sample uniformly from [min_gamma, max_gamma]
"""
m, n = A_mat.shape
curr_pt = hit_n_run_init(A_mat, b_vec)
pts = [curr_pt]
pts_len = 1
bar = tqdm(total=n_samples)
for _ in range(n_samples * hr_timeout):
direction = np.random.randn(n)
direction = direction / np.linalg.norm(direction)
# calculate gamma
numer = b_vec - np.dot(A_mat, curr_pt)
denom = np.dot(A_mat, direction)
gamma = [ n / d for n, d in zip(numer, denom) ]
gamma.append(0)
gamma = np.array(gamma)
if (gamma > 0).all():
gamma_min = 0
else:
gamma_min = max(gamma[gamma < 0])
if (gamma < 0).all():
gamma_max = 0
else:
gamma_max = min(gamma[gamma > 0])
magnitude = np.random.uniform(low=gamma_min, high=gamma_max)
curr_pt = curr_pt + magnitude * direction
if is_feasible(A_mat, b_vec, curr_pt):
pts.append(curr_pt)
bar.update(1)
pts_len += 1
if pts_len >= n_samples:
break
else:
pass
bar.close()
if len(pts) < min(0.4 * n_samples, 500):
raise Exception(
'Sampled {} points instead of {}'.format(len(pts), 0.4 * n_samples)
)
return pts | 37,035 |
def construct_simulation_hydra_paths(base_config_path: str) -> HydraConfigPaths:
"""
Specifies relative paths to simulation configs to pass to hydra to declutter tutorial.
:param base_config_path: Base config path.
:return Hydra config path.
"""
common_dir = "file://" + join(base_config_path, 'config', 'common')
config_name = 'default_simulation'
config_path = join(base_config_path, 'config', 'simulation')
experiment_dir = "file://" + join(base_config_path, 'experiments')
return HydraConfigPaths(common_dir, config_name, config_path, experiment_dir) | 37,036 |
def delete_from_limits_by_id(id, connection, cursor):
"""
Delete row with a certain ID from limits table
:param id: ID to delete
:param connection: connection instance
:param cursor: cursor instance
:return:
"""
check_for_existence = get_limit_by_id(id, cursor)
if check_for_existence.get('failure') is None:
delete_query = '''Delete from limits where id = {}'''
cursor.execute(delete_query.format(id))
connection.commit()
print(f'Record with id={id} deleted')
return {'status': 'success', 'message': f'Record with id={id} deleted'}
else:
print(f'Failed to delete, ID={id} does not exist')
return {'failure': f'Failed to delete, ID={id} does not exist'} | 37,037 |
def _dummy_func(x):
"""Decorate to to see if Numba actually works"""
x += 1 | 37,038 |
def article_search(request):
"""Пошук статті і використанням вектору пошуку (за полями заголовку і тексту з
ваговими коефіцієнтами 1 та 0.4 відповідно. Пошуковий набір проходить стемінг.
При пошуку враховується близькість шуканих слів одне до одного"""
query = ''
results = []
if 'query' in request.GET:
results, query = search_results(request)
return render(request, 'articles/post/search.html', {'query': query,
'results': results}) | 37,039 |
def get_campaign_data(api, campaign_id):
"""Return campaign metadata for the given campaign ID."""
campaign = dict()
# Pulls the campaign data as dict from GoPhish.
rawCampaign: dict = api.campaigns.get(campaign_id).as_dict()
campaign["id"] = rawCampaign["name"]
campaign["start_time"] = rawCampaign["launch_date"]
campaign["end_time"] = rawCampaign["completed_date"]
campaign["url"] = rawCampaign["url"]
campaign["subject"] = rawCampaign["template"]["subject"]
# Get the template ID from the GoPhish template name.
campaign["template"] = (
api.templates.get(rawCampaign["template"]["id"]).as_dict()["name"].split("-")[2]
)
campaign["clicks"] = get_click_data(api, campaign_id)
# Get the e-mail send status from GoPhish.
campaign["status"] = get_email_status(api, campaign_id)
return campaign | 37,040 |
def create_small_map(sharing_model):
"""
Create small map and 2 BS
:returns: tuple (map, bs_list)
"""
map = Map(width=150, height=100)
bs1 = Basestation('A', Point(50, 50), get_sharing_for_bs(sharing_model, 0))
bs2 = Basestation('B', Point(100, 50), get_sharing_for_bs(sharing_model, 1))
bs_list = [bs1, bs2]
return map, bs_list | 37,041 |
def save_dataz(file_name, obj, **kwargs):
"""Save compressed structured data to files. The arguments will be passed to ``numpy.save()``."""
return np.savez(file_name, obj, **kwargs) | 37,042 |
def tfrecord_iterator(data_path: str,
index_path: typing.Optional[str] = None,
shard: typing.Optional[typing.Tuple[int, int]] = None
) -> typing.Iterable[memoryview]:
"""Create an iterator over the tfrecord dataset.
Since the tfrecords file stores each example as bytes, we can
define an iterator over `datum_bytes_view`, which is a memoryview
object referencing the bytes.
Params:
-------
data_path: str
TFRecord file path.
index_path: str, optional, default=None
Index file path. Can be set to None if no file is available.
shard: tuple of ints, optional, default=None
A tuple (index, count) representing worker_id and num_workers
count. Necessary to evenly split/shard the dataset among many
workers (i.e. >1).
Yields:
-------
datum_bytes_view: memoryview
Object referencing the specified `datum_bytes` contained in the
file (for a single record).
"""
file = io.open(data_path, "rb")
length_bytes = bytearray(8)
crc_bytes = bytearray(4)
datum_bytes = bytearray(1024 * 1024)
def read_records(start_offset=None, end_offset=None):
nonlocal length_bytes, crc_bytes, datum_bytes
if start_offset is not None:
file.seek(start_offset)
if end_offset is None:
end_offset = os.path.getsize(data_path)
while file.tell() < end_offset:
if file.readinto(length_bytes) != 8:
raise RuntimeError("Failed to read the record size.")
if file.readinto(crc_bytes) != 4:
raise RuntimeError("Failed to read the start token.")
length, = struct.unpack("<Q", length_bytes)
if length > len(datum_bytes):
datum_bytes = datum_bytes.zfill(int(length * 1.5))
datum_bytes_view = memoryview(datum_bytes)[:length]
if file.readinto(datum_bytes_view) != length:
raise RuntimeError("Failed to read the record.")
if file.readinto(crc_bytes) != 4:
raise RuntimeError("Failed to read the end token.")
yield datum_bytes_view
if index_path is None:
yield from read_records()
else:
index = np.loadtxt(index_path, dtype=np.int64)[:, 0]
if shard is None:
offset = np.random.choice(index)
yield from read_records(offset)
yield from read_records(0, offset)
else:
num_records = len(index)
shard_idx, shard_count = shard
start_index = (num_records * shard_idx) // shard_count
end_index = (num_records * (shard_idx + 1)) // shard_count
start_byte = index[start_index]
end_byte = index[end_index] if end_index < num_records else None
yield from read_records(start_byte, end_byte)
file.close() | 37,043 |
def test_wps_ext_kwa_proto_kwa_mismatch(dev, apdev):
"""WPS and KWA error: KWA mismatch"""
r_s1,keywrapkey,authkey,raw_m3_attrs,eap_id,bssid,attrs = wps_start_kwa(dev, apdev)
data = build_wsc_attr(ATTR_R_SNONCE1, r_s1)
# Encrypted Settings and KWA with incorrect value
data += build_wsc_attr(ATTR_KEY_WRAP_AUTH, 8*'\x00')
iv = 16*b'\x99'
aes = AES.new(keywrapkey, AES.MODE_CBC, iv)
pad_len = 16 - len(data) % 16
ps = pad_len * struct.pack('B', pad_len)
data += ps
wrapped = aes.encrypt(data)
attrs += build_wsc_attr(ATTR_ENCR_SETTINGS, iv + wrapped)
wps_stop_kwa(dev, bssid, attrs, authkey, raw_m3_attrs, eap_id) | 37,044 |
def step(x):
"""Heaviside step function."""
step = np.ones_like(x, dtype='float')
step[x<0] = 0
step[x==0] = 0.5
return step | 37,045 |
def createRaviartThomas0VectorSpace(context, grid, segment=None,
putDofsOnBoundaries=False,
requireEdgeOnSegment=True,
requireElementOnSegment=False):
"""
Create and return a space of lowest order Raviart-Thomas vector functions
with normal components continuous on boundaries between elements.
*Parameters:*
- context (Context)
A Context object that will determine the type used to represent the
values of the basis functions of the newly constructed space.
- grid (Grid)
Grid on which the functions from the newly constructed space will be
defined.
- segment (GridSegment)
(Optional) Segment of the grid on which the space should be defined.
If set to None (default), the whole grid will be used.
- putDofsOnBoundaries (bool)
(Optional) If set to False (default), degrees of freedom will not be
placed on edges lying on boundaries of the grid. This is usually the
desired behaviour for simulations of open perfectly conducting
surfaces (sheets). If set to True, degrees of freedom will be placed
on all edges belonging to the chosen segment of the grid.
*Returns* a newly constructed Space_BasisFunctionType object, with
BasisFunctionType determined automatically from the context argument and
equal to either float32, float64, complex64 or complex128.
"""
name = 'raviartThomas0VectorSpace'
dofMode = 0
if requireEdgeOnSegment:
dofMode |= 1
if requireElementOnSegment:
dofMode |= 2
return _constructObjectTemplatedOnBasis(
core, name, context.basisFunctionType(), grid, segment,
putDofsOnBoundaries, dofMode) | 37,046 |
def testimage():
"""
PIL lets you create in-memory images with various pixel types:
>>> from PIL import Image, ImageDraw, ImageFilter, ImageMath
>>> im = Image.new("1", (128, 128)) # monochrome
>>> _info(im)
(None, '1', (128, 128))
>>> _info(Image.new("L", (128, 128))) # grayscale (luminance)
(None, 'L', (128, 128))
>>> _info(Image.new("P", (128, 128))) # palette
(None, 'P', (128, 128))
>>> _info(Image.new("RGB", (128, 128))) # truecolor
(None, 'RGB', (128, 128))
>>> _info(Image.new("I", (128, 128))) # 32-bit integer
(None, 'I', (128, 128))
>>> _info(Image.new("F", (128, 128))) # 32-bit floating point
(None, 'F', (128, 128))
Or open existing files:
>>> im = Image.open("Tests/images/hopper.gif")
>>> _info(im)
('GIF', 'P', (128, 128))
>>> _info(Image.open("Tests/images/hopper.ppm"))
('PPM', 'RGB', (128, 128))
>>> try:
... _info(Image.open("Tests/images/hopper.jpg"))
... except IOError as v:
... print(v)
('JPEG', 'RGB', (128, 128))
PIL doesn't actually load the image data until it's needed,
or you call the "load" method:
>>> im = Image.open("Tests/images/hopper.ppm")
>>> print(im.im) # internal image attribute
None
>>> a = im.load()
>>> type(im.im) # doctest: +ELLIPSIS
<... '...ImagingCore'>
You can apply many different operations on images. Most
operations return a new image:
>>> im = Image.open("Tests/images/hopper.ppm")
>>> _info(im.convert("L"))
(None, 'L', (128, 128))
>>> _info(im.copy())
(None, 'RGB', (128, 128))
>>> _info(im.crop((32, 32, 96, 96)))
(None, 'RGB', (64, 64))
>>> _info(im.filter(ImageFilter.BLUR))
(None, 'RGB', (128, 128))
>>> im.getbands()
('R', 'G', 'B')
>>> im.getbbox()
(0, 0, 128, 128)
>>> len(im.getdata())
16384
>>> im.getextrema()
((0, 255), (0, 255), (0, 255))
>>> im.getpixel((0, 0))
(20, 20, 70)
>>> len(im.getprojection())
2
>>> len(im.histogram())
768
>>> _info(im.point(list(range(256))*3))
(None, 'RGB', (128, 128))
>>> _info(im.resize((64, 64)))
(None, 'RGB', (64, 64))
>>> _info(im.rotate(45))
(None, 'RGB', (128, 128))
>>> [_info(ch) for ch in im.split()]
[(None, 'L', (128, 128)), (None, 'L', (128, 128)), (None, 'L', (128, 128))]
>>> len(im.convert("1").tobitmap())
10456
>>> len(im.tobytes())
49152
>>> _info(im.transform((512, 512), Image.AFFINE, (1,0,0,0,1,0)))
(None, 'RGB', (512, 512))
>>> _info(im.transform((512, 512), Image.EXTENT, (32,32,96,96)))
(None, 'RGB', (512, 512))
The ImageDraw module lets you draw stuff in raster images:
>>> im = Image.new("L", (128, 128), 64)
>>> d = ImageDraw.ImageDraw(im)
>>> d.line((0, 0, 128, 128), fill=128)
>>> d.line((0, 128, 128, 0), fill=128)
>>> im.getextrema()
(64, 128)
In 1.1.4, you can specify colors in a number of ways:
>>> xy = 0, 0, 128, 128
>>> im = Image.new("RGB", (128, 128), 0)
>>> d = ImageDraw.ImageDraw(im)
>>> d.rectangle(xy, "#f00")
>>> im.getpixel((0, 0))
(255, 0, 0)
>>> d.rectangle(xy, "#ff0000")
>>> im.getpixel((0, 0))
(255, 0, 0)
>>> d.rectangle(xy, "rgb(255,0,0)")
>>> im.getpixel((0, 0))
(255, 0, 0)
>>> d.rectangle(xy, "rgb(100%,0%,0%)")
>>> im.getpixel((0, 0))
(255, 0, 0)
>>> d.rectangle(xy, "hsl(0, 100%, 50%)")
>>> im.getpixel((0, 0))
(255, 0, 0)
>>> d.rectangle(xy, "red")
>>> im.getpixel((0, 0))
(255, 0, 0)
In 1.1.6, you can use the ImageMath module to do image
calculations.
>>> im = ImageMath.eval("float(im + 20)", im=im.convert("L"))
>>> im.mode, im.size
('F', (128, 128))
PIL can do many other things, but I'll leave that for another
day. If you're curious, check the handbook, available from:
http://www.pythonware.com
Cheers /F
""" | 37,047 |
def test_create_option_model():
"""Tests for create_option_model()."""
utils.reset_config({
"env": "cover",
"approach": "nsrt_learning",
})
model = create_option_model("oracle")
assert isinstance(model, _OracleOptionModel)
utils.reset_config({
"env": "pybullet_blocks",
"approach": "nsrt_learning",
})
model = create_option_model("oracle_blocks")
assert isinstance(model, _OracleOptionModel)
model = create_option_model("oracle_behavior")
assert isinstance(model, _BehaviorOptionModel) | 37,048 |
def query_by_band_gap(min_band_gap=None, max_band_gap=None):
"""
Find all chemicals with a band gap within the specified range.
Parameters
----------
min_band_gap : float, optional
Minimum allowed band gap. Default is ``None``.
max_band_gap : float, optional
Maximum allowed band gap. Default is ``None``.
"""
queries = create_band_gap_queries(min_band_gap=min_band_gap, max_band_gap=max_band_gap)
params = combine_queries(queries)
url = endpoint('chemicals') + params
r = requests.get(url)
total_matches = get_total(r)
print(f'Found {total_matches} items with a band gap between {min_band_gap} and {max_band_gap}.') | 37,049 |
def edus_toks2ids(edu_toks_list, word2ids):
""" 将训练cbos的论元句子们转换成ids序列, 将训练cdtb论元关系的论元对转成对应的论元对的tuple ids 列表并返回
"""
tok_list_ids = []
for line in edu_toks_list:
line_ids = get_line_ids(toks=line, word2ids=word2ids)
tok_list_ids.append(line_ids)
# 数据存储
return tok_list_ids | 37,050 |
def pd_log_with_neg(ser: pd.Series) -> pd.Series:
"""log transform series with negative values by adding constant"""
return np.log(ser + ser.min() + 1) | 37,051 |
def filter_X_dilutions(df, concentration):
"""Select only one dilution ('high', 'low', or some number)."""
assert concentration in ['high','low'] or type(concentration) is int
df = df.sort_index(level=['CID','Dilution'])
df = df.fillna(999) # Pandas doesn't select correctly on NaNs
if concentration == 'low':
df = df.groupby(level=['CID']).first()
elif concentration == 'high':
df = df.groupby(level=['CID']).last()
else:
df = df.loc[[x for x in df.index if x[1]==concentration]]
df = df.groupby(level=['CID']).last()
df = df.replace(999,float('NaN')) # Undo the fillna line above.
return df | 37,052 |
def test_delete_data_file(test_cloud_manager):
"""
Test that deleting an object actually calls the google API with
given bucket and object name
"""
# Setup #
test_cloud_manager._authed_session.delete.return_value = _fake_response(200)
bucket = "some_bucket"
object_name = "some_object"
# Call #
test_cloud_manager.delete_data_file(bucket, object_name)
# Test #
assert test_cloud_manager._authed_session.delete.called is True
# Naive check to see if the object appears in the call to delete
args, kwargs = test_cloud_manager._authed_session.delete.call_args
assert any(bucket in str(arg) for arg in args) or any(
bucket in str(kwarg) for kwarg in kwargs.values()
)
assert any(object_name in str(arg) for arg in args) or any(
object_name in str(kwarg) for kwarg in kwargs.values()
) | 37,053 |
def update_params(base_param: dict, additional: dict):
"""overwrite base parameter dictionary
Parameters
----------
base_param : dict
base param dictionary
additional : dict
additional param dictionary
Returns
-------
dict
updated parameter dictionary
"""
for key in additional:
base_param[key] = additional[key]
return base_param | 37,054 |
def test_GarbageTracker_get_tracker():
"""
Test function for GarbageTracker.get_tracker().
"""
tracker1 = GarbageTracker.get_tracker()
tracker2 = GarbageTracker.get_tracker()
assert tracker1 is tracker2 | 37,055 |
def lanczos_generalized(
operator,
metric_operator=None,
metric_inv_operator=None,
num_eigenthings=10,
which="LM",
max_steps=20,
tol=1e-6,
num_lanczos_vectors=None,
init_vec=None,
use_gpu=False,
):
"""
Use the scipy.sparse.linalg.eigsh hook to the ARPACK lanczos algorithm
to find the top k eigenvalues/eigenvectors.
Parameters
-------------
operator: power_iter.Operator
linear operator to solve.
num_eigenthings : int
number of eigenvalue/eigenvector pairs to compute
which : str ['LM', SM', 'LA', SA']
L,S = largest, smallest. M, A = in magnitude, algebriac
SM = smallest in magnitude. LA = largest algebraic.
max_steps : int
maximum number of arnoldi updates
tol : float
relative accuracy of eigenvalues / stopping criterion
num_lanczos_vectors : int
number of lanczos vectors to compute. if None, > 2*num_eigenthings
init_vec: [torch.Tensor, torch.cuda.Tensor]
if None, use random tensor. this is the init vec for arnoldi updates.
use_gpu: bool
if true, use cuda tensors.
Returns
----------------
eigenvalues : np.ndarray
array containing `num_eigenthings` eigenvalues of the operator
eigenvectors : np.ndarray
array containing `num_eigenthings` eigenvectors of the operator
"""
if isinstance(operator.size, int):
size = operator.size
else:
size = operator.size[0]
shape = (size, size)
if num_lanczos_vectors is None:
num_lanczos_vectors = min(2 * num_eigenthings, size - 1)
if num_lanczos_vectors < 2 * num_eigenthings:
warn(
"[lanczos] number of lanczos vectors should usually be > 2*num_eigenthings"
)
def _scipy_apply(x):
x = torch.from_numpy(x)
if use_gpu:
x = x.cuda()
return operator.apply(x.float()).cpu().numpy()
scipy_op = ScipyLinearOperator(shape, _scipy_apply)
if isinstance(metric_operator, np.ndarray) or \
isinstance(metric_operator, ScipyLinearOperator):
metric_op = metric_operator
else:
def _scipy_apply_metric(x):
x = torch.from_numpy(x)
if use_gpu:
x = x.cuda()
return metric_operator.apply(x.float()).cpu().numpy()
metric_op = ScipyLinearOperator(shape, _scipy_apply_metric)
if isinstance(metric_inv_operator, np.ndarray) or \
isinstance(metric_inv_operator, ScipyLinearOperator):
metric_inv_op = metric_inv_operator
else:
def _scipy_apply_metric_inv(x):
x = torch.from_numpy(x)
if use_gpu:
x = x.cuda()
return metric_inv_operator.apply(x.float()).cpu().numpy()
metric_inv_op = ScipyLinearOperator(shape, _scipy_apply_metric_inv)
if init_vec is None:
init_vec = np.random.rand(size)
elif isinstance(init_vec, torch.Tensor):
init_vec = init_vec.cpu().numpy()
eigenvals, eigenvecs = eigsh(
A=scipy_op,
k=num_eigenthings,
M=metric_op,
Minv=metric_inv_op,
which=which,
maxiter=max_steps,
tol=tol,
ncv=num_lanczos_vectors,
return_eigenvectors=True,
)
return eigenvals, eigenvecs.T | 37,056 |
def get_articles_news(name):
"""
Function that gets the json response to our url request
"""
get_news_url = 'https://newsapi.org/v2/top-headlines?sources={}&apiKey=988fb23113204cfcb2cf79eb7ad99b76'.format(name)
with urllib.request.urlopen(get_news_url) as url:
get_news_data = url.read()
get_news_response = json.loads(get_news_data)
news_results = None
if get_news_response['articles']:
news_results_list = get_news_response['articles']
news_results = process_articles(news_results_list)
return news_results | 37,057 |
def execute(
name,
params=None,
constraints=None,
data_folder=None,
tag=None,
time_to_expire_secs=None,
suffix=None,
app_metrics=None,
app_params=None,
metadata=None,
):
"""
Create an instance of a workflow
:param name: name of the workflow to create the instance
:param params: parameters as dictionary to the workflow instance
:param constraints: a set of execution constraints such as a list of emails, ids, or groups to send task to
:param data_folder: a data folder to be uploaded and accessed from the instance through canotic.request.data()
:param tag: workflow auxiliary tag
:param time_to_expire_secs: an expiration time in secs
:return:
"""
if "CANOTIC_AGENT" in os.environ:
return schedule_workflow(
name,
params,
constraints,
data_folder,
tag,
time_to_expire_secs,
suffix,
app_metrics,
app_params,
metadata,
)
return None | 37,058 |
def coordConv(fromP, fromV, fromSys, fromDate, toSys, toDate, obsData=None, refCo=None):
"""Converts a position from one coordinate system to another.
Inputs:
- fromP(3) cartesian position (au)
- fromV(3) cartesian velocity (au/year); ignored if fromSys
is Geocentric, Topocentric or Observed
- fromSys coordinate system from which to convert;
any of the entries in the table below; use opscore.RO.CoordSys constants.
- fromDate date*
- toSys coordinate system to which to convert (see fromSys)
- toDate date*
- obsData an opscore.RO.Astro.Cnv.ObserverData object; required if fromSys or toSys
is Topocentric or Observed; ignored otherwise.
- refCo(2) refraction coefficients; required if fromSys or toSys is Observed;
ignored otherwise.
Returns:
- toP(3) converted cartesian position (au)
- toV(3) converted cartesian velocity (au/year)
*the units of date depend on the associated coordinate system:
coord sys def date date
ICRS 2000.0 Julian epoch of observation
FK5 2000.0 Julian epoch of equinox and observation
FK4 1950.0 Besselian epoch of equinox and observation
Galactic now Julian epoch of observation
Geocentric now UT1 (MJD)
Topocentric now UT1 (MJD)
Observed now UT1 (MJD)
**Setting fromV all zero means the object is fixed. This slighly affects
conversion to or from FK4, which has fictitious proper motion.
Error Conditions:
- If obsData or refCo are absent and are required, raises ValueError.
Details:
The conversion is performed in two stages:
- fromP/fromSys/fromDate -> ICRS
- ICRS -> toP/toSys/toDate
Each of these two stages is performed using the following graph:
FK5 ------\
FK4 ------ ICRS --- Geocentric -*- Topocentric -**- Observed
Galactic--/
* obsData required
** refCo required
"""
return _TheCnvObj.coordConv(fromP, fromV, fromSys, fromDate, toSys, toDate, obsData, refCo) | 37,059 |
def sample_recipe(user, **params):
"""create recipe"""
defaults = {
'title': 'paneer tikka',
'time_minute': 10,
'price': 5.00
}
defaults.update(**params)
return Recipe.objects.create(user=user, **defaults) | 37,060 |
def updateSections(thisconfig):
""" Re-read the test config INI file. Load into the
test environment config object.
"""
raise NotImplementedError | 37,061 |
def briconToScaleOffset(brightness, contrast, drange):
"""Used by the :func:`briconToDisplayRange` and the :func:`applyBricon`
functions.
Calculates a scale and offset which can be used to transform a display
range of the given size so that the given brightness/contrast settings
are applied.
:arg brightness: Brightness, between 0.0 and 1.0.
:arg contrast: Contrast, between 0.0 and 1.0.
:arg drange: Data range.
"""
# The brightness is applied as a linear offset,
# with 0.5 equivalent to an offset of 0.0.
offset = (brightness * 2 - 1) * drange
# If the contrast lies between 0.0 and 0.5, it is
# applied to the colour as a linear scaling factor.
if contrast <= 0.5:
scale = contrast * 2
# If the contrast lies between 0.5 and 1, it
# is applied as an exponential scaling factor,
# so lower values (closer to 0.5) have less of
# an effect than higher values (closer to 1.0).
else:
scale = 20 * contrast ** 4 - 0.25
return scale, offset | 37,062 |
def parse_read_name_map_file(read_map, directories, recursive=False):
"""Parse either a seq summary file or a readdb file
:param read_map: either a readdb file or sequencing summary file
:param directories: check all the directories for the fast5 path
:param recursive: boolean option to check the sub directories of input directories
"""
if read_map.endswith("readdb"):
name_index = 0
path_index = 1
else:
name_index = 1
path_index = 0
for dir_path in directories:
assert os.path.isdir(dir_path), "Path provided does not exist or is not a directory: {}".format(dir_path)
with open(read_map, 'r') as fh:
for line in fh:
split_line = line.split()
if len(split_line) == 2:
for dir_path in directories:
if recursive:
directories2 = get_all_sub_directories(dir_path)
for dir_path2 in directories2:
full_path = os.path.join(dir_path2, split_line[path_index])
if os.path.exists(full_path):
yield split_line[name_index], os.path.abspath(full_path)
else:
full_path = os.path.join(dir_path, split_line[path_index])
if os.path.exists(full_path):
yield split_line[name_index], os.path.abspath(full_path) | 37,063 |
def HostNameRequestHeader(payload_size):
"""
Construct a ``MessageHeader`` for a HostNameRequest command.
Sends local host name to virtual circuit peer. This name will affect
access rights. Sent over TCP.
Parameters
----------
payload_size : integer
Length of host name string.
"""
struct_args = (21, payload_size, 0, 0, 0, 0)
# If payload_size or data_count cannot fit into a 16-bit integer, use the
# extended header.
return (ExtendedMessageHeader(*struct_args)
if any((payload_size > 0xffff, ))
else MessageHeader(*struct_args)) | 37,064 |
def california_quadtree_region(magnitudes=None, name="california-quadtree"):
"""
Returns object of QuadtreeGrid2D representing quadtree grid for California RELM testing region.
The grid is already generated at zoom-level = 12 and it is loaded through classmethod: QuadtreeGrid2D.from_quadkeys
The grid cells at zoom level 12 are selected using the external boundary of RELM california region.
This grid can be used to create gridded datasets for earthquake forecasts.
Args:
magnitudes: Magnitude discretization
name: string
Returns:
:class:`csep.core.spatial.QuadtreeGrid2D
"""
# use default file path from python package
root_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
filepath = os.path.join(root_dir, 'artifacts', 'Regions', 'california_qk_zoom=12.txt')
qk = numpy.genfromtxt(filepath, delimiter=',', dtype='str')
california_region = QuadtreeGrid2D.from_quadkeys(qk, magnitudes=magnitudes, name=name)
return california_region | 37,065 |
def cinema_trip(persons, day, premium_seating, treat):
"""
The total cost of going to the cinema
Parameters:
----------
persons: int
number of people who need a ticket
day: int
day of the week to book (1 = Monday, 7 = Sunday)
preimum_seating: bool
boolean True/False if premium seats are required
treat: str
string value representing a choice of refreshment
Returns:
-------
float
"""
#fill in your code here
return tickets(persons, day, premium_seating) + refreshment(treat) | 37,066 |
def check_path(path: pathlib.Path) -> bool:
"""Check path."""
return path.exists() and path.is_file() | 37,067 |
def omega2kwave(omega, depth, grav=9.81):
"""
Solve the linear dispersion relation close to machine precision::
omega**2 = kwave * grav * tanh(kwave*depth)
Parameters
----------
omega : float
Wave oscillation frequency [rad/s]
depth : float
Constant water depth. [m] (<0 indicates infinite depth)
grav : float, optional
Acceleration of gravity [m/s^2]
Returns
-------
float
Wave number (kwave) [1/m]
Raises
------
None
"""
if depth < 0.0:
return omega**2 / grav
# Solve equivalent equation system: c == y * tanh(y), kwave = y / depth
c = depth * omega**2 / grav
# High accuracy fix point schemes
if c > 2.5:
def f(y0):
# y == c/tanh(y)
# tanh(y) = 1 - eps, Near y=y0 the RHS is almost c.
# Solve y== c / tanh(y0)
return c / np.tanh(y0)
else:
def f(y0):
# y*(k0 + k1*(y-y0)) == c*(k0 + k1*(y-y0))/tanh(y0)
# tanh(y0) = k0 + k1*(y-y0) + ...
# Near y=y0 the RHS is almost c.
# Solve y*(k0 + k1*(y-y0)) == c for y
k0 = np.tanh(y0)
k1 = 1.0 - k0**2
b = k0 - k1 * y0
return 0.5 * (-b + np.sqrt(b**2 + 4.0 * c * k1)) / k1
# Fist initial guess (MIT lecture notes... 4 digits accuracy)
if c > 2.4:
# Deeper water...
y = c * (1.0 + 3.0 * np.exp(-2 * c) - 12.0 * np.exp(-4 * c))
# using fixed point iteration: y <= c + y - y * tanh(y)
else:
# Shallower water...
y = np.sqrt(c) * (1.0 + 0.169 * c + 0.031 * c ** 2)
# using fixed point iteration: y <= sqrt(c * y / tanh(y))
y_prev = -1.0
while abs(y - y_prev) > 100 * np.finfo(y).eps:
y_prev = y
y = f(y)
kwave = y / depth
return kwave | 37,068 |
def is_true(a: Bool) -> bool:
"""Returns whether the provided bool can be simplified to true.
:param a:
:return:
"""
return z3.is_true(a.raw) | 37,069 |
def traverse_caves_recursive(cave: str, cave_system: dict, current_path: list[str]):
"""Recursively traverse through all paths in the cave."""
if cave != "START":
# build the current path traversed
current_path = current_path[:]
current_path.append(cave)
if cave == "END":
return current_path
previous_cave_counts = Counter(current_path)
small_caves_previously_visited = [
cave
for cave in previous_cave_counts.keys()
if cave.islower() and previous_cave_counts[cave] > 0
]
potential_next_caves = [
cave_
for cave_ in cave_system[cave]
if cave_ not in small_caves_previously_visited
]
if len(potential_next_caves) > 0:
return [
traverse_caves_recursive(next_cave, cave_system, current_path)
for next_cave in potential_next_caves
] | 37,070 |
def check_dummybots():
"""
Checks the availablity of dummybots and set the global flag. Runs once per
test session.
"""
global DUMMYBOTS
if not DUMMYBOTS['tested']:
DUMMYBOTS['tested'] = True
# Load bot configuration
fp = open(path.join(TEST_CONFIG_PATH, 'bots.json'), 'r')
bot_connections = loads(fp.read())
fp.close()
# Check the connection to dummybots concurrently
def worker(bot_url):
try:
r = head('{}/askmeanything?q=test'.format(bot_url), timeout=5)
assert r.ok
return r
except (RequestException, AssertionError):
return None
urls = []
for connection in bot_connections:
urls.append(connection['url'])
pool = ThreadPool(processes=3)
bot_available = pool.map(worker, urls)
# Check the results of the connection tests and update flags
for i, available in enumerate(bot_available):
if available is None:
DUMMYBOTS['available'] = False
return
DUMMYBOTS['available'] = True | 37,071 |
def main(tests=None, **kwargs):
"""Run the Python suite."""
Regrtest().main(tests=tests, **kwargs) | 37,072 |
def letter_generator(start: str, end: str) -> typing.Generator:
"""
Возвращаем символы из дипазона
:param start: начальный символ кодировки, с которого начинать возврат
:param end: конечный символ кодировки, которым необходимо закончить
:return: Generator
"""
for code in range(ord(start), ord(end) + 1):
yield chr(code)
print('end func generator') | 37,073 |
def count(A,target):
"""invoke recursive function to return number of times target appears in A."""
def rcount(lo, hi, target):
"""Use recursion to find maximum value in A[lo:hi+1]."""
if lo == hi:
return 1 if A[lo] == target else 0
mid = (lo+hi)//2
left = rcount(lo, mid, target)
right = rcount(mid+1, hi, target)
return left + right
return rcount(0, len(A)-1, target) | 37,074 |
def nothing(x):
"""
Pass nothing.
This function does nothing. It's just for passing
as fourth argument of cv.createTrackbar.
"""
pass | 37,075 |
def plot_images(images, class_names, labels, file_name, n_cols=10):
"""Plot a sample of images from the training set
:param images: the set of images
:param class_names: mapping from class indices to names
:param labels: the labels of the images
:param file_name: the file where the figure will be saved
:param n_cols: number of the columns in the plot
:return:
"""
n_rows = len(images) // n_cols
fig, axes = plt.subplots(n_rows, n_cols, figsize=(20, 10))
k = 0
for ax in axes.flat:
ax.imshow(images[k])
ax.axis('off')
ax.set_title(class_names[labels[k][0]], fontsize=16) # each label is a 1D vector of size 1
k += 1
plt.savefig(f'figures/{file_name}.png')
plt.close() | 37,076 |
def asset_get_current_log(asset_id):
"""
"""
db = current.db
s3db = current.s3db
table = s3db.asset_log
query = ( table.asset_id == asset_id ) & \
( table.cancel == False ) & \
( table.deleted == False )
# Get the log with the maximum time
asset_log = db(query).select(table.id,
table.status,
table.datetime,
table.cond,
table.person_id,
table.site_id,
#table.location_id,
orderby = ~table.datetime,
limitby=(0, 1)).first()
if asset_log:
return Storage(datetime = asset_log.datetime,
person_id = asset_log.person_id,
cond = int(asset_log.cond or 0),
status = int(asset_log.status or 0),
site_id = asset_log.site_id,
#location_id = asset_log.location_id
)
else:
return Storage() | 37,077 |
def step_decay_lr_scheduler(optimizer, epoch, lr_decay=0.1, lr_decay_epoch=7):
"""Decay learning rate by a factor of lr_decay every lr_decay_epoch epochs"""
if epoch % lr_decay_epoch:
return
for param_group in optimizer.param_groups:
param_group['lr'] *= lr_decay | 37,078 |
def _filter_to_k_shot(dataset, num_classes, k):
"""Filters k-shot subset from a dataset."""
# !!! IMPORTANT: the dataset should *not* be shuffled. !!!
# Make sure that `shuffle_buffer_size=1` in the call to
# `dloader.get_tf_data`.
# Indices of included examples in the k-shot balanced dataset.
keep_example = []
# Keep track of the number of examples per class included in
# `keep_example`.
class_counts = np.zeros([num_classes], dtype=np.int32)
for _, label in dataset.as_numpy_iterator():
# If there are less than `k` examples of class `label` in `example_indices`,
# keep this example and update the class counts.
keep = class_counts[label] < k
keep_example.append(keep)
if keep:
class_counts[label] += 1
# When there are `k` examples for each class included in `keep_example`,
# stop searching.
if (class_counts == k).all():
break
dataset = tf.data.Dataset.zip((
tf.data.Dataset.from_tensor_slices(keep_example),
dataset
)).filter(lambda keep, _: keep).map(lambda _, example: example).cache()
return dataset | 37,079 |
async def test_no_state_change_on_failure(v3_server):
"""Test that the system doesn't change state on an error."""
v3_server.post(
(
f"https://api.simplisafe.com/v1/ss3/subscriptions/{TEST_SUBSCRIPTION_ID}"
"/state/away"
),
status=401,
body="Unauthorized",
)
v3_server.post(
"https://api.simplisafe.com/v1/api/token", status=401, body="Unauthorized"
)
v3_server.post(
"https://api.simplisafe.com/v1/api/token", status=401, body="Unauthorized"
)
async with aiohttp.ClientSession() as session:
simplisafe = await get_api(
TEST_EMAIL, TEST_PASSWORD, session=session, client_id=TEST_CLIENT_ID
)
systems = await simplisafe.get_systems()
system = systems[TEST_SYSTEM_ID]
assert system.state == SystemStates.off
with pytest.raises(InvalidCredentialsError):
await system.set_away()
assert system.state == SystemStates.off | 37,080 |
def _create_tf_example(entry):
""" Creates a tf.train.Example to be saved in the TFRecord file.
Args:
entry: string containing the path to a image and its label.
Return:
tf_example: tf.train.Example containing the info stored in feature
"""
image_path, label = _get_image_and_label_from_entry(entry)
# Convert the jpeg image to raw image.
image = Image.open(image_path)
image_np = np.array(image)
image_raw = image_np.tostring()
# Data which is going to be stored in the TFRecord file
feature = {
'image': tfrecord_utils.bytes_feature(image_raw),
'image/height': tfrecord_utils.int64_feature(image_np.shape[0]),
'image/width': tfrecord_utils.int64_feature(image_np.shape[1]),
'label': tfrecord_utils.int64_feature(label),
}
tf_example = tf.train.Example(features=tf.train.Features(feature=feature))
return tf_example | 37,081 |
def _gen_parameters_section(names, parameters, allowed_periods=None):
"""Generate the "parameters" section of the indicator docstring.
Parameters
----------
names : Sequence[str]
Names of the input parameters, in order. Usually `Ind._parameters`.
parameters : Mapping[str, Any]
Parameters dictionary. Usually `Ind.parameters`, As this is missing `ds`, it is added explicitly.
"""
section = "Parameters\n----------\n"
for name in names:
if name == "ds":
descstr = "Input dataset."
defstr = "Default: None."
unitstr = ""
annotstr = "Dataset, optional"
else:
param = parameters[name]
descstr = param["description"]
if param["kind"] == InputKind.FREQ_STR and allowed_periods is not None:
descstr += (
f" Restricted to frequencies equivalent to one of {allowed_periods}"
)
if param["kind"] == InputKind.VARIABLE:
defstr = f"Default : `ds.{param['default']}`. "
elif param["kind"] == InputKind.OPTIONAL_VARIABLE:
defstr = ""
else:
defstr = f"Default : {param['default']}. "
if "choices" in param:
annotstr = str(param["choices"])
else:
annotstr = KIND_ANNOTATION[param["kind"]]
if param.get("units", False):
unitstr = f"[Required units : {param['units']}]"
else:
unitstr = ""
section += f"{name} : {annotstr}\n {descstr}\n {defstr}{unitstr}\n"
return section | 37,082 |
async def async_setup_entry(
hass: HomeAssistant, entry: ConfigEntry, async_add_entities: AddEntitiesCallback
) -> None:
"""Set up Guardian switches based on a config entry."""
@callback
def add_new_paired_sensor(uid: str) -> None:
"""Add a new paired sensor."""
coordinator = hass.data[DOMAIN][entry.entry_id][DATA_COORDINATOR_PAIRED_SENSOR][
uid
]
async_add_entities(
[
PairedSensorBinarySensor(entry, coordinator, description)
for description in PAIRED_SENSOR_DESCRIPTIONS
]
)
# Handle adding paired sensors after HASS startup:
entry.async_on_unload(
async_dispatcher_connect(
hass,
SIGNAL_PAIRED_SENSOR_COORDINATOR_ADDED.format(entry.data[CONF_UID]),
add_new_paired_sensor,
)
)
# Add all valve controller-specific binary sensors:
sensors: list[PairedSensorBinarySensor | ValveControllerBinarySensor] = [
ValveControllerBinarySensor(
entry, hass.data[DOMAIN][entry.entry_id][DATA_COORDINATOR], description
)
for description in VALVE_CONTROLLER_DESCRIPTIONS
]
# Add all paired sensor-specific binary sensors:
sensors.extend(
[
PairedSensorBinarySensor(entry, coordinator, description)
for coordinator in hass.data[DOMAIN][entry.entry_id][
DATA_COORDINATOR_PAIRED_SENSOR
].values()
for description in PAIRED_SENSOR_DESCRIPTIONS
]
)
async_add_entities(sensors) | 37,083 |
def unload_agent():
"""returns zero in case of success or if the plist does not exist"""
plist_path = installed_plist_path()
ret = 0
if os.path.exists(plist_path):
ret = sync_task([LAUNCHCTL_PATH, "unload", "-w", "-S", "Aqua", plist_path])
else:
log_message("nothing to unload")
if ret:
log_message("unable to unload agent %s" % (plist_path))
return 0 | 37,084 |
def readCSV(name):
"""Read the filters from a CSV file.
Note that the only the Remark and ToDo fields are taken from the CSV file.
The filter names and which toolkits have each filter comes from is not read
from the file. Rather that information is loaded from the ITK and SimpleITK
Python symbol tables.
"""
try:
with open(name, "rU") as fp:
reader = csv.DictReader(fp)
for row in reader:
filt = row[fieldnames[0]]
# Check the file to see if the ITK/SITK flags match what we've pulled from Python.
iflag = sflag = False
if len(row[fieldnames[1]]):
iflag = row[fieldnames[1]].lower() == "true"
if len(row[fieldnames[2]]):
sflag = row[fieldnames[2]].lower() == "true"
initk = filt in fs.itk
insitk = filt in fs.sitk
if not initk and not insitk:
print( bcolors.FAIL, "Warning: ", bcolors.ENDC, "Filter ", filt, \
"not found in either ITK or SimpleITK" )
if (iflag != initk) or (sflag != insitk):
print( bcolors.FAIL, "Warning: ", bcolors.ENDC, \
"mismatch between file and symbol table for filter ", filt )
print( " ", row )
# Get the remark field from the file.
if row[fieldnames[3]] != None:
if len(row[fieldnames[3]]):
fs.remarks[filt] = row[fieldnames[3]]
# Get the ToDo flag
if len(row[fieldnames[4]]):
if row[fieldnames[4]].lower() == "true":
fs.todo.add(filt)
except:
print( bcolors.FAIL, "Warning:", bcolors.ENDC, "Couldn't read input file", name, \
". Proceeding without it.\n" )
else:
if not quietMode:
print( "Read file", remarkFile, "\n" ) | 37,085 |
def bins(df):
"""Segrega os dados de notas de 10 em 10 pontos para construção de gráficos.
Parameters
----------
df : type Pandas DataFrame
DataFrame de início.
Returns
-------
type Pandas DataFrame
DataFrame final.
"""
df_bins = pd.DataFrame(df['ALUNO'].rename('Contagem').groupby(pd.cut(
df['Pontuação final'].rename('Intervalos'), np.arange(0, 101, 10), right=False)).count())
df_bins['Contagem /%'] = round(100 * df_bins['Contagem'] /
df_bins['Contagem'].sum(), 2)
df_bins['Contagem cumulativa'] = df_bins['Contagem'].cumsum()
df_bins['Contagem /% cumulativa'] = df_bins['Contagem /%'].cumsum()
return df_bins | 37,086 |
def test_percentage():
""" Test percentage is calculated correctly """
ndarr = np.array([
[1, 2, 3, 4, 5, 6],
[6, 5, 4, 3, 8, 1],
[1, 1, 9, 1, 1, 1],
[9, 2, 2, 2, 2, 2],
[3, 3, 3, 9, 3, 3],
[1, 2, 2, 9, 1, 4]
])
arr = np.array([1, 2, 3, 4, 5, 6])
exp_driver_score = np.array([ 5.90856, 17.81959, 9.62429, 25.08222, 28.85722, 12.70813])
driver_score = np.round(Kruskals.Kruskals(ndarr, arr).percentage(), decimals=5)
assert np.array_equal(driver_score, exp_driver_score) | 37,087 |
def _GetBrowserSharedRelroConfig():
"""Returns a string corresponding to the Linker's configuration of shared
RELRO sections in the browser process. This parses the Java linker source
file to get the appropriate information.
Return:
None in case of error (e.g. could not locate the source file).
'NEVER' if the browser process shall never use shared RELROs.
'LOW_RAM_ONLY' if if uses it only on low-end devices.
'ALWAYS' if it always uses a shared RELRO.
"""
source_path = \
os.path.join(constants.DIR_SOURCE_ROOT, _LINKER_JAVA_SOURCE_PATH)
if not os.path.exists(source_path):
logging.error('Could not find linker source file: ' + source_path)
return None
with open(source_path) as f:
configs = _RE_LINKER_BROWSER_CONFIG.findall(f.read())
if not configs:
logging.error(
'Can\'t find browser shared RELRO configuration value in ' + \
source_path)
return None
if configs[0] not in ['NEVER', 'LOW_RAM_ONLY', 'ALWAYS']:
logging.error('Unexpected browser config value: ' + configs[0])
return None
logging.info('Found linker browser shared RELRO config: ' + configs[0])
return configs[0] | 37,088 |
def relative_to_abs() -> None:
"""Updates paths in the dataset_config.yaml file from relative to absolute.
This function is used to replace the dataset path in the
`dataset_config.yml` file from a relative to absolute path.
Returns:
None
"""
with open('dataset_config.yml') as f:
lines = f.readlines()
if '/' not in lines[0]:
lines[0] = lines[0].replace('dataset-YOLO',
f'{Path().cwd()}/dataset-YOLO')
with open('dataset_config.yml', 'w') as f:
f.writelines(lines)
print(lines) | 37,089 |
def parse_pcap(pcap, pcap_path, file_name):
"""Print out information about each packet in a pcap
Args:
pcap: dpkt pcap reader object (dpkt.pcap.Reader)
"""
counter = 0
pcap_dict = {}
# For each packet in the pcap process the contents
for ts, packet in pcap:
# Unpack the Ethernet frame
try:
eth = dpkt.ethernet.Ethernet(packet)
except dpkt.dpkt.NeedData:
print("dpkt.dpkt.NeedData")
# Make sure the Ethernet data contains an IP packet
if isinstance(eth.data, dpkt.ip.IP):
ip = eth.data
elif isinstance(eth.data, str):
try:
ip = dpkt.ip.IP(packet)
except dpkt.UnpackError:
continue
else:
continue
# Now unpack the data within the Ethernet frame (the IP packet)
# Pulling out src_ip, dst_ip, protocol (tcp/udp), dst/src port, length
proto = ip.data
# Print out the info
if type(ip.data)in PROTO_DICT:
session_tuple_key = (inet_to_str(ip.src), proto.sport, inet_to_str(ip.dst), proto.dport, PROTO_DICT[type(ip.data)])
pcap_dict.setdefault(session_tuple_key, (ts, [], []))
d = pcap_dict[session_tuple_key]
size = len(ip) #ip.len
d[1].append(round(ts - d[0], 6)), d[2].append(size)
counter += 1
print("Total Number of Parsed Packets in " + pcap_path + ": " + str(counter))
csv_file_path = os.path.splitext(pcap_path)[0] + ".csv"
with open(csv_file_path, 'wb') as csv_file:
writer = csv.writer(csv_file)
for key, value in pcap_dict.items():
writer.writerow([file_name.split(".")[0]] + list(key) + [value[0], len(value[1])] + value[1] + [None] + value[2])
for k,v in pcap_dict.iteritems():
if len(v[1]) > 2000:
print(k, v[0], len(v[1])) | 37,090 |
def insert_video(ID):
"""
The function gets a valid YouTube ID,
checks for its existence in database,
if not found calls YouTube API and
inserts into the MongoDB database.
"""
client = MongoClient('localhost:27017')
db = client['PreCog']
collection = db['YoutubeRaw']
check_collection = db['YoutubeProcessed']
check = check_collection.find_one({"ID" : ID})
if check == None:
video = youtube_search(ID)
if video is not None:
result = collection.insert_one(video)
print(result.inserted_id, datetime.datetime.now())
return True
else:
print("Already in DataBase")
return False | 37,091 |
def get_neighbor_v6_by_search(search=None):
"""Return a list of NeighborV6's by dict."""
try:
objects = NeighborV6.objects.filter()
search_dict = search if search else dict()
object_map = build_query_to_datatable_v3(objects, search_dict)
except FieldError as e:
raise api_rest_exceptions.ValidationAPIException(str(e))
except Exception as e:
raise api_rest_exceptions.NetworkAPIException(str(e))
else:
return object_map | 37,092 |
def validate_customer(fn):
"""
Validates that credit cards are between 1 and 5 and that each is 16 chars long
"""
@functools.wraps(fn)
def wrapped(*args, **kwargs):
# Validate credit card length
cc_list = kwargs.get("credit_cards")
trimmed_cc = [remove_non_numeric(cc.replace(' ', '')) for cc in cc_list]
num_credit_cards = len(trimmed_cc)
if num_credit_cards < 1 or num_credit_cards > 5:
return "Number of credit cards must be between 1 and 5, inclusive", 400
# Validate credit card composition
for cc in trimmed_cc:
if len(cc) != 16 or not cc.isdigit():
return f"Credit card {cc} must be 16 digits long", 400
# If passed, continue with registration
kwargs["credit_cards"] = trimmed_cc
return fn(*args, **kwargs)
return wrapped | 37,093 |
def format_field(value: Any) -> str:
"""
Function that formats a single field for output on a table or CSV output, in order to deal with nested arrays or
objects in the JSON outputs of the API.
:param value: the value to format
:return: a string that is fit for console output
"""
if isinstance(value, Sequence) and not isinstance(value, (str, bytes)):
if all(isinstance(x, (str, bytes, int, float)) for x in value):
return ", ".join([str(x) for x in value])
return dumps(value)
if isinstance(value, Mapping):
return dumps(value)
return value | 37,094 |
def count_honeypot_events():
"""
Get total number of honeypot events
Returns:
JSON/Dict number of honeypot events
"""
date = fix_date(
get_value_from_request("date")
)
if date:
try:
return jsonify(
{
"count_honeypot_events_by_date": connector.honeypot_events.count_documents(
{
"date": {
"$gte": date[0],
"$lte": date[1]
}
}
),
"date": date
}
), 200
except Exception as _:
return flask_null_array_response()
else:
try:
return jsonify(
{
"count_honeypot_events": connector.honeypot_events.estimated_document_count()
}
), 200
except Exception as _:
return flask_null_array_response() | 37,095 |
def k_i_grid(gridsize, boxsize):
"""k_i_grid(gridsize, boxlen)"""
halfgrid = gridsize // 2
boxsize = egp.utils.boxsize_tuple(boxsize)
dk = 2 * np.pi / boxsize
kmax = gridsize * dk
_ = np.newaxis
k1, k2, k3 = dk[:, _, _, _] * np.mgrid[0:gridsize, 0:gridsize, 0:halfgrid + 1]
k1 -= kmax[0] * (k1 > dk[0] * (halfgrid - 1)) # shift the second half to negative k values
k2 -= kmax[1] * (k2 > dk[1] * (halfgrid - 1))
return np.array((k1, k2, k3)) | 37,096 |
def is_bool_type(typ):
"""
Check if the given type is a bool.
"""
if hasattr(typ, '__supertype__'):
typ = typ.__supertype__
return isinstance(typ, type) and issubclass(typ, bool) | 37,097 |
def gaussians_entropy(covars, ns=nt.NumpyLinalg):
"""
Calculates entropy of an array Gaussian distributions
:param covar: [N*D*D] covariance matrices
:return: total entropy
"""
N = covar.shape[0]
D = covar.shape[-1]
return 0.5 * N * D * (1 + log(2*ns.pi)) + 0.5 * ns.sum(ns.det(covar)) | 37,098 |
def stop_client(signum, frame):
"""Stop the monitoring client."""
logger.debug('Received signal, shutting down [signal=%s]', signum)
# Inform workers of shutdown
SHUTDOWN.set()
# Join all workers
[x.join() for x in WORKERS] | 37,099 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.