content stringlengths 22 815k | id int64 0 4.91M |
|---|---|
def change_service(**kwargs):
"""Makes a given change to a MRS service
Args:
**kwargs: Additional options
Keyword Args:
service_id (int): The id of the service
change_type (int): Type of change
url_context_root (str): The context root for this service
url_host_name (str): The host name for this service
value (str): The value to be set as string or a dict if all are set
session (object): The database session to use
interactive (bool): Indicates whether to execute in interactive mode
raise_exceptions (bool): If set to true exceptions are raised
Returns:
The result message as string
"""
import json
service_id = kwargs.get("service_id")
change_type = kwargs.get("change_type")
url_context_root = kwargs.get("url_context_root")
url_host_name = kwargs.get("url_host_name")
value = kwargs.get("value")
session = kwargs.get("session")
interactive = kwargs.get("interactive", core.get_interactive_default())
raise_exceptions = kwargs.get("raise_exceptions", not interactive)
try:
session = core.get_current_session(session)
# Make sure the MRS metadata schema exists and has the right version
core.ensure_rds_metadata_schema(session)
# List of services to be changed, initialized with service_id if given
service_ids = [service_id] if service_id else []
# Get the right service_id(s) if service_id is not given
if not url_context_root and not service_id:
# Check if there already is at least one service
res = session.run_sql("""
SELECT COUNT(*) AS service_count, MAX(id) AS id
FROM `mysql_rest_service_metadata`.`service`
""")
row = res.fetch_one()
service_count = row.get_field("service_count") if row else 0
# If there is no service to change, error out.
if service_count == 0:
Exception("No service available.")
# If there is exactly 1 service, use that one
# elif service_count == 1:
# service_ids.append(row.get_field("id"))
# If there are more services, let the user select one or all
if interactive:
allow_multi_select = (
change_type == SERVICE_DISABLE or
change_type == SERVICE_ENABLE or
change_type == SERVICE_DELETE)
if allow_multi_select:
caption = ("Please select a service index, type "
"'hostname/root_context' or type '*' "
"to select all: ")
else:
caption = ("Please select a service index or type "
"'hostname/root_context'")
services = get_services(session=session, interactive=False)
selection = core.prompt_for_list_item(
item_list=services,
prompt_caption=caption,
item_name_property="host_ctx",
given_value=None,
print_list=True,
allow_multi_select=allow_multi_select)
if not selection or selection == "":
raise ValueError("Operation cancelled.")
if allow_multi_select:
service_ids = [item["id"] for item in selection]
else:
service_ids.append(selection["id"])
# Lookup the service id
res = session.run_sql(
"""
SELECT se.id FROM `mysql_rest_service_metadata`.`service` se
LEFT JOIN `mysql_rest_service_metadata`.url_host h
ON se.url_host_id = h.id
WHERE h.name = ? AND se.url_context_root = ?
""",
[url_host_name if url_host_name else "", url_context_root])
row = res.fetch_one()
if row:
service_ids.append(row.get_field("id"))
if len(service_ids) == 0:
raise ValueError("The specified service was not found.")
# Check the given value
if interactive and not value:
if change_type == SERVICE_SET_PROTOCOL:
value = prompt_for_service_protocol()
elif change_type == SERVICE_SET_COMMENTS:
value = core.prompt_for_comments()
if change_type == SERVICE_SET_PROTOCOL and not value:
raise ValueError("No value given.")
# Update all given services
for service_id in service_ids:
service = get_service(
service_id=service_id, session=session, interactive=False,
return_formatted=False)
if change_type == SERVICE_SET_CONTEXT_ROOT:
url_ctx_root = value
elif change_type == SERVICE_SET_ALL:
if type(value) == str: # TODO: Check why dicts cannot be used
value = json.loads(value)
url_ctx_root = value.get("url_context_root")
if (change_type == SERVICE_SET_CONTEXT_ROOT or
change_type == SERVICE_SET_ALL):
if interactive and not url_ctx_root:
url_ctx_root = prompt_for_url_context_root(
default=service.get('url_context_root'))
# If the context root has changed, check if the new one is valid
if service.get("url_context_root") != url_ctx_root:
if (not url_ctx_root or not url_ctx_root.startswith('/')):
raise ValueError(
"The url_context_root has to start with '/'.")
core.check_request_path(
url_ctx_root, session=session)
params = [service_id]
if change_type == SERVICE_DISABLE:
sql = """
UPDATE `mysql_rest_service_metadata`.`service`
SET enabled = FALSE
WHERE id = ?
"""
elif change_type == SERVICE_ENABLE:
sql = """
UPDATE `mysql_rest_service_metadata`.`service`
SET enabled = TRUE
WHERE id = ?
"""
elif change_type == SERVICE_DELETE:
sql = """
DELETE FROM `mysql_rest_service_metadata`.`service`
WHERE id = ?
"""
elif change_type == SERVICE_SET_DEFAULT:
res = session.run_sql("""
UPDATE `mysql_rest_service_metadata`.`service`
SET is_default = FALSE
""")
sql = """
UPDATE `mysql_rest_service_metadata`.`service`
SET is_default = TRUE
WHERE id = ?
"""
elif change_type == SERVICE_SET_CONTEXT_ROOT:
sql = """
UPDATE `mysql_rest_service_metadata`.`service`
SET url_context_root = ?
WHERE id = ?
"""
params.insert(0, url_ctx_root)
elif change_type == SERVICE_SET_PROTOCOL:
sql = """
UPDATE `mysql_rest_service_metadata`.`service`
SET url_protocol = ?
WHERE id = ?
"""
params.insert(0, value)
elif change_type == SERVICE_SET_COMMENTS:
sql = """
UPDATE `mysql_rest_service_metadata`.`service`
SET comments = ?
WHERE id = ?
"""
params.insert(0, value)
elif change_type == SERVICE_SET_ALL:
sql = """
UPDATE `mysql_rest_service_metadata`.`service`
SET enabled = ?,
url_context_root = ?,
url_protocol = ?,
comments = ?,
is_default = ?
WHERE id = ?
"""
if str(value.get("is_default")).lower() == "true":
res = session.run_sql("""
UPDATE `mysql_rest_service_metadata`.`service`
SET is_default = FALSE
""")
params.insert(
0, (str(value.get("enabled")).lower() == "true" or
str(value.get("enabled")) == "1"))
params.insert(1, url_ctx_root)
params.insert(2, value.get("url_protocol", ""))
params.insert(3, value.get("comments", ""))
params.insert(
4, (str(value.get("is_default")).lower() == "true" or
str(value.get("is_default")) == "1"))
else:
raise Exception("Operation not supported")
res = session.run_sql(sql, params)
if res.get_affected_row_count() == 0:
raise Exception(
f"The specified service with id {service_id} was not "
"found.")
if change_type == SERVICE_SET_DEFAULT:
return "The service has been made the default."
if len(service_ids) == 1:
msg = "The service has been "
else:
msg = "The services have been "
if change_type == SERVICE_DISABLE:
msg += "disabled."
elif change_type == SERVICE_ENABLE:
msg += "enabled."
elif change_type == SERVICE_DELETE:
msg += "deleted."
else:
msg += "updated."
return msg
except Exception as e:
if raise_exceptions:
raise
else:
print(f"Error: {str(e)}") | 37,300 |
def text_3d(string, depth=0.5):
"""Create 3D text."""
vec_text = _vtk.vtkVectorText()
vec_text.SetText(string)
extrude = _vtk.vtkLinearExtrusionFilter()
extrude.SetInputConnection(vec_text.GetOutputPort())
extrude.SetExtrusionTypeToNormalExtrusion()
extrude.SetVector(0, 0, 1)
extrude.SetScaleFactor(depth)
tri_filter = _vtk.vtkTriangleFilter()
tri_filter.SetInputConnection(extrude.GetOutputPort())
tri_filter.Update()
return pyvista.wrap(tri_filter.GetOutput()) | 37,301 |
def get_secret_version(project: Optional[str] = None,
secret: Optional[str] = None,
version: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetSecretVersionResult:
"""
Get a Secret Manager secret's version. For more information see the [official documentation](https://cloud.google.com/secret-manager/docs/) and [API](https://cloud.google.com/secret-manager/docs/reference/rest/v1/projects.secrets.versions).
:param str project: The project to get the secret version for. If it
is not provided, the provider project is used.
:param str secret: The secret to get the secret version for.
:param str version: The version of the secret to get. If it
is not provided, the latest version is retrieved.
"""
__args__ = dict()
__args__['project'] = project
__args__['secret'] = secret
__args__['version'] = version
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('gcp:secretmanager/getSecretVersion:getSecretVersion', __args__, opts=opts, typ=GetSecretVersionResult).value
return AwaitableGetSecretVersionResult(
create_time=__ret__.create_time,
destroy_time=__ret__.destroy_time,
enabled=__ret__.enabled,
id=__ret__.id,
name=__ret__.name,
project=__ret__.project,
secret=__ret__.secret,
secret_data=__ret__.secret_data,
version=__ret__.version) | 37,302 |
def logged_class(cls):
"""Class Decorator to add a class level logger to the class with module and
name."""
cls.logger = logging.getLogger("{0}.{1}".format(cls.__module__, cls.__name__))
return cls | 37,303 |
def set_beam_dimensions_from_yield_moment_and_strain(df, eps_yield, e_mod, f_crack=0.4, d_fact=2.1):
"""d_fact=2.1 from Priestley book"""
import sfsimodels as sm
assert isinstance(df, sm.FrameBuilding)
for ns in range(df.n_storeys):
beams = df.get_beams_at_storey(ns)
for beam in beams:
sects = beam.sections
widths = []
for sect in sects:
phi_y = d_fact * eps_yield / sect.depth
if hasattr(sect, 'mom_cap_p'):
mom_cap = (sect.mom_cap_p - sect.mom_cap_n) / 2
i_cracked = mom_cap / phi_y / e_mod
i_full = i_cracked / f_crack
widths.append(i_full / sect.depth ** 3 * 12)
width = np.mean(widths)
beam.set_section_prop('width', width, sections=list(range(len(sects))))
beam.set_section_prop('e_mod', e_mod, sections=list(range(len(sects))))
beam.set_section_prop('f_crack', f_crack, sections=list(range(len(sects)))) | 37,304 |
def display_grid(rows, cols, xs, y_true, y_pred=None,
y_true_color='b', y_pred_color='g', figsize=(14, 7)):
"""Despliega ejemplos en una cuadrícula."""
fig, ax = plt.subplots(rows, cols, figsize=figsize)
i = 0
for r in range(rows):
for c in range(cols):
img = xs[i]
ax[r, c].imshow(xs[i], cmap='gray')
ax[r, c].set_xticklabels([])
ax[r, c].set_yticklabels([])
x, y, w, h = y_true[i]
rect = patches.Rectangle((x, y), w, h, linewidth=1,
edgecolor=y_true_color,
facecolor='none')
ax[r, c].add_patch(rect)
if y_pred is not None:
img_h, img_w = img.shape[:2]
x, y, w, h = y_pred[i]
if x + w > img_w:
w = img_w - x
if y + h > img_h:
h = img_h - y
rect = patches.Rectangle((x, y), w, h, linewidth=1,
edgecolor=y_pred_color,
facecolor='none')
ax[r, c].add_patch(rect)
i += 1
fig.tight_layout()
plt.show() | 37,305 |
def get_available_plugin_screens():
"""
Gets the available screens in this package for dynamic instantiation.
"""
ignore_list = ['__init__.py']
screens = []
for plugin in os.listdir(os.path.join(os.path.dirname(__file__))):
if (os.path.isdir(os.path.join(os.path.dirname(__file__), plugin))):
# we are inside the plugin directory, get screens available in
# screens directory
for module in os.listdir(os.path.join(os.path.dirname(__file__),
plugin, "screen")):
if module in ignore_list or module[-3:] != '.py':
continue
module_name = plugin + ".screen." + module[:-3]
m = __import__(module_name, globals(), locals(),
[module_name.rsplit(".", 1)[-1]])
# loop module's classes in search for the ones inheriting
# Screenbase and ignore name (no need) with underscore variable
for name, obj in inspect.getmembers(m):
if inspect.isclass(obj) and issubclass(obj,
base.ScreenBase) and not name.endswith("Base"):
screens.append(obj)
return screens | 37,306 |
def crop_point(image, height_rate, width_rate):
"""Crop the any region of the image.
Crop region area = height_rate * width_rate *image_height * image_width
Args:
image: a Image instance.
height_rate: flaot, in the interval (0, 1].
width_rate: flaot, in the interval (0, 1].
Returns:
a Image instance.
Raises:
ValueError: if central_crop_fraction is not within (0, 1].
"""
assert 0<height_rate<=1 and 0<width_rate<=1, 'height_rate and width_rate should be in the interval (0, 1].'
left = image.size[0]*np.random.uniform(0, 1-width_rate)
upper = image.size[1]*np.random.uniform(0, 1-height_rate)
right = left+image.size[0]*width_rate
lower = upper+image.size[1]*height_rate
return image.crop((left, upper, right, lower)) | 37,307 |
def test_name_rename():
"""
Test a simple transformer to rename
"""
class Renamer(NodeTransformer):
def visit_Name(self, node, meta):
node.id = node.id + '_visited'
return node
renamer = Renamer()
mod = ast.parse("bob = frank")
transform(mod, renamer)
bob_node = mod.body[0].targets[0]
frank_node = mod.body[0].value
assert bob_node.id == "bob_visited"
assert frank_node.id == "frank_visited" | 37,308 |
def endiff(directory):
"""
Calculate the energy difference for a transition.
"""
from pybat.cli.commands.get import get_endiff
get_endiff(directory) | 37,309 |
def fileobj_video(contents=None):
"""
Create an "mp4" video file on storage and return a File model pointing to it.
if contents is given and is a string, then write said contents to the file.
If no contents is given, a random string is generated and set as the contents of the file.
"""
if contents:
logging.warning("input = {}".format(contents))
filecontents = contents
else:
filecontents = "".join(random.sample(string.printable, 20)).encode('utf-8')
logging.warning("contents = {}".format(filecontents))
temp_file_dict = create_studio_file(filecontents, preset=format_presets.VIDEO_HIGH_RES, ext='mp4')
return temp_file_dict['db_file'] | 37,310 |
def delete_old_stock_items():
"""
This function removes StockItem objects which have been marked for deletion.
Bulk "delete" operations for database entries with foreign-key relationships
can be pretty expensive, and thus can "block" the UI for a period of time.
Thus, instead of immediately deleting multiple StockItems, some UI actions
simply mark each StockItem as "scheduled for deletion".
The background worker then manually deletes these at a later stage
"""
try:
from stock.models import StockItem
except AppRegistryNotReady:
logger.info("Could not delete scheduled StockItems - AppRegistryNotReady")
return
items = StockItem.objects.filter(scheduled_for_deletion=True)
if items.count() > 0:
logger.info(f"Removing {items.count()} StockItem objects scheduled for deletion")
items.delete() | 37,311 |
def test_smoke_parse_merpfile():
"""just parse"""
for mcf in good_mcfs + softerror_mcfs:
merp2tbl.parse_merpfile(mcf)
for mcf in harderror_mcfs:
with pytest.raises(NotImplementedError):
merp2tbl.parse_merpfile(mcf) | 37,312 |
def _toIPv4AddrString(intIPv4AddrInteger):
"""Convert the IPv4 address integer to the IPv4 address string.
:param int intIPv4AddrInteger: IPv4 address integer.
:return: IPv4 address string.
:rtype: str
Example::
intIPv4AddrInteger Return
---------------------------------
3221225985 -> '192.0.2.1'
Test:
>>> _toIPv4AddrString(3221225985)
'192.0.2.1'
"""
return (
str((intIPv4AddrInteger >> 24) & 0xFF) + '.' +
str((intIPv4AddrInteger >> 16) & 0xFF) + '.' +
str((intIPv4AddrInteger >> 8) & 0xFF) + '.' +
str( intIPv4AddrInteger & 0xFF)) | 37,313 |
def expand_case_matching(s):
"""Expands a string to a case insensitive globable string."""
t = []
openers = {"[", "{"}
closers = {"]", "}"}
nesting = 0
drive_part = WINDOWS_DRIVE_MATCHER.match(s) if ON_WINDOWS else None
if drive_part:
drive_part = drive_part.group(0)
t.append(drive_part)
s = s[len(drive_part) :]
for c in s:
if c in openers:
nesting += 1
elif c in closers:
nesting -= 1
elif nesting > 0:
pass
elif c.isalpha():
folded = c.casefold()
if len(folded) == 1:
c = "[{0}{1}]".format(c.upper(), c.lower())
else:
newc = ["[{0}{1}]?".format(f.upper(), f.lower()) for f in folded[:-1]]
newc = "".join(newc)
newc += "[{0}{1}{2}]".format(folded[-1].upper(), folded[-1].lower(), c)
c = newc
t.append(c)
return "".join(t) | 37,314 |
def look_behind(s: str, end_idx: int) -> str:
"""
Given a string containing semi-colons, find the span of text after the last
semi-colon.
"""
span = s[: (end_idx - 1)]
semicolon_matches = [
(m.group(), m.start(), m.end()) for m in re.finditer(r"(?<=(;))", span)
]
if len(semicolon_matches) == 0:
start_idx = 0
else:
start_idx = semicolon_matches[-1][2]
return span[start_idx:end_idx].strip() | 37,315 |
def sum_fspec(files, outname=None):
"""Take a bunch of (C)PDSs and sums them."""
# Read first file
ftype0, contents = get_file_type(files[0])
pdstype = ftype0.replace("reb", "")
outname = _assign_value_if_none(
outname, "tot_" + ftype0 + HEN_FILE_EXTENSION
)
def check_and_distribute_files(files):
for i, f in enumerate(files):
ftype, contents = get_file_type(files[0])
if i == 0:
contents0, ftype0 = contents, ftype
else:
assert ftype == ftype0, "Files must all be of the same kind"
contents.fftlen = contents.segment_size
yield contents
tot_contents = average_periodograms(check_and_distribute_files(files))
log.info("Saving %s to %s" % (pdstype, outname))
save_pds(tot_contents, outname)
return tot_contents | 37,316 |
def make_aware(dt, tz=None):
"""
Convert naive datetime object to tz-aware
"""
if tz:
if isinstance(tz, six.string_types):
tz = pytz.timezone(tz)
else:
tz = pytz.utc
if dt.tzinfo:
return dt.astimezone(dt.tzinfo)
else:
return tz.localize(dt) | 37,317 |
def decrypt(ctxt, kx, spice, blocksize):
""" Main decryption function
Args:
ctxt: ciphertext
kx: key expansion table
spice: spice
blocksize: size of block
Returns:
Decrypted ciphertext
"""
spice = int_to_arr(spice, 512)
ctxt_arr = int_to_arr(ctxt, blocksize)
args = (ctxt_arr, kx, spice, blocksize)
lmask = (1 << blocksize % 64) - 1
if blocksize < 36:
s = tiny_decrypt(*args)
elif blocksize < 65:
s = short_decrypt(*args, lmask)
elif blocksize < 129:
s = medium_decrypt(*args, lmask)
elif blocksize < 513:
s = long_decrypt(*args, lmask)
else:
s = extended_decrypt(*args, lmask)
return s | 37,318 |
def test_list(app):
""" test list folder """
LOGGER.debug("sample: %r, %r", SAMPLE_FOLDER, SAMPLE_FILE)
site = app.get_site()
file = site.list_folder(f"{SAMPLE_FOLDER}/")["files"][0]
assert file["path"] == SAMPLE_KEY
assert file["name"] == SAMPLE_FILE | 37,319 |
def listCombination(lists) -> list:
"""
输入多个列表组成的列表,返回多列表中元素的所有可能组合
:param lists: 多个列表组成的列表
:return: 所有元素可能的组合
"""
result = []
resultAppend = result.append
from itertools import product
for i in product(*lists):
resultAppend(i)
return result | 37,320 |
def DoChopTraj(trajf, chopf, startns, stopns, translate=False):
"""
Chops a provided trajectory file based on a given
start time and end time in nanoseconds. Assuming
2 fs time step and writing results every 1000 steps.
Helpful for seeing how PMF evolves over time.
Parameters
----------
trajf
translate: Boolean, translate negative angles to positive ones,
e.g., -90 translates to 270
Returns
-------
True if successful, False otherwise
"""
if startns != 0:
time1 = (1000*startns/2)+1
else:
time1 = startns
if stopns is not None:
time2 = (1000*stopns/2)
with open(trajf,'r') as f:
lines=f.readlines()
# filter out lines to remove commented ones
filtlines = []
for line in lines:
if not line.startswith('#'):
filtlines.append(line)
if os.path.exists(trajf):
#if os.path.exists(trajf) and not os.path.exists(chopf):
outf = open(chopf, 'w')
if stopns is not None:
subset = filtlines[time1:time2]
else:
subset = filtlines[time1:]
for i in subset:
value = float(i.split()[1])
if translate and value < 0:
value = value+360 ##### ======== condition to change
elif not translate and value > 180:
value = value-360
outf.write("%s %.14e \n" % (i.split()[0], value))
outf.close()
return True
else:
print("%s not found in %s" % (trajf, os.getcwd()))
return False | 37,321 |
def test_single_link_is_dead(client):
"""Ensure a dead link is flagged as rotten status of 'yes'."""
creation = client.post(
helpers.authed_request("/", auth=helpers.VALID_AUTH),
json=helpers.item_dead_url(),
)
item_id = helpers.from_json(creation.data)["id"]
for _ in range(3):
response = client.post(
helpers.authed_request("/", "linkrot", item_id, auth=helpers.VALID_AUTH)
)
response_data = helpers.from_json(response.get_data(as_text=True))
assert response.status_code == 200
assert response_data["id"] == item_id
assert response_data["url"] == helpers.item_dead_url()["url"]
assert response_data["result"]["times_failed"] == 0
assert response_data["result"]["is_dead"] == True
assert response_data["result"]["is_web_archive"] == False | 37,322 |
def write_frame(frame, name, con, flavor='sqlite', if_exists='fail', **kwargs):
"""DEPRECIATED - use to_sql
Write records stored in a DataFrame to a SQL database.
Parameters
----------
frame : DataFrame
name : string
con : DBAPI2 connection
flavor : {'sqlite', 'mysql'}, default 'sqlite'
The flavor of SQL to use.
if_exists : {'fail', 'replace', 'append'}, default 'fail'
- fail: If table exists, do nothing.
- replace: If table exists, drop it, recreate it, and insert data.
- append: If table exists, insert data. Create if does not exist.
index : boolean, default False
Write DataFrame index as a column
Notes
-----
This function is deprecated in favor of ``to_sql``. There are however
two differences:
- With ``to_sql`` the index is written to the sql database by default. To
keep the behaviour this function you need to specify ``index=False``.
- The new ``to_sql`` function supports sqlalchemy engines to work with
different sql flavors.
See also
--------
pandas.DataFrame.to_sql
"""
warnings.warn("write_frame is depreciated, use to_sql", FutureWarning)
# for backwards compatibility, set index=False when not specified
index = kwargs.pop('index', False)
return to_sql(frame, name, con, flavor=flavor, if_exists=if_exists,
index=index, **kwargs) | 37,323 |
def edge_slope(e):
"""Calculate the slope of an edge, 'inf' for vertical edges"""
v = edge_vector(e)
try:
return v.z / round(v.xy.length, 4)
except ZeroDivisionError:
return float("inf") | 37,324 |
def test_reg_user_cannot_view_users_org_dne(reg_user_headers):
""" regular users cannot view users of an organization that doesn't exist """
org = str(uuid.uuid4())
res = requests.get(
f'{env.AWG_BASE_URL}{ORG_URL}/{org}/users',
headers=reg_user_headers
)
assert res.status_code == 404
response_contains_json(res, 'error', 'ORG_DNE_PARAM') | 37,325 |
def modify_table():
"""Delete old table from README.md, and put new one."""
delete_next_lines = False
with open("docs/README.md", "r", encoding="utf-8") as old_file:
lines = old_file.readlines()
with open("docs/README.md", "w", encoding="utf-8") as new_file:
for line in lines:
if "| Sync Service | " in line.strip("\n"):
delete_next_lines = False
new_file.write(get_content_of_new_table())
elif delete_next_lines is True:
pass
elif "| Component | " in line.strip("\n"):
delete_next_lines = True
else:
new_file.write(line) | 37,326 |
def pkt_addrs(addr_fragment: str) -> tuple[Address, Address, Address, Address, Address]:
"""Return the address fields from (e.g): '01:078710 --:------ 01:144246'.
Will raise an InvalidAddrSetError is the address fields are not valid.
"""
# for debug: print(pkt_addrs.cache_info())
try:
addrs = [id_to_address(addr_fragment[i : i + 9]) for i in range(0, 30, 10)]
except ValueError as exc:
raise InvalidAddrSetError(f"Invalid addr set: {addr_fragment}: {exc}")
if (
not (
# .I --- 01:145038 --:------ 01:145038 1F09 003 FF073F # valid
# .I --- 04:108173 --:------ 01:155341 2309 003 0001F4 # valid
addrs[0] not in (NON_DEV_ADDR, NUL_DEV_ADDR)
and addrs[1] == NON_DEV_ADDR
and addrs[2] != NON_DEV_ADDR
)
and not (
# .I --- 32:206250 30:082155 --:------ 22F1 003 00020A # valid
# .I --- 29:151550 29:237552 --:------ 22F3 007 00023C03040000 # valid
addrs[0] not in (NON_DEV_ADDR, NUL_DEV_ADDR)
and addrs[1] not in (NON_DEV_ADDR, addrs[0])
and addrs[2] == NON_DEV_ADDR
)
and not (
# .I --- --:------ --:------ 10:105624 1FD4 003 00AAD4 # valid
addrs[2] not in (NON_DEV_ADDR, NUL_DEV_ADDR)
and addrs[0] == NON_DEV_ADDR
and addrs[1] == NON_DEV_ADDR
)
):
raise InvalidAddrSetError(f"Invalid addr set: {addr_fragment}")
device_addrs = list(filter(lambda a: a.type != "--", addrs)) # dex
src_addr = device_addrs[0]
dst_addr = device_addrs[1] if len(device_addrs) > 1 else NON_DEV_ADDR
if src_addr.id == dst_addr.id: # incl. HGI_DEV_ADDR == HGI_DEV_ADDR
src_addr = dst_addr
return src_addr, dst_addr, *addrs | 37,327 |
def token_downup(target_dict, source_dict):
"""Transform token features between different distribution.
Returns:
x_out (Tensor[B, N, C]): token features.
Args:
target_dict (dict): dict for target token information
source_dict (dict): dict for source token information.
"""
x_s = source_dict['x']
idx_token_s = source_dict['idx_token']
idx_token_t = target_dict['idx_token']
T = target_dict['token_num']
B, S, C = x_s.shape
N_init = idx_token_s.shape[1]
weight = target_dict['agg_weight'] if 'agg_weight' in target_dict.keys() else None
if weight is None:
weight = x_s.new_ones(B, N_init, 1)
weight = weight.reshape(-1)
# choose the way with fewer flops.
if N_init < T * S:
# use sparse matrix multiplication
# Flops: B * N_init * (C+2)
idx_token_t = idx_token_t + torch.arange(B, device=x_s.device)[:, None] * T
idx_token_s = idx_token_s + torch.arange(B, device=x_s.device)[:, None] * S
coor = torch.stack([idx_token_t, idx_token_s], dim=0).reshape(2, B * N_init)
# torch.sparse.spmm does not support fp16
with torch.cuda.amp.autocast(enabled=False):
# torch.sparse does not support grad for sparse matrix
weight = weight.float().detach()
# build a matrix with shape [B*T, B*S]
A = torch.sparse.FloatTensor(coor, weight, torch.Size([B * T, B * S]))
# normalize the matrix
all_weight = A.type(torch.float32) @ x_s.new_ones(B * S, 1).type(torch.float32) + 1e-6
weight = weight / all_weight[(idx_token_t).reshape(-1), 0]
A = torch.sparse.FloatTensor(coor, weight, torch.Size([B * T, B * S]))
# sparse matmul
x_out = A.type(torch.float32) @ x_s.reshape(B * S, C).type(torch.float32)
else:
# use dense matrix multiplication
# Flops: B * T * S * (C+2)
idx_batch = torch.arange(B, device=x_s.device)[:, None].expand(B, N_init)
coor = torch.stack([idx_batch, idx_token_t, idx_token_s], dim=0).reshape(3, B * N_init)
weight = weight.detach() # detach to reduce training time
# build a matrix with shape [B, T, S]
A = torch.sparse.FloatTensor(coor, weight, torch.Size([B, T, S])).to_dense()
# normalize the matrix
A = A / (A.sum(dim=-1, keepdim=True) + 1e-6)
# dense matmul
x_out = A @ x_s
x_out = x_out.reshape(B, T, C).type(x_s.dtype)
return x_out | 37,328 |
def get_score(train_data,train_labels,test_data,test_labels,problem_type):
"""
Returns the f1 score resulting from 3NN classification if problem_type = 'classification',
or the mse from regression if problem_type = 'regression'
"""
if (problem_type=="classification"):
predictor = KNeighborsClassifier(n_neighbors=3)
else:
predictor = KNeighborsRegressor(n_neighbors=3)
predictor.fit(train_data,train_labels)
predicted_labels = predictor.predict(test_data)
if (problem_type=="regression"):
score = mean_squared_error(test_labels,predicted_labels)
else:
score = accuracy_score(test_labels,predicted_labels)
return score | 37,329 |
def build_client_datasets_fn(train_dataset, train_clients_per_round):
"""Builds the function for generating client datasets at each round.
Args:
train_dataset: A `tff.simulation.ClientData` object.
train_clients_per_round: The number of client participants in each round.
Returns:
A function which returns a list of `tff.simulation.ClientData` objects at a
given round round_num.
"""
def client_datasets(round_num):
del round_num # Unused.
sampled_clients = np.random.choice(
train_dataset.client_ids, size=train_clients_per_round, replace=False)
return [
train_dataset.create_tf_dataset_for_client(client)
for client in sampled_clients
]
return client_datasets | 37,330 |
def get_simple_grid(xbounds, ybounds, shift_origin=None):
""" """
xbounds = np.atleast_1d(xbounds)
if len(xbounds)==1:
xmin,xmax = 0,xbounds[0]
else:
xmin,xmax = xbounds
ybounds = np.atleast_1d(ybounds)
if len(ybounds)==1:
ymin,ymax = 0,ybounds[0]
else:
ymin,ymax = ybounds
pixels = np.mgrid[xmin:xmax,ymin:ymax]
pixels2_flat = np.concatenate(pixels.T, axis=0)
if shift_origin is not None:
# not += because conflict between int and float array
pixels2_flat = pixels2_flat+ shift_origin
return Grid(pixels2_flat, UNIT_SQUARE) | 37,331 |
def log_message(update, context):
"""Log the message that caused the update
Args:
update (Update): update event
context (CallbackContext): context passed by the handler
"""
if update.message:
try:
with open(get_abs_path("logs", "messages.log"), "a", encoding="utf8") as log_file:
user = update.message.from_user
chat = update.message.chat
message = f"\n___ID MESSAGE: {str(update.message.message_id)} ____\n"\
"___INFO USER___\n"\
f"user_id: {str(user.id)}\n"\
f"user_name: {str(user.username)}\n"\
f"user_first_lastname: {str(user.first_name)} {str(user.last_name)}\n"\
"___INFO CHAT___\n"\
f"chat_id: {str(chat.id)}\n"\
f"chat_type: {str(chat.type)}\n"\
f"chat_title: {str(chat.title)}\n"\
"___TESTO___\n"\
f"text: {str(update.message.text)}\n"\
f"date: {str(update.message.date)}"\
"\n_____________\n"
log_file.write("\n" + message)
except AttributeError as e:
logger.warning(e)
except FileNotFoundError as e:
logger.error(e) | 37,332 |
def query_hecate(session, ra, dec, _radius, _verbose: bool = True):
""" Query the HECATE catalog """
m=0
gal_offset = []; mag = []; filt = []; dist = []; dist_err = []; gal_ra = []; gal_dec = []; distflag = []; source = []
# set up query
try:
query = session.query(HecateQ3cRecord)
query = hecate_q3c_orm_filters(query, {'cone': f'{ra},{dec},{_radius}'})
except Exception as _e3:
if _verbose:
print(f"{_e3}")
print(f"Failed to execute query for RA, Dec = ({ra},{dec})")
if len(query.all()) > 0:
m+=1
for _x in HecateQ3cRecord.serialize_list(query.all()):
if _x['bt']== _x['bt']:
mag.append(_x['bt'])
filt.append('B')
gal = SkyCoord(_x['ra']*u.deg, _x['dec']*u.deg)
cand = SkyCoord(ra*u.deg, dec*u.deg)
gal_offset.append(cand.separation(gal).arcsec)
gal_ra.append(_x['ra'])
gal_dec.append(_x['dec'])
dist.append(_x['d']) # Mpc
dist_err.append(_x['e_d']) # Mpc
source.append('HECATE')
return m, gal_ra, gal_dec, gal_offset, mag, filt, dist, dist_err, source | 37,333 |
def all_main_characters(raw_data: AniListRawResponse) -> list[Character]:
"""Returns all of the main characters from the data."""
characters: list[Character] = anime_media(raw_data)["mainCharacters"]["nodes"]
return characters | 37,334 |
def sequence_to_ngram(sequence: str, N: int) -> List[str]:
"""
Chops a sequence into overlapping N-grams (substrings of length N)
:param sequence: str Sequence to convert to N-garm
:type sequence: str
:param N: Length ofN-grams (int)
:type N: int
:return: List of n-grams
:rtype: List[str]
"""
return [sequence[i : i + N] for i in range(len(sequence) - N + 1)] | 37,335 |
def _convert_for_receive(profile):
"""Convert profile to be fed into the receive model.
Args:
profile (pandas.DataFrame): Profile to convert.
Returns:
pandas.DataFrame: Converted profile.
"""
without_profile = profile[profile.age.isna()].reset_index(drop=True)
profile = profile[~profile.age.isna()].reset_index(drop=True)
profile = _transform_age_group(
_transform_generation(
_transform_gender(
_explode_membership_date(
_extract_age_bins(
profile)))))
return profile, without_profile | 37,336 |
def _format_path(path):
"""Format path to data for which an error was found.
:param path: Path as a list of keys/indexes used to get to a piece of data
:type path: collections.deque[str|int]
:returns: String representation of a given path
:rtype: str
"""
path_with_brackets = (
''.join('[{!r}]'.format(fragment) for fragment in path)
)
return '{}'.format(path_with_brackets) | 37,337 |
def common_mean_watson(Data1, Data2, NumSims=5000, print_result=True, plot='no', save=False, save_folder='.', fmt='svg'):
"""
Conduct a Watson V test for a common mean on two directional data sets.
This function calculates Watson's V statistic from input files through
Monte Carlo simulation in order to test whether two populations of
directional data could have been drawn from a common mean. The critical
angle between the two sample mean directions and the corresponding
McFadden and McElhinny (1990) classification is printed.
Parameters
----------
Data1 : a nested list of directional data [dec,inc] (a di_block)
Data2 : a nested list of directional data [dec,inc] (a di_block)
NumSims : number of Monte Carlo simulations (default is 5000)
print_result : default is to print the test result (True)
plot : the default is no plot ('no'). Putting 'yes' will the plot the CDF
from the Monte Carlo simulations.
save : optional save of plots (default is False)
save_folder : path to where plots will be saved (default is current)
fmt : format of figures to be saved (default is 'svg')
Returns
-------
printed text : text describing the test result is printed
result : a boolean where 0 is fail and 1 is pass
angle : angle between the Fisher means of the two data sets
critical_angle : critical angle for the test to pass
Examples
--------
Develop two populations of directions using ``ipmag.fishrot``. Use the
function to determine if they share a common mean.
>>> directions_A = ipmag.fishrot(k=20, n=30, dec=40, inc=60)
>>> directions_B = ipmag.fishrot(k=35, n=25, dec=42, inc=57)
>>> ipmag.common_mean_watson(directions_A, directions_B)
"""
pars_1 = pmag.fisher_mean(Data1)
pars_2 = pmag.fisher_mean(Data2)
cart_1 = pmag.dir2cart([pars_1["dec"], pars_1["inc"], pars_1["r"]])
cart_2 = pmag.dir2cart([pars_2['dec'], pars_2['inc'], pars_2["r"]])
Sw = pars_1['k'] * pars_1['r'] + pars_2['k'] * pars_2['r'] # k1*r1+k2*r2
xhat_1 = pars_1['k'] * cart_1[0] + pars_2['k'] * cart_2[0] # k1*x1+k2*x2
xhat_2 = pars_1['k'] * cart_1[1] + pars_2['k'] * cart_2[1] # k1*y1+k2*y2
xhat_3 = pars_1['k'] * cart_1[2] + pars_2['k'] * cart_2[2] # k1*z1+k2*z2
Rw = np.sqrt(xhat_1**2 + xhat_2**2 + xhat_3**2)
V = 2 * (Sw - Rw)
# keep weighted sum for later when determining the "critical angle"
# let's save it as Sr (notation of McFadden and McElhinny, 1990)
Sr = Sw
# do monte carlo simulation of datasets with same kappas as data,
# but a common mean
counter = 0
Vp = [] # set of Vs from simulations
for k in range(NumSims):
# get a set of N1 fisher distributed vectors with k1,
# calculate fisher stats
Dirp = []
for i in range(pars_1["n"]):
Dirp.append(pmag.fshdev(pars_1["k"]))
pars_p1 = pmag.fisher_mean(Dirp)
# get a set of N2 fisher distributed vectors with k2,
# calculate fisher stats
Dirp = []
for i in range(pars_2["n"]):
Dirp.append(pmag.fshdev(pars_2["k"]))
pars_p2 = pmag.fisher_mean(Dirp)
# get the V for these
Vk = pmag.vfunc(pars_p1, pars_p2)
Vp.append(Vk)
# sort the Vs, get Vcrit (95th percentile one)
Vp.sort()
k = int(.95 * NumSims)
Vcrit = Vp[k]
# equation 18 of McFadden and McElhinny, 1990 calculates the critical
# value of R (Rwc)
Rwc = Sr - (Vcrit/2)
# following equation 19 of McFadden and McElhinny (1990) the critical
# angle is calculated. If the observed angle (also calculated below)
# between the data set means exceeds the critical angle the hypothesis
# of a common mean direction may be rejected at the 95% confidence
# level. The critical angle is simply a different way to present
# Watson's V parameter so it makes sense to use the Watson V parameter
# in comparison with the critical value of V for considering the test
# results. What calculating the critical angle allows for is the
# classification of McFadden and McElhinny (1990) to be made
# for data sets that are consistent with sharing a common mean.
k1 = pars_1['k']
k2 = pars_2['k']
R1 = pars_1['r']
R2 = pars_2['r']
critical_angle = np.degrees(np.arccos(((Rwc**2) - ((k1 * R1)**2)
- ((k2 * R2)**2))/
(2 * k1 * R1 * k2 * R2)))
D1 = (pars_1['dec'], pars_1['inc'])
D2 = (pars_2['dec'], pars_2['inc'])
angle = pmag.angle(D1, D2)
if print_result == True:
print("Results of Watson V test: ")
print("")
print("Watson's V: " '%.1f' % (V))
print("Critical value of V: " '%.1f' % (Vcrit))
if V < Vcrit:
if print_result == True:
print('"Pass": Since V is less than Vcrit, the null hypothesis')
print('that the two populations are drawn from distributions')
print('that share a common mean direction can not be rejected.')
result = 1
elif V > Vcrit:
if print_result == True:
print('"Fail": Since V is greater than Vcrit, the two means can')
print('be distinguished at the 95% confidence level.')
result = 0
if print_result == True:
print("")
print("M&M1990 classification:")
print("")
print("Angle between data set means: " '%.1f' % (angle))
print("Critical angle for M&M1990: " '%.1f' % (critical_angle))
if print_result == True:
if V > Vcrit:
print("")
elif V < Vcrit:
if critical_angle < 5:
print("The McFadden and McElhinny (1990) classification for")
print("this test is: 'A'")
elif critical_angle < 10:
print("The McFadden and McElhinny (1990) classification for")
print("this test is: 'B'")
elif critical_angle < 20:
print("The McFadden and McElhinny (1990) classification for")
print("this test is: 'C'")
else:
print("The McFadden and McElhinny (1990) classification for")
print("this test is: 'INDETERMINATE;")
if plot == 'yes':
CDF = {'cdf': 1}
# pmagplotlib.plot_init(CDF['cdf'],5,5)
plt.figure(figsize=(3.5, 2.5))
p1 = pmagplotlib.plot_cdf(CDF['cdf'], Vp, "Watson's V", 'r', "")
p2 = pmagplotlib.plot_vs(CDF['cdf'], [V], 'g', '-')
p3 = pmagplotlib.plot_vs(CDF['cdf'], [Vp[k]], 'b', '--')
# pmagplotlib.draw_figs(CDF)
if save == True:
plt.savefig(os.path.join(
save_folder, 'common_mean_watson') + '.' + fmt)
pmagplotlib.show_fig(CDF['cdf'])
return result, angle[0], critical_angle | 37,338 |
def empty_method(context):
"""
Even if it may not be used, context argument is required for step
implementation methods. It is an instance of behave.runner.Context
"""
pass | 37,339 |
def GetLayouts():
"""Returns the layout proxies on the active session.
Layout proxies are used to place views in a grid."""
return servermanager.ProxyManager().GetProxiesInGroup("layouts") | 37,340 |
def p_constdef(p):
"""constdef : DEFINE IDENTIFIER EQ argument"""
constdef = {p[2]: p[4]}
p[0] = constdef | 37,341 |
def fail(message: str) -> NoReturn:
"""
Shortcut for throwing a `DescriptiveError`. See its docs for details.
"""
raise DescriptiveError(dedent(message).strip()) | 37,342 |
def register(event: str, handler: Callable, exchange=""):
"""
为`event`注册一个事件处理器。如果
:param event:
:param handler:
:param exchange:
:return:
"""
global _registry
event = f"{exchange}/{event}"
item = _registry.get(event, {"handlers": set()})
item['handlers'].add(handler)
_registry[event] = item
if _started:
# in this case, we need manually bind msg, handler with a queue/channel
asyncio.create_task(_bind(event)) | 37,343 |
def test_one_node_net():
"""
:return:
:rtype:
"""
net = create_empty_network(fluid='water')
j = create_junction(net, 1, 298.15)
create_ext_grid(net, j, 1, 298.15)
create_sink(net, j, 0.01)
create_source(net, j, 0.02)
pp.pipeflow(net)
assert np.isclose(net.res_ext_grid.values + net.res_sink.values - net.res_source.values, 0)
net = create_empty_network(fluid='lgas')
j = create_junction(net, 1, 298.15)
create_ext_grid(net, j, 1, 298.15)
create_sink(net, j, 0.01)
create_source(net, j, 0.02)
pp.pipeflow(net)
assert np.isclose(net.res_ext_grid.values + net.res_sink.values - net.res_source.values, 0) | 37,344 |
def GetRelativePath(starting_dir, dest):
"""Creates a relative path from the starting_dir to the dest."""
assert starting_dir
assert dest
starting_dir = os.path.realpath(starting_dir).rstrip(os.path.sep)
dest = os.path.realpath(dest).rstrip(os.path.sep)
common_prefix = GetCommonPath(starting_dir, dest)
if not common_prefix:
return dest
starting_dir = starting_dir[len(common_prefix):]
dest = dest[len(common_prefix):]
if not starting_dir:
if not dest:
return '.'
return os.path.join(".", dest)
seps = os.path.splitdrive(starting_dir)[1].count(os.path.sep) + 1
return "{}{}".format((("..{}".format(os.path.sep)) * seps), dest) | 37,345 |
def _calculate_mean_cvss():
"""Calcuate the mean CVSS score across all known vulnerabilities"""
results = db.osvdb.aggregate([
{"$unwind": "$cvss_metrics"},
{"$group": {
"_id": "null",
"avgCVSS": {"$avg": "$cvss_metrics.calculated_cvss_base_score"}
}}
])
logger.info("There are {} entries in this aggregation.".format(
len(results['result'])))
logger.debug("The headers are: {}".format(results['result'][0].keys()))
try:
avgCVSS = results['result'][0]['avgCVSS']
except:
avgCVSS = None
return avgCVSS | 37,346 |
def decomposition_super1(centroid, highway, coherence,coordinates,input):
"""
Function to perform Experiment 2: Differential Decomposition with level-specific weight
Args:
centroid: Cluster centroid of super pixels
highway: Super pixels after Stage I Super pixeling
coherence: Coherence value at super pixel level
coordinates: Coordinates of pixels in each highway clusters
input: 4 channel input data
Returns:
decom_super_coh: Coherence estimate passed from super pixel to pixel level
"""
c = 0
decom_super_coh = []
for i in range (0, 300):
new = []
for j in range (0, 300):
new.append(0)
decom_super_coh.append(new)
# Normalizing centroids and input_sl
input_min = input.min(axis=(0, 1), keepdims=True)
input_max = input.max(axis=(0, 1), keepdims=True)
input_norm = (input - input_min)/(input_max - input_min)
c_min = centroid.min(axis=(0, 1), keepdims=True)
c_max = centroid.max(axis=(0, 1), keepdims=True)
c_norm = (centroid - c_min)/(c_max - c_min)
# Traversing through each cluster coordinates to calculate
# distance between pixels and cluster coordinates
# To assign coherence value to pixel level
for cluster in coordinates:
clusterCenter = c_norm[0][c]
for point in cluster:
x,y = point[0],point[1]
superPixel = input_norm[x,y]
distance = norm(clusterCenter-superPixel)
coh = (coherence[c]*(1-distance))
decom_super_coh[x][y] = coh
c+=1
return decom_super_coh | 37,347 |
async def webhook_ack():
"""
{
"application_code" : "<e.g. uuid4>"
}
"""
pass | 37,348 |
def drop_duplicate_titles(df_bso):
"""Drop lines whose titles are not unique.
The most common 'title' is "Introduction", appearing eg. in books and
journal issues.
The 'source_title' is the name of the journal or book series, which is
often not very helpful for very general collections that cover more than
one scientific fields (ex: "SpringerBriefs in Applied Sciences and
Technology").
A solution would be to use the 'booktitle' but Unpaywall does not provide
it.
Parameters
----------
df_bso
Returns
-------
df_bso
"""
df_title_cnt = df_bso['title'].value_counts().to_frame()
df_title_cnt = df_title_cnt.reset_index()
df_title_cnt.columns = ['title', 'count']
print(df_title_cnt[df_title_cnt['count'] > 1]['title'].to_string())
# return df_bso | 37,349 |
def get_deck_xs(bridge: Bridge, ctx: BuildContext) -> List[float]:
"""X positions of nodes on the bridge deck.
First the required X positions 'RX' are determined, positions of loads and
abutments etc.. After that a number of X positions are calculated between
each pair of adjacent X positions 'RX_i' and 'RX_j', such that the maximum
distance between X positions does not exceed 'bridge.base_mesh_deck_max_x'.
"""
all_xs = set()
# From piers.
for pier in bridge.supports:
for x in pier.x_min_max_top():
all_xs.add(round_m(x))
# Bridge ends.
all_xs.add(round_m(bridge.x_min))
all_xs.add(round_m(bridge.x_max))
# From loads.
for point in ctx.add_loads:
all_xs.add(round_m(point.x))
# From material propertes.
for x in get_deck_section_grid(bridge)[0]:
all_xs.add(round_m(x))
# Additional nodes requested by the Bridge.
for x in bridge.additional_xs:
all_xs.add(round_m(x))
all_xs = sorted(all_xs)
print_i(f"Required node X positions on deck (from all sources) =\n {all_xs}")
deck_xs = set()
for i in range(len(all_xs) - 1):
x0, x1 = all_xs[i], all_xs[i + 1]
num = math.ceil((x1 - x0) / bridge.base_mesh_deck_max_x) + 1
for x in np.linspace(x0, x1, num=num):
deck_xs.add(round_m(x))
return sorted(deck_xs) | 37,350 |
def helicsGetFederateByName(fed_name: str) -> HelicsFederate:
"""
Get an existing `helics.HelicsFederate` from a core by name.
The federate must have been created by one of the other functions and at least one of the objects referencing the created federate must still be active in the process.
**Parameters**
- **`fed_name`** - The name of the federate to retrieve.
**Returns**: `helics.HelicsFederate`.
"""
f = loadSym("helicsGetFederateByName")
err = helicsErrorInitialize()
result = f(cstring(fed_name), err)
if err.error_code != 0:
raise HelicsException("[" + str(err.error_code) + "] " + ffi.string(err.message).decode())
else:
return HelicsFederate(result) | 37,351 |
def persistant_property(*key_args):
"""Utility decorator for Persistable-based objects. Adds any arguments as properties
that automatically loads and stores the value in the persistence table in the database.
These arguments are created as permanent persistent properties."""
def _decorator(cls):
@functools.wraps(cls)
def wrapper(*args, **kwargs):
for key in key_args:
# this _closure function is required since we're using a for loop and a closure
# see http://www.discoversdk.com/blog/closures-in-python-3
def _closure(key=key):
internal_key = f'_{key}' # internal value
internal_key_loaded = f'_{key}_loaded' # boolean set to True after it's loaded
def _getter(self):
try:
self.load_persistent_property(key)
except Exception as e:
logging.error(f"unable to load persistence key {key}: {e}")
return getattr(self, internal_key)
def _setter(self, value):
try:
retry(self.save_persistent_property(key, value))
except Exception as e:
logging.error(f"unable to save persistence key {key}: {e}")
setattr(self, internal_key, value)
setattr(cls, internal_key, None)
setattr(cls, internal_key_loaded, False)
setattr(cls, key, property(_getter, _setter))
_closure(key)
return cls(*args, **kwargs)
return wrapper
return _decorator | 37,352 |
def _initialize_ort_devices():
"""
Determine available ORT devices, and place info about them to os.environ,
they will be available in spawned subprocesses.
Using only python ctypes and default lib provided with NVIDIA drivers.
"""
if int(os.environ.get('ORT_DEVICES_INITIALIZED', 0)) == 0:
os.environ['ORT_DEVICES_INITIALIZED'] = '1'
os.environ['ORT_DEVICES_COUNT'] = '0'
os.environ['CUDA_CACHE_MAXSIZE'] = '2147483647'
try:
libnames = ('libcuda.so', 'libcuda.dylib', 'nvcuda.dll')
for libname in libnames:
try:
cuda = ctypes.CDLL(libname)
except:
continue
else:
break
else:
return
nGpus = ctypes.c_int()
name = b' ' * 200
cc_major = ctypes.c_int()
cc_minor = ctypes.c_int()
freeMem = ctypes.c_size_t()
totalMem = ctypes.c_size_t()
device = ctypes.c_int()
context = ctypes.c_void_p()
devices = []
if cuda.cuInit(0) == 0 and \
cuda.cuDeviceGetCount(ctypes.byref(nGpus)) == 0:
for i in range(nGpus.value):
if cuda.cuDeviceGet(ctypes.byref(device), i) != 0 or \
cuda.cuDeviceGetName(ctypes.c_char_p(name), len(name), device) != 0 or \
cuda.cuDeviceComputeCapability(ctypes.byref(cc_major), ctypes.byref(cc_minor), device) != 0:
continue
if cuda.cuCtxCreate_v2(ctypes.byref(context), 0, device) == 0:
if cuda.cuMemGetInfo_v2(ctypes.byref(freeMem), ctypes.byref(totalMem)) == 0:
cc = cc_major.value * 10 + cc_minor.value
devices.append ({'name' : name.split(b'\0', 1)[0].decode(),
'total_mem' : totalMem.value,
'free_mem' : freeMem.value,
'cc' : cc
})
cuda.cuCtxDetach(context)
except Exception as e:
print(f'CUDA devices initialization error: {e}')
devices = []
os.environ['ORT_DEVICES_COUNT'] = str(len(devices))
for i, device in enumerate(devices):
os.environ[f'ORT_DEVICE_{i}_NAME'] = device['name']
os.environ[f'ORT_DEVICE_{i}_TOTAL_MEM'] = str(device['total_mem'])
os.environ[f'ORT_DEVICE_{i}_FREE_MEM'] = str(device['free_mem'])
os.environ[f'ORT_DEVICE_{i}_CC'] = str(device['cc']) | 37,353 |
def states():
"""
Get a dictionary of Backpage city names mapped to their respective states.
Returns:
dictionary of Backpage city names mapped to their states
"""
states = {}
fname = pkg_resources.resource_filename(__name__, 'resources/City_State_Pairs.csv')
with open(fname, 'rU') as csvfile:
reader = csv.reader(csvfile, delimiter = ',')
for row in reader:
states[row[0]] = row[1]
return states | 37,354 |
def matrix_set_diag(input_x, diagonal, k=0, alignment="RIGHT_LEFT"):
"""
Calculate a batched matrix tensor with new batched diagonal values.
Args:
input_x (Tensor): a :math:`(..., M, N)` matrix to be set diag.
diagonal (Tensor): a :math`(..., max_diag_len)`, or `(..., num_diags, max_diag_len)` vector to be placed to
output's diags.
k (Tensor): a scalar or 1D list. it's max length is to which indicates the diag's lower index and upper index.
(k[0], k[1]).
alignment (str): Some diagonals are shorter than `max_diag_len` and need to be padded.
`align` is a string specifying how superdiagonals and subdiagonals should be aligned,
respectively. There are four possible alignments: "RIGHT_LEFT" (default),
"LEFT_RIGHT", "LEFT_LEFT", and "RIGHT_RIGHT". "RIGHT_LEFT" aligns superdiagonals to
the right (left-pads the row) and subdiagonals to the left (right-pads the row).
Returns:
- Tensor, :math:`(...,M, N)`. a batched matrix with the same shape and values as `input`,
except for the specified diagonals of the innermost matrices.
Supported Platforms:
``CPU`` ``GPU``
Examples:
>>> import numpy as onp
>>> from mindspore.common import Tensor
>>> from mindspore.scipy.ops_wrapper import matrix_set_diag
>>> input_x = Tensor(
>>> onp.array([[[7, 7, 7, 7],[7, 7, 7, 7], [7, 7, 7, 7]],
>>> [[7, 7, 7, 7],[7, 7, 7, 7],[7, 7, 7, 7]]])).astype(onp.int)
>>> diagonal = Tensor(onp.array([[1, 2, 3],[4, 5, 6]])).astype(onp.int)
>>> output = matrix_set_diag(input_x, diagonal)
>>> print(output)
>>> [[[1 7 7 7]
[7 2 7 7]
[7 7 3 7]]
[[4 7 7 7]
[7 5 7 7]
[7 7 6 7]]
"""
matrix_set_diag_net = MatrixSetDiag(alignment)
k_vec = mnp.zeros((2,), dtype=mstype.int32)
if isinstance(k, int):
k_vec += k
elif isinstance(k, (list, tuple)):
k_vec = k
else:
_raise_value_error("input k to indicate diagonal region is invalid.")
k_vec = _to_tensor(k_vec, dtype=mstype.int32)
output = matrix_set_diag_net(input_x, diagonal, k_vec)
return output | 37,355 |
def execute_payment(pp_req):
"""Executes a payment authorized by the client."""
payment = paypalrestsdk.Payment.find(pp_req['paymentId'])
if payment.execute({"payer_id": pp_req['PayerID']}):
return True
return False | 37,356 |
def FillSegmentWithNops(x):
"""
Sets every byte in the segment that contains 'x' to a
NOP by changing its value to 0x90 and marking it as code.
"""
for Byte in segment.SegmentAddresses(x):
ida.patch_byte(Byte, 0x90)
ida.MakeCode(Byte) | 37,357 |
def create_structural_eqs(X, Y, G, n_nodes_se=40, n_nodes_M=100, activation_se='relu'):
"""
Method to create structural equations (F:U->X) and the original prediction model (M:X->Y). This also calculates and stores residuals.
Parameters
----------
X : pandas DataFrame
input features of the dataset
Y : pandas Series
target to be predicted
G : networkx.classes.digraph.DiGraph
causal graph of the data
n_nodes_se : int
number of nodes for the neural network of the strutural equations (SE)
n_nodes_M: int
number of nodes in the neural network of the original model (M)
activation_se: str
type of activation for the structural equations
Returns
----------
struct_eq: keras.engine.functional.Functional - keras Model
structural equations (F:U->X)
final : keras.engine.functional.Functional - keras Model
model in the latent space. Final model that uses structural equations and original prediction model: M^:U->Y. M^(u)=M(F(u))
Additionally:
In the folder data, residuals are stored
In the folder data, the original prediction model (M - stored as "nn_model"), the model for child nodes and structural equations are stored.
Performance metrics are printed in the terminal
"""
# split dataset
X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=0.1, random_state=42)
# take all nodes except target >>> classification
nodes = [n for n in list(G.nodes) if n != Y.name]
# Standardise data
scaler = StandardScaler()
scaler.fit(X_train)
X_train.loc[:, :] = scaler.transform(X_train)
X_test.loc[:, :] = scaler.transform(X_test)
# get root nodes
root_nodes = [n for n, d in G.in_degree() if d == 0]
# define variables where residuals and residual inputs will be stored
U_train = X_train[root_nodes].copy()
U_test = X_test[root_nodes].copy()
res_inputs = []
#define tf inputs, one for each node
node_inputs = {n: keras.Input(shape=(1,), name=n) for n in nodes}
# define dic to store the final X = F(U) with U = (roots, residuals) for each node
# fill the root nodes directly with input layers
X_n = {r: node_inputs[r] for r in root_nodes}
# auxiliary while-loop variables
added_nodes = []
root_nodes_tmp = root_nodes
while set(root_nodes_tmp) != set(nodes):
# loop until all nodes are either root or dealt with (root_nodes_tmp
# contains root nodes and is updated with dealt with nodes)
for n in nodes:
parents = list(G.predecessors(n))
# go on only when:
# n has parents
# parents are root_nodes or nodes already dealt with
# n is not a root node and has not been dealt with yet
if G.in_degree[n] != 0 and set(parents).issubset(set(root_nodes_tmp)) and not n in root_nodes_tmp:
print("dealing with ", n, " with parents: ", parents)
# build the model from parents to node n
if len(parents) == 1:
parent = parents[0]
inputs = node_inputs[parent]
conc = tf.identity(inputs)
X_train_p = X_train[parent].values
X_test_p = X_test[parent].values
else:
inputs = [node_inputs[p] for p in parents]
conc = layers.Concatenate()(inputs)
X_train_p = [X_train[p].values for p in parents]
X_test_p = [X_test[p].values for p in parents]
x = layers.Dense(n_nodes_se, activation=activation_se)(conc)
x = layers.Dense(n_nodes_se, activation=activation_se)(x)
out = layers.Dense(1)(x)
ff = keras.Model(inputs=inputs, outputs=out, name=n)
ff.compile(loss=tf.losses.MeanSquaredError(), optimizer=tf.optimizers.Adam(learning_rate=0.0001))
hist = ff.fit(X_train_p, X_train[n].values, batch_size=512,
epochs=200, verbose=0, validation_split=0.25, callbacks=[early_stopping])
#plot history
# plt.plot(hist.history['val_loss'])
# plt.plot(hist.history['loss'])
# plt.show()
#
# plt.figure()
# pred_tmp=ff.predict(X_test_p)
# plt.plot(X_test[n].values, pred_tmp.reshape(1,-1)[0], '.', alpha=0.2)
score = ff.evaluate(X_train_p, X_train[n].values, verbose=0)
print('The TRAIN score for model node ', n, ' is ', score)
score = ff.evaluate(X_test_p, X_test[n].values, verbose=0)
print('The TEST score for model node ', n, ' is ', score)
# save picture of the model
#dot_img_file = 'model_nn' + node_tmp +'.png'
#keras.utils.plot_model(nn, to_file=dot_img_file, show_shapes=True)
# plot model graph
# keras.utils.plot_model(ff, show_shapes=True)
# Calculate residuals as the value of the node - the prediction of the model for that node
pred = ff.predict(X_train_p).reshape(X_train.shape[0],)
U_train['r_' + n] = X_train[n].values - pred
pred = ff.predict(X_test_p).reshape(X_test.shape[0],)
U_test['r_' + n] = X_test[n].values - pred
# build input for residual of node n
res = keras.Input(shape=(1,), name="r_" + n)
res_inputs.append(res)
# create the reconstructed node as the built model ff + the residual
X_n[n] = layers.Add(name=n + "_reconstructed")([ff([X_n[p] for p in parents]), res])
# Save nn of the structural equation
ff.save('models/'+str(n)+'.h5')
added_nodes.append(n)
# Add the node in the roots node, so the graph can be explored in the next dependence level
root_nodes_tmp = root_nodes_tmp + added_nodes
added_nodes = []
# Define the structural equation model
inputs = [X_n[r] for r in root_nodes] + res_inputs
# Reorder the inputs and list "nodes" is
col_name_inputs = [i.name[:-2].split('r_')[-1] for i in inputs]
inputs = list(np.array(inputs)[[col_name_inputs.index(col) for col in nodes]])
# concatenate outputs to build a stacked tensor (actually a vector),
# respecting the order of the original nodes (i.e. same order of X_in)
X_out = tf.concat([X_n[x] for x in nodes], axis=1, name='X_out')
struct_eq_tmp = keras.Model(inputs=inputs, outputs=X_out, name="struct_eq_tmp")
dim_input_se = U_train.shape[1]
inputs = keras.Input(shape=(dim_input_se,), name="U")
# define the model struct_eq U->X
x = keras.layers.Lambda(lambda x: tf.split(x, num_or_size_splits=dim_input_se, axis=1))(inputs)
out_x = struct_eq_tmp(x)
struct_eq = keras.Model(inputs=inputs, outputs=out_x, name="struct_eq")
struct_eq.compile(loss=tf.losses.MeanSquaredError(), optimizer=tf.optimizers.Adam())
struct_eq.save('models/nn_struct_eq.h5')
# Save residual dataset
columns_dataset_u = [i.split('r_')[-1] for i in U_train.columns]
columns_dataset_u = list(np.array(U_train.columns)[[columns_dataset_u.index(col) for col in nodes]])
U_train[columns_dataset_u].to_csv('data/res_train.csv', index=False)
U_test[columns_dataset_u].to_csv('data/res_test.csv', index=False)
### Build M, standard ML model
# model going from features X to target Y
# the inputs are precisely the node inputs
# X matrix -> Y
X_in = keras.Input(shape=(len(nodes)), name='X_in')
x = layers.Dense(n_nodes_M, activation='relu')(X_in)
x = layers.Dense(int(n_nodes_M/2), activation='relu')(x)
out = layers.Dense(2, activation='softmax')(x)
M = keras.Model(inputs=X_in, outputs=out, name="M")
M.compile(loss='sparse_categorical_crossentropy', optimizer=tf.optimizers.Adam(learning_rate=0.001))
hist=M.fit(X_train, y_train, batch_size=512, epochs=200, verbose=0,
validation_split=0.25, callbacks=[early_stopping])
# plt.plot(hist.history['val_loss'])
# plt.plot(hist.history['loss'])
# plt.show()
M.save('models/nn_model.h5')
### Build a model from root_nodes + residuals to Y, i.e. Y^ = M(F(U))
# matrix U -> Y
inputs = keras.Input(shape=(U_train.shape[1],), name="U")
out = M(struct_eq(inputs))
final = keras.Model(inputs=inputs, outputs=out, name="final")
final.compile(loss='sparse_categorical_crossentropy', optimizer=tf.optimizers.Adam())
# final.summary()
# dot_img_file = 'final.png'
# keras.utils.plot_model(final, to_file=dot_img_file, show_shapes=True)
# final.save('final.h5')
### make predictions
# Load final model (the weights are already computed in model M and
# structural equation F, no need to fit)
pred = final.predict(U_test)[:, 1]
# Print report
print(classification_report(y_test, pred > 0.5))
return struct_eq, final | 37,358 |
def twitch_checkdspstatus(_double_check: bool) -> bool:
"""
Uses current Selenium browser to determine if DSP is online on Twitch.
:param _double_check: Internally used to recursively call function again to double check if DSP is online
:return: True if DSP is online. False is DSP is offline.
"""
try:
if _double_check:
logging.debug("Double Checking Run. Refresh and Wait")
trackerglobals.BROWSER.get("https://www.duckduckgo.com")
sleep(2.0)
trackerglobals.BROWSER.get(trackerglobals.URL)
sleep(5.0)
logging.debug("Refreshed. Checking the Second Time.")
logging.debug("Checking if DSP is online.")
# Check if the "Follow and get notified when darksydephil is live" text overlay exists.
_ = WebDriverWait(trackerglobals.BROWSER, 2).until(EC.presence_of_element_located(
(By.CSS_SELECTOR, 'a[href="/darksydephil"][status="tw-channel-status-indicator--live"'))) # noqa
if _double_check:
logging.debug("DSP is online. Returning True.")
return True
return twitch_checkdspstatus(True)
except TimeoutException:
logging.debug("DSP is offline. Returning False.")
return False | 37,359 |
def test_cold_start(sdc_builder, sdc_executor, cluster, db, stored_as_avro, external_table, partitioned):
"""Validate Cold Start no table and no data. This test also tests different types of table and methods of creation.
The pipeline looks like:
dev_raw_data_source >> expression_evaluator >> hive_metadata
hive_metadata >> hadoop_fs
hive_metadata >> hive_metastore
"""
if getattr(cluster, 'kerberized_services', False) and 'hive' in cluster.kerberized_services:
pytest.skip('Test runs only in non-kerberized environment till SDC-9324 is fixed.')
# based on SDC-13915
if (isinstance(cluster, AmbariCluster) and Version(cluster.version) == Version('3.1')
and Version(sdc_builder.version) < Version('3.8.1')):
pytest.skip('Hive stages not available on HDP 3.1.0.0 for SDC versions before 3.8.1')
table_name = get_random_string(string.ascii_lowercase, 20)
db_for_path = 'default' if not db else f'{db}.db' if db != 'default' else db
database_location_for_table_path = (f'/tmp/sdc/hive/warehouse/{db_for_path}'
if external_table else f'/user/hive/warehouse/{db_for_path}')
table_path_template = f'{database_location_for_table_path}/{table_name}' if external_table else ''
raw_data = [dict(id=1, name='abc'), dict(id=2, name='def'), dict(id=3, name='ghi')]
dev_raw_data_source_data = ''.join(json.dumps(d) for d in raw_data)
pipeline_builder = sdc_builder.get_pipeline_builder()
dev_raw_data_source = pipeline_builder.add_stage('Dev Raw Data Source')
dev_raw_data_source.set_attributes(data_format='JSON',
raw_data=dev_raw_data_source_data,
stop_after_first_batch=True)
expression_evaluator = pipeline_builder.add_stage('Expression Evaluator')
expression_evaluator.set_attributes(header_attribute_expressions=[{'attributeToSet': 'db',
'headerAttributeExpression': db},
{'attributeToSet': 'table_name',
'headerAttributeExpression': table_name}])
partition_configuration = [{'name': 'dt', 'valueType': 'STRING',
'valueEL': '${YYYY()}-${MM()}-${DD()}'}] if partitioned else []
partition_path_template = 'dt=${YYYY()}-${MM()}-${DD()}' if partitioned else ''
hive_metadata = pipeline_builder.add_stage('Hive Metadata')
hive_metadata.set_attributes(data_format='AVRO',
database_expression="${record:attribute('db')}",
external_table=external_table,
table_path_template=table_path_template,
partition_configuration=partition_configuration,
partition_path_template=partition_path_template,
decimal_scale_expression='5',
decimal_precision_expression='10',
table_name="${record:attribute('table_name')}")
hadoop_fs = pipeline_builder.add_stage('Hadoop FS', type='destination')
hadoop_fs.set_attributes(avro_schema_location='HEADER',
data_format='AVRO',
directory_in_header=True,
use_roll_attribute=True)
hive_metastore = pipeline_builder.add_stage('Hive Metastore', type='destination')
hive_metastore.set_attributes(stored_as_avro=stored_as_avro)
dev_raw_data_source >> expression_evaluator >> hive_metadata
hive_metadata >> hadoop_fs
hive_metadata >> hive_metastore
pipeline = pipeline_builder.build(title='Hive drift test - Cold Start').configure_for_environment(cluster)
sdc_executor.add_pipeline(pipeline)
hive_cursor = cluster.hive.client.cursor()
if db:
hive_cursor.execute(f'CREATE DATABASE IF NOT EXISTS`{db}`')
try:
sdc_executor.start_pipeline(pipeline).wait_for_finished()
hive_cursor.execute('RELOAD {0}'.format(_get_qualified_table_name(db, table_name)))
hive_cursor.execute('SELECT * from {0}'.format(_get_qualified_table_name(db, table_name)))
hive_values = [list(row) for row in hive_cursor.fetchall()]
raw_values = [list(row.values()) for row in raw_data]
if partitioned:
for i in range(len(raw_values)):
raw_values[i] = raw_values[i] + [datetime.now().strftime('%Y-%m-%d')]
assert sorted(hive_values) == sorted(raw_values)
finally:
logger.info('Dropping table %s in Hive...', _get_qualified_table_name(db, table_name))
hive_cursor.execute('DROP TABLE {0}'.format(_get_qualified_table_name(db, table_name)))
if db and db != 'default':
logger.info('Dropping Database %s in Hive...', db)
hive_cursor.execute('DROP DATABASE IF EXISTS`{0}`'.format(db))
if external_table:
logger.info('Deleting Hadoop FS directory %s ...', database_location_for_table_path)
cluster.hdfs.client.delete(database_location_for_table_path, recursive=True) | 37,360 |
def random_walk(humans, dt, energy, temperature):
"""
calculates location, speed and acceleration by adding random values to the speed
Args:
humans (list): list of all humans
dt (float): time step in which the movement is calculated
energy (float): amount of movement
Returns:
humans (list): list of all humans
"""
new_energy = 0
old_humans = humans
for i, h in enumerate(humans):
infection(humans, h, i)
new_location = h.location + dt * h.velocity
velocity_gen_x = random.gauss(0, 1)
velocity_gen_y = random.gauss(0, 1)
velocity_random = [
velocity_gen_x * float(temperature)/15, velocity_gen_y * float(temperature)/15]
new_velocity = h.velocity + velocity_random
# handle maximum velocity based on total energy
new_energy += np.linalg.norm(new_velocity)**2
factor = math.sqrt(energy / new_energy)
new_velocity = new_velocity*factor
abs_speed = np.linalg.norm(new_velocity)**2
factor_v = math.sqrt(abs_speed / energy)
if factor_v > 3*(1/len(humans)):
scaling = 0.03/factor_v
new_velocity = new_velocity*scaling
h.update(new_location, new_velocity)
return humans | 37,361 |
def format_name(name_format: str, state: State):
"""Format a checkpoint filename according to the ``name_format`` and the training :class:`~.State`.
The following format variables are available:
+------------------------+-------------------------------------------------------+
| Variable | Description |
+========================+=======================================================+
| ``{rank}`` | The global rank, as returned by |
| | :func:`~.dist.get_global_rank`. |
+------------------------+-------------------------------------------------------+
| ``{local_rank}`` | The local rank of the process, as returned by |
| | :func:`~.dist.get_local_rank`. |
+------------------------+-------------------------------------------------------+
| ``{world_size}`` | The world size, as returned by |
| | :func:`~.dist.get_world_size`. |
+------------------------+-------------------------------------------------------+
| ``{local_world_size}`` | The local world size, as returned by |
| | :func:`~.dist.get_local_world_size`. |
+------------------------+-------------------------------------------------------+
| ``{node_rank}`` | The node rank, as returned by |
| | :func:`~.dist.get_node_rank`. |
+------------------------+-------------------------------------------------------+
| ``{epoch}`` | The total epoch count, as returned by |
| | :meth:`~composer.core.time.Timer.epoch`. |
+------------------------+-------------------------------------------------------+
| ``{batch}`` | The total batch count, as returned by |
| | :meth:`~composer.core.time.Timer.batch`. |
+------------------------+-------------------------------------------------------+
| ``{batch_in_epoch}`` | The batch count in the current epoch, as returned by |
| | :meth:`~composer.core.time.Timer.batch_in_epoch`. |
+------------------------+-------------------------------------------------------+
| ``{sample}`` | The total sample count, as returned by |
| | :meth:`~composer.core.time.Timer.sample`. |
+------------------------+-------------------------------------------------------+
| ``{sample_in_epoch}`` | The sample count in the current epoch, as returned by |
| | :meth:`~composer.core.time.Timer.sample_in_epoch`. |
+------------------------+-------------------------------------------------------+
| ``{token}`` | The total token count, as returned by |
| | :meth:`~composer.core.time.Timer.token`. |
+------------------------+-------------------------------------------------------+
| ``{token_in_epoch}`` | The token count in the current epoch, as returned by |
| | :meth:`~composer.core.time.Timer.token_in_epoch`. |
+------------------------+-------------------------------------------------------+
.. note::
If using DeepSpeed, and ``name_format`` does not end with an tarfile archive extension (``'.tar'``, ``'.tgz'``,
``'.tar.gz'``, ``'.tar.bz2'``, or ``'.tar.lzma'``), then ``'.tar'`` will be appended. DeepSpeed uses a tarball
format as it saves model and optimizer states in separate files within the tarball.
Consider the following scenario, where the current epoch count is ``1`` and the current batch count is ``42``:
* When not using DeepSpeed, then the rank zero process will call this function:
.. testsetup:: composer.utils.checkpoint.format_name.no_deepspeed
from composer.utils.checkpoint import format_name
state.timer._batch._value = 42
state.timer._epoch._value = 1
.. doctest:: composer.utils.checkpoint.format_name.no_deepspeed
>>> format_name("ep{epoch}-ba{batch}", state)
'ep1-ba42'
* When using DeepSpeed, each rank (process) will call this function. ``'{rank}'`` should appear within
``name_format``, so each rank (process) will write to its own file. For example, on the rank zero process:
.. testsetup:: composer.utils.checkpoint.format_name.deepspeed
from composer.utils.checkpoint import format_name
original_is_model_deepspeed = State.is_model_deepspeed
setattr(State, 'is_model_deepspeed', property(lambda x: True))
state.timer._batch._value = 42
state.timer._epoch._value = 1
.. doctest:: composer.utils.checkpoint.format_name.deepspeed
>>> format_name("ep{epoch}-ba{batch}-rank{rank}", state)
'ep1-ba42-rank0.tar'
.. testcleanup:: composer.utils.checkpoint.format_name.deepspeed
setattr(State, 'is_model_deepspeed', original_is_model_deepspeed)
"""
checkpoint_name = name_format.format(
rank=dist.get_global_rank(),
local_rank=dist.get_local_rank(),
world_size=dist.get_world_size(),
local_world_size=dist.get_local_world_size(),
node_rank=dist.get_node_rank(),
epoch=int(state.timer.epoch),
batch=int(state.timer.batch),
batch_in_epoch=int(state.timer.batch_in_epoch),
sample=int(state.timer.sample),
sample_in_epoch=int(state.timer.sample_in_epoch),
token=int(state.timer.token),
token_in_epoch=int(state.timer.token_in_epoch),
)
if state.is_model_deepspeed and not _is_archive(checkpoint_name):
# Deepspeed requires tarballs; appending `.tar`
checkpoint_name += ".tar"
return checkpoint_name | 37,362 |
def build_arglist(builder, nb):
"""
arglist: (argument ',')* ( '*' test [',' '**' test] |
'**' test |
argument |
[argument ','] )
"""
atoms = get_atoms(builder, nb)
arguments, stararg, dstararg = parse_argument(atoms, builder)
if atoms:
lineno = atoms[0].lineno
else:
lineno = -1
builder.push(ArglistObject(arguments, stararg, dstararg, lineno)) | 37,363 |
def allsync(local_values, comm=None, op=None):
"""Perform allreduce if MPI comm is provided."""
if comm is None:
return local_values
if op is None:
from mpi4py import MPI
op = MPI.MAX
return comm.allreduce(local_values, op=op) | 37,364 |
def output_variant_tsv(interpretation_request, force_update=False):
"""Output a variant TSV to match Alamut Batch format for annotation.
If a variant TSV for the given interpretation_request (version, and genome
build) exists then pass. If a matching file does not exist or the
force_update boolean is True then for each of the variants in the
interpretation_request get the zygosity for the proband, mother, and father
(where they are known) and output into a TSV.
Args:
interpretation_request: JSON representation of an
interpretation_request (output of get_interpretation_request_json).
force_update: Boolean switch to enforce output file overwriting.
"""
# Make the file paths for existance checking
ir_id, ir_version = (interpretation_request['interpretation_request_id']
.split('-'))
variant_tsv = '{}_{}_{}_{}_tiered_variants.tsv'.format(
interpretation_request['family_id'], ir_id, ir_version,
interpretation_request['assembly'])
variant_tsv_path = os.path.join(os.getcwd(), 'output', variant_tsv)
# Check for file existance or force_update boolean
if not (os.path.isfile(variant_tsv_path)) or (force_update is True):
print('Writing variants to {}'.format(variant_tsv_path))
with open(variant_tsv_path, 'w') as fout:
# Write header row ofr human readability
header = ('#id\tchr\tposition\tref\talt\tTier\tproband_zygosity\t'
'mother_zygosity\tfather_zygosity\n')
fout.write(header)
# Construct the row for a given variant
for variant in (
interpretation_request['interpretation_request_data']
['interpretation_request_data']['json_request']
['TieredVariants']):
dbSNPid = variant['dbSNPid']
chromosome = variant['chromosome']
position = variant['position']
ref = variant['reference']
alt = variant['alternate']
# Get variant tier
tier = str(get_variant_tier(variant))
# Get the zygosities where known
proband_zygosity = (get_call_zygosity(
variant,
interpretation_request['simple_pedigree'],
'Proband'))
mother_zygosity = (get_call_zygosity(
variant,
interpretation_request['simple_pedigree'],
'Mother'))
father_zygosity = (get_call_zygosity(
variant,
interpretation_request['simple_pedigree'],
'Father'))
fout.write('\t'.join([dbSNPid, chromosome, str(position), ref,
alt, tier, proband_zygosity, mother_zygosity,
father_zygosity]) + '\n') | 37,365 |
def vec_bin_array(arr, m):
"""
Arguments:
arr: Numpy array of positive integers
m: Number of bits of each integer to retain
Returns a copy of arr with every element replaced with a bit vector.
Bits encoded as int8's.
"""
to_str_func = np.vectorize(lambda x: np.binary_repr(x).zfill(m))
strs = to_str_func(arr)
ret = np.zeros(list(arr.shape) + [m], dtype=np.int8)
for bit_ix in range(0, m):
fetch_bit_func = np.vectorize(lambda x: x[bit_ix] == '1')
ret[...,bit_ix] = fetch_bit_func(strs).astype("int8")
return ret | 37,366 |
def make_api_links(file_path, file_type):
"""Build links to automodapi documentation."""
start_link = "../"
re_api = re.compile(r'<span class="pre">~gammapy\.(.*?)</span>')
if file_type == "ipynb":
start_link = URL_DOCS
re_api = re.compile(r"`~gammapy\.(.*?)`")
txt = file_path.read_text(encoding="utf-8")
for module in re_api.findall(txt):
# end urls
alt_links = []
submodules = module.split(".")
if len(submodules) == 1:
target = submodules[0]
alt_links.append(f"{target}/index.html")
elif len(submodules) == 2:
target = f"{submodules[0]}.{submodules[1]}"
alt_links.append(f"api/gammapy.{target}.html#gammapy.{target}")
alt_links.append(f"{submodules[0]}/index.html#module-gammapy.{target}")
alt_links.append(
f"{submodules[0]}/{submodules[1]}index.html#module-gammapy.{target}"
)
elif len(submodules) == 3:
target = f"{submodules[0]}.{submodules[1]}"
alt_links.append(
f"api/gammapy.{target}.html#gammapy.{target}.{submodules[2]}"
)
alt_links.append(
f"api/gammapy.{target}.{submodules[2]}.html#gammapy.{target}.{submodules[2]}"
)
elif len(submodules) == 4:
target = f"{submodules[0]}.{submodules[1]}.{submodules[2]}"
alt_links.append(
f"api/gammapy.{target}.html#gammapy.{target}.{submodules[3]}"
)
else:
continue
# broken link
broken = True
for link in alt_links:
search_file = re.sub(r"(#.*)$", "", link)
search_path = PATH_DOC / search_file
if search_path.is_file():
link_api = f"{start_link}{link}"
link_api = link_api.replace("()", "")
broken = False
break
if broken:
if file_type == "ipynb":
log.warning(f"{str(search_path)} does not exist in {file_path}.")
continue
# replace syntax with link
str_api = f'<span class="pre">~gammapy.{module}</span>'
label_api = str_api.replace('<span class="pre">', "")
label_api = label_api.replace("</span>", "")
label_api = label_api.replace("~", "")
replace_api = f"<a href='{link_api}'>{label_api}</a>"
if file_type == "ipynb":
str_api = f"`~gammapy.{module}`"
label_api = str_api.replace("`", "")
label_api = label_api.replace("~", "")
replace_api = f"[[{label_api}]({link_api})]"
txt = txt.replace(str_api, replace_api)
# modif absolute links to rst/html doc files
if file_type == "ipynb":
url_docs_release = URL_DOCS.replace("dev", release_number_docs)
txt = txt.replace(URL_DOCS, url_docs_release)
else:
repl = r"..\/\1html\2"
txt = re.sub(
pattern=URL_DOCS + r"(.*?)html(\)|#)",
repl=repl,
string=txt,
flags=re.M | re.I,
)
file_path.write_text(txt, encoding="utf-8") | 37,367 |
def parse_bafs(stream: Iterator[str]) -> List[BAF]:
"""Parses allelic counts output from GATK ModelSegments, which is a SAM-style
header comprising lines starting with @ followed by single line with column
names (CONTIG, POSITION, REF_COUNT, ALT_COUNT, REF_NUCLEOTIDE, ALT_NUCLEOTIDE)."""
skip_header(stream)
bafs: List[BAF] = []
for line in stream:
chromosome, position, ref_count, alt_count, ref_nucleotide, alt_nucleotide = line.split('\t')
baf = BAF(chromosome=chromosome, position=int(position), ref_count=int(ref_count), alt_count=int(alt_count), ref_nucleotide=ref_nucleotide, alt_nucleotide=alt_nucleotide)
bafs.append(baf)
return bafs | 37,368 |
def logpdf(x, chi, c):
"""
Logarithm of the PDF of the ARGUS probability distribution.
"""
if c <= 0:
raise ValueError('c must be positive')
if chi <= 0:
raise ValueError('chi must be positive')
if x < 0 or x > c:
return mpmath.mp.ninf
with mpmath.extradps(5):
x = mpmath.mpf(x)
chi = mpmath.mpf(chi)
c = mpmath.mpf(c)
z = x/c
t1 = (3*mpmath.log(chi)
- mpmath.log(2*mpmath.pi)/2
- mpmath.log(_psi(chi)))
t2 = -mpmath.log(c) + mpmath.log(z)
t3 = mpmath.log1p(-z**2)/2
t4 = -chi**2/2*(1 - z**2)
return t1 + t2 + t3 + t4 | 37,369 |
def sample(colors: list, max_colors: int = 8, sensitivity: int = 75) -> list:
"""
Sample most common colors from a PIL Image object.
:param colors: list of RGB color tuples eg. [(0, 0, 0), (255, 255, 255)]
:param max_colors: maximum number of colors to return
:param sensitivity: how perceptively different (Euclidean Distance) a color
must be from others to be included in the sampled palette.
:returns: list of most common colors in RGB tuples (255, 255, 255)
"""
# reduce all found colors using supplied sensitivity
sampled_colors = []
for color in colors:
# if max_color limit reached stop looking
if len(sampled_colors) == max_colors:
break
# clean-up any slight color differences in PIL sampling
color = normalize_rgb_values(color)
# if most common color (first color) append it
if sampled_colors == []:
sampled_colors.append(color)
continue
# calculate Euclidean distance for a color against colors
# already appended to determine if it shoule be ignored
if not any(
color_distance(color, found) <= sensitivity for found in sampled_colors
):
sampled_colors.append(color)
return sampled_colors | 37,370 |
def tf_example_to_feature_description(example,
num_timesteps=DEFAULT_NUM_TIMESTEPS):
"""Takes a string tensor encoding an tf example and returns its features."""
if not tf.executing_eagerly():
raise AssertionError(
'tf_example_to_reverb_sample() only works under eager mode.')
example = tf.train.Example.FromString(example.numpy())
ret = {}
for k, v in example.features.feature.items():
l = len(v.float_list.value)
if l % num_timesteps:
raise ValueError('Unexpected feature length %d. It should be divisible '
'by num_timesteps: %d' % (l, num_timesteps))
size = l // num_timesteps
ret[k] = tf.io.FixedLenFeature([num_timesteps, size], tf.float32)
return ret | 37,371 |
def setupmethod(f: F) -> F:
"""Wraps a method so that it performs a check in debug mode if the
first request was already handled.
"""
def wrapper_func(self, *args: t.Any, **kwargs: t.Any) -> t.Any:
if self._is_setup_finished():
raise AssertionError(
"A setup function was called after the first request "
"was handled. This usually indicates a bug in the"
" application where a module was not imported and"
" decorators or other functionality was called too"
" late.\nTo fix this make sure to import all your view"
" modules, database models, and everything related at a"
" central place before the application starts serving"
" requests."
)
return f(self, *args, **kwargs)
return t.cast(F, update_wrapper(wrapper_func, f)) | 37,372 |
def generateStructuredGridPoints(nx, ny, v0, v1, v2, v3):
"""
Generate structured grid points
:param nx: number of x cells
:param ny: number of y cells
:param v0: south west corner
:param v1: south east corner
:param v2: north east corner
:param v3: north west corner
:returns array of size (nx, ny, 3)
"""
# parametric
nx1 = nx + 1
ny1 = ny + 1
x = numpy.linspace(0., 1., nx1)
y = numpy.linspace(0., 1., ny1)
xx1, yy1 = numpy.meshgrid(x, y, indexing='ij')
xx0 = 1.0 - xx1
yy0 = 1.0 - yy1
# structured points
spts = numpy.zeros(list(xx0.shape) + [3], numpy.float64)
for j in range(3):
spts[..., j] = xx0*yy0*v0[j] + \
xx1*yy0*v1[j] + \
xx1*yy1*v2[j] + \
xx0*yy1*v3[j]
return spts | 37,373 |
def Keywords(lang_id=0):
"""Returns Specified Keywords List
@param lang_id: used to select specific subset of keywords
"""
return [PY_KW, PY_BIN] | 37,374 |
def cli() -> None:
"""
Command line interface for TESMART device RT-05
If none options are defined, it uses config.toml
""" | 37,375 |
def clear(keyword):
"""``clear`` property validation."""
return keyword in ('left', 'right', 'both', 'none') | 37,376 |
def PermissionsListOfUser(perm_list: List[str]) -> List[str]:
"""
Takes a list of items and asserts that all of them are in the permissions list of
a user.
:param perm_list: A list of permissions encoded as ``str``
:return: The input perm_list
:raises Invalid: If the user does not have a permission in the list
"""
if isinstance(perm_list, list):
for perm in perm_list:
if not flask.g.user.has_permission(perm):
break
else:
return perm_list
raise Invalid('permissions must be in the user\'s permissions list') | 37,377 |
def PrepareForBuild(input_proto, output_proto, _config):
"""Prepare to build toolchain artifacts.
The handlers (from _TOOLCHAIN_ARTIFACT_HANDLERS above) are called with:
artifact_name (str): name of the artifact type.
chroot (chroot_lib.Chroot): chroot. Will be None if the chroot has not
yet been created.
sysroot_path (str): sysroot path inside the chroot (e.g., /build/atlas).
Will be an empty string if the sysroot has not yet been created.
build_target_name (str): name of the build target (e.g., atlas). Will be
an empty string if the sysroot has not yet been created.
input_artifacts ({(str) name:[str gs_locations]}): locations for possible
input artifacts. The handler is expected to know which keys it should
be using, and ignore any keys that it does not understand.
profile_info ({(str) name: (str) value}) Dictionary containing profile
information.
They locate and modify any ebuilds and/or source required for the artifact
being created, then return a value from toolchain_util.PrepareForBuildReturn.
This function sets output_proto.build_relevance to the result.
Args:
input_proto (PrepareForToolchainBuildRequest): The input proto
output_proto (PrepareForToolchainBuildResponse): The output proto
_config (api_config.ApiConfig): The API call config.
"""
if input_proto.chroot.path:
chroot = controller_util.ParseChroot(input_proto.chroot)
else:
chroot = None
input_artifacts = collections.defaultdict(list)
for art in input_proto.input_artifacts:
item = _TOOLCHAIN_ARTIFACT_HANDLERS.get(art.input_artifact_type)
if item:
input_artifacts[item.name].extend(
['gs://%s' % str(x) for x in art.input_artifact_gs_locations])
profile_info = _GetProfileInfoDict(input_proto.profile_info)
results = set()
sysroot_path = input_proto.sysroot.path
build_target = input_proto.sysroot.build_target.name
for artifact_type in input_proto.artifact_types:
# Unknown artifact_types are an error.
handler = _TOOLCHAIN_ARTIFACT_HANDLERS[artifact_type]
if handler.prepare:
results.add(handler.prepare(
handler.name, chroot, sysroot_path, build_target, input_artifacts,
profile_info))
# Translate the returns from the handlers we called.
# If any NEEDED => NEEDED
# elif any UNKNOWN => UNKNOWN
# elif any POINTLESS => POINTLESS
# else UNKNOWN.
if toolchain_util.PrepareForBuildReturn.NEEDED in results:
output_proto.build_relevance = PrepareForBuildResponse.NEEDED
elif toolchain_util.PrepareForBuildReturn.UNKNOWN in results:
output_proto.build_relevance = PrepareForBuildResponse.UNKNOWN
elif toolchain_util.PrepareForBuildReturn.POINTLESS in results:
output_proto.build_relevance = PrepareForBuildResponse.POINTLESS
else:
output_proto.build_relevance = PrepareForBuildResponse.UNKNOWN
return controller.RETURN_CODE_SUCCESS | 37,378 |
def for_properties(path: Path = Path('config.json')):
"""
Simple externalized configuration loader. Properties are loaded from a file containing a JSON object.
:param path: Path to the file.
:return: Simple namespace with the key/value pairs matching the loaded json object.
"""
if not path or not path.exists():
raise ValueError(f"Configuration file [{path}] doesn't exist")
return json.loads(path.read_text(), object_hook=lambda d: SimpleNamespace(**d)) | 37,379 |
async def discordHandleGameChange(cls:"PhaazebotDiscord", event_list:List["StatusEntry"]) -> None:
"""
With a list status entry's from twitch,
we format and send all gamechange announcements to all discord channels
"""
if not cls: return # Discord Client not ready or off
event_channel_list:str = ",".join(Event.channel_id for Event in event_list)
if not event_channel_list: event_channel_list = "0"
res:List[dict] = cls.BASE.PhaazeDB.selectQuery(f"""
SELECT
`discord_twitch_alert`.`twitch_channel_id`,
`discord_twitch_alert`.`discord_channel_id`,
`discord_twitch_alert`.`suppress_gamechange`
FROM `discord_twitch_alert`
WHERE `discord_twitch_alert`.`twitch_channel_id` IN ({event_channel_list})"""
)
for db_entry in res:
Event:"StatusEntry" = getStreamFromDBResult(event_list, db_entry["twitch_channel_id"])
if Event is None: continue # should never happen
# ignore gamechange events for alert
if db_entry.get("suppress_gamechange", 0): continue
# we only care about live alerts,
# there should be no other types, but we go save here
if Event.Stream.stream_type != "live": continue
# try to catch invalid twitch api results
if not Event.User:
cls.BASE.Logger.warning(f"Can't find Twitch User ID:{Event.channel_id}")
continue
if not Event.Game:
cls.BASE.Logger.warning(f"Can't find Twitch Game ID:{Event.game_id}")
continue
stream_status:str = Event.Stream.title or "[N/A]"
stream_url:str = TWITCH_STREAM_URL + (Event.User.login or "")
stream_description:str = f":game_die: Now Playing: **{Event.Game.name}**"
Emb:discord.Embed = discord.Embed(
title=stream_status,
url=stream_url,
description=stream_description,
color=TWITCH_COLOR
)
Emb.set_author(
name=Event.User.display_name,
url=stream_url,
icon_url=Event.User.profile_image_url
)
Emb.set_footer(
text="Provided by twitch.tv",
icon_url=cls.BASE.Vars.logo_twitch
)
Emb.set_thumbnail(url=Event.User.profile_image_url)
discord_chan_id:str = db_entry.get("discord_channel_id", "-1")
try:
Chan:discord.TextChannel = cls.get_channel(int(discord_chan_id))
if not Chan: continue
await Chan.send(embed=Emb)
except:
cls.BASE.Logger.warning(f"Can't send Twitch Alert to Discord Channel ID: {discord_chan_id}") | 37,380 |
def guess_components(paths, stop_words=None, n_clusters=8):
"""Guess components from an iterable of paths.
Args:
paths: list of string containing file paths in the project.
stop_words: stop words. Passed to TfidfVectorizer.
n_clusters: number of clusters. Passed to MiniBatchKMeans.
Returns:
pandas.DataFrame
See Also:
sklearn.feature_extraction.text.TfidfVectorizer
sklearn.cluster.MiniBatchKMeans
"""
dirs = [os.path.dirname(p.replace("\\", "/")) for p in paths]
vectorizer = sklearn.feature_extraction.text.TfidfVectorizer(stop_words=stop_words)
transformed_dirs = vectorizer.fit_transform(dirs)
algo = sklearn.cluster.MiniBatchKMeans
clustering = algo(compute_labels=True, n_clusters=n_clusters)
clustering.fit(transformed_dirs)
def __cluster_name(center, threshold):
df = pd.DataFrame(
data={"feature": vectorizer.get_feature_names(), "weight": center}
)
df.sort_values(by=["weight", "feature"], ascending=False, inplace=True)
if (df["weight"] <= threshold).all():
return ""
df = df[df["weight"] > threshold]
return ".".join(df["feature"].tolist())
cluster_names = [
__cluster_name(center, 0.4) for center in clustering.cluster_centers_
]
components = [cluster_names[lbl] for lbl in clustering.labels_]
rv = pd.DataFrame(data={"path": paths, "component": components})
rv.sort_values(by="component", inplace=True)
return rv | 37,381 |
def get_instance_tags(ec2_client: boto3.Session.client, instance_id: str):
"""Get instance tags to parse through for selective hardening"""
tag_values = []
tags = ec2_client.describe_tags(
Filters=[
{
"Name": "resource-id",
"Values": [
instance_id,
],
},
],
)["Tags"]
for tag in tags:
tag_values.append(tag["Value"])
return tag_values | 37,382 |
def create_app(config_name=None) -> Flask:
"""Create a flask app instance."""
app = Flask("__name__")
app.config.from_object(config[config_name])
config[config_name].init_app(app)
# import blueprints
from scorer.controller import prediction_app
app.register_blueprint(prediction_app)
_logger.debug("Application instance created")
return app | 37,383 |
def getmasterxpub(client: HardwareWalletClient, addrtype: AddressType = AddressType.WIT, account: int = 0) -> Dict[str, str]:
"""
Get the master extended public key from a client
:param client: The client to interact with
:return: A dictionary containing the public key at the ``m/44'/0'/0'`` derivation path.
Returned as ``{"xpub": <xpub string>}``.
"""
return {"xpub": client.get_master_xpub(addrtype, account).to_string()} | 37,384 |
def test_FilterAnalyzer():
"""Testing the FilterAnalyzer """
t = np.arange(np.pi/100,10*np.pi,np.pi/100)
fast = np.sin(50*t)+10
slow = np.sin(10*t)-20
fast_mean = np.mean(fast)
slow_mean = np.mean(slow)
fast_ts = ts.TimeSeries(data=fast,sampling_rate=np.pi)
slow_ts = ts.TimeSeries(data=slow,sampling_rate=np.pi)
#Make sure that the DC is preserved
f_slow = nta.FilterAnalyzer(slow_ts,ub=0.6)
f_fast = nta.FilterAnalyzer(fast_ts,lb=0.6)
npt.assert_almost_equal(f_slow.filtered_fourier.data.mean(),slow_mean,
decimal=2)
npt.assert_almost_equal(f_slow.filtered_boxcar.data.mean(),slow_mean,
decimal=2)
npt.assert_almost_equal(f_slow.fir.data.mean(),slow_mean)
npt.assert_almost_equal(f_fast.filtered_fourier.data.mean(),10)
npt.assert_almost_equal(f_fast.filtered_boxcar.data.mean(),10,decimal=2)
npt.assert_almost_equal(f_fast.fir.data.mean(),10)
#Check that things work with a two-channel time-series:
T2 = ts.TimeSeries(np.vstack([fast,slow]),sampling_rate=np.pi)
f_both = nta.FilterAnalyzer(T2,ub=1.0,lb=0.1)
#These are rather basic tests:
npt.assert_equal(f_both.fir.shape,T2.shape)
npt.assert_equal(f_both.iir.shape,T2.shape)
npt.assert_equal(f_both.filtered_boxcar.shape,T2.shape)
npt.assert_equal(f_both.filtered_fourier.shape,T2.shape) | 37,385 |
def serialize(
obj: Any,
annotation: Any,
config: SerializerConfig
) -> str:
"""Convert the object to JSON
Args:
obj (Any): The object to convert
annotation (Annotation): The type annotation
config (SerializerConfig): The serializer configuration
Returns:
str: The serialized object
"""
if _is_typed(annotation):
return typed_serialize(obj, annotation, config)
else:
return untyped_serialize(obj, config) | 37,386 |
def validate_dvprel(prop_type, pname_fid, validate):
"""
Valdiates the DVPREL1/2
.. note:: words that start with integers (e.g., 12I/T**3) doesn't
support strings
"""
if validate:
msg = 'DVPREL1: prop_type=%r pname_fid=%r is invalid' % (prop_type, pname_fid)
#if prop_type == 'CELAS2':
#assert pname_fid in ['K', 'GE', 'S'], msg
#elif prop_type == 'CELAS4':
#assert pname_fid in ['K'], msg
if prop_type == 'PELAS':
if pname_fid in ['K1', 3]:
pname_fid = 'K1'
elif pname_fid in ['GE1', 4]:
pname_fid = 'GE1'
else:
raise NotImplementedError('PELAST pname_fid=%r is invalid' % pname_fid)
#assert pname_fid in [3, 4, 'K1', 'GE1'], msg
elif prop_type == 'PELAST':
if pname_fid in ['TKID', 3]:
pname_fid = 'TKID'
else:
raise NotImplementedError('PELAST pname_fid=%r is invalid' % pname_fid)
assert pname_fid in [3, 4, 'TKID'], msg
elif prop_type == 'PROD':
if pname_fid in ['A', 4]:
pname_fid = 'A'
elif pname_fid in ['J', 5]:
pname_fid = 'J'
#elif pname_fid in ['C', 6]:
#pname_fid = 'C'
else:
raise NotImplementedError('PROD pname_fid=%r is invalid' % pname_fid)
assert pname_fid in [4, 'A', 5, 'J'], msg
elif prop_type == 'PTUBE':
assert pname_fid in [4, 5], msg
#elif prop_type == 'CBAR':
#assert pname_fid in ['X1', 'X2'], msg
elif prop_type == 'PBAR':
assert pname_fid in [4, 5, 6, 7, 12, 13, 14, 15, 16, 17, 18, 19, 'A', 'I1', 'J'], msg
elif prop_type == 'PBARL':
assert pname_fid in [12, 13, 14, 15, 16, 17, 'DIM1', 'DIM2', 'DIM3', 'DIM4'], msg
#elif prop_type == 'CBEAM':
#assert pname_fid in ['X1', 'X2', 'X3', 'W1A', 'W2A', 'W3A', 'W1B', 'W2B', 'W3B'], msg
elif prop_type == 'PBEAM':
assert pname_fid in ['I1', 'I2', 'A', 'J',
'I1(B)', 'I2(B)',
'-8', '-9', '-10', '-14'], msg # -8
elif prop_type == 'PBEAML':
assert pname_fid in ['DIM1', 'DIM2', 'DIM3', 'DIM4', 'DIM5', 'DIM6',
'DIM1(A)',
'DIM1(B)', 'DIM2(B)', 'I1(B)', 'I2(B)',
'NSM'], msg # 'DIM(B)'
#elif prop_type == 'CQUAD4':
#assert pname_fid in ['T1', 'T2', 'T3', 'T4'], msg
elif prop_type == 'PSHELL':
if pname_fid in ['T', 4]:
pname_fid = 'T'
elif pname_fid in [6]: # 12I/T**3 doesn't support strings
pass
else:
raise NotImplementedError('PSHELL pname_fid=%r is invalid' % pname_fid)
#if cp_name in '12I/T**3':
#cp_name =
#assert pname_fid in ['T', 4, 6], msg
elif prop_type == 'PCOMP':
if isinstance(pname_fid, str):
word, num = break_word_by_trailing_integer(pname_fid)
if word not in ['T', 'THETA']:
raise RuntimeError(msg)
else:
assert pname_fid in [3, #3-z0
# 13-t1, 14-theta1, 17-t2, 18-theta2
13, 14, 17, 18,
23, 24, 27, 28,
33, 34, 37, 38,
43, 44, 47, 48], msg
elif prop_type == 'PCOMPG':
#if pname_fid in ['T', 4]:
#pname_fid = 'T'
#elif pname_fid in [6]: # 12I/T**3 doesn't support strings
#pass
#else:
#raise NotImplementedError('PSHELL pname_fid=%r is invalid' % pname_fid)
#if cp_name in '12I/T**3':
assert pname_fid in ['Z0', 'SB',
15, 25, 75, 85], msg
#elif prop_type == 'CBUSH':
#assert pname_fid in ['X1', 'X2', 'X3', 'S', 'S1'], msg
elif prop_type == 'PBUSH':
assert pname_fid in [18,
'K1', 'K2', 'K3', 'K4', 'K5', 'K6',
'B2',
'GE1', 'GE3', 'GE4', 'GE5', 'GE6',
'-13'], msg
elif prop_type == 'PBUSH1D':
assert pname_fid in ['K', 'C'], msg
elif prop_type == 'PBUSHT':
assert pname_fid in ['TBID1', 'TGEID1', 'TGEID2'], msg
# CGAP
elif prop_type == 'PGAP':
assert pname_fid in [5], msg
elif prop_type == 'PVISC':
assert pname_fid in ['CE1'], msg
#elif prop_type == 'CDAMP2':
#assert pname_fid in ['B'], msg
elif prop_type == 'PDAMP':
assert pname_fid in [3, 'B1'], msg
#elif prop_type == 'CMASS2':
#assert pname_fid in ['M'], msg
#elif prop_type == 'CMASS4':
#assert pname_fid in ['M'], msg
elif prop_type == 'PMASS':
assert pname_fid in [3], msg
#elif prop_type == 'CONM2':
#assert pname_fid in ['M', 'X1', 'X2', 'I11', 'I22'], msg
elif prop_type == 'PSHEAR':
if pname_fid in ['T', 4]:
pname_fid = 'T'
else:
raise NotImplementedError('PSHEAR pname_fid=%r is invalid' % pname_fid)
elif prop_type == 'PWELD':
assert pname_fid in ['D'], msg
elif prop_type == 'PBEND':
raise RuntimeError('Nastran does not support the PBEND')
else:
raise NotImplementedError(msg)
return pname_fid | 37,387 |
def generate_thrift():
"""Generates the thrift metric definitions file used by Impala."""
metrics = load_metrics(options.input_schema_path)
metrics_json = json.dumps(metrics, sort_keys=True, indent=2)
# dumps writes the TMetricKind and TUnit as quoted strings which is not
# interpreted by the thrift compiler correctly. Need to remove the quotes around
# the enum values.
metrics_json = re.sub(r'"(Metrics.TMetricKind.\S+)"', r'\1', metrics_json)
metrics_json = re.sub(r'"(Metrics.TUnit.\S+)"', r'\1', metrics_json)
target_file = options.output_thrift_path
fid = open(target_file, "w")
try:
fid.write(THRIFT_PREAMBLE)
fid.write("const map<string,TMetricDef> TMetricDefs =\n")
fid.write(metrics_json)
finally:
fid.close()
print("%s created." % target_file) | 37,388 |
def is_ligature(archar):
"""Checks for Arabic Ligatures like LamAlef.
(LAM_ALEF, LAM_ALEF_HAMZA_ABOVE, LAM_ALEF_HAMZA_BELOW, LAM_ALEF_MADDA_ABOVE)
@param archar: arabic unicode char
@type archar: unicode
@return:
@rtype:Boolean
"""
return archar in LIGUATURES | 37,389 |
def write_config_file(config: ClientConfig, path: str) -> None:
"""
Writes a config object to a config file
:param config: the config to write
:param path: the path to write the file
:return: None
"""
json_str = json.dumps(json.loads(jsonpickle.encode(config)), indent=4, sort_keys=True)
with io.open(path, 'w', encoding='utf-8') as f:
f.write(json_str) | 37,390 |
def _get_embl_key(line):
"""Return first part of a string as a embl key (ie 'AC M14399;' -> 'AC')"""
# embl keys have a fixed size of 2 chars
return line[:2] | 37,391 |
def poke(event=None):
"""Poke event, checks if the user is still there... currently just bugs the user"""
msg_list.insert(END, "Bot -> Are you still there?")
CONTEXT[USERNAME] = 'here' | 37,392 |
def size_too_big(path):
"""Returns true is file is too large (5MB)
"""
five_mb = 5242880
return os.path.getsize(path) > five_mb | 37,393 |
def store_h2o_frame(data, directory, filename, force=False, parts=1):
"""
Export a given H2OFrame to a path on the machine this python session is currently connected to.
:param data: the Frame to save to disk.
:param directory: the directory to the save point on disk.
:param filename: the name to save the frame to.
:param force: if True, overwrite any preexisting file with the same path
:param parts: enables export to multiple 'part' files instead of just a single file.
Convenient for large datasets that take too long to store in a single file.
Use parts=-1 to instruct H2O to determine the optimal number of part files or
specify your desired maximum number of part files. Path needs to be a directory
when exporting to multiple files, also that directory must be empty.
Default is ``parts = 1``, which is to export to a single file.
:return string filepath: the path to which the file was stored.
"""
if not os.path.isdir(directory):
os.makedirs(directory)
filepath = _make_local_path(os.path.join(directory, filename))
from h2o.job import H2OJob
from h2o.utils.typechecks import assert_is_type
from h2o.frame import H2OFrame
from h2o import api
assert_is_type(data, H2OFrame)
assert_is_type(filepath, str)
assert_is_type(force, bool)
assert_is_type(parts, int)
H2OJob(api("POST /3/Frames/%s/export" % (data.frame_id), data={"path": filepath, "num_parts": parts, "force": force}),
"Export File").poll()
return filepath | 37,394 |
def get_region_data(region, lastday=-1, printrows=0, correct_anomalies=True,
correct_dow='r7'):
"""Get case counts and population for one municipality.
It uses the global DFS['mun'], DFS['cases'] dataframe.
Parameters:
- region: region name (see below)
- lastday: last day to include.
- printrows: print this many of the most recent rows
- correct_anomalies: correct known anomalies (hiccups in reporting)
by reassigning cases to earlier dates.
- correct_dow: None, 'r7' (only for extrapolated rolling-7 average)
Special municipalities:
- 'Nederland': all
- 'HR:Zuid', 'HR:Noord', 'HR:Midden', 'HR:Midden+Zuid', 'HR:Midden+Noord':
holiday regions.
- 'MS:xx-yy': municipalities with population xx <= pop/1000 < yy'
- 'P:xx': province
Use data up to lastday.
Return:
- df: dataframe with added columns:
- Delta: daily increase in case count (per capita).
- Delta_dowc: daily increase, day-of-week correction applied
based on national pattern in most recent 7 weeks.
- Delta7r: daily increase as 7-day rolling average
(last 3 days are estimated).
- DeltaSG: daily increase, smoothed with (15, 2) Savitsky-Golay filter.Region selec
- pop: population.
"""
df1, npop = nl_regions.select_cases_region(DFS['cases'], region)
# df1 will have index 'Date_of_report', columns:
# 'Total_reported', 'Hospital_admission', 'Deceased'
assert correct_dow in [None, 'r7']
if lastday < -1 or lastday > 0:
df1 = df1.iloc[:lastday+1]
if len(df1) == 0:
raise ValueError(f'No data for region={region!r}.')
# nc: number of cases
nc = df1['Total_reported'].diff()
if printrows > 0:
print(nc[-printrows:])
nc.iat[0] = 0
df1['Delta'] = nc/npop
if correct_anomalies:
_correct_delta_anomalies(df1)
nc = df1['Delta'] * npop
nc7 = nc.rolling(7, center=True).mean()
nc7[np.abs(nc7) < 1e-10] = 0.0 # otherwise +/-1e-15 issues.
nc7a = nc7.to_numpy()
# last 3 elements are NaN, use mean of last 4 raw (dow-corrected) to
# get an estimated trend and use exponential growth or decay
# for filling the data.
if correct_dow == 'r7':
# mean number at t=-1.5 days
dow_correction = get_dow_correction((lastday-49, lastday)) # (7,) array
df1['Delta_dowc'] = df1['Delta'] * dow_correction[df1.index.dayofweek]
nc1 = np.mean(nc.iloc[-4:] * dow_correction[nc.index[-4:].dayofweek])
else:
nc1 = nc.iloc[-4:].mean() # mean number at t=-1.5 days
log_slope = (np.log(nc1) - np.log(nc7a[-4]))/1.5
nc7.iloc[-3:] = nc7a[-4] * np.exp(np.arange(1, 4)*log_slope)
# 1st 3 elements are NaN
nc7.iloc[:3] = np.linspace(0, nc7.iloc[3], 3, endpoint=False)
df1['Delta7r'] = nc7/npop
df1['DeltaSG'] = scipy.signal.savgol_filter(
nc/npop, 15, 2, mode='interp')
return df1, npop | 37,395 |
def write(path: Union[str, Path], entity_key: str, data: Any):
"""Writes data to the HDF file at the given path to the given key.
Parameters
----------
path
The path to the HDF file to write to.
entity_key
A string representation of the internal HDF path where we want to
write the data. The key must be formatted as ``"type.name.measure"``
or ``"type.measure"``.
data
The data to write. If it is a :mod:`pandas` object, it will be
written using a
`pandas.HDFStore <https://pandas.pydata.org/pandas-docs/stable/user_guide/io.html#hdf5-pytables>`_
or :meth:`pandas.DataFrame.to_hdf`. If it is some other kind of python
object, it will first be encoded as json with :func:`json.dumps` and
then written to the provided key.
Raises
------
ValueError
If the path or entity_key are improperly formatted.
"""
path = _get_valid_hdf_path(path)
entity_key = EntityKey(entity_key)
if isinstance(data, PandasObj):
_write_pandas_data(path, entity_key, data)
else:
_write_json_blob(path, entity_key, data) | 37,396 |
def MONTH(*args) -> Function:
"""
Returns the month of the year a specific date falls in, in numeric format.
Learn more: https//support.google.com/docs/answer/3093052
"""
return Function("MONTH", args) | 37,397 |
def local_tmp_dir():
"""tmp directory for tests"""
tmp_dir_path = "./tmp"
if not os.path.isdir(tmp_dir_path):
os.mkdir(tmp_dir_path)
return tmp_dir_path | 37,398 |
def Install(browser):
"""Installs |browser|, if necessary. It is not possible to install
an older version of the already installed browser currently.
Args:
browser: specific browst to install.
Returns:
whether browser is installed.
"""
# Only dynamic installation of browsers for Windows now.
if not util.IsWindows():
return True
logging.info('Wants to install ' + browser['name'])
version = GetVersionNumber(browser['family'])
if version is None:
logging.info('No version of %s is installed' % browser['family'])
else:
logging.info('Version %s of %s is installed already'
% (version, browser['family']))
if not IsBrowserInstalled(browser):
install_cmd = None
# Download browser.
logging.info('Downloading ' + browser['name'])
if browser['family'] == 'ie':
if browser['name'] == 'ie7':
install_cmd = util.Download(_IE_7_URLS[util.GetOSPrefix()],
SOFTWARE_PATH)
elif browser['name'] == 'ie8':
install_cmd = util.Download(_IE_8_URLS[util.GetOSPrefix()],
SOFTWARE_PATH)
install_cmd += ' /passive /no-default'
elif browser['family'] == 'firefox':
if util.IsWindows():
install = util.Download(_FIREFOX_VERSIONS[browser['name']],
SOFTWARE_PATH)
install_cmd = install + ' -ms'
elif browser['family'] == 'chrome':
if util.IsWindows():
install_cmd = util.Download(_CHROME_VERSIONS[browser['name']],
SOFTWARE_PATH)
else:
logging.error('Browser %s is not currently supported' % browser['name'])
# Run installation.
if install_cmd is not None:
logging.info('Installing browser: ' + install_cmd)
if install_cmd is None or util.RunStr(install_cmd) != 0:
logging.error('Could not install %s' % browser['name'])
return False
# Do post installation things.
if browser['family'] == 'chrome':
first_run = file(HOME_PATH + '\\Local Settings\\'
'Application Data\\Google\\Chrome\\Application\\'
'First Run', 'w')
first_run.close()
# Wait for Chrome to install. Reboot to get rid of that first run UI.
time.sleep(90)
util.Reboot()
logging.error('Could not reboot. Needed for Chrome installation.')
return False
else:
logging.info(browser['name'] + ' already installed')
return True | 37,399 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.