content stringlengths 22 815k | id int64 0 4.91M |
|---|---|
def convert_mg_l_to_mymol_kg(o2, rho_0=1025):
"""Convert oxygen concentrations in ml/l to mymol/kg."""
converted = o2 * 1/32000 * rho_0/1000 * 1e6
converted.attrs["units"] = "$\mu mol/kg$"
return converted | 36,700 |
def parse_defines(root: xml.etree.ElementTree.Element,
component_id: str) -> List[str]:
"""Parse pre-processor definitions for a component.
Schema:
<defines>
<define name="EXAMPLE" value="1"/>
<define name="OTHER"/>
</defines>
Args:
root: root of element tree.
component_id: id of component to return.
Returns:
list of str NAME=VALUE or NAME for the component.
"""
xpath = f'./components/component[@id="{component_id}"]/defines/define'
return list(_parse_define(define) for define in root.findall(xpath)) | 36,701 |
def wheel(string = None):
"""
Spinning ascii wheel keep alive, user string is optional
Args:
string (str) : String to display before the spinning wheel
Returns:
none
"""
_wheel(string) | 36,702 |
def make_f_beta(beta):
"""Create a f beta function
Parameters
----------
beta : float
The beta to use where a beta of 1 is the f1-score or F-measure
Returns
-------
function
A function to compute the f_beta score
"""
beta_2 = beta**2
coeff = (1 + beta_2)
def f(global_, local_, node):
"""Compute the f-measure
Parameters
----------
global_ : np.array
All of the scores for a given query
local_ : np.array
The scores for the query at the current node
node : skbio.TreeNode
The current node being evaluated
"""
p = len(global_) / len(local_)
r = len(local_) / node.ntips
return coeff * (p * r) / ((beta_2 * p) + r)
return f | 36,703 |
def _autohint_code(f, script):
"""Return 'not-hinted' if we don't hint this, else return the ttfautohint
code, which might be None if ttfautohint doesn't support the script.
Note that LGC and MONO return None."""
if script == 'no-script':
return script
if not script:
script = noto_fonts.script_key_to_primary_script(_get_font_info(f).script)
return noto_data.HINTED_SCRIPTS.get(script, 'not-hinted') | 36,704 |
def infostring(message=""):
"""Info log-string.
I normally use this at the end of tasks.
Args:
message(str): A custom message to add.
Returns:
(str)
"""
message.rstrip().replace("\n", " ")
return tstamp() + "\t## INFO ## " + message + "\n" | 36,705 |
def handler(event, _):
"""
Lambda handler
"""
# Input event:
# https://docs.aws.amazon.com/cognito/latest/developerguide/cognito-user-identity-pools-working-with-aws-lambda-triggers.html#cognito-user-pools-lambda-trigger-event-parameter-shared
logger.debug({
"message": "Input event",
"event": event
})
# Never confirm users
event["response"] = {
"autoConfirmUser": False,
"autoVerifyPhone": False,
"autoVerifyEmail": False
}
# Only care about the ConfirmSignUp action
# At the moment, the only other PostConfirmation event is 'PostConfirmation_ConfirmForgotPassword'
if event["triggerSource"] not in ["PreSignUp_SignUp", "PreSignUp_AdminCreateUser"]:
logger.warning({
"message": "invalid triggerSource",
"triggerSource": event["triggerSource"]
})
return event
# Prepare the event
eb_event = process_request(event)
# Send the event to EventBridge
send_event(eb_event)
# Always return the event at the end
return event | 36,706 |
def geo_point_n(arg, n):
"""Return the Nth point in a single linestring in the geometry.
Negative values are counted backwards from the end of the LineString,
so that -1 is the last point. Returns NULL if there is no linestring in
the geometry
Parameters
----------
arg : geometry
n : integer
Returns
-------
PointN : geometry scalar
"""
op = ops.GeoPointN(arg, n)
return op.to_expr() | 36,707 |
def replace_pasture_scrubland_with_shrubland(df, start_col, end_col):
"""Merge pasture and scrubland state transitions into 'shrubland'.
1. Remove transitions /between/ scrubland and pasture and vice versa.
2. Check there are no duplicate transitions which would be caused by an
identical set of conditions leading from or to both pasture and
scrubland being merged.
3. Rename all instances of either 'scrubland' or 'pasture' to 'shrubland'
4. Check for duplicates again.
"""
df = remove_transitions_bw_pasture_and_scrubland(df, start_col, end_col)
duplicates_start = duplicates_start_with_pasture_or_scrubland(df,
start_col, end_col)
assert len(duplicates_start.index) == 0, "No duplicates expected."
duplicates_end = duplicates_end_with_pasture_or_scrubland(df,
start_col, end_col)
assert len(duplicates_end.index) == 0, "No duplicates expected."
for col in [start_col, end_col]:
for lct in [MLct.SCRUBLAND.alias, MLct.PASTURE.alias]:
df.loc[:,col] = df[col].replace(lct, AsLct.SHRUBLAND.alias)
cond_cols = ["succession", "aspect", "pine", "oak", "deciduous", "water"]
cond_cols += [start_col, end_col]
assert len(df[df.duplicated(cond_cols)].index) == 0, "There should be "\
+ "no duplicated rows."
return df | 36,708 |
def init_random_seed(seed: int) -> None:
"""Initialize random seed of Python's random module, NumPy, and PyTorch.
Args:
seed: Seed value.
"""
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed) | 36,709 |
def get_storage_backend_descriptions() -> List[dict]:
"""
Returns:
"""
result = list()
for backend in SUPPORTED_STORAGE_BACKENDS:
result.append(get_storage_backend(backend).metadata)
return result | 36,710 |
def main():
"""
main function
"""
# parse command-line parameters
parser = argparse.ArgumentParser(
description='generate SPEXone L1A product simulating a science orbit')
parser.add_argument('--navigation_data', default=None,
help=('navigation data used to initialize the L1A file,'
' provide path to an existing L1A product'))
parser.add_argument('--binTableID', default=3, type=int,
help=('provide binning table ID, default=3'))
parser.add_argument('--repeats', default=1, type=int,
help=('specify number of repeats of each measurement,'
' default=1'))
parser.add_argument('--measurement_data', default=None, nargs='*',
help=('provide path one or more L1A products with,'
' measurement data used to fill the L1A product'))
parser.add_argument('--verbose', action='store_true', default=False)
args = parser.parse_args()
if args.verbose:
print(args)
if args.navigation_data is not None:
duration = initialize_l1a_product(args.navigation_data, args.binTableID)
print(f'Coverage of the navigation data is {duration} seconds')
if args.measurement_data is not None:
add_measurements(args.measurement_data, args.repeats, sampling=3) | 36,711 |
def get_business_with_most_location() -> Tuple:
"""
Fetches LA API and returns the business with most locations
from first page
:return Tuple: business name and number of locations
"""
response = _fetch_businesses_from_la_api()
business_to_number_of_location = dict()
if response.status_code == 200:
businesses_list = response.json()
for active_business in businesses_list:
business_name = active_business["business_name"]
if business_name not in business_to_number_of_location:
business_to_number_of_location[business_name] = 1
else:
business_to_number_of_location[business_name] += 1
(
business_name_from_max,
number_of_locations,
) = _get_max_business_occurrence(business_to_number_of_location)
else:
raise ServiceUnavailable()
return business_name_from_max, number_of_locations | 36,712 |
def degrees_to_polynomial(degrees: AbstractSet[int]) -> Poly:
"""
For each degree in a set, create the polynomial with those
terms having coefficient 1 (and all other terms zero), e.g.:
{0, 2, 5} -> x**5 + x**2 + 1
"""
degrees_dict = dict.fromkeys(degrees, 1)
return Poly.from_dict(degrees_dict, x) | 36,713 |
def validate_set_member_filter(filter_vals, vals_type, valid_vals=None):
"""
Validate filter values that must be of a certain type or
found among a set of known values.
Args:
filter_vals (obj or Set[obj]): Value or values to filter records by.
vals_type (type or Tuple[type]): Type(s) of which all ``filter_vals``
must be instances.
valid_vals (Set[obj]): Set of valid values in which all ``filter_vals``
must be found.
Return:
Set[obj]: Validated and standardized filter values.
Raises:
TypeError
ValueError
"""
filter_vals = to_collection(filter_vals, vals_type, set)
if valid_vals is not None:
if not all(filter_val in valid_vals for filter_val in filter_vals):
raise ValueError(
"not all values in filter are valid: {}".format(
filter_vals.difference(valid_vals)
)
)
return filter_vals | 36,714 |
def nsi_delete2(ctx, name, force, wait):
"""deletes a Network Slice Instance (NSI)
NAME: name or ID of the Network Slice instance to be deleted
"""
logger.debug("")
nsi_delete(ctx, name, force, wait=wait) | 36,715 |
def reorder_widgets(dashboard_id, widget_data):
"""
Reorders Widgets given the relative order desired,
reorders widgets in the next possible set of numbers
i.e if order of widgets is 1, 2, 3
the reordered widgets will have order 4, 5, 6
"""
dashboard_widgets = Widget.objects.filter(
dashboard_id=dashboard_id,
)
dashboard_widgets = list(remove_widgets(dashboard_widgets, widget_data))
# dashboard_widgets and widget_data should now have the same widgets
widget_data.sort(key=lambda x: x['order'])
next_order = get_next_dashboard_order(dashboard_id)
for index, data in enumerate(widget_data):
for widget in dashboard_widgets:
if widget.id == data['id']:
widget.order = next_order + index
widget.save()
break | 36,716 |
def run(weavrs_instance, weavr):
"""Do the prosthetic activity for a given weavr."""
logging.info(u"Hello, weavr %s" % weavr.key().name())
client = weavrsclient.WeavrsClient(weavrs_instance, weavr)
config = client.get_weavr_configuration()
logging.info(u"Got configuration: %s" % config) | 36,717 |
def ultimate_1_post():
"""Igre ultimativnih križcev in krožcev za enega igralca."""
check_user_id()
# Poiščemo igro trenutnega igralca.
user_id = int(bottle.request.get_cookie(COOKIE, secret=SECRET))
ultimate_1 = user_tracker.users[user_id][2]
if ultimate_1.state == "P":
# Določimo začetne parametre in bot naredi morebitno prvo potezo.
player_mark = bottle.request.forms.getunicode('player_mark')
player_turn = bool(bottle.request.forms.getunicode('player_turn'))
ultimate_1.choose_parameters(player_mark, player_turn)
if not player_turn:
ultimate_1.inp_cell = ultimate_1.master_cell.random_free()
ultimate_1.inp_space = ultimate_1.cell_list[ultimate_1.inp_cell].random_free()
current_cell = ultimate_1.cell_list[ultimate_1.inp_cell]
ultimate_1.move_in_big_cell = False
ultimate_1.move_in_small_cell(current_cell)
ultimate_1.state = "M"
bottle.redirect("/igre/ultimate_1/")
elif ultimate_1.state == "M":
# Naredimo potezo.
# Igralec.
while ultimate_1.player_turn:
current_cell = ultimate_1.cell_list[ultimate_1.inp_cell]
if not ultimate_1.move_in_big_cell:
inp_space_kand = int(bottle.request.forms.getunicode('inp_space'))
if ultimate_1.cell_list[ultimate_1.inp_cell].spaces[inp_space_kand] == ".":
ultimate_1.inp_space = inp_space_kand
ultimate_1.move_in_small_cell(current_cell)
else:
bottle.redirect("/igre/ultimate_1/")
else:
inp_cell_kand = int(bottle.request.forms.getunicode('inp_cell'))
if ultimate_1.master_cell.spaces[inp_cell_kand] == ".":
ultimate_1.inp_cell = inp_cell_kand
ultimate_1.move_in_big_cell = False
# Da se ne pokaže zadnja poteza robota.
ultimate_1.last_inp_cell = 0
bottle.redirect("/igre/ultimate_1/")
else:
bottle.redirect("/igre/ultimate_1/")
if ultimate_1.master_cell.spaces[ultimate_1.inp_cell] != ".":
ultimate_1.move_in_big_cell = True
if not ultimate_1.master_cell.check_win() and ultimate_1.num_master_turns < 9:
# Bot.
while not ultimate_1.player_turn:
current_cell = ultimate_1.cell_list[ultimate_1.inp_cell]
if not ultimate_1.move_in_big_cell:
ultimate_1.inp_space = ultimate_1.master_bot.ultimate_incell_move(ultimate_1.cell_list, ultimate_1.inp_cell)
ultimate_1.move_in_small_cell(current_cell)
else:
ultimate_1.inp_cell = ultimate_1.master_cell.random_free()
ultimate_1.move_in_big_cell = False
if ultimate_1.master_cell.spaces[ultimate_1.inp_cell] != ".":
ultimate_1.move_in_big_cell = True
if not ultimate_1.master_cell.check_win() and ultimate_1.num_master_turns < 9:
None
else:
ultimate_1.state = "E"
else:
ultimate_1.state = "E"
bottle.redirect("/igre/ultimate_1/")
elif ultimate_1.state == "E":
# Posodobimo statistične podatke in ponastavimo igro.
data_manager.data["ended_U1"] += 1
if ultimate_1.master_cell.check_win() and not ultimate_1.player_turn:
data_manager.data["player_beat_bot"] += 1
if ultimate_1.player_mark == "X":
data_manager.data["player_win_X"] += 1
elif ultimate_1.player_mark == "O":
data_manager.data["player_win_O"] += 1
elif not ultimate_1.master_cell.check_win():
data_manager.data["ended_draw"] += 1
data_manager.dump_data_to_file()
ultimate_1.reset()
bottle.redirect("/igre/") | 36,718 |
def start_replication(cur, slot_name=DEFAULT_SLOT_NAME, observable_nodes=None):
"""Start replication, create slot if not already there."""
replication_parms = {'slot_name': slot_name, 'decode': True}
# # notifications only for configured tables, see https://github.com/eulerto/wal2json#parameters
if observable_nodes:
table_names = []
for k in observable_nodes.keys():
for k2 in observable_nodes[k].keys():
table_names.append(observable_nodes[k][k2]['table_name'])
# add schema name
replication_parms['options'] = {'add-tables': ','.join(['*.{}'.format(tn) for tn in table_names])}
print('Started replication {}'.format(replication_parms), file=sys.stderr)
try:
cur.start_replication(**replication_parms)
except psycopg2.ProgrammingError:
cur.create_replication_slot(slot_name, output_plugin='wal2json')
cur.start_replication(**replication_parms) | 36,719 |
def update_many(token, checkids, fields, customerid=None):
""" Updates a field(s) in multiple existing NodePing checks
Accepts a token, a list of checkids, and fields to be updated in a
NodePing check. Updates the specified fields for the one check.
To update many checks with the same value, use update_many
:type token: string
:param token: Your NodePing API token
:type checkids: dict
:param checkids: CheckIDs with their check type to update
:type fields: dict
:param fields: Fields in check that will be updated
:type customerid: string
:param customerid: subaccount ID
:rtype: dict
:return: Return information from NodePing query
"""
updated_checks = []
for checkid, checktype in checkids.items():
url = "{0}/{1}".format(API_URL, checkid)
url = _utils.create_url(token, url, customerid)
send_fields = fields.copy()
send_fields.update({"type": checktype.upper()})
updated_checks.append(_query_nodeping_api.put(url, send_fields))
return updated_checks | 36,720 |
def main(start, end, csv_name, verbose):
"""Run script conditioned on user-input."""
print("Collecting Pomological Watercolors {s} throught {e}".format(s=start, e=end))
return get_pomological_data(start=start, end=end, csv_name=csv_name, verbose=verbose) | 36,721 |
def publish(message: Message) -> None:
"""
Publishes a message on Taskhawk queue
"""
message_body = message.as_dict()
payload = _convert_to_json(message_body)
if settings.IS_LAMBDA_APP:
topic = _get_sns_topic(message.priority)
_publish_over_sns(topic, payload, message.headers)
else:
queue_name = get_queue_name(message.priority)
queue = get_queue(queue_name)
_publish_over_sqs(queue, payload, message.headers)
_log_published_message(message_body) | 36,722 |
def _sub_fetch_file(url, md5sum=None):
"""
Sub-routine of _fetch_file
:raises: :exc:`DownloadFailed`
"""
contents = ''
try:
fh = urlopen(url)
contents = fh.read()
if md5sum is not None:
filehash = hashlib.md5(contents).hexdigest()
if md5sum and filehash != md5sum:
raise DownloadFailed("md5sum didn't match for %s. Expected %s got %s" % (url, md5sum, filehash))
except URLError as ex:
raise DownloadFailed(str(ex))
return contents | 36,723 |
def write_dict_to_hdf5(
data_dict: dict, entry_point, group_overwrite_level: int = np.inf
):
"""
Args:
data_dict (dict): dictionary to write to hdf5 file
entry_point (hdf5 group.file) : location in the nested hdf5 structure
where to write to.
"""
for key, item in data_dict.items():
# Basic types
if isinstance(
item, (str, float, int, bool, np.number, np.float_, np.int_, np.bool_)
):
try:
entry_point.attrs[key] = item
except Exception as e:
print(
"Exception occurred while writing"
" {}:{} of type {} at entry point {}".format(
key, item, type(item), entry_point
)
)
log.warning(e)
elif isinstance(item, np.ndarray):
entry_point.create_dataset(key, data=item)
elif item is None:
# as h5py does not support saving None as attribute
# I create special string, note that this can create
# unexpected behaviour if someone saves a string with this name
entry_point.attrs[key] = "NoneType:__None__"
elif isinstance(item, dict):
# converting key to string is to make int dictionary keys work
str_key = str(key)
if str_key not in entry_point.keys():
entry_point.create_group(str_key)
elif group_overwrite_level < 1:
log.debug("Overwriting hdf5 group: {}".format(str_key))
del entry_point[str_key]
entry_point.create_group(str_key)
write_dict_to_hdf5(
data_dict=item,
entry_point=entry_point[str_key],
group_overwrite_level=group_overwrite_level - 1,
)
elif isinstance(item, UFloat):
str_key = str(key)
if str_key not in entry_point.keys():
entry_point.create_group(str_key)
elif group_overwrite_level < 1:
log.debug("Overwriting hdf5 group: {}".format(str_key))
del entry_point[str_key]
entry_point.create_group(str_key)
new_item = {"nominal_value": item.nominal_value, "std_dev": item.std_dev}
write_dict_to_hdf5(
data_dict=new_item,
entry_point=entry_point[str_key],
group_overwrite_level=group_overwrite_level - 1,
)
elif isinstance(item, (list, tuple)):
if len(item) > 0:
elt_type = type(item[0])
# Lists of a single type, are stored as an hdf5 dset
if (
all(isinstance(x, elt_type) for x in item)
and not isinstance(item[0], dict)
and not isinstance(item, tuple)
):
if isinstance(item[0], (int, float, np.int32, np.int64)):
entry_point.create_dataset(key, data=np.array(item))
entry_point[key].attrs["list_type"] = "array"
# strings are saved as a special dtype hdf5 dataset
elif isinstance(item[0], str):
dt = h5py.special_dtype(vlen=str)
data = np.array(item)
data = data.reshape((-1, 1))
ds = entry_point.create_dataset(key, (len(data), 1), dtype=dt)
ds.attrs["list_type"] = "str"
ds[:] = data
else:
# For nested list we don't throw warning, it will be
# recovered in case of a snapshot
warn_msg = (
'List of type "{}" for "{}":"{}" not '
"supported, storing as string".format(elt_type, key, item)
)
if elt_type is list:
log.debug(warn_msg)
else:
log.warning(warn_msg)
entry_point.attrs[key] = str(item)
# Storing of generic lists/tuples
else:
if key not in entry_point.keys():
entry_point.create_group(key)
elif group_overwrite_level < 1:
log.debug("Overwriting hdf5 group: {}".format(key))
del entry_point[key]
entry_point.create_group(key)
# N.B. item is of type list
list_dct = {
"list_idx_{}".format(idx): entry
for idx, entry in enumerate(item)
}
group_attrs = entry_point[key].attrs
if isinstance(item, tuple):
group_attrs["list_type"] = "generic_tuple"
else:
group_attrs["list_type"] = "generic_list"
group_attrs["list_length"] = len(item)
write_dict_to_hdf5(
data_dict=list_dct,
entry_point=entry_point[key],
group_overwrite_level=group_overwrite_level - 1,
)
else:
# as h5py does not support saving None as attribute
entry_point.attrs[key] = "NoneType:__emptylist__"
else:
log.warning(
'Type "{}" for "{}" (key): "{}" (item) at location {} '
"not supported, "
"storing as string".format(type(item), key, item, entry_point)
)
entry_point.attrs[key] = str(item) | 36,724 |
def get_path_to_config(config_name: str) -> str:
"""Returns path to config dir"""
return join(get_run_configs_dir(), config_name) | 36,725 |
def get_orig_rawimage(raw_file, debug=False):
"""
Read a raw, original LRIS data frame.
Ported from LOWREDUX long_oscan.pro lris_oscan()
Parameters
----------
raw_file : :obj:`str`
Filename
debug : :obj:`bool`, optional
Run in debug mode (doesn't do anything)
Returns
-------
raw_img : `numpy.ndarray`_
Raw image for this detector.
hdu : `astropy.io.fits.HDUList`_
Opened fits file
exptime : :obj:`float`
Exposure time read from the file header
rawdatasec_img : `numpy.ndarray`_
Data (Science) section of the detector as provided by setting the
(1-indexed) number of the amplifier used to read each detector pixel.
Pixels unassociated with any amplifier are set to 0.
oscansec_img : `numpy.ndarray`_
Overscan section of the detector as provided by setting the
(1-indexed) number of the amplifier used to read each detector pixel.
Pixels unassociated with any amplifier are set to 0.
"""
# Open
hdul = io.fits_open(raw_file)
head0 = hdul[0].header
# TODO -- Check date here and error/warn if not after the upgrade
image = hdul[0].data.astype(float)
# Get post, pre-pix values
postpix = head0['POSTPIX']
prepix = head0['PREPIX']
post_buffer1 = 4
post_buffer2 = 8
namps = head0['NUMAMPS']
# get the x and y binning factors...
binning = head0['BINNING']
xbin, ybin = [int(ibin) for ibin in binning.split(',')]
rawdatasec_img = np.zeros_like(image, dtype=int)
oscansec_img = np.zeros_like(image, dtype=int)
datacol = namps * (prepix // xbin) + np.arange(namps) * 1024 // xbin
postcol = datacol[namps - 1] + (1024 + post_buffer1) // xbin
for iamp in range(namps): #= 0, namps - 1L
biascols = np.arange((postpix - post_buffer2) // xbin) + (
iamp * postpix) // xbin + postcol
oscansec_img[:, biascols] = iamp+1
imagecols = np.arange(1024 // xbin) + iamp * 1024 // xbin
rawdatasec_img[:,imagecols + namps*(prepix // xbin)] = iamp+1
return image, hdul, float(head0['ELAPTIME']), \
rawdatasec_img, oscansec_img | 36,726 |
def scan_db_and_save_table_info(data_source_id, db_connection, schema, table):
"""Scan the database for table info."""
table_info = get_table_info(
{}, schema, table, from_db_conn=True, db_conn=db_connection
)
old_table_info = fetch_table_info(data_source_id, schema, table, as_obj=True)
data_source_metadata = DataSourceMetadata(
data_source_id=data_source_id,
metadata_type="table_info",
metadata_param=get_metadata_param_str([schema, table]),
metadata_info=table_info,
)
data_source_metadata.save(commit=True)
if old_table_info:
old_table_info.delete(commit=True)
return table_info | 36,727 |
def _get_non_heavy_neighbor_residues(df0, df1, cutoff):
"""Get neighboring residues for non-heavy atom-based distance."""
non_heavy0 = df0[df0['element'] != 'H']
non_heavy1 = df1[df1['element'] != 'H']
dist = spa.distance.cdist(non_heavy0[['x', 'y', 'z']], non_heavy1[['x', 'y', 'z']])
pairs = np.array(np.where(dist < cutoff)).T
if len(pairs) == 0:
return [], []
# Use the found pairs to find unique pairings of residues.
res0 = non_heavy0.iloc[pairs[:, 0]][['pdb_name', 'model', 'chain', 'residue']]
res1 = non_heavy1.iloc[pairs[:, 1]][['pdb_name', 'model', 'chain', 'residue']]
res0 = res0.reset_index(drop=True)
res1 = res1.reset_index(drop=True)
# We concatenate so that we can find unique _pairs_.
res = pd.concat((res0, res1), axis=1)
res = res.drop_duplicates()
# # Split back out now that we have found duplicates.
res0 = res.iloc[:, range(4)]
res1 = res.iloc[:, range(4, 8)]
res0 = res0.reset_index(drop=True)
res1 = res1.reset_index(drop=True)
return res0, res1 | 36,728 |
def j0(ctx, x):
"""Computes the Bessel function `J_0(x)`. See :func:`besselj`."""
return ctx.besselj(0, x) | 36,729 |
def mysql2df(host, user, password, db_name, tb_name):
"""
Return mysql table data as pandas DataFrame.
:param host: host name
:param user: user name
:param password: password
:param db_name: name of the pydb from where data will be exported
:param tb_name: name of the table from where data will be exported
"""
# Create a connection object
# dialect+driver://username:password@host:port/pydb
connect_string = "mysql+pymysql://{}:{}@{}/{}".format(user, password, host, db_name)
engine = db.create_engine(connect_string, encoding='latin1', echo=True, pool_pre_ping=True)
connection = engine.connect()
session = sessionmaker(bind=engine)()
metadata = db.MetaData()
try:
# print the table column names
tb = db.Table(tb_name, metadata, autoload=True, autoload_with=engine)
print(tb.columns.keys())
# Retrieve table data: 'SELECT * FROM table'
sql_query = 'SELECT * FROM {}'.format(tb_name)
df = pd.read_sql(sql_query, connection)
return df
except Exception as e:
print('Error: {}'.format(str(e)))
finally:
engine.dispose()
session.close() | 36,730 |
def is_int(number):
""" Check if a variable can be cast as an int.
@param number: The number to check
"""
try:
x = int(number)
return True
except:
return False | 36,731 |
def load_pretrained_net_weights(net, ckpt_path):
"""
A function loading parameters (weights and biases) from a previous training to a net RNN instance
:param net: An instance of RNN
:param ckpt_path: path to .ckpt file storing weights and biases
:return: No return. Modifies net in place.
"""
print("Loading Model: ", ckpt_path)
print('')
net.load_weights(ckpt_path).expect_partial() | 36,732 |
def get_versions(api_type=DEFAULT_TYPE):
"""Search for API object module files of api_type.
Args:
api_type (:obj:`str`, optional):
Type of object module to load, must be one of :data:`API_TYPES`.
Defaults to: :data:`DEFAULT_TYPE`.
Raises:
:exc:`exceptions.NoVersionFoundError`:
If no API module files matching :data:`PATTERN` are found.
Returns:
:obj:`list` of :obj:`dict`
"""
path = pathlib.Path(__file__).absolute().parent
pattern = PATTERN.format(api_type=api_type)
matches = [p for p in path.glob(pattern)]
if not matches:
error = "Unable to find any object modules matching pattern {r!r} in {p!r}"
error = error.format(p=format(path), r=pattern)
raise exceptions.NoVersionFoundError(error)
versions = []
for match in matches:
name = match.stem
vparts = name.split("_")
vtype = vparts.pop(0)
vparts = utils.versions.split_ver(vparts)
vstr = utils.versions.join_ver(vparts)
versions.append(
{
"ver_str": vstr,
"ver_parts": vparts,
"api_type": vtype,
"module_file": name,
"module_path": match,
}
)
versions = sorted(versions, key=lambda x: x["ver_parts"], reverse=True)
return versions | 36,733 |
def convert_dict_time_format(data: dict, keys: list):
"""
Convert dictionary data values time format.
Args:
data (dict): Data.
keys (list): Keys list to convert
"""
for key in keys:
if data.get(key):
str_time = data.get(key)[:-2] + 'Z' # type: ignore
iso_time = FormatIso8601(datetime.strptime(str_time, DATE_FORMAT))
data[key] = iso_time | 36,734 |
def auto_update_library(sync_with_mylist, silent):
"""
Perform an auto update of the exported items to Kodi library,
so check if there is new seasons/episodes.
If sync_with_mylist is enabled the Kodi library will be also synchronized
with the Netflix "My List".
:param sync_with_mylist: True to enable sync with My List
:param silent: don't display user interface while performing an operation
:return: None
"""
if _is_auto_update_library_running():
return
execute_lib_tasks_method = execute_library_tasks_silently if silent else execute_library_tasks
common.info(
'Starting auto update library - check updates for tv shows (sync with My List is {})',
'ENABLED' if sync_with_mylist else 'DISABLED')
g.SHARED_DB.set_value('library_auto_update_is_running', True)
g.SHARED_DB.set_value('library_auto_update_start_time', datetime.now())
try:
videoids_to_update = []
# Get the list of the exported items to Kodi library
exported_tvshows_videoids_values = g.SHARED_DB.get_tvshows_id_list()
exported_movies_videoids_values = g.SHARED_DB.get_movies_id_list()
if sync_with_mylist:
# Get My List videoids of the chosen profile
# Use make_http_call instead make_http because call AddonSignals on same instance makes problems
mylist_video_id_list, mylist_video_id_list_type = common.make_http_call(
'get_mylist_videoids_profile_switch', None)
# Check if tv shows have been removed from the My List
for videoid_value in exported_tvshows_videoids_values:
if unicode(videoid_value) in mylist_video_id_list:
continue
# The tv show no more exist in My List so remove it from library
videoid = common.VideoId.from_path([common.VideoId.SHOW, videoid_value])
execute_lib_tasks_method(videoid, [remove_item])
# Check if movies have been removed from the My List
for videoid_value in exported_movies_videoids_values:
if unicode(videoid_value) in mylist_video_id_list:
continue
# The movie no more exist in My List so remove it from library
videoid = common.VideoId.from_path([common.VideoId.MOVIE, videoid_value])
execute_lib_tasks_method(videoid, [remove_item])
# Add missing tv shows / movies of My List to library
for index, video_id in enumerate(mylist_video_id_list):
if (int(video_id) not in exported_tvshows_videoids_values and
int(video_id) not in exported_movies_videoids_values):
videoids_to_update.append(
common.VideoId(
**{('movieid' if (mylist_video_id_list_type[index] == 'movie') else 'tvshowid'): video_id}))
# Add the exported tv shows to be updated to the list..
tvshows_videoids_to_upd = [
common.VideoId.from_path([common.VideoId.SHOW, videoid_value]) for
videoid_value in g.SHARED_DB.get_tvshows_id_list(VidLibProp['exclude_update'], False)
]
# ..and avoids any duplication caused by possible unexpected errors
videoids_to_update.extend(list(set(tvshows_videoids_to_upd) - set(videoids_to_update)))
# Add missing tv shows/movies or update existing tv shows
_update_library(videoids_to_update, exported_tvshows_videoids_values, silent)
common.debug('Auto update of the library completed')
g.SHARED_DB.set_value('library_auto_update_is_running', False)
if not g.ADDON.getSettingBool('lib_auto_upd_disable_notification'):
ui.show_notification(common.get_local_string(30220), time=5000)
common.debug('Notify service to communicate to Kodi of update the library')
common.send_signal(common.Signals.LIBRARY_UPDATE_REQUESTED)
except Exception: # pylint: disable=broad-except
import traceback
common.error('An error has occurred in the library auto update')
common.error(g.py2_decode(traceback.format_exc(), 'latin-1'))
g.SHARED_DB.set_value('library_auto_update_is_running', False) | 36,735 |
def test_short_equals_forms():
"""Check parsed arguments with one character options with =value."""
line = 'doc/*.md --skip="Floats" -s=Cherries -u=MyTEXT -d=CIDER --setup-doctest --fail-nocode'
args = parse_collect_line(parser, line)
assert args == {
"file_glob": "doc/*.md",
"skips": ["Floats", "Cherries"],
"fail_nocode": True,
"setup": "MyTEXT",
"teardown": "CIDER",
"setup_doctest": True,
} | 36,736 |
def model_flux(parameters_dict, xfibre, yfibre, wavelength, model_name):
"""Return n_fibre X n_wavelength array of model flux values."""
parameters_array = parameters_dict_to_array(parameters_dict, wavelength,
model_name)
return moffat_flux(parameters_array, xfibre, yfibre) | 36,737 |
def new_rnn_layer(cfg, num_layer):
"""Creates new RNN layer for each parameter depending on whether it is bidirectional LSTM or not.
Uses the fast LSTM implementation backed by CuDNN if a GPU is available.
Note: The normal LSTMs utilize sigmoid recurrent activations so as to retain compatibility CuDNNLSTM:
see the following github issue for more details: https://github.com/keras-team/keras/issues/8860
:param cfg: configuration of CharGen instance
:param num_layer: ordinal number of the rnn layer being built
:return: 3D tensor if return sequence is True
"""
gpu_no = len(K.tensorflow_backend._get_available_gpus())
if gpu_no > 0:
print('GPU is available...')
if cfg['bidirectional']:
return Bidirectional(CuDNNLSTM(cfg['rnn_size'],
return_sequences=True),
name='rnn_{}'.format(num_layer))
return CuDNNLSTM(cfg['rnn_size'],
return_sequences=True,
name='rnn_{}'.format(num_layer))
else:
print('No GPU available...')
if cfg['bidirectional']:
return Bidirectional(LSTM(cfg['rnn_size'],
return_sequences=True,
recurrent_activation='sigmoid'),
name='rnn_{}'.format(num_layer))
return LSTM(cfg['rnn_size'],
return_sequences=True,
recurrent_activation='sigmoid',
name='rnn_{}'.format(num_layer)) | 36,738 |
def firfreqz(h, omegas):
"""Evaluate frequency response of an FIR filter at discrete frequencies.
Parameters
h: array_like
FIR filter coefficient array for numerator polynomial.
e.g. H(z) = 1 + a*z^-1 + b*z^-2
h = [1, a, b]
"""
hh = np.zeros(omegas.shape, dtype='complex128')
for ii, aa in enumerate(h):
hh[:] = hh[:] + aa * np.exp(-1j * omegas*ii)
return hh | 36,739 |
def write_np2pickle(output_fp: str, array, timestamps: list) -> bool:
"""
Convert and save Heimann HTPA NumPy array shaped [frames, height, width] to a pickle file.
Parameters
----------
output_fp : str
Filepath to destination file, including the file name.
array : np.array
Temperatue distribution sequence, shaped [frames, height, width].
timestamps : list
List of timestamps of corresponding array frames.
"""
ensure_parent_exists(output_fp)
with open(output_fp, "wb") as f:
pickle.dump((array, timestamps), f)
return True | 36,740 |
def run(
client_id_: str, client_secret_: str, server_class=HTTPServer, handler_class=S, port=8080
) -> str:
"""
Generates a Mapillary OAuth url and prints to screen as well as opens it automatically in a browser. Declares some
global variables to pull data from the HTTP server through the GET endpoint.
"""
# These global variables are defined so that we can pass data to / get data from the GET endpoint
global client_id
global client_secret
global access_token
client_id = client_id_
client_secret = client_secret_
server_address = ("localhost", port)
httpd = server_class(server_address, handler_class)
logging.info("Starting httpd and opening Mapillary to authenticate...")
try:
# Print the OAuth link to console and also tries to open it directly in the browser
auth_url = AUTH_URL.format(client_id)
logging.info(
"Please authenticate (if browser didn't automatically open): {}".format(auth_url)
)
webbrowser.open_new_tab(auth_url)
httpd.serve_forever()
except KeyboardInterrupt:
pass
httpd.server_close()
logging.info("Stopping httpd...")
return access_token | 36,741 |
def sort_by_rank_change(val):
"""
Sorter by rank change
:param val: node
:return: nodes' rank value
"""
return abs(float(val["rank_change"])) | 36,742 |
def ping():
"""always 200"""
status = 200
return flask.Response(response='\n', status=status, mimetype='application/json') | 36,743 |
def lambda_handler(event=None, context=None):
"""Entry point for lambda, simple try/except/finally with return and raise values"""
print(f"EVENT: {json.dumps(event)}")
try:
response = actions(event)
return response
except Exception as e:
logging.debug(f"Exception: {e}")
raise
finally:
pass | 36,744 |
def add_gtid_ranges_to_executed_set(existing_set, *new_ranges):
"""Takes in a dict like {"uuid1": [[1, 4], [7, 12]], "uuid2": [[1, 100]]} (as returned by e.g. parse_gtid_range_string)
and any number of lists of type [{"server_uuid": "uuid", "start": 1, "end": 3}, ...]. Adds all the ranges in the lists to
the ranges in the dict and returns a new dict that contains minimal representation with both the old and new ranges."""
all_ranges = []
for server_uuid, ranges in existing_set.items():
for rng in ranges:
all_ranges.append({
"end": rng[1],
"server_uuid": server_uuid,
"start": rng[0],
})
for rng in new_ranges:
all_ranges.extend(rng)
return partition_sort_and_combine_gtid_ranges(all_ranges) | 36,745 |
def fetch_rgb(img):
"""for outputing rgb values from click event to the terminal.
:param img: input image
:type img: cv2 image
:return: the rgb list
:rtype: list
"""
rgb_list = []
def click_event(event, x, y, flags, param):
if event == cv2.EVENT_LBUTTONDOWN:
red = img[y, x, 2]
blue = img[y, x, 0]
green = img[y, x, 1]
print(red, green, blue) # prints to command line
strRGB = str(red) + "," + str(green) + "," + str(blue)
rgb_list.append([red, green, blue])
cv2.imshow('original', img)
cv2.imshow('original', img)
cv2.setMouseCallback("original", click_event)
cv2.waitKey(0)
cv2.destroyAllWindows
return rgb_list | 36,746 |
def load_checkpoint(model, pth_file, check=False):
"""load state and network weights"""
checkpoint = torch.load(pth_file, map_location=lambda storage, loc: storage.cuda())
if 'model' in checkpoint.keys():
pretrained_dict = checkpoint['model']
else:
pretrained_dict = checkpoint['state_dict']
model_dict = model.state_dict()
if check:
print(len(model_dict.keys()), len(pretrained_dict.keys()))
missed = [name for name in model_dict.keys() if name not in pretrained_dict.keys()]
print(missed)
pretrained_dict = {k: v for k, v in pretrained_dict.items() if k in model_dict and 'fc' not in k}
model_dict.update(pretrained_dict)
model.load_state_dict(model_dict)
print('Pre-trained model weight loaded') | 36,747 |
def index(context):
"""Create indexes for the database"""
log.info("Running scout index")
adapter = context.obj['adapter']
adapter.load_indexes() | 36,748 |
def _get_date_filter_consumer(field):
"""date.{lt, lte, gt, gte}=<ISO DATE>"""
date_filter = make_date_filter(functools.partial(django_date_filter, field_name=field))
def _date_consumer(key, value):
if '.' in key and key.split(".")[0] == field:
prefix, qualifier = key.split(".", maxsplit=1)
try:
return date_filter(qualifier, value)
except ValueError as e:
raise InvalidFilterError(str(e))
return {}
return _date_consumer | 36,749 |
def plot_matches(ax, image1, image2, keypoints1, keypoints2, matches, title='',
keypoints_color='b', matches_color=None, only_matches=False):
"""Plot matched features.
Parameters
----------
ax : matplotlib.axes.Axes
Matches and image are drawn in this ax.
image1 : (N, M [, 3]) array
First grayscale or color image.
image2 : (N, M [, 3]) array
Second grayscale or color image.
keypoints1 : (K1, 2) array
First keypoint coordinates as ``(row, col)``.
keypoints2 : (K2, 2) array
Second keypoint coordinates as ``(row, col)``.
matches : (Q, 2) array
Indices of corresponding matches in first and second set of
descriptors, where ``matches[:, 0]`` denote the indices in the first
and ``matches[:, 1]`` the indices in the second set of descriptors.
keypoints_color : matplotlib color, optional
Color for keypoint locations.
matches_color : matplotlib color, optional
Color for lines which connect keypoint matches. By default the
color is chosen randomly.
only_matches : bool, optional
Whether to only plot matches and not plot the keypoint locations.
"""
image1 = img_as_float(image1)
image2 = img_as_float(image2)
new_shape1 = list(image1.shape)
new_shape2 = list(image2.shape)
if image1.shape[0] < image2.shape[0]:
new_shape1[0] = image2.shape[0]
elif image1.shape[0] > image2.shape[0]:
new_shape2[0] = image1.shape[0]
if image1.shape[1] < image2.shape[1]:
new_shape1[1] = image2.shape[1]
elif image1.shape[1] > image2.shape[1]:
new_shape2[1] = image1.shape[1]
if new_shape1 != image1.shape:
new_image1 = np.zeros(new_shape1, dtype=image1.dtype)
new_image1[:image1.shape[0], :image1.shape[1]] = image1
image1 = new_image1
if new_shape2 != image2.shape:
new_image2 = np.zeros(new_shape2, dtype=image2.dtype)
new_image2[:image2.shape[0], :image2.shape[1]] = image2
image2 = new_image2
image = np.concatenate([image1, image2], axis=1)
offset = image1.shape
if not only_matches:
if keypoints1.shape[0]:
if keypoints2.shape[0]:
ax.scatter(keypoints1[:, 1], keypoints1[:, 0],
facecolors='none', edgecolors=keypoints_color)
ax.scatter(keypoints2[:, 1] + offset[1], keypoints2[:, 0],
facecolors='none', edgecolors=keypoints_color)
ax.imshow(image, interpolation='nearest', cmap='gray')
ax.axis((0, 2 * offset[1], offset[0], 0))
ax.set_title(title)
for i in range(matches.shape[0]):
idx1 = matches[i, 0]
idx2 = matches[i, 1]
if matches_color is None:
color = np.random.rand(3, 1)
else:
color = matches_color
if keypoints1.shape[0] and keypoints2.shape[0]:
ax.plot((keypoints1[idx1, 1], keypoints2[idx2, 1] + offset[1]),
(keypoints1[idx1, 0], keypoints2[idx2, 0]),
'-', color=color) | 36,750 |
def list_devices(AccessToken=None, Limit=None, PaginationToken=None):
"""
Lists the devices.
See also: AWS API Documentation
:example: response = client.list_devices(
AccessToken='string',
Limit=123,
PaginationToken='string'
)
:type AccessToken: string
:param AccessToken: [REQUIRED]
The access tokens for the request to list devices.
:type Limit: integer
:param Limit: The limit of the device request.
:type PaginationToken: string
:param PaginationToken: The pagination token for the list request.
:rtype: dict
:return: {
'Devices': [
{
'DeviceKey': 'string',
'DeviceAttributes': [
{
'Name': 'string',
'Value': 'string'
},
],
'DeviceCreateDate': datetime(2015, 1, 1),
'DeviceLastModifiedDate': datetime(2015, 1, 1),
'DeviceLastAuthenticatedDate': datetime(2015, 1, 1)
},
],
'PaginationToken': 'string'
}
"""
pass | 36,751 |
def data_path(fname):
"""
Gets a path for a given filename. This ensures that relative filenames to
data files can be used from all modules.
model.json -> .../src/data/model.json
"""
return join(dirname(realpath(__file__)), fname) | 36,752 |
def _parse_special_functions(sym: sp.Expr, toplevel: bool = True) -> sp.Expr:
"""
Recursively checks the symbolic expression for functions which have be
to parsed in a special way, such as piecewise functions
:param sym:
symbolic expressions
:param toplevel:
as this is called recursively, are we in the top level expression?
"""
args = tuple(arg if arg.__class__.__name__ == 'piecewise'
and sym.__class__.__name__ == 'piecewise'
else _parse_special_functions(arg, False)
for arg in sym.args)
fun_mappings = {
'times': sp.Mul,
'xor': sp.Xor,
'abs': sp.Abs,
'min': sp.Min,
'max': sp.Max,
'ceil': sp.functions.ceiling,
'floor': sp.functions.floor,
'factorial': sp.functions.factorial,
'arcsin': sp.functions.asin,
'arccos': sp.functions.acos,
'arctan': sp.functions.atan,
'arccot': sp.functions.acot,
'arcsec': sp.functions.asec,
'arccsc': sp.functions.acsc,
'arcsinh': sp.functions.asinh,
'arccosh': sp.functions.acosh,
'arctanh': sp.functions.atanh,
'arccoth': sp.functions.acoth,
'arcsech': sp.functions.asech,
'arccsch': sp.functions.acsch,
}
if sym.__class__.__name__ in fun_mappings:
return fun_mappings[sym.__class__.__name__](*args)
elif sym.__class__.__name__ == 'piecewise' \
or isinstance(sym, sp.Piecewise):
if isinstance(sym, sp.Piecewise):
# this is sympy piecewise, can't be nested
denested_args = args
else:
# this is sbml piecewise, can be nested
denested_args = _denest_piecewise(args)
return _parse_piecewise_to_heaviside(denested_args)
if sym.__class__.__name__ == 'plus' and not sym.args:
return sp.Float(0.0)
if isinstance(sym, (sp.Function, sp.Mul, sp.Add, sp.Pow)):
sym._args = args
elif toplevel and isinstance(sym, BooleanAtom):
# Replace boolean constants by numbers so they can be differentiated
# must not replace in Piecewise function. Therefore, we only replace
# it the complete expression consists only of a Boolean value.
sym = sp.Float(int(bool(sym)))
return sym | 36,753 |
def write_model_inputs(scenario_directory, scenario_id, subscenarios, subproblem, stage, conn):
"""
Get inputs from database and write out the model input
rps_targets.tab file.
:param scenario_directory: string, the scenario directory
:param subscenarios: SubScenarios object with all subscenario info
:param subproblem:
:param stage:
:param conn: database connection
:return:
"""
rps_targets, lz_mapping = get_inputs_from_database(
scenario_id, subscenarios, subproblem, stage, conn)
with open(os.path.join(scenario_directory, str(subproblem), str(stage), "inputs",
"rps_targets.tab"), "w", newline="") as \
rps_targets_tab_file:
writer = csv.writer(rps_targets_tab_file,
delimiter="\t", lineterminator="\n")
# Write header
writer.writerow(
["rps_zone", "period", "rps_target_mwh", "rps_target_percentage"]
)
for row in rps_targets:
# It's OK if targets are not specified; they default to 0
replace_nulls = ["." if i is None else i for i in row]
writer.writerow(replace_nulls)
# Write the RPS zone to load zone map file for the RPS percent target if
# there are any mappings only
rps_lz_map_list = [row for row in lz_mapping]
if rps_lz_map_list:
with open(os.path.join(scenario_directory, str(subproblem), str(stage),
"inputs",
"rps_target_load_zone_map.tab"), "w",
newline="") as \
rps_lz_map_tab_file:
writer = csv.writer(rps_lz_map_tab_file,
delimiter="\t", lineterminator="\n")
# Write header
writer.writerow(
["rps_zone", "load_zone"]
)
for row in rps_lz_map_list:
writer.writerow(row)
else:
pass | 36,754 |
def load_oxfordiiitpets(breed=True) -> core.SceneCollection:
"""Load the Oxford-IIIT pets dataset. It is not divided into
train, validation, and test because it appeared some files were missing
from the trainval and test set documents
(e.g., english_cocker_spaniel_164).
Args:
breed: Whether to use the breeds as the class labels. If False, the
class labels are limited to dog or cat.
Returns:
A scene collection containing the dataset
"""
image_dir = utils.get_file(
origin="http://www.robots.ox.ac.uk/~vgg/data/pets/data/images.tar.gz",
file_hash="67195c5e1c01f1ab5f9b6a5d22b8c27a580d896ece458917e61d459337fa318d",
cache_subdir=path.join("datasets", "oxfordiiitpets"),
hash_algorithm="sha256",
extract=True,
archive_format="tar",
extract_check_fn=lambda directory: len(
glob(path.join(directory, "images", "*.jpg"))
)
== 7390,
)
annotations_dir = utils.get_file(
origin="http://www.robots.ox.ac.uk/~vgg/data/pets/data/annotations.tar.gz",
file_hash="52425fb6de5c424942b7626b428656fcbd798db970a937df61750c0f1d358e91",
cache_subdir=path.join("datasets", "oxfordiiitpets"),
hash_algorithm="sha256",
extract=True,
archive_format="tar",
extract_check_fn=lambda directory: len(
glob(path.join(directory, "annotations", "xmls", "*.xml"))
)
== 3686,
)
filepaths = glob(path.join(annotations_dir, "annotations", "xmls", "*.xml"))
image_dir = path.join(image_dir, "images")
collection = load_voc(
filepaths=filepaths,
annotation_config=core.AnnotationConfiguration(["dog", "cat"]),
image_dir=image_dir,
)
if not breed:
return collection
assert all(
len(s.annotations) in [1, 2] for s in collection.scenes
), "An error occurred handling pets dataset"
labels = [
"_".join(path.splitext(path.split(f)[1])[0].split("_")[:-1]) for f in filepaths
]
annotation_config = core.AnnotationConfiguration(sorted(set(labels)))
return core.SceneCollection(
scenes=[
scene.assign(
annotations=[
a.assign(category=annotation_config[label])
for a in scene.annotations
],
annotation_config=annotation_config,
)
for scene, label in zip(collection.scenes, labels)
],
annotation_config=annotation_config,
) | 36,755 |
def _do_process_purpose(action):
""" Does all the 'hard work' in processing the purpose. Returns a single
line of the form
symbol, ex_date(yyyy-mm-dd), purpose(d/b/s), ratio(for b/s), value(for d),
"""
symbol = action.sym.upper()
purpose = action.purpose.lower()
ex_date = action.ex_date
fv = float(action.fv)
actions = []
if purpose.find('div') >= 0:
#r = re.compile(r'(?:.*?)(?P<Div>(?:div.*?)?((?:(rs.*?)|(\s+))\d+\.?\d*((?:\/-)|%)?))')
#r = re.compile(r'(?:.*?)(?P<Div>(?:div.*?)((?:(rs\.*?)|(\s+))\d+\.?\d*(?:\/-)?)|(\d+%))')
#r = re.compile(r'(?:.*?)(?P<div>(?:(?:div.*?)(\\d+%)|(?:div.*?(rs\\.?)?)\\s*(\\d+\\.?\\d*)))')
for x in _div_regex.finditer(purpose):
for _, v in x.groupdict().items():
v = re.sub(_rsr_regex, '', v)
for y in _num_per_r.finditer(v):
z = y.group()
if z.find('%') > 0:
div = float(z.replace('%', '')) * (fv/100)
else:
div = float(z)
actions.append(CorpAction(symbol, ex_date, 'D', 1.0, div))
if purpose.find('bon') >= 0:
y = _bonus_regex.search(purpose)
if y:
n, d = float(y.group(1)), float(y.group(2))
ratio = n / (n+d)
action = CorpAction(symbol, ex_date, 'B', ratio, 0.0)
actions.append(action)
module_logger.debug("CorpAction: %s", str(CorpAction))
if purpose.find('spl') >= 0:
y = _split_regex.search(purpose)
if y:
d, n = float(y.group(1)), float(y.group(2))
ratio = n / d
action = CorpAction(symbol, ex_date, 'S', ratio, 0.0)
actions.append(action)
module_logger.debug("CorpAction: %s", str(CorpAction))
return actions | 36,756 |
def test_multiple_input(input_gridsize=128, output_gridsize=64):
"""
Tests that passing an input grid where every input_gridsize/output_gridsize
cell is filled will a value of (input_gridsize/output_gridsize)^3 produces
a grid that is homogenously filled with values of 1.0.
The test will fail if the output grid is not homogenously filled with
values of 1.0.
We also run a suite of unit tests (see `unit_tests` function) that will
return a RuntimeError if they're not passed.
Parameters
----------
input_gridsize, output_gridsize : int, optional
1D size of the input/output grids. Default : 128 and 64.
..note::
`output_gridsize` must be an integer multiple (and smaller) than
`input_gridsize`. If not, a `RuntimeError` will be raised by
`downsample.downsample_grid`.
Returns
----------
None.
Errors
----------
RuntimeError
Raised if the output grid contains values that are not close to 1
(within tolerance defined by the global variable `tol`).
"""
# Ratio in grid size.
conversion = int(input_gridsize / output_gridsize)
input_grid = np.zeros((input_gridsize, input_gridsize, input_gridsize))
# We fill every conversion-th cell with a value of conversion cubed.
for (i, j, k) in itertools.product(range(output_gridsize),
range(output_gridsize),
range(output_gridsize)):
input_grid[i*conversion, j*conversion, k*conversion] = conversion**3
# Run the downsampler.
output_grid = downsample_grid(input_grid, output_gridsize)
# Find any instances where the output grid is not 1.
w = np.where((output_grid <= 1.0-tol) & (output_grid >= 1.0+tol))[0]
if len(w) > 0:
print("We tested an input grid with every {0} cell containing a value "
"of {1}. We expected the output grid to contain values of 1.0 "
"as well.".format(conversion, conversion**3))
print("However cells {0} had values {1}".format(w, output_grid[w]))
raise RuntimeError
# Now run some unit tests that check some properties.
unit_tests(output_grid, output_gridsize) | 36,757 |
def get_airflow_config(version, timestamp, major, minor, patch, date, rc):
"""Return a dict of the configuration for the Pipeline."""
config = dict(AIRFLOW_CONFIG)
if version is not None:
config['VERSION'] = version
else:
config['VERSION'] = config['VERSION'].format(
major=major, minor=minor, patch=patch, date=date, rc=rc)
config['MFEST_COMMIT'] = config['MFEST_COMMIT'].format(timestamp=timestamp)
# This works becuse python format ignores keywork args that arn't pressent.
for k, v in config.items():
if k not in ['VERSION', 'MFEST_COMMIT']:
config[k] = v.format(VERSION=config['VERSION'])
return config | 36,758 |
def hash(object: Any) -> _int:
"""
Returns the hash value of the object (if it has one).
""" | 36,759 |
def joint(absolute: bool = False,angleX: float = 1.0,angleY: float = 1.0,angleZ: float = 1.0,assumePreferredAngles: bool = False,automaticLimits: bool = False,children: bool = False,component: bool = False,degreeOfFreedom: str = "",exists: str = "",limitSwitchX: bool = False,limitSwitchY: bool = False,limitSwitchZ: bool = False,limitX: Tuple[float, float] = tuple(1.0, 1.0),limitY: Tuple[float, float] = tuple(1.0, 1.0),limitZ: Tuple[float, float] = tuple(1.0, 1.0),name: str = "",orientJoint: str = "",orientation: Tuple[float, float, float] = tuple(1.0, 1.0, 1.0),position: Tuple[float, float, float] = tuple(1.0, 1.0, 1.0),radius: float = 1.0,relative: bool = False,rotationOrder: str = "",scale: Tuple[float, float, float] = tuple(1.0, 1.0, 1.0),scaleCompensate: bool = False,scaleOrientation: Tuple[float, float, float] = tuple(1.0, 1.0, 1.0),secondaryAxisOrient: str = "",setPreferredAngles: bool = False,stiffnessX: float = 1.0,stiffnessY: float = 1.0,stiffnessZ: float = 1.0,symmetry: bool = False,symmetryAxis: str = "",zeroScaleOrient: bool = False) -> None:
"""
joint コマンドは、Maya 内でジョイントを作成、編集、そして照会するために使用します。(編集と照会機能には、標準の編集(-e)と照会(-q)フラグが使用されます)。オブジェクトを指定しないと、現在選択しているオブジェクト(dag オブジェクト)が使用されます。
-----------------------------------------
Flags:
-----------------------------------------
absolute (boolean): ジョイントの中央位置を絶対的なワールド座標にします(これが既定です)。
-----------------------------------------
angleX (angle): X軸の角度を設定します。照会するとfloatを返します。
-----------------------------------------
angleY (angle): Y軸の角度を設定します。照会するとfloatを返します。
-----------------------------------------
angleZ (angle): Z軸の角度を設定します。照会するとfloatを返します。
-----------------------------------------
assumePreferredAngles (boolean): 編集モードでのみ意味を持ちます。ジョイント角を対応する優先回転角に設定します。
-----------------------------------------
automaticLimits (boolean): 編集モードでのみ意味を持ちます。ジョイントを、ジョイント制限付きで適切なヒンジジョイントに設定します。次の2つの場合に限り、このフラグは自動的にジョイントを修正します。(a)ヒンジジョイントが2つのジョイント(1つの親と1つの子)に対して正確に接続する(b)接続された2つのジョイントの間に描かれた線上にヒンジジョイントがないが、接続された2つのジョイントで形成されるプレーンが回転軸の1つに対して垂直である
-----------------------------------------
children (boolean): 選択したジョイントだけでなく、DAGの中にある孫のジョイントに対しても、すべてのEditオプションを適用します。
-----------------------------------------
component (boolean): -positionスイッチと一緒に使用して、ジョイントの親(-relativeと同様)に相対的にジョイントを位置付けます。しかし、すべての子ジョイントのために新しい位置を計算するので、ワールド座標の位置は変わりません。
-----------------------------------------
degreeOfFreedom (string): IKの自由度を指定します。有効な文字列は、x、y、とzの重複しない文字で構成されます。文字列の文字は、どの回転がIKで使用されたかを示します。文字列の文字の順序は関係ありません。例:x、yz、xyz。照会するとstringを返します。dofを修正すると、対応する回転アトリビュートのロッキング状態が修正されます。この規則は次のとおりです。回転がdofに変化すると、現在ロックされていればロックが解除されます。回転がnon-dofに変化すると、現在ロックされていなければロックされます。
-----------------------------------------
exists (string): 名前の付いた既存のジョイントを照会します。照会するとbooleanを返します。
-----------------------------------------
limitSwitchX (boolean): X軸の回転を制限します。照会するとbooleanを返します。
-----------------------------------------
limitSwitchY (boolean): Y軸の回転を制限します。照会するとbooleanを返します。
-----------------------------------------
limitSwitchZ (boolean): Z軸の回転を制限します。照会するとbooleanを返します。
-----------------------------------------
limitX ([angle, angle]): 回転のX軸の下限と上限を設定します。ジョイントの制限も有効にします。照会すると2つのfloatを返します。
-----------------------------------------
limitY ([angle, angle]): 回転のY軸の下限と上限を設定します。ジョイントの制限も有効にします。照会すると2つのfloatを返します。
-----------------------------------------
limitZ ([angle, angle]): 回転のZ軸の下限と上限を設定します。ジョイントの制限も有効にします。照会すると2つのfloatを返します。
-----------------------------------------
name (string): ジョイントの名前を指定します。照会するとstringを返します。
-----------------------------------------
orientJoint (string): 引数は、次の文字列のいずれかを返します。xyz、yzx、zxy、zyx、yxz、xzy、noneジョイントとスケールの向きが修正され、引数の最初の文字で示される軸がこのジョイントから最初の子ジョイントへのベクトルに位置合わせされます。たとえば、引数が「xyz」であれば、x軸が子ジョイントの方を指します。残りの2つのジョイント方向軸の位置合わせは、-sao/-secondaryAxisOrientフラグを使用しているかどうかによって異なります。-saoフラグが使用されている場合の残りの軸の位置合わせ方法については、-saoフラグに関する文書を参照してください。2番目の軸方向が指定されていなければ、引数の最後の文字で示される回転軸は、1番目の軸に垂直なベクトルと、このジョイントから親ジョイントへのベクトルに位置合わせされます。残りの軸は、右手の法則に従って位置合わせされます。引数が「none」の場合、ジョイントの向きが0に設定され、下の階層に対するエフェクトがスケールの方向の修正によってオフセットされます。フラグは、次の場合には無視されます。A.引数が「none」でない場合、ジョイントが0以外の回転を持っているB.引数が「none」でない場合、ジョイントが子ジョイントを持っていないか、子ジョイントとの間隔が0であるC.フラグ-oまたは-soのいずれかが設定されている
-----------------------------------------
orientation ([angle, angle, angle]): ジョイントの方向です。照会すると3つのfloatを返します。
-----------------------------------------
position ([linear, linear, linear]): ジョイントの中央位置を指定します。この位置はジョイントの親に相対的であるか、または絶対的なワールド座標になります(以下のフラグ-rと-aを参照)。照会すると3つのfloatを返します。
-----------------------------------------
radius (float): ジョイントの半径を指定します。
-----------------------------------------
relative (boolean): ジョイントの中央位置をジョイントの親に相対的にします。
-----------------------------------------
rotationOrder (string): ジョイントの回転の順序です。引数は、次の文字列のいずれかを返します。xyz、yzx、zxy、zyx、yxz、xzy
-----------------------------------------
scale ([float, float, float]): ジョイントのスケールを行います。照会すると3つのfloatを返します。
-----------------------------------------
scaleCompensate (boolean): ジョイントのscaleCompenstateアトリビュートを、指定した引数に設定します。trueならば、親ジョイントのスケールはこのジョイントに何らかの回転が適用される前に補正されます。そのため、ボーンはジョイントに対してスケールされますが、ボーンはその子ジョイントに対してはスケールされません。照会するとbooleanを返します。
-----------------------------------------
scaleOrientation ([angle, angle, angle]): スケーリングの座標軸の方向を設定します。照会すると3つのfloatを返します。
-----------------------------------------
secondaryAxisOrient (string): 引数は、次の文字列のいずれかを返します。xup、xdown,yup、ydown、zup、zdown、noneこのフラグは-oj/orientJointフラグと一緒に使用します。2番目の軸を位置合わせるシーンの軸を指定します。たとえば、"-ojyzx-saoyup"というフラグの組み合わせでは、y軸はボーンの下方向を指し、z軸はシーンのy軸の正方向と同じ方向を指し、x軸は右手の法則に従った向きになります。
-----------------------------------------
setPreferredAngles (boolean): 編集モードでのみ意味を持ちます。現在のジョイント角に対して、優先回転角を設定します。
-----------------------------------------
stiffnessX (float): X軸の固さ(0~100.0)を設定します。照会するとfloatを返します。
-----------------------------------------
stiffnessY (float): Y軸の固さ(0~100.0)を設定します。照会するとfloatを返します。
-----------------------------------------
stiffnessZ (float): Z軸の固さ(0~100.0)を設定します。照会するとfloatを返します。
-----------------------------------------
symmetry (boolean): 現在のジョイントから左右対称ジョイントを作成します。
-----------------------------------------
symmetryAxis (string): このフラグは、左右対称ジョイントをミラーするための軸を指定します。x、y、zの任意の組み合わせを使用できます。このオプションは、シンメトリフラグをTrueに設定した場合のみ使用します。
-----------------------------------------
zeroScaleOrient (boolean): スケールの方向を0に設定し、ジョイントの方向と移動の修正によって変更を補正します。あるいはすべての子が変換する一般トランスフォームは回転の修正によって変更を補正します。フラグ-soが設定された場合、フラグは無視されます。
-----------------------------------------
Return Value:
None: stringコマンドの結果照会モードでは、戻り値のタイプは照会されたフラグに基づきます。
"""
pass | 36,760 |
def find_duplicates(treeroot, tbl=None):
"""
Find duplicate files in a directory.
"""
dup = {}
if tbl is None: tbl = {}
os.path.walk(treeroot, file_walker, tbl)
for k,v in tbl.items():
if len(v) > 1:
dup[k] = v
return dup | 36,761 |
def attach_capping(mol1, mol2):
"""it is connecting all Nterminals with the desired capping
Arguments:
mol1 {rdKit mol object} -- first molecule to be connected
mol2 {rdKit mol object} -- second molecule to be connected - chosen N-capping
Returns:
rdKit mol object -- mol1 updated (connected with mol2, one or more)
"""
count = 0
# detects all the N terminals in mol1
for atom in mol1.GetAtoms():
atom.SetProp('Cterm', 'False')
if atom.GetSmarts() == '[N:2]' or atom.GetSmarts() == '[NH2:2]' or atom.GetSmarts() == '[NH:2]':
count += 1
atom.SetProp('Nterm', 'True')
else:
atom.SetProp('Nterm', 'False')
# detects all the C terminals in mol2 (it should be one)
for atom in mol2.GetAtoms():
atom.SetProp('Nterm', 'False')
if atom.GetSmarts() == '[C:1]' or atom.GetSmarts() == '[CH:1]':
atom.SetProp('Cterm', 'True')
else:
atom.SetProp('Cterm', 'False')
# mol2 is addes to all the N terminal of mol1
for i in range(count):
combo = rdmolops.CombineMols(mol1, mol2)
Nterm = []
Cterm = []
# saves in two different lists the index of the atoms which has to be connected
for atom in combo.GetAtoms():
if atom.GetProp('Nterm') == 'True':
Nterm.append(atom.GetIdx())
if atom.GetProp('Cterm') == 'True':
Cterm.append(atom.GetIdx())
# creates the amide bond
edcombo = rdchem.EditableMol(combo)
edcombo.AddBond(Nterm[0], Cterm[0], order=Chem.rdchem.BondType.SINGLE)
clippedMol = edcombo.GetMol()
# removes tags and lables form the atoms which reacted
clippedMol.GetAtomWithIdx(Nterm[0]).SetProp('Nterm', 'False')
clippedMol.GetAtomWithIdx(Cterm[0]).SetProp('Cterm', 'False')
clippedMol.GetAtomWithIdx(Nterm[0]).SetAtomMapNum(0)
clippedMol.GetAtomWithIdx(Cterm[0]).SetAtomMapNum(0)
# uptades the 'core' molecule
mol1 = clippedMol
return mol1 | 36,762 |
def up():
"""Starts Human Lambdas"""
html = Path(__file__).parent / "html"
html.mkdir(exist_ok=True)
html_tgz = Path(__file__).parent / "frontend.tgz"
shutil.unpack_archive(html_tgz, extract_dir=html)
click.echo("Human Lambdas web running on http://localhost:8000/")
gunicorn = Path(sys.executable).parent / "gunicorn"
cmd = f"{gunicorn.as_posix()} human_lambdas.hl_rest_api.wsgi -b 0.0.0.0:8000 -w 1 -t 1 --timeout 0 --preload"
click.echo(f"Running {cmd}")
subprocess.run(
cmd,
shell=True,
stdin=PIPE,
stdout=PIPE,
check=True,
) | 36,763 |
def setPSF3Dconstraints(psfConstraints, params, bounds):
"""
Decipher psf3Dconstraints=[constraint] option and set initial
guess params and/or bounds accordingly. Each constraint is a
string 'n:val' (strict constraint) or 'n:val1,val2' (loose
constraint), for n=0 (delta), 1 (theta), 2,3 (position), 4 (xy),
5...6+ellDeg (ellipticity polynomial coefficients) and 7+ellDeg
... 8+ellDeg+alphaDeg (alpha polynomial coefficients).
"""
for psfConstraint in psfConstraints:
try:
n, constraintStr = psfConstraint.split(':')
n = int(n)
vals = map(float, constraintStr.split(','))
assert len(vals) in (1, 2)
except (ValueError, AssertionError):
print("WARNING: Cannot decipher constraint '%s', discarded" %
psfConstraints)
continue
else:
if len(vals) == 1: # Strict constraint: param = val
val = vals[0]
params[n] = val
bounds[n] = [val, val]
print("WARNING: Forcing PSF param[%d] to %f" % (n, val))
else: # Loose constraint: vmin <= param <= vmax
vmin, vmax = sorted(vals)
params[n] = min(max(params[n], vmin), vmax)
bounds[n] = [vmin, vmax]
print("WARNING: Constraining PSF param[%d] in %f,%f" %
(n, vmin, vmax)) | 36,764 |
def gather_point(input, index):
"""
**Gather Point Layer**
Output is obtained by gathering entries of X indexed by `index`
and concatenate them together.
.. math::
Out = X[Index]
.. code-block:: text
Given:
X = [[1, 2, 3],
[3, 4, 5],
[5, 6, 7]]
Index = [[1, 2]
Then:
Out = [[3, 4, 5],
[5, 6, 7]]
Args:
input (Variable): The source input with rank>=1, This
is a 3-D tensor with shape of [B, N, 3].
index (Variable): The index input with shape of [B, M].
Returns:
output (Variable): The output is a tensor with shape of [B,M].
Examples:
.. code-block:: python
import paddle.fluid as fluid
x = fluid.data(name='x', shape=[None, 5, 3], dtype='float32')
index = fluid.data(name='index', shape=[None, 1], dtype='int32')
output = fluid.layers.gather_point(x, index)
"""
helper = LayerHelper('gather_point', **locals())
dtype = helper.input_dtype()
out = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type="gather_point",
inputs={"X": input,
"Index": index},
outputs={"Output": out})
return out | 36,765 |
def _apply_size_dependent_ordering(input_feature, feature_level, block_level,
expansion_size, use_explicit_padding,
use_native_resize_op):
"""Applies Size-Dependent-Ordering when resizing feature maps.
See https://arxiv.org/abs/1912.01106
Args:
input_feature: input feature map to be resized.
feature_level: the level of the input feature.
block_level: the desired output level for the block.
expansion_size: the expansion size for the block.
use_explicit_padding: Whether to use explicit padding.
use_native_resize_op: Whether to use native resize op.
Returns:
A transformed feature at the desired resolution and expansion size.
"""
padding = 'VALID' if use_explicit_padding else 'SAME'
if feature_level >= block_level: # Perform 1x1 then upsampling.
node = slim.conv2d(
input_feature,
expansion_size, [1, 1],
activation_fn=None,
normalizer_fn=slim.batch_norm,
padding=padding,
scope='Conv1x1')
if feature_level == block_level:
return node
scale = 2**(feature_level - block_level)
if use_native_resize_op:
input_shape = shape_utils.combined_static_and_dynamic_shape(node)
node = tf.image.resize_nearest_neighbor(
node, [input_shape[1] * scale, input_shape[2] * scale])
else:
node = ops.nearest_neighbor_upsampling(node, scale=scale)
else: # Perform downsampling then 1x1.
stride = 2**(block_level - feature_level)
node = slim.max_pool2d(
_maybe_pad(input_feature, use_explicit_padding), [3, 3],
stride=[stride, stride],
padding=padding,
scope='Downsample')
node = slim.conv2d(
node,
expansion_size, [1, 1],
activation_fn=None,
normalizer_fn=slim.batch_norm,
padding=padding,
scope='Conv1x1')
return node | 36,766 |
def delete_provisioning_artifact(AcceptLanguage=None, ProductId=None, ProvisioningArtifactId=None):
"""
Deletes the specified provisioning artifact. This operation will not work on a provisioning artifact associated with a product that has been shared with you, or on the last provisioning artifact associated with a product (a product must have at least one provisioning artifact).
See also: AWS API Documentation
:example: response = client.delete_provisioning_artifact(
AcceptLanguage='string',
ProductId='string',
ProvisioningArtifactId='string'
)
:type AcceptLanguage: string
:param AcceptLanguage: The language code to use for this operation. Supported language codes are as follows:
'en' (English)
'jp' (Japanese)
'zh' (Chinese)
If no code is specified, 'en' is used as the default.
:type ProductId: string
:param ProductId: [REQUIRED]
The product identifier.
:type ProvisioningArtifactId: string
:param ProvisioningArtifactId: [REQUIRED]
The identifier of the provisioning artifact for the delete request.
:rtype: dict
:return: {}
:returns:
(dict) --
"""
pass | 36,767 |
def distance(s1, s2):
"""Return the Levenshtein distance between strings a and b."""
if len(s1) < len(s2):
return distance(s2, s1)
# len(s1) >= len(s2)
if len(s2) == 0:
return len(s1)
previous_row = xrange(len(s2) + 1)
for i, c1 in enumerate(s1):
current_row = [i + 1]
for j, c2 in enumerate(s2):
insertions = previous_row[j + 1] + 1 # j+1 instead of j since previous_row and current_row are one character longer
deletions = current_row[j] + 1 # than s2
substitutions = previous_row[j] + (c1 != c2)
current_row.append(min(insertions, deletions, substitutions))
previous_row = current_row
return previous_row[-1] | 36,768 |
def check_file_exists(file_name: str) -> None:
"""
Checks if the file is on the filesystem and readable
:param file_name: Name of the file including the path
"""
if not isfile(file_name):
raise FileNotFoundError(f"Cannot find {file_name}. Check if path is correct!") | 36,769 |
def check_diversity(group, L):
"""check if group satisfy l-diversity
"""
SA_values = set()
for index in group:
str_value = list_to_str(gl_data[index][-1], cmp)
SA_values.add(str_value)
if len(SA_values) >= L:
return True
return False | 36,770 |
def getEntries(person):
""" Fetch a Advogato member's diary and return a dictionary in the form
{ date : entry, ... }
"""
parser = DiaryParser()
f = urllib.urlopen("http://www.advogato.org/person/%s/diary.xml" % urllib.quote(person))
s = f.read(8192)
while s:
parser.feed(s)
s = f.read(8192)
parser.close()
result = {}
for d, e in map(None, parser.dates, parser.entries):
result[d] = e
return result | 36,771 |
def sum_values(p, K):
"""
sum the values in ``p``
"""
nv = []
for v in itervalues(p):
nv = dup_add(nv, v, K)
nv.reverse()
return nv | 36,772 |
def _run_doxy(doxy_out_dir, doxyINPUT, is_doxy_recursive):
"""This runs the command "doxygen" on an input and writes the ouput XML files to a temporary directory. """
doxy_file_patterns = " ".join("*.%s" % fil_ext for fil_ext in file_extensions_dox)
# TODO: Create a tmp dir just for this purpose.
fil_co = _my_doxyfile % (doxy_out_dir, doxyINPUT, doxy_file_patterns, is_doxy_recursive)
# This is the input file to Doxygen.
tmp_doxyfile_obj = lib_util.TmpFile("survol_doxygen_config")
doxynam = tmp_doxyfile_obj.Name
doxyfi = open(doxynam, "w")
doxyfi.write(fil_co)
doxyfi.close()
# https://www.stack.nl/~dimitri/doxygen/manual/customize.html
doxygen_command = ["doxygen", doxynam]
# TODO: Use lib_common.SubProcPOpen
ret = lib_common.SubProcCall(doxygen_command)
logging.debug("doxy_out_dir=%s", doxy_out_dir) | 36,773 |
def create_owner_team_and_permissions(sender, instance, created, **kwargs):
"""
Signal handler that creates the Owner team and assigns group and user
permissions.
"""
if created:
team = Team.objects.create(
name=Team.OWNER_TEAM_NAME, organization=instance.user,
created_by=instance.created_by)
content_type = ContentType.objects.get(
app_label='api', model='organizationprofile')
permission, created = Permission.objects.get_or_create(
codename="is_org_owner", name="Organization Owner",
content_type=content_type)
team.permissions.add(permission)
instance.creator.groups.add(team)
for perm in get_perms_for_model(instance.__class__):
assign_perm(perm.codename, instance.user, instance)
if instance.creator:
assign_perm(perm.codename, instance.creator, instance)
if instance.created_by and instance.created_by != instance.creator:
assign_perm(perm.codename, instance.created_by, instance) | 36,774 |
def define_permit_price_targeting_constraints(m):
"""Constraints used to get the absolute difference between the permit price and some target"""
# Constraints to minimise difference between permit price and target
m.C_PERMIT_PRICE_TARGET_CONSTRAINT_1 = pyo.Constraint(
expr=m.V_DUMMY_PERMIT_PRICE_TARGET_X_1 >= m.P_POLICY_PERMIT_PRICE_TARGET - m.V_DUAL_PERMIT_MARKET)
m.C_PERMIT_PRICE_TARGET_CONSTRAINT_2 = pyo.Constraint(
expr=m.V_DUMMY_PERMIT_PRICE_TARGET_X_2 >= m.V_DUAL_PERMIT_MARKET - m.P_POLICY_PERMIT_PRICE_TARGET)
return m | 36,775 |
def _startsession (ARG_targetfile):
"""Writes a blank line and a session header to the targetfile.
Parameters
----------
ARG_targetfile : any type that open() accepts as a filename
Path to the file that should recieve the new session header.
"""
if os.path.isfile(ARG_targetfile) is False:
fileheader = _logfile_readme()
with open(ARG_targetfile, "w") as f:
f.write (fileheader)
with open(ARG_targetfile, "a") as f:
t_start = datetime.datetime.now()
f.write(f"\n---------- BEGIN SESSION: {t_start} ----------\n") | 36,776 |
def predict4():
"""Use Xception to label image"""
path = 'static/Images/boxer.jpeg'
img = image.load_img(path,target_size=(299,299))
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
x = preprocess_input(x)
preds = model.predict(x)
pclass = decode_predictions(preds, top=5)
result = str(pclass[0][0][1])
bad_chars=[';',':','_','!','*']
for i in bad_chars:
result = result.replace(i, ' ')
result = result.title()
print(result)
return result | 36,777 |
def augment_data(image, label, seg_label, perform_random_flip_and_rotate,
num_channels, has_seg_labels):
"""
Image augmentation for training. Applies the following operations:
- Horizontally flip the image with probabiliy 0.5
- Vertically flip the image with probability 0.5
- Apply random rotation
"""
if perform_random_flip_and_rotate:
if has_seg_labels:
image = tf.concat([image, tf.expand_dims(seg_label, -1)], 2)
image = tf.image.random_flip_left_right(image)
image = tf.image.random_flip_up_down(image)
rotate_angle = tf.random_shuffle([0.0, 90.0, 180.0, 270.0])[0]
image = tf.contrib.image.rotate(
image, rotate_angle * np.pi / 180.0, interpolation='BILINEAR')
if has_seg_labels:
seg_label = image[:, :, -1]
image = image[:,:,:num_channels]
return image, label, seg_label | 36,778 |
def protobuf_open_channel(channel_name, media_type):
"""func"""
open_channel_request = pb.OpenChannelRequest()
open_channel_request.channel_name = channel_name
open_channel_request.content_type = media_type
return open_channel_request.SerializeToString() | 36,779 |
def cleanup_rollback_complete(self, dryrun, wait):
"""Predeploy hook to delete last failed deployment."""
if self.status == "ROLLBACK_COMPLETE":
logger.info("Deleting stack in ROLLBACK_COMPLETE state.")
if not dryrun:
self.delete(dryrun, wait) | 36,780 |
async def update_code_example(q: Q):
"""
Update code example.
"""
logging.info('Updating code snippet')
copy_expando(q.args, q.client)
q.page['code_examples'] = cards.code_examples(
code_function=q.client.code_function,
theme_dark=q.client.theme_dark
)
await q.page.save() | 36,781 |
def action(update, context):
"""A fun command to send bot actions (typing, record audio, upload photo, etc). Action appears at top of main chat.
Done using the /action command."""
bot = context.bot
user_id = update.message.from_user.id
username = update.message.from_user.name
admin = _admin(user_id)
if not admin:
return _for_admin_only_message(bot, user_id, username)
available_actions = ['RECORD_AUDIO', 'RECORD_VIDEO_NOTE', 'TYPING', 'UPLOAD_AUDIO',
'UPLOAD_DOCUMENT', 'UPLOAD_PHOTO', 'UPLOAD_VIDEO', 'UPLOAD_VIDEO_NOTE']
send_action = choice(available_actions)
bot.send_chat_action(chat_id=config["GROUPS"]["crab_wiv_a_plan"], action=send_action) | 36,782 |
def findPartsLists(path):
"""gets a list of files/folders present in a path"""
walkr = os.walk(path)
dirlist = [a for a in walkr]
#print dirlist
expts = []
for fle in dirlist[0][2]:
#print fle
if(fle[-4:]=='xlsx'):
try:
xl_file = pd.read_excel(os.path.join(path,fle),None)
dfs = {sheet_name: xl_file.parse(sheet_name)
for sheet_name in xl_file.sheet_names}
#print(dfs.keys()
if("parts" in list(dfs.keys())[0]):
expts+=[(os.path.join(path,fle),fle[:-4])]
except IOError:
pass
return sorted(expts)[::-1] | 36,783 |
def plot_grid(
basis: "numpy.ndarray" = None,
supercell_matrix: "numpy.ndarray" = None,
Nmax: int = None,
**kwargs
):
"""Plots lattice points of a unit cell."""
axes = plt.gca()
basis = basis[:2, :2].copy()
a1 = basis[0, :].copy()
a2 = basis[1, :].copy()
# p = product(range(-Nmax, Nmax + 1), range(-Nmax, Nmax + 1))
# points = np.array([n[0] * a1 + n[1] * a2 for n in p])
# axes.scatter(points[:, 0], points[:, 1], **kwargs)
axes.axline(0 * a1, 1 * a1, **kwargs)
axes.axline(0 * a2, 1 * a2, **kwargs)
for n in range(-Nmax, Nmax + 1):
if n != 0:
axes.axline(n * a1 + 0 * a2, n * a1 + n * a2, **kwargs)
axes.axline(0 * a1 + n * a2, n * a1 + n * a2, **kwargs) | 36,784 |
def borehole_vec(x, theta):
"""Given x and theta, return vector of values."""
(Hu, Ld_Kw, Treff, powparam) = np.split(theta, theta.shape[1], axis=1)
(rw, Hl) = np.split(x[:, :-1], 2, axis=1)
numer = 2 * np.pi * (Hu - Hl)
denom1 = 2 * Ld_Kw / rw ** 2
denom2 = Treff
f = ((numer / ((denom1 + denom2))) * np.exp(powparam * rw)).reshape(-1)
return f | 36,785 |
def read_datastore(resource_id):
"""
Retrieves data when the resource is part of the CKAN DataStore.
Parameters
----------
resource_id: str
Id for resource
Returns
----------
pd.DataFrame:
Data records in table format
"""
r = requests.get(
DATASTORE_SEARCH_URL, params={"resource_id": resource_id, "limit": 1}
)
n_records = json.loads(r.content)["result"]["total"]
r = requests.get(
DATASTORE_SEARCH_URL, params={"resource_id": resource_id, "limit": n_records}
)
r.encoding = "utf-8"
data_json = json.loads(r.content)["result"]["records"]
data_df = pd.DataFrame.from_records(data_json).fillna("")
return data_df | 36,786 |
def new_server_session(keys, pin):
"""Create SRP server session."""
context = SRPContext(
"Pair-Setup",
str(pin),
prime=constants.PRIME_3072,
generator=constants.PRIME_3072_GEN,
hash_func=hashlib.sha512,
bits_salt=128,
bits_random=512,
)
username, verifier, salt = context.get_user_data_triplet()
context_server = SRPContext(
username,
prime=constants.PRIME_3072,
generator=constants.PRIME_3072_GEN,
hash_func=hashlib.sha512,
bits_salt=128,
bits_random=512,
)
session = SRPServerSession(
context_server, verifier, binascii.hexlify(keys.auth).decode()
)
return session, salt | 36,787 |
def lenzi(df):
"""Check if a pandas series is empty"""
return len(df.index) == 0 | 36,788 |
def move_pdfs(directory: str = None) -> None:
"""Move PDF files into the correct directory."""
direc = directory + os.sep if directory is not None else ''
pdfs = glob(direc + '*.pdf')
for pdf in pdfs:
new = direc + 'PDFs' + os.sep + os.path.split(pdf)[-1]
try:
os.remove(new)
except FileNotFoundError:
pass
os.rename(pdf, new) | 36,789 |
def check_exists(path):
"""Check if a directory or a path is at the received path.
Arguments:
path: The path to check.
Returns:
Nothing.
Raises:
RuntimeError: Raised if nothing exists at the received path.
"""
if path is None:
raise RuntimeError("Got None instead of a valid path.")
if not gfile.Exists(path):
raise RuntimeError(f"File path `{path}` doesn't exist.") | 36,790 |
def flatten_in(iterable, pred=None):
"""Like flatten, but recurse also into tuples/lists not matching pred.
This makes also those items get the same flattening applied inside them.
Example::
is_nested = lambda e: all(isinstance(x, (list, tuple)) for x in e)
data = (((1, 2), ((3, 4), (5, 6)), 7), ((8, 9), (10, 11)))
assert tuple(flatten(data, is_nested)) == \\
(((1, 2), ((3, 4), (5, 6)), 7), (8, 9), (10, 11))
assert tuple(flatten_in(data, is_nested)) == \\
(((1, 2), (3, 4), (5, 6), 7), (8, 9), (10, 11))
"""
pred = pred or (lambda x: True)
it = iter(iterable)
for e in it:
if isinstance(e, (list, tuple)):
if pred(e):
for f in flatten_in(e, pred):
yield f
else:
t = type(e)
yield t(flatten_in(e, pred))
else:
yield e | 36,791 |
def m_step(counts, item_classes, psuedo_count):
"""
Get estimates for the prior class probabilities (p_j) and the error
rates (pi_jkl) using MLE with current estimates of true item classes
See equations 2.3 and 2.4 in Dawid-Skene (1979)
Input:
counts: Array of how many times each rating was given by each rater
for each item
item_classes: Matrix of current assignments of items to classes
psuedo_count: A psuedo count used to smooth the error rates. For each
rater k
and for each class i and class j, we pretend rater k has rated
psuedo_count examples with class i when class j was the true class.
Returns:
p_j: class marginals [classes]
pi_kjl: error rates - the probability of rater k giving
response l for an item in class j [observers, classes, classes]
"""
[nItems, nRaters, nClasses] = np.shape(counts)
# compute class marginals
class_marginals = np.sum(item_classes, axis=0) / float(nItems)
# compute error rates for each rater, each predicted class
# and each true class
error_rates = np.matmul(counts.T, item_classes) + psuedo_count
# reorder axes so its of size [nItems x nClasses x nClasses]
error_rates = np.einsum('abc->bca', error_rates)
# divide each row by the sum of the error rates over all observation classes
sum_over_responses = np.sum(error_rates, axis=2)[:, :, None]
# for cases where an annotator has never used a label, set their sum over
# responses for that label to 1 to avoid nan when we divide. The result will
# be error_rate[k, i, j] is 0 if annotator k never used label i.
sum_over_responses[sum_over_responses == 0] = 1
error_rates = np.divide(error_rates, sum_over_responses)
return (class_marginals, error_rates) | 36,792 |
def make_parser(fn: Callable[[], Parser]) -> Parser:
"""
Make typed parser (required for mypy).
"""
return generate(fn) | 36,793 |
def ts2date(ctx: 'click.Context', ts: int) -> None:
"""unix timestampを日付する
Args:
ctx: click's Context
"""
click.echo('UTC: {}'.format(time.gmtime(ts)))
click.echo('Local: {}'.format(time.localtime(ts))) | 36,794 |
def mfcc_htk(y, sr, hop_length=2**10, window_length=22050, nmfcc=13, n_mels=26, fmax=8000, lifterexp=22):
"""
Get MFCCs 'the HTK way' with the help of Essentia
https://github.com/MTG/essentia/blob/master/src/examples/tutorial/example_mfcc_the_htk_way.py
Using all of the default parameters from there except the hop length (which shouldn't matter), and a much longer window length (which has been found to work better for covers)
Parameters
----------
window_length: int
Length of the window to use for the STFT
nmfcc: int
Number of MFCC coefficients to compute
n_mels: int
Number of frequency bands to use
fmax: int
Maximum frequency
Returns
-------
ndarray(nmfcc, nframes)
An array of all of the MFCC frames
"""
fftlen = int(2**(np.ceil(np.log(window_length)/np.log(2))))
spectrumSize= fftlen//2+1
zeroPadding = fftlen - window_length
w = estd.Windowing(type = 'hamming', # corresponds to htk default USEHAMMING = T
size = window_length,
zeroPadding = zeroPadding,
normalized = False,
zeroPhase = False)
spectrum = estd.Spectrum(size=fftlen)
mfcc_htk = estd.MFCC(inputSize = spectrumSize,
type = 'magnitude', # htk uses mel filterbank magniude
warpingFormula = 'htkMel', # htk's mel warping formula
weighting = 'linear', # computation of filter weights done in Hz domain
highFrequencyBound = fmax, # 8000 is htk default
lowFrequencyBound = 0, # corresponds to htk default
numberBands = n_mels, # corresponds to htk default NUMCHANS = 26
numberCoefficients = nmfcc,
normalize = 'unit_max', # htk filter normaliation to have constant height = 1
dctType = 3, # htk uses DCT type III
logType = 'log',
liftering = lifterexp) # corresponds to htk default CEPLIFTER = 22
mfccs = []
# startFromZero = True, validFrameThresholdRatio = 1 : the way htk computes windows
for frame in estd.FrameGenerator(audio.y, frameSize = window_length, hopSize = hop_length , startFromZero = True, validFrameThresholdRatio = 1):
spect = spectrum(w(frame))
mel_bands, mfcc_coeffs = mfcc_htk(spect)
mfccs.append(mfcc_coeffs)
return np.array(mfccs, dtype=np.float32).T | 36,795 |
def num_to_int(num):
"""
Checks that a numerical value (e.g. returned by robot) is an integer and
not a float.
Parameters
----------
num : number to check
Returns
-------
integer : num cast to an integer
Raises
------
ValueError : if n is not an integer
"""
if num % 1 == 0:
return int(num)
else:
raise ValueError('Expecting integer. Got: "{0}" ({1})'
.format(num, type(num))) | 36,796 |
def shuffle_blocks(wmx_orig, pop_size=800):
"""
Shuffles pop_size*pop_size blocks within the martrix
:param wmx_orig: original weight matrix
:param pop_size: size of the blocks kept together
:return: wmx_modified: modified weight matrix
"""
assert nPCs % pop_size == 0
np.random.seed(12345)
# get blocks
n_pops = nPCs / pop_size
blocks = {}
for i in range(n_pops):
for j in range(n_pops):
blocks[i, j] = wmx_orig[i*pop_size:(i+1)*pop_size, j*pop_size:(j+1)*pop_size]
# generate shuffling idx
x = np.linspace(0, n_pops-1, n_pops)
y = np.linspace(0, n_pops-1, n_pops)
np.random.shuffle(x)
np.random.shuffle(y)
# create block shuffled weight matrix
wmx_modified = np.zeros((nPCs, nPCs))
for i, id_i in enumerate(x):
for j, id_j in enumerate(y):
wmx_modified[i*pop_size:(i+1)*pop_size, j*pop_size:(j+1)*pop_size] = blocks[id_i, id_j]
return wmx_modified | 36,797 |
def test_generate_f_file_queries_contracts(database, monkeypatch):
""" generate_f_file_queries should provide queries representing halves of F file data related to a submission
This will cover contracts records.
"""
sess = database.session
sess.query(Subaward).delete(synchronize_session=False)
sess.commit()
parent_duns, duns, dom_country, int_country = reference_data(sess)
# Setup - create awards, procurements, subcontract
sub = SubmissionFactory(submission_id=1)
d1_awd = DetachedAwardProcurementFactory(
submission_id=sub.submission_id,
idv_type=None,
unique_award_key='AWD'
)
contract_awd = FSRSProcurementFactory(
contract_number=d1_awd.piid,
idv_reference_number=d1_awd.parent_award_id,
contracting_office_aid=d1_awd.awarding_sub_tier_agency_c,
company_address_country=dom_country.country_code,
principle_place_country=int_country.country_code,
duns=duns.awardee_or_recipient_uniqu,
date_signed=datetime.now(),
date_submitted=datetime(2019, 5, 30, 16, 25, 12, 34)
)
sub_contract_awd = FSRSSubcontractFactory(
parent=contract_awd,
company_address_country=int_country.country_code,
principle_place_country=dom_country.country_code,
subcontract_date=datetime.now()
)
d1_idv = DetachedAwardProcurementFactory(
submission_id=sub.submission_id,
idv_type='C',
unique_award_key='IDV'
)
contract_idv = FSRSProcurementFactory(
contract_number=d1_idv.piid,
idv_reference_number=d1_idv.parent_award_id,
contracting_office_aid=d1_idv.awarding_sub_tier_agency_c,
company_address_country=dom_country.country_code,
principle_place_country=int_country.country_code,
duns=duns.awardee_or_recipient_uniqu,
date_signed=datetime.now(),
date_submitted=datetime(2019, 5, 30, 16, 25, 12, 34)
)
sub_contract_idv = FSRSSubcontractFactory(
parent=contract_idv,
company_address_country=int_country.country_code,
principle_place_country=dom_country.country_code,
subcontract_date=datetime.now()
)
sess.add_all([sub, d1_awd, contract_awd, sub_contract_awd, d1_idv, contract_idv, sub_contract_idv])
sess.commit()
# Gather the sql
populate_subaward_table(sess, 'procurement_service', ids=[contract_awd.id, contract_idv.id])
# Get the records
contracts_results = sess.query(Subaward).order_by(Subaward.unique_award_key).all()
created_at = updated_at = contracts_results[0].created_at
# Expected Results
assert compare_contract_results(contracts_results[0], d1_awd, contract_awd, sub_contract_awd, parent_duns, duns,
dom_country, int_country, created_at, updated_at) is True
assert compare_contract_results(contracts_results[1], d1_idv, contract_idv, sub_contract_idv, parent_duns, duns,
dom_country, int_country, created_at, updated_at) is True | 36,798 |
def change():
"""
Change language
"""
lang = request.args.get("lang", None)
my_id = None
if hasattr(g, 'my') and g.my:
my_id = g.my['_id']
data = core.languages.change(lang=lang, my_id=my_id)
return jsonify(data) | 36,799 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.