content stringlengths 22 815k | id int64 0 4.91M |
|---|---|
async def get_category(category):
"""
Retrieves the data for the provided category. The data is cached for 1 hour.
:returns: The data for category.
:rtype: dict
"""
# Adhere to category naming standard.
category = category.lower()
# URL to request data from.
url = BASE_URL + "time_series_covid19_%s_global.csv" % category
# Request the data
async with httputils.CLIENT_SESSION.get(url) as response:
text = await response.text()
# Parse the CSV.
data = list(csv.DictReader(text.splitlines()))
# The normalized locations.
locations = []
for item in data:
# Filter out all the dates.
dates = dict(filter(lambda element: date_util.is_date(element[0]), item.items()))
# Make location history from dates.
history = {date: int(amount or 0) for date, amount in dates.items()}
# Country for this location.
country = item["Country/Region"]
# Latest data insert value.
latest = list(history.values())[-1]
# Normalize the item and append to locations.
locations.append(
{
# General info.
"country": country,
"country_code": countries.country_code(country),
"province": item["Province/State"],
# Coordinates.
"coordinates": {"lat": item["Lat"], "long": item["Long"],},
# History.
"history": history,
# Latest statistic.
"latest": int(latest or 0),
}
)
# Latest total.
latest = sum(map(lambda location: location["latest"], locations))
# Return the final data.
return {
"locations": locations,
"latest": latest,
"last_updated": datetime.utcnow().isoformat() + "Z",
"source": "https://github.com/ExpDev07/coronavirus-tracker-api",
} | 31,700 |
def __round(x):
"""
rounds to the nearest 0.25
:param x: <float> number.
:return: <float> rounded number.
"""
print(round(x*4)/4) | 31,701 |
def func_simple_sca_batch_scanner(root_directory):
"""
使用SCA逐一扫描目录下所有的文件夹,比如:repos/app1 repos/app2 repos/app3
:param root_directory: 指定目录
:return:
"""
root_directory = Path(root_directory)
repos = [_ for _ in os.listdir(root_directory) if os.path.isdir(os.path.join(root_directory, _))]
for _ in repos:
logging.info("repo {} is being analysed by Fortify SCA".format(_))
arg = ["-d", root_directory, "-r", str(_)]
start_time = time.perf_counter()
func_simple_sca_cli(arg)
end_time = time.perf_counter()
run_time = end_time - start_time
logging.info("repo {} scanning finished in {}".format(_, time.strftime("%H:%M:%S", time.gmtime(run_time)))) | 31,702 |
def plot_data():
"""Plots the selected data"""
if X_SELECT.get() != "" and Y_SELECT.get() != "":
x_data = DATA_DICT[X_SELECT.get()]
y_data = DATA_DICT[Y_SELECT.get()]
# print(x_data, y_data)
plot_name = Y_SELECT.get() + ' vs ' + X_SELECT.get()
plt.figure(num=plot_name) # put each graph on a new window
plt.plot(x_data, y_data, 'o')
plt.title(plot_name)
plt.xlabel(X_SELECT.get())
plt.ylabel(Y_SELECT.get())
plt.show() | 31,703 |
def discover_modules(mnodule_name):
"""
Recursively import a module and all sub-modules in the module
:param mnodule_name: module name
:return: modules from the package
"""
module = import_module(mnodule_name)
yield module
if getattr(module, '__path__', None):
for _, name, _ in pkgutil.iter_modules(module.__path__, f'{mnodule_name}.'):
yield from discover_modules(name) | 31,704 |
def plot_ellipses_area(
params, depth="None", imin=0, imax=398, jmin=0, jmax=898, figsize=(10, 10)
):
"""Plot ellipses on a map in the Salish Sea.
:arg params: a array containing the parameters (possibly at different
depths and or locations).
:type param: np.array
:arg depth: The depth at which you want to see the ellipse. If the param
array has no depth dimensions put 'None'. Default 'None'.
:arg depth: int
:arg imin: Minimum horizontal index that will be plotted.
:type imin: int
:arg imax: Maximum horizontal index that will be plotted.
:type imax: int
:arg jmin: Minimum vertical index that will be plotted.
:type jmin: int
:arg jmax: Maximum vertical index that will be plotted.
:type jmax: int
"""
phi = 0
fig, ax = plt.subplots(1, 1, figsize=figsize)
k = np.zeros((898, 398))
m = np.zeros((898, 398))
scale = 10
for q in np.arange(jmin, jmax):
for l in np.arange(imin, imax):
k[q, l] = q * np.cos(phi * np.pi / 180.0) + l * np.sin(phi * np.pi / 180.0)
m[q, l] = -q * np.sin(phi * np.pi / 180.0) + l * np.cos(phi * np.pi / 180.0)
if depth == "None":
for x in np.arange(imin, imax):
for y in np.arange(jmin, jmax):
if params[y, x, 1] > 0:
thec = "b"
else:
thec = "r"
ellsc = Ellipse(
xy=(m[y, x], k[y, x]),
width=scale * params[y, x, 0],
height=scale * params[y, x, 1],
angle=params[y, x, 2] - 29,
color=thec,
)
ax.add_artist(ellsc)
else:
for x in np.arange(imin, imax):
for y in np.arange(jmin, jmax):
if params[y, x, depth, 2] > 0:
thec = "b"
else:
thec = "r"
ellsc = Ellipse(
xy=(m[y, x], k[y, x]),
width=scale * params[y, x, depth, 1],
height=scale * params[y, x, depth, 2],
angle=params[y, x, depth, 3] - 29,
color=thec,
)
ax.add_artist(ellsc)
grid_B = nc.Dataset(
"/data/dlatorne/MEOPAR/NEMO-forcing/grid/bathy_meter_SalishSea2.nc"
)
bathy = grid_B.variables["Bathymetry"][:, :]
contour_interval = [-0.01, 0.01]
ax.contourf(
m[jmin:jmax, imin:imax],
k[jmin:jmax, imin:imax],
bathy.data[jmin:jmax, imin:imax],
contour_interval,
colors="black",
)
ax.contour(
m[jmin:jmax, imin:imax],
k[jmin:jmax, imin:imax],
bathy.data[jmin:jmax, imin:imax],
[5],
colors="black",
)
ax.set_title("Tidal ellipse", fontsize=20)
ax.set_xlabel("x index", fontsize=16)
ax.set_ylabel("y index", fontsize=16)
print("red is clockwise")
return fig | 31,705 |
def get_repo_owner_from_url(url: str) -> str:
"""Get git repository owner from git remote url.
Args:
url: (str) git remote url. Can be "git@" or "https://".
Returns:
str: git repository owner (user or organization).
"""
last_slash_index = url.rfind('/')
# IF HTTPS: https://github.com/shalb/cluster.dev.git
prefix_index = url.find('/', 8)
# IF SSH: git@github.com:shalb/cluster.dev.git
if prefix_index == last_slash_index:
prefix_index = url.find(':')
if last_slash_index < 0 or prefix_index < 0:
sys.exit(f'ERROR: Check `git remote -v`. Badly formatted origin: {url}')
return url[prefix_index + 1:last_slash_index] | 31,706 |
def radec2lb(ra, dec, radian=False, FK5=False):
"""
Convert (ra, dec) into Galactic coordinate (l, b).
Parameters
----------
ra : float or list or array
RA Coordinates in degree
dec : float or list or array
DEC Coordinates in degree
Returns
-------
l : float or list or array
b : float or list or array
"""
""" See if the input is number or array"""
if not (isiterable(ra) or isiterable(dec)):
returnScalar = True
if not FK5:
raDec = [SkyCoord(ra, dec, frame='icrs', unit='deg')]
else:
raDec = [SkyCoord(ra, dec, frame='fk5', unit='deg')]
else:
returnScalar = False
if not FK5:
raDec = [SkyCoord(ra, dec, frame='icrs', unit='deg')
for rrr, ddd in zip(ra, dec)]
else:
raDec = [SkyCoord(ra, dec, frame='fk5', unit='deg')
for rrr, ddd in zip(ra, dec)]
""" Convert to galactic coordinates
Currently, coordinates do not support arrays; have to loop.
"""
l = np.empty(len(raDec), dtype=np.float)
b = np.empty(len(raDec), dtype=np.float)
for ii, cc in enumerate(raDec):
gg = cc.galactic
# Hack to support both astropy v0.2.4 and v0.3.dev
# TODO: remove this hack once v0.3 is out (and array-ify this
# whole thing)
if radian:
l[ii] = gg.l.radian
b[ii] = gg.b.radian
else:
l[ii] = gg.l.degree
b[ii] = gg.b.degree
if returnScalar:
return l[0], b[0]
else:
return l, b | 31,707 |
def populate_workflow_request_body(manifest_data: Dict):
"""
Populate workflow request body with the passed data according API specification
:param data: item data from manifest files
:return: populated request
:rtype: dict
"""
request = {
"runId": "",
"executionContext": {
"acl": {
"owners": [],
"viewers": []
},
"legal": {
"legaltags": [],
"otherRelevantDataCountries": [],
"compliant": "compliant"
},
"Payload": {
"AppKey": "test-app",
"data-partition-id": "opendes"
},
"manifest": ""
}
}
request["runId"] = generate_id()
request["executionContext"]["acl"]["owners"].append(config.get("REQUEST", "acl_owner"))
request["executionContext"]["acl"]["viewers"].append(config.get("REQUEST", "acl_viewer"))
request["executionContext"]["legal"]["legaltags"].append(config.get("REQUEST", "legal_tag"))
request["executionContext"]["legal"]["otherRelevantDataCountries"].append(
config.get("REQUEST", "other_relevant_data_countries"))
request["executionContext"]["manifest"] = manifest_data
return request | 31,708 |
def openImage(filename, videoFrameTime=None, isMask=False, preserveSnapshot=False):
"""
Open and return an image from the file. If the file is a video, find the first non-uniform frame.
videoFrameTime, integer time in milliseconds, is provided, then find the frame after that point in time
preserveSnapshot, False by default, informs the function to save the frame image after extraction for videos
"""
import os
from scipy import ndimage
snapshotFileName = filename
if not os.path.exists(filename):
logging.getLogger('maskgen').warning(filename + ' is missing.')
if not filename.endswith('icons/RedX.png'):
return openImage(get_icon('RedX.png'))
return None
if filename[filename.rfind('.') + 1:].lower() in ['avi', 'mp4', 'mov', 'flv', 'qt', 'wmv', 'm4p', 'mpeg', 'mpv',
'm4v', 'mts', 'mpg'] or fileType(filename) == 'video':
snapshotFileName = filename[0:filename.rfind('.') - len(filename)] + '.png'
if fileType(filename) == 'audio':
return openImage(get_icon('audio.png'))
if videoFrameTime is not None or \
(snapshotFileName != filename and \
(not os.path.exists(snapshotFileName) or \
os.stat(snapshotFileName).st_mtime < os.stat(filename).st_mtime)):
if not ('video' in getFileMeta(filename)):
return openImage(get_icon('audio.png'))
videoFrameImg = readImageFromVideo(filename,videoFrameTime=videoFrameTime,isMask=isMask,
snapshotFileName=snapshotFileName if preserveSnapshot else None)
if videoFrameImg is None:
logging.getLogger('maskgen').warning( 'invalid or corrupted file ' + filename)
return openImage(get_icon('RedX.png'))
return videoFrameImg
else:
try:
img = openImageFile(snapshotFileName, isMask=isMask)
return img if img is not None else openImage('./icons/RedX.png')
except Exception as e:
logging.getLogger('maskgen').warning('Failed to load ' + filename + ': ' + str(e))
return openImage(get_icon('RedX.png')) | 31,709 |
def is_leap_year(year):
"""
Is the current year a leap year?
Args:
y (int): The year you wish to check.
Returns:
bool: Whether the year is a leap year (True) or not (False).
"""
if year % 4 == 0 and (year % 100 > 0 or year % 400 == 0): return True
return False | 31,710 |
def delta2bbox(src_bbox, delta):
"""
src_bbox: (N_bbox, 4)
delta: (N_bbox, 4)
"""
#---------- debug
assert src_bbox.shape == delta.shape
assert isinstance(src_bbox, np.ndarray)
assert isinstance(delta, np.ndarray)
#----------
src_bbox_h = src_bbox[:,2] - src_bbox[:,0]
src_bbox_w = src_bbox[:,3] - src_bbox[:,1]
src_bbox_x = src_bbox[:,0] + src_bbox_h/2
src_bbox_y = src_bbox[:,1] + src_bbox_w/2
dst_bbox_x = src_bbox_x + src_bbox_h*delta[:,0]
dst_bbox_y = src_bbox_y + src_bbox_w*delta[:,1]
dst_bbox_h = src_bbox_h * np.exp(delta[:,2])
dst_bbox_w = src_bbox_w * np.exp(delta[:,3])
dst_bbox_x_min = (dst_bbox_x - dst_bbox_h / 2).reshape([-1, 1])
dst_bbox_y_min = (dst_bbox_y - dst_bbox_w / 2).reshape([-1, 1])
dst_bbox_x_max = (dst_bbox_x + dst_bbox_h / 2).reshape([-1, 1])
dst_bbox_y_max = (dst_bbox_y + dst_bbox_w / 2).reshape([-1, 1])
dst_bbox = np.concatenate([dst_bbox_x_min, dst_bbox_y_min, dst_bbox_x_max, dst_bbox_y_max], axis=1) #(N_dst_bbox, 4)
return dst_bbox | 31,711 |
def blank_dog():
"""Set up (16, 3) array of dog with initial joint positions"""
length = 0.5
width = 0.2
ankle_length = 0.1
ankle_to_knee = 0.2
knee_to_shoulder = 0.05
O = Vector(0,0,0) # origin
out = []
for lengthwise in [-1, +1]:
for widthwise in [+1, -1]:
foot = O + length * Vector(lengthwise/2,0,0) + width * Vector(0, widthwise/2, 0)
ankle = foot + ankle_length * Vector(-0.3, 0, 1).unit()
knee = ankle + ankle_to_knee * Vector(-0.1, 0, 1).unit()
shoulder = knee + knee_to_shoulder * Vector(0.05,0,1).unit()
if n_joints == 16: out += [foot, ankle, knee, shoulder]
elif n_joints == 8: out += [foot, shoulder]
return np.array(out) | 31,712 |
def pandoc_command(event, verbose=True):
#@+<< pandoc command docstring >>
#@+node:ekr.20191006153547.1: *4* << pandoc command docstring >>
"""
The pandoc command writes all @pandoc nodes in the selected tree to the
files given in each @pandoc node. If no @pandoc nodes are found, the
command looks up the tree.
Each @pandoc node should have the form: `@pandoc x.adoc`. Relative file names
are relative to the base directory. See below.
By default, the pandoc command creates AsciiDoctor headings from Leo
headlines. However, the following kinds of nodes are treated differently:
- @ignore-tree: Ignore the node and its descendants.
- @ignore-node: Ignore the node.
- @no-head: Ignore the headline. Do not generate a heading.
After running the pandoc command, use the pandoc tool to convert the x.adoc
files to x.html.
Settings
--------
@string pandoc-base-directory specifies the base for relative file names.
The default is c.frame.openDirectory
Scripting interface
-------------------
Scripts may invoke the adoc command as follows::
event = g.Bunch(base_dicrectory=my_directory, p=some_node)
c.markupCommands.pandoc_command(event=event)
This @button node runs the adoc command and coverts all results to .html::
import os
paths = c.markupCommands.pandoc_command(event=g.Bunch(p=p))
paths = [z.replace('/', os.path.sep) for z in paths]
input_paths = ' '.join(paths)
g.execute_shell_commands(['asciidoctor %s' % input_paths])
"""
#@-<< pandoc command docstring >>
c = event and event.get('c')
if not c:
return None
return c.markupCommands.pandoc_command(event, verbose=verbose) | 31,713 |
def _update_testcase_list(options: dict):
"""
Use provided config options to find the final list of test cases to use
for running the workflows. The following implementation assumes options
`--testcases` and `--testcase-file` are mutually exclusive.
"""
if options.get("testcases"):
return
if "testcase-file" in options:
with open(options["testcase-file"], "rt") as file:
keep = lambda x: x and not x.startswith("#")
entries = list(filter(keep, file.read().splitlines()))
options["testcases"] = entries
return
if options.get("offline"):
elements = ["output-directory", "suite", "version"]
version_dir = Path(*map(options.get, elements))
if not version_dir.exists():
raise _ToucaError(_ToucaErrorCode.NoCaseMissingRemote)
entries = [x.name for x in version_dir.glob("*")]
if not entries:
raise _ToucaError(_ToucaErrorCode.NoCaseMissingRemote)
options["testcases"] = entries
return
if any(k not in options for k in ["api-key", "api-url"]):
raise _ToucaError(_ToucaErrorCode.NoCaseMissingRemote)
options["testcases"] = Client.instance().get_testcases()
if not options.get("testcases"):
raise _ToucaError(_ToucaErrorCode.NoCaseEmptyRemote) | 31,714 |
def stop_worker(config, *, worker_ids=None):
""" Stop a worker process.
Args:
config (Config): Reference to the configuration object from which the
settings for the worker are retrieved.
worker_ids (list): An optional list of ids for the worker that should be stopped.
"""
if worker_ids is not None and not isinstance(worker_ids, list):
worker_ids = [worker_ids]
celery_app = create_app(config)
celery_app.control.shutdown(destination=worker_ids) | 31,715 |
def random_majority_link_clf():
"""
for link classification we do not select labels from a fixed distribution
but instead we set labels to the number of possible segments in a sample.
I.e. we only predict a random link out of all the possible link paths in a sample.
"""
def clf(labels, k:int):
##only to self
#return np.arange(k)
# only one forward
#return [min(i+1, k-1) for i in range(k)]
# only one back
return [max(0, i-1) for i in range(k)]
## link to the segment behind or the one ahead.
## If i == k-1, we take i or i-1. if i == 0, we take i or i+1
#return [random.choice([max(0, i-1), i, min(i+1, k-1)]) for i in range(k)]
return clf | 31,716 |
def configure_logger(logger_args):
"""Configures the global Python logger object."""
logger_val = ArgumentsValidator(logger_args, "Logger arguments")
with logger_val:
show_debug_logs = logger_val.get("show_debug_logs", ATYPE_BOOL, False, default=False)
show_date = logger_val.get("show_date", ATYPE_BOOL, False, default=True)
syslog_path = logger_val.get("syslog_path", [ATYPE_NONE, ATYPE_STRING], False)
logger = logging.getLogger()
if show_debug_logs:
logger.setLevel(logging.DEBUG)
else:
logger.setLevel(logging.INFO)
if syslog_path is not None:
handler = logging.handlers.SysLogHandler(address=syslog_path)
else:
handler = logging.StreamHandler(stream=sys.stdout)
if show_date:
date_str = "%Y-%m-%d %H:%M:%S"
else:
date_str = "%H:%M:%S"
formatter = logging.Formatter("%(asctime)s [%(levelname)s] %(message)s", date_str)
handler.setFormatter(formatter)
logger.addHandler(handler) | 31,717 |
def do_snapshot_force_delete(cs, args):
"""Attempt force-delete of snapshot, regardless of state."""
snapshot = _find_share_snapshot(cs, args.snapshot)
snapshot.force_delete() | 31,718 |
def db_to_df(db_table):
"""Reads in a table from the board games database as pandas DataFrame"""
query = f"SELECT * FROM {db_table};"
pd_table = pd.read_sql(query, DB)
return pd_table | 31,719 |
def server(ip, port, channel, token, single, config, tmux):
"""Runs the discord bot server."""
if single:
validate_single = {"channel": channel, "token": token}
for key, arg in validate_single.items():
assert (
len(arg[0]) > 0
), f"{key} is empty. Please add the --{key} flag with the approprate information."
logger.info(
"Running server... ", ip=ip, port=port, channel=channel[0][:5], token=token[0][:5]
)
click.echo("Running server...")
server = Server(ip=ip, port=port, channel_id=channel[0], bot_token=token[0])
server.run()
elif len(config) > 0:
click.echo("running config")
if tmux:
click.echo("running in tmux")
server_instances_from_configuration_file_in_tmux(config)
else:
server_instances_from_configuration_file(config)
else:
click.echo("Please choose a config file or run in single mode.") | 31,720 |
def kill_master_if_running(identifier="lunchrc", directory=None):
"""
Given a lunch master identifier and a PID file directory, kills the master.
"""
pid_file = gen_pid_file_path(identifier, directory)
deferred = defer.Deferred()
send_sigkill_at = time.time() + 20.0 # wait 20 seconds before to use kill -9
is_first_time_called = True
def _kill(is_first_time_called=False):
#we check if running several time before to send it SIGKILL
if os.path.exists(pid_file):
log.info("PID file for master %s found!" % (pid_file))
pid = is_lunch_master_running(pid_file)
if pid is not None:
if is_first_time_called:
log.warning("Sending SIGINT to the lunch master %s." % (identifier))
os.kill(pid, signal.SIGINT)
reactor.callLater(0.2, _kill)
else:
if time.time() > send_sigkill_at:
log.warning("Sending SIGKILL to the lunch master %s." % (identifier))
os.kill(signal.SIGKILL)
deferred.callback(None)
else:
log.debug("The lunch master %s is not dead yet." % (identifier))
reactor.callLater(0.2, _kill)
else:
if is_first_time_called:
log.warning("The lunch master %s was not running." % (identifier))
deferred.callback(None)
else:
if is_first_time_called:
log.info("Could not find a PID file for master %s." % (identifier))
deferred.callback(None)
reactor.callLater(0.01, _kill, True)
return deferred | 31,721 |
def player_input_choice() -> int:
"""Function that takes the player input as the position for his|her marker"""
marker_position = 0
while marker_position not in range(1, 10) or not tic_tac_toe.check_the_cell(marker_position):
marker_position = int(input("Choose the position for your marker from 1 to 9: "))
return marker_position | 31,722 |
def write_all_summaries(ID, stats_pdframe, output_folder):
"""
Docstring
Args: ID: str, stats_pdframe is pd dataframe (summary stats)
output_path should be the folder path where you want to save the output
Return: write out as csv files named by user ID
"""
if os.path.exists(output_folder)==False:
os.mkdir(output_folder)
stats_pdframe.to_csv(output_folder + "/" + str(ID) + ".csv",index=False) | 31,723 |
def increment_datetime_by_string(mydate, increment, mult=1):
"""Return a new datetime object incremented with the provided
relative dates specified as string.
Additional a multiplier can be specified to multiply the increment
before adding to the provided datetime object.
Usage:
.. code-block:: python
>>> dt = datetime(2001, 9, 1, 0, 0, 0)
>>> string = "60 seconds, 4 minutes, 12 hours, 10 days, 1 weeks, 5 months, 1 years"
>>> increment_datetime_by_string(dt, string)
datetime.datetime(2003, 2, 18, 12, 5)
>>> dt = datetime(2001, 11, 1, 0, 0, 0)
>>> string = "1 months"
>>> increment_datetime_by_string(dt, string)
datetime.datetime(2001, 12, 1, 0, 0)
>>> dt = datetime(2001, 11, 1, 0, 0, 0)
>>> string = "13 months"
>>> increment_datetime_by_string(dt, string)
datetime.datetime(2002, 12, 1, 0, 0)
>>> dt = datetime(2001, 1, 1, 0, 0, 0)
>>> string = "72 months"
>>> increment_datetime_by_string(dt, string)
datetime.datetime(2007, 1, 1, 0, 0)
>>> dt = datetime(2001, 1, 1, 0, 0, 0)
>>> string = "72 months"
>>> increment_datetime_by_string(dt, string)
datetime.datetime(2007, 1, 1, 0, 0)
>>> dt = datetime(2001, 1, 1, 0, 0, 0)
>>> string = "5 minutes"
>>> increment_datetime_by_string(dt, string)
datetime.datetime(2001, 1, 1, 0, 5)
>>> dt = datetime(2001, 1, 1, 0, 0, 0)
>>> string = "49 hours"
>>> increment_datetime_by_string(dt, string)
datetime.datetime(2001, 1, 3, 1, 0)
>>> dt = datetime(2001, 1, 1, 0, 0, 0)
>>> string = "3600 seconds"
>>> increment_datetime_by_string(dt, string)
datetime.datetime(2001, 1, 1, 1, 0)
>>> dt = datetime(2001, 1, 1, 0, 0, 0)
>>> string = "30 days"
>>> increment_datetime_by_string(dt, string)
datetime.datetime(2001, 1, 31, 0, 0)
:param mydate: A datetime object to incremented
:param increment: A string providing increment information:
The string may include comma separated values of type
seconds, minutes, hours, days, weeks, months and years
Example: Increment the datetime 2001-01-01 00:00:00
with "60 seconds, 4 minutes, 12 hours, 10 days,
1 weeks, 5 months, 1 years" will result in the
datetime 2003-02-18 12:05:00
:param mult: A multiplier, default is 1
:return: The new datetime object or none in case of an error
"""
return modify_datetime_by_string(mydate, increment, mult, sign=int(1)) | 31,724 |
def get_regularization_loss(scope=None, name="total_regularization_loss"):
"""Gets the total regularization loss.
Args:
scope: An optional scope name for filtering the losses to return.
name: The name of the returned tensor.
Returns:
A scalar regularization loss.
"""
losses = get_regularization_losses(scope)
if losses:
return math_ops.add_n(losses, name=name)
else:
return constant_op.constant(0.0) | 31,725 |
def Make_Sequential(sents, **kwargs):
"""
Make sequential parses (each word simply linked to the next one),
to use as baseline
"""
output_path = kwargs.get("output_path", os.environ["PWD"])
sequential_parses = []
for sent in sents:
parse = [["0", "###LEFT-WALL###", "1", sent[0]]] # include left-wall
for i in range(1, len(sent)):
parse.append([str(i), sent[i - 1], str(i + 1), sent[i]])
#parse.append([str(i), sent[i - 1], str(i + 1), sent[i]] for i in range(1, len(sent)))
sequential_parses.append(parse)
Print_parses(sents, sequential_parses, f"{output_path}/sequential_parses.ull")
return sequential_parses | 31,726 |
def test_handle_set_handle_set_characteristics_unencrypted(driver):
"""Verify an unencrypted set_characteristics."""
acc = Accessory(driver, "TestAcc", aid=1)
assert acc.aid == 1
service = acc.driver.loader.get_service("GarageDoorOpener")
acc.add_service(service)
driver.add_accessory(acc)
handler = hap_handler.HAPServerHandler(driver, "peername")
handler.is_encrypted = False
response = hap_handler.HAPResponse()
handler.response = response
handler.request_body = b'{"characteristics":[{"aid":1,"iid":9,"ev":true}]}'
handler.handle_set_characteristics()
assert response.status_code == 401 | 31,727 |
def test_keypoint_nn(model):
"""Wrapper for KeypointModelTest"""
KeypointModelTest(model)() | 31,728 |
def storage_backend_get_all(context, inactive=False, filters=None):
"""Get all storage backends"""
return IMPL.storage_backend_get_all(context, inactive, filters) | 31,729 |
def get_date_opm_status_response(intent, session):
""" Gets the current status of opm for the day
"""
card_title = "OPM Status Result"
session_attributes = {}
speech_output = "I'm not sure which o. p. m. status you requested. " \
"Please try again."
reprompt_text = "I'm not sure which o. p. m. status you requested. " \
"Try asking if the government is open today."
should_end_session = True
if "date" in intent["slots"]:
dt_value = intent["slots"]["date"]["value"]
try:
fmt_dt_value = datetime.datetime.strptime(dt_value, "%Y-%m-%d").strftime("%m/%d/%Y")
# call the operating status endpoint and convert the response to json
r = requests.get(API_BASE + "?date=" + fmt_dt_value)
if r.status_code == 200:
data = r.json()
status = data['StatusType'].lower()
if status != 'undefined':
speech_output = "Federal agencies in the Washington, DC, area were " \
+ status + " on " + dt_value + "."
reprompt_text = ""
else:
speech_output = "I seem to be having trouble answering your question. " \
"Please ask me for the o. p. m. status by saying, " \
"Is the government open today?"
reprompt_text = "Please ask me for bus times by saying, " \
"Is the government open today?"
should_end_session = False
except ValueError:
speech_output = "Sorry, I did not understand that date. Please ask your question " \
"again with a valid date."
reprompt_text = "Sorry, I did not understand that date. Please ask your question " \
"again with a valid date."
should_end_session = False
return build_response(session_attributes, build_speechlet_response(
card_title, speech_output, reprompt_text, should_end_session)) | 31,730 |
def oauth_api_request(method, url, **kwargs):
"""
when network error, fallback to use rss proxy
"""
options = _proxy_helper.get_proxy_options()
client = RSSProxyClient(**options, proxy_strategy=_proxy_strategy)
return client.request(method, url, **kwargs) | 31,731 |
def validate_cached(cached_calcs):
"""
Check that the calculations with created with caching are indeed cached.
"""
valid = True
for calc in cached_calcs:
if not calc.is_finished_ok:
print('Cached calculation<{}> not finished ok: process_state<{}> exit_status<{}>'
.format(calc.pk, calc.process_state, calc.exit_status))
print_report(calc.pk)
valid = False
if '_aiida_cached_from' not in calc.extras or calc.get_hash() != calc.get_extra('_aiida_hash'):
print('Cached calculation<{}> has invalid hash'.format(calc.pk))
print_report(calc.pk)
valid = False
if isinstance(calc, CalcJobNode):
original_calc = load_node(calc.get_extra('_aiida_cached_from'))
files_original = original_calc.list_object_names()
files_cached = calc.list_object_names()
if not files_cached:
print("Cached calculation <{}> does not have any raw inputs files".format(calc.pk))
print_report(calc.pk)
valid = False
if not files_original:
print("Original calculation <{}> does not have any raw inputs files after being cached from."
.format(original_calc.pk))
valid = False
if set(files_original) != set(files_cached):
print("different raw input files [{}] vs [{}] for original<{}> and cached<{}> calculation".format(
set(files_original), set(files_cached), original_calc.pk, calc.pk))
valid = False
return valid | 31,732 |
def SplitRecursively(x, num_splits, axis=-1):
"""Splits Tensors in 'x' recursively.
Args:
x: a Tensor, or a list or NestMap containing Tensors to split.
num_splits: number of splits per Tensor.
axis: the split axis.
Returns:
A list of split values of length 'num_splits'.
- If 'x' is a Tensor, a list of split Tensors.
- If 'x' is a list, a list of lists, where each sublist has the same length
as 'x' and the k'th element in each sublist corresponds to a split of the
k'th element from 'x'.
- If 'x' is a `.NestedMap`, a list of `.NestedMap`, where each field
corresponds to a split from the same field of 'x'.
"""
if isinstance(x, tf.Tensor):
return tf.split(x, num_splits, axis=axis)
elif isinstance(x, list):
splits = [SplitRecursively(element, num_splits, axis) for element in x]
splits = list(zip(*splits))
return [list(t) for t in splits]
elif isinstance(x, NestedMap):
results = [NestedMap() for _ in range(num_splits)]
for key, val in x.items():
val_splits = SplitRecursively(val, num_splits, axis)
for i in range(num_splits):
results[i][key] = val_splits[i]
return results
else:
raise TypeError('Unexpected type for SplitRecursively: %s' % type(x)) | 31,733 |
def calc_global_rwr(events, tslsi_anom, tas_anom, weights, regrid_model,
ndays_dry=_NDAYS_DRY, ndays_ante=_NDAYS_ANTE,
rwr_beg=_RWR_BEG, rwr_end=_RWR_END,
modis_out=False,
file_out_rwr=None, file_out_modis=None):
"""
Calculate RWR from composite weighted means over a set of events for
coarse (1 degree) grid boxes globally. Resulting (lon, lat) fields
are written to a netCDF file.
Parameters
----------
events : list of lists of <dry_spell_rwr.event.Event> instances
Global dry spell event information. Outer list is some
grouping, e.g., season, inner list is land point.
tslsi_anom : MaskedArray, shape(time, land)
Land surface temperature anomaly (K).
tas_anom : MaskedArray, shape(time, land)
Near-surface air temperature anomaly (K).
weights : MaskedArray, shape(time, land)
Weights used in calculating the composite means. This is
typically the number of 1 km MODIS LST data per 0.5deg gridbox.
regrid_model : function
Returns info for mapping from the input grid to a coarser output
grid.
ndays_dry : int, optional
Number of dry spell days to composite over.
ndays_ante : int, optional
Number of days before dry spell started to composite over.
rwr_beg : int, optional
The first dry spell day to use for the RWR regression. This is
zero-based, so rwr_beg=1 starts the regression on the second day
of the dry spell.
rwr_end : int, optional
The last dry spell day to use for the RWR regression.
modis_out : bool, optional
If true, indicates that the input tslsi_anom is MODIS data, so
patt_rwr_out_modis is used to generate the output file name.
file_out_rwr : str, optional
Output netCDF file name. This is ignored if modis_out=T.
file_out_modis : str, optional
Output netCDF file name. Only used if modis_out=T.
"""
if modis_out:
file_out = file_out_modis
else:
file_out = file_out_rwr
# Annual events list.
events = events[0]
# Get info for mapping WFDEI grid boxes to larger boxes for compositing.
coarse_grid, land = regrid_model(1)
composites = cc.get_rwr(events, tas_anom, tslsi_anom, weights, land,
ndays_dry=ndays_dry, ndays_ante=ndays_ante,
rwr_beg=rwr_beg, rwr_end=rwr_end)
fio.nc_rwr_write(coarse_grid, composites, file_out)
return | 31,734 |
def generate_entry(request, properties, data, mtime=None):
"""
Takes a properties dict and a data string and generates a generic
entry using the data you provided.
:param request: the Request object
:param properties: the dict of properties for the entry
:param data: the data content for the entry
:param mtime: the mtime tuple (as given by ``time.localtime()``).
if you pass in None, then we'll use localtime.
"""
entry = EntryBase(request)
entry.update(properties)
entry.set_data(data)
if mtime:
entry.set_time(mtime)
else:
entry.set_time(time.localtime())
return entry | 31,735 |
def info():
"""Refresh teh client session using the refresh token"""
global client
client = client.refresh_session(app_id, app_secret)
return "Refreshed" | 31,736 |
def weight_point_in_circle(
point: tuple,
center: tuple,
radius: int,
corner_threshold: float = 1.5
):
"""
Function to decide whether a certain grid coordinate should be a full, half or empty tile.
Arguments:
point (tuple): x, y of the point to be tested
center (tuple): x, y of the origin (center) point
radius (int): radius of certainly empty tiles, does not include half tiles
corner_threshold (float): threshold that decides if the tile should be a half tile instead of empty
Returns:
int: the type of the tested tile
0 if empty tile
1 if full tile
2 if half tile
"""
diff_x, diff_y = map(lambda x, y: abs(x - y), center, point) # subtract point from center then abs for both x and y
if (diff_y > radius) or (diff_x > radius):
return 0 # eliminate any obviously out of bounds tiles
# precalculate pythagoras distance squared
dist_squared = (diff_x * diff_x) + (diff_y * diff_y)
# precalculate radius sqaured
radius_squared = radius * radius
# precalculate rounded distance
rounded_distance = round(dist_squared)
if rounded_distance < radius_squared: # distance within radius
return 1 # full tile
elif rounded_distance < radius_squared * corner_threshold and diff_x < radius: # distance on edge
return 2 # half tile
# outside of any thresholds
return 0 | 31,737 |
def encipher_shift(plaintext, plain_vocab, shift):
"""Encrypt plain text with a single shift layer.
Args:
plaintext (list of list of Strings): a list of plain text to encrypt.
plain_vocab (list of Integer): unique vocabularies being used.
shift (Integer): number of shift, shift to the right if shift is positive.
Returns:
ciphertext (list of Strings): encrypted plain text.
"""
ciphertext = []
cipher = ShiftEncryptionLayer(plain_vocab, shift)
for _, sentence in enumerate(plaintext):
cipher_sentence = []
for _, character in enumerate(sentence):
encrypted_char = cipher.encrypt_character(character)
cipher_sentence.append(encrypted_char)
ciphertext.append(cipher_sentence)
return ciphertext | 31,738 |
def esmf_interp_points(ds_in, locs_lon, locs_lat, lon_field_name='lon',
lat_field_name='lat'):
"""Use ESMF toolbox to interpolate grid at points."""
# generate grid object
grid = esmf_create_grid(ds_in[lon_field_name].values.astype(np.float),
ds_in[lat_field_name].values.astype(np.float))
# generate location stream object
locstream = esmf_create_locstream_spherical(locs_lon.values.astype(np.float),
locs_lat.values.astype(np.float))
# generate regridding object
srcfield = ESMF.Field(grid, name='srcfield')
dstfield = ESMF.Field(locstream, name='dstfield')
regrid = ESMF.Regrid(srcfield, dstfield,
regrid_method=ESMF.RegridMethod.BILINEAR,
unmapped_action=ESMF.UnmappedAction.ERROR)
# construct output dataset
coords = {c: locs_lon[c] for c in locs_lon.coords}
dims_loc = locs_lon.dims
nlocs = len(locs_lon)
ds_out = xr.Dataset(coords=coords, attrs=ds_in.attrs)
for name, da_in in ds_in.data_vars.items():
# get the dimensions of the input dataset; check if it's spatial
dims_in = da_in.dims
if lon_field_name not in dims_in or lat_field_name not in dims_in:
continue
# get the dimension/shape of output
non_lateral_dims = dims_in[:-2]
dims_out = non_lateral_dims + dims_loc
shape_out = da_in.shape[:-2] + (nlocs,)
# create output dataset
da_out = xr.DataArray((np.ones(shape_out)*np.nan).astype(da_in.dtype),
name=name,
dims=dims_out,
attrs=da_in.attrs,
coords={c: da_in.coords[c] for c in da_in.coords
if c in non_lateral_dims})
dstfield.data[...] = np.nan
if len(non_lateral_dims) > 0:
da_in_stack = da_in.stack(non_lateral_dims=non_lateral_dims)
da_out_stack = xr.full_like(da_out, fill_value=np.nan).stack(non_lateral_dims=non_lateral_dims)
for i in range(da_in_stack.shape[-1]):
srcfield.data[...] = da_in_stack.data[:, :, i].T
dstfield = regrid(srcfield, dstfield, zero_region=ESMF.Region.SELECT)
da_out_stack.data[:, i] = dstfield.data
da_out.data = da_out_stack.unstack('non_lateral_dims').transpose(*dims_out).data
else:
srcfield.data[...] = da_in.data[:, :].T
dstfield = regrid(srcfield, dstfield, zero_region=ESMF.Region.SELECT)
da_out.data = dstfield.data
ds_out[name] = da_out
return ds_out | 31,739 |
def update_loan_record(id_: int, loan_record: LoanRecord) -> bool:
"""Update a loan record from the database
Args:
id_: loan record id which wants to be modified.
loan_record: new information for updating."""
updated_data = {key: value for key, value in loan_record.items() if value is not None} # type: ignore
with session_scope() as session:
result = session.query(LoanRequest).filter(LoanRequest.id == id_).update(updated_data)
session.commit()
return bool(result) | 31,740 |
def check_results(results):
"""Examines a list of individual check results and returns an overall
result for all checks combined.
"""
if CheckResults.SCALE_UP in results:
return CheckResults.SCALE_UP
if all(r == CheckResults.SCALE_DOWN for r in results):
return CheckResults.SCALE_DOWN
return CheckResults.DONT_SCALE | 31,741 |
def disk_usage(path):
"""returns disk usage for a path"""
total = os.path.getsize(path)
if os.path.isdir(path):
for filename in os.listdir(path):
child_path = os.path.join(path, filename)
total += disk_usage(child_path)
print(f"{total:<10} {path}")
return total | 31,742 |
def mark_sent(email_id, db):
"""Mark an email as sent in the database."""
query_string = "UPDATE emails SET sent=NOW() WHERE id = {};".format(email_id)
db.execute(query_string) | 31,743 |
def edit_route(link_id):
"""edit link"""
link = dynamo.tables[TABLE_NAME].get_item(Key={'id': link_id})['Item']
form = LinkForm(link=link['link'], tags=','.join(link['tags']))
if form.validate_on_submit():
link, tags = form.parsed_data()
dynamo.tables[TABLE_NAME].update_item(
Key={'id': link_id},
UpdateExpression='set link = :link, tags = :tags',
ExpressionAttributeValues={':link': link, ':tags': tags},
ReturnValues='UPDATED_NEW')
return redirect(url_for('app.index_route'))
return render_template('addedit.html', form=form) | 31,744 |
def build_empty_pq():
"""Build empty pq."""
return PriorityQ() | 31,745 |
def add_flags(flags):
"""Add KZC flags"""
def f(test, way):
test.args += flags
return f | 31,746 |
def extract_stackstring(f, bb):
""" check basic block for stackstring indicators """
if _bb_has_stackstring(f, bb):
yield Characteristic("stack string"), bb.va | 31,747 |
async def goto(ctx, channel: discord.TextChannel):
"""Tell Amy to go to another channel"""
change_channel(channel)
await ctx.send('*Walks to ' + channel.mention + '.*') | 31,748 |
def RunProcess(cmd, stdinput=None, env=None, cwd=None, sudo=False,
sudo_password=None):
"""Executes cmd using suprocess.
Args:
cmd: An array of strings as the command to run
stdinput: An optional sting as stdin
env: An optional dictionary as the environment
cwd: An optional string as the current working directory
sudo: An optional boolean on whether to do the command via sudo
sudo_password: An optional string of the password to use for sudo
Returns:
A tuple of two strings and an integer: (stdout, stderr, returncode).
Raises:
DSException: if both stdinput and sudo_password are specified
"""
if sudo:
sudo_cmd = ['sudo']
if sudo_password and not stdinput:
# Set sudo to get password from stdin
sudo_cmd = sudo_cmd + ['-S']
stdinput = sudo_password + '\n'
elif sudo_password and stdinput:
raise DSException('stdinput and sudo_password '
'are mutually exclusive')
else:
sudo_cmd = sudo_cmd + ['-p',
"%u's password is required for admin access: "]
cmd = sudo_cmd + cmd
environment = os.environ
environment.update(env)
task = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
stdin=subprocess.PIPE, env=environment, cwd=cwd)
(stdout, stderr) = task.communicate(input=stdinput)
return (stdout, stderr, task.returncode) | 31,749 |
def general_output(string):
"""prints string on cmd"""
string = ownfunctions.replace_signs(string)
print(string) | 31,750 |
def public_jsonp_service(view):
"""
More explicitly named to call attention to the extra little p
"""
return _json_service_wrapper(JSONPResponse, view) | 31,751 |
def make_submission(predictions,
index2playlist,
index2trackid,
outfile=None,
topk=500):
""" Writes the predictions as submission file to disk """
print("Sorting top {} items for each playlist".format(topk))
__, topk_iy = argtopk(predictions, topk)
print("Writing rows to", outfile)
with open(outfile, 'a') as csvfile:
csv_writer = csv.writer(csvfile, delimiter=',')
csv_writer.writerow(SUBMISSION_HEADER)
# Line format
# playlist_id, trackid1, trackid2, trackid500
for row_ix, item_ixs in enumerate(topk_iy):
playlist = index2playlist[row_ix]
items = [index2trackid[ix] for ix in item_ixs]
csv_writer.writerow([playlist] + items) | 31,752 |
def test_convert_wrong_path(mock_app):
"""test the command with missing args"""
runner = mock_app.test_cli_runner()
# Provide a non-valid path to a panel file
result = runner.invoke(cli, ["convert", "wrong/path/to/file"])
assert result.exit_code != 0
assert "Could not open file: wrong/path/to/file:" in result.output
runner = mock_app.test_cli_runner() | 31,753 |
def chunks(iterable, size):
"""Generator which chunks data into chunks of given size."""
it = iter(iterable)
while True:
chunk = tuple(islice(it, size))
if not chunk:
return
yield chunk | 31,754 |
def load_network():
"""
Load the network. After calling this function, facebook.network points to a networkx object for the facebook data.
"""
load_features()
load_nodes()
load_edges() | 31,755 |
async def async_setup_entry(
hass: HomeAssistant,
entry: ConfigEntry,
async_add_entities: AddEntitiesCallback,
) -> None:
"""Set up the FiveM sensor platform."""
coordinator = hass.data[DOMAIN][entry.entry_id]
# Add sensor entities.
async_add_entities(
[FiveMSensorEntity(coordinator, description) for description in SENSORS]
) | 31,756 |
def manage_data(xls_file: str) -> list:
"""
转换xls手动标注的数据为待处理的格式
:param xls_file: 目标文件路径
:return: 转换后的字典列表
"""
f = pd.read_excel(xls_file, index=False)
cnt = 0
result = []
while cnt < len(f) - 1:
if f.text[cnt] == f.text[cnt + 1]:
temp_dic = {'text': f.text[cnt], 'spo_list': []}
while cnt < len(f) - 1 and f.text[cnt] == f.text[cnt + 1]:
temp_dic['spo_list'].append(f.iloc[cnt, 1:].to_dict())
cnt += 1
temp_dic['spo_list'].append(f.iloc[cnt, 1:].to_dict())
cnt += 1
result.append(temp_dic)
else:
temp_dic = {'text': f.text[cnt],
'spo_list': [f.iloc[cnt, 1:].to_dict()]}
result.append(temp_dic)
cnt += 1
return result | 31,757 |
def get_arrival_times(inter_times):
"""Convert interevent times to arrival times."""
return inter_times.cumsum() | 31,758 |
def generate_custom_background(size, background_color, nb_blobs=3000,
kernel_boundaries=(50, 100)):
""" Generate a customized background to fill the shapes
Parameters:
background_color: average color of the background image
nb_blobs: number of circles to draw
kernel_boundaries: interval of the possible sizes of the kernel
"""
img = np.zeros(size, dtype=np.uint8)
img = img + get_random_color(background_color)
blobs = np.concatenate([random_state.randint(0, size[1], size=(nb_blobs, 1)),
random_state.randint(0, size[0], size=(nb_blobs, 1))],
axis=1)
for i in range(nb_blobs):
col = get_random_color(background_color)
cv.circle(img, (blobs[i][0], blobs[i][1]),
random_state.randint(20), col, -1)
kernel_size = random_state.randint(kernel_boundaries[0], kernel_boundaries[1])
cv.blur(img, (kernel_size, kernel_size), img)
return img | 31,759 |
def rotate_z(domain, nrot=4):
"""take BoxCollection and return equivalent CylinderCollection by
rotating about the second axis. thus, transform coordinates of
points like (x, z) --> (x, 0, z)."""
return rotate(domain, d=1, nrot=nrot) | 31,760 |
def test_unsaved_filing_lock(session):
"""Assert that an unsaved filing, even with an invoice, is not locked."""
# not locked
filing = Filing()
filing.payment_token = 'payment_token'
assert not filing.locked | 31,761 |
def fetch_rib(config):
"""
Fetch the Internet routing table, as observed by BGP collectors.
"""
cleanup_files(config)
download_ribs(config)
convert_ribs(config) | 31,762 |
def filter_roi(roi_data, nb_nonzero_thr):
"""Filter slices from dataset using ROI data.
This function filters slices (roi_data) where the number of non-zero voxels within the ROI slice (e.g. centerline,
SC segmentation) is inferior or equal to a given threshold (nb_nonzero_thr).
Args:
roi_data (nd.array): ROI slice.
nb_nonzero_thr (int): Threshold.
Returns:
bool: True if the slice needs to be filtered, False otherwise.
"""
# Discard slices with less nonzero voxels than nb_nonzero_thr
return not np.any(roi_data) or np.count_nonzero(roi_data) <= nb_nonzero_thr | 31,763 |
def mdadm_status():
"""
Check the status of mdadm managed disks.
Example Output:
Personalities : [raid1] [linear] [multipath] [raid0] [raid6] [raid5] [raid4] [raid10]
md0 : active raid1 sdc1[1] sdb1[0]
9766302720 blocks super 1.2 [2/2] [UU]
bitmap: 0/73 pages [0KB], 65536KB chunk
md1 : active raid1 sda1[0] sde1[1]
1000072192 blocks super 1.2 [2/2] [UU]
bitmap: 4/8 pages [16KB], 65536KB chunk
unused devices: <none>
"""
new_md = True
md_arrays = []
new_array = {"health": "HEALTHY"}
for line in execute("cat /proc/mdstat", capture=True).split("\n"):
if line:
if line.split()[0] not in ["Personalities", "unused"]:
if line.startswith(" "):
if "blocks" in line:
drives = line.split(" [", 1)[1]
total, active = drives.split("] [")[0].split("/")
total = int(total)
active = int(active)
status = drives.split("] [")[1].replace("]", "")
new_array["total_devices"] = total
new_array["active_devices"] = active
new_array["failed_devices"] = max(total - active, 0)
if new_array["failed_devices"]:
if new_array["state"] == "active":
new_array["health"] = "ERROR: DEGRADED!!!"
else:
new_array["health"] = "ERROR: FAILED!!!"
else:
array = line.replace(":", "").split()
new_array["name"] = array[0]
new_array["state"] = array[1]
new_array["raid_level"] = array[2]
else:
if "name" in new_array:
md_arrays.append(new_array)
new_array = {"health": "HEALTHY"}
if md_arrays:
print("mdadm Managed Devices:")
cols = ["Device", "State", "Level", "Total", "Active", "Failed",
"Health"]
print((
f" {cols[0]:<6}\t{cols[1]:>6}\t{cols[2]:>7}\t{cols[3]:>5}\t"
f"{cols[4]:>5}\t{cols[5]:>6}\tHealth"
))
for array in md_arrays:
BG_COLOR = Back.RED
if array["health"] == "HEALTHY":
BG_COLOR = Back.GREEN
print((
f' {array["name"]:<6}\t{array["state"]:>6}\t'
f'{array["raid_level"]:>7}\t{array["total_devices"]:>5}\t'
f'{array["active_devices"]:>6}\t{array["failed_devices"]:>6}\t'
f'{BG_COLOR}{array["health"]}{RESET_ALL}'
))
print() | 31,764 |
def postprocess_relative_symlinks(logger, artifact_dir, filename):
"""
Turns absolute symlinks that points inside the artifact_dir into relative
symlinks, and gives an error on symlinks that point out of the artifact
"""
if os.path.islink(filename):
target = os.readlink(filename)
if os.path.isabs(target):
if not filename.startswith(artifact_dir):
msg = 'Absolute symlink %s points to %s outside of %s' % (filename, target, artifact_dir)
logger.error(msg)
raise ValueError(msg)
else:
new_target = os.path.relpath(target, os.path.dirname(filename))
logger.debug('Rewriting symlink "%s" from "%s" to "%s"' % (filename, target, new_target))
os.unlink(filename)
os.symlink(new_target, filename) | 31,765 |
def from_numpy(shape, dt):
"""
Upcast a (shape, dtype) tuple if possible.
>>> from_numpy((5,5), dtype('int32'))
dshape('5, 5, int32')
"""
dtype = np.dtype(dt)
if dtype.kind == 'S':
measure = String(dtype.itemsize, 'A')
elif dtype.kind == 'U':
measure = String(dtype.itemsize / 4, 'U8')
elif dtype.fields:
rec = [(a,CType.from_dtype(b[0])) for a,b in dtype.fields.items()]
measure = Record(rec)
else:
measure = CType.from_dtype(dtype)
if shape == ():
return measure
else:
return DataShape(parameters=(map(Fixed, shape)+[measure])) | 31,766 |
def ParseArgs():
"""Parse the command line options."""
option_parser = optparse.OptionParser()
option_parser.add_option(
'--from', dest='sender', metavar='EMAIL',
help='The sender\'s email address')
option_parser.add_option(
'--to', action='append', metavar='EMAIL', dest='recipients', default=[],
help='The recipient\'s address (reapeatable)')
option_parser.add_option(
'--subject', metavar='TEXT|@FILE', help='The subject of the email')
option_parser.add_option(
'--message', metavar='TEXT|@FILE', help='The body of the message')
option_parser.add_option(
'--attach', metavar='FILE', action='append', dest='attachments',
default=[], help='The path of a file to attach')
option_parser.add_option(
'--ignore-missing', action='store_true', default=False,
help='No errors on attempts to attach non-existing files')
option_parser.add_option('--server', help='The SMTP server to use')
option_parser.add_option('--password', help='The password to use')
options, _args = option_parser.parse_args()
if not options.sender:
option_parser.error('--from is required')
if not options.recipients:
option_parser.error('At least one --to is required')
if not options.subject:
option_parser.error('--subject is required')
if not options.message:
option_parser.error('--message is reuqired')
if not options.server:
option_parser.error('--server is required')
options.subject = ResolveParameter(options.subject)
options.message = ResolveParameter(options.message)
return options | 31,767 |
def set_conf(key, value, conf=None):
"""
Set configuration key.
"""
if conf is None:
conf = global_config
*base, name = key.split('.')
data = conf
for k in base:
data = data.setdefault(k, {})
data[name] = value
conf.save() | 31,768 |
def train_perception(base_dir, save_path, batch_size=32, random_state=0, lr=0.01, optim='Adam', epochs=100):
"""Trains the Perception network
Sample command: python train_perception.py /kaggle/input/simulation/perception_data/ /kaggle/working/ \
--batch-size 64\
--random-state 0\
--lr 0.01\
--optim SGD\
--epochs 100
Args:
base_dir ([str]): Directory which stores the training images
save_path ([str]): Path to store saved model checkpoints to
batch_size (int, optional): Training batch size. Defaults to 32.
random_state (int, optional): Random state to initialize params with. Defaults to 0.
lr (float, optional): Training Learning Rate. Defaults to 0.01.
optim (str, optional): Optimizer to use for training. Defaults to 'Adam'.
epochs (int, optional): Number of epochs to train for. Defaults to 100.
"""
# Define the transforms
transform = T.Compose([
T.Resize((224, 224)),
T.ToTensor()
])
train_dataset = PerceptionDataset(base_dir, seed=random_state, mode='train', transform=transform)
val_dataset = PerceptionDataset(base_dir, seed=random_state, mode='val', transform=transform)
model = PerceptionNet()
train_loss = get_loss('mse')
eval_loss = get_loss('mae')
optimizer_kwargs = {
'momentum': 0.9
}
trainer = Trainer(
train_dataset, val_dataset, model, train_loss,
eval_loss=eval_loss,
random_state=random_state,
batch_size=batch_size,
lr=lr, optim=optim, num_epochs=epochs, optimizer_kwargs=optimizer_kwargs
)
trainer.train(save_path) | 31,769 |
def define_components(mod):
"""
Adds components to a Pyomo abstract model object to force discrete
builds for generation technologies that have g_unit_size specified.
Unless otherwise stated, all power capacity is specified in units of
MW and all sets and parameters are mandatory.
NEW_PROJ_BUILDYEARS_DISCRETE is a subset of NEW_PROJ_BUILDYEARS that
only includes projects that have g_unit_size defined for their
technology.
BuildUnits[(proj, bld_yr) in NEW_PROJ_BUILDYEARS_DISCRETE] is an
integer decision variable of how many units to build.
Build_Units_Consistency[(proj, bld_yr) in NEW_PROJ_BUILDYEARS_DISCRETE]
is a constraint that forces the continous decision variable
BuildProj to be equal to BuildUnits * g_unit_size.
"""
mod.NEW_PROJ_BUILDYEARS_DISCRETE = Set(
initialize=mod.NEW_PROJ_BUILDYEARS,
filter=lambda m, proj, bld_yr: (
m.proj_gen_tech[proj] in m.GEN_TECH_WITH_UNIT_SIZES))
mod.BuildUnits = Var(
mod.NEW_PROJ_BUILDYEARS_DISCRETE,
within=NonNegativeIntegers)
mod.Build_Units_Consistency = Constraint(
mod.NEW_PROJ_BUILDYEARS_DISCRETE,
rule=lambda m, proj, bld_yr: (
m.BuildProj[proj, bld_yr] ==
m.BuildUnits[proj, bld_yr] * m.g_unit_size[m.proj_gen_tech[proj]])) | 31,770 |
def pyramidnet110_a84_cifar100(classes=100, **kwargs):
"""
PyramidNet-110 (a=84) model for CIFAR-100 from 'Deep Pyramidal Residual Networks,' https://arxiv.org/abs/1610.02915.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.chainer/models'
Location for keeping the model parameters.
"""
return get_pyramidnet_cifar(
classes=classes,
blocks=110,
alpha=84,
bottleneck=False,
model_name="pyramidnet110_a84_cifar100",
**kwargs) | 31,771 |
def contains_vendored_imports(python_path):
"""
Returns True if ``python_path`` seems to contain vendored imports from botocore.
"""
# We're using a very rough heuristic here: if the source code contains
# strings that look like a vendored import, we'll flag.
#
# Because Python is dynamic, there are lots of ways you could be
# importing the vendored modules that wouldn't be caught this way, but:
#
# 1. Doing it in a complete foolproof way is incredibly complicated, and
# I don't care that much.
# 2. If you're writing your Lambda code in a deliberately obfuscated way,
# you have bigger problems than vendor deprecations.
#
# In practice, Python imports are usually near the top of the file, so we
# read it line-by-line. This means if we find an import, we can skip
# reading the rest of the file.
#
with open(python_path, "rb") as python_src:
for line in python_src:
if (
b"import botocore.vendored" in line
or b"from botocore.vendored import " in line
):
return True
return False | 31,772 |
def filter_uniq(item):
"""Web app, feed template, creates unique item id"""
detail = item['item']
args = (item['code'], item['path'], str(detail['from']), str(detail['to']))
return ':'.join(args) | 31,773 |
def load_pickle(filename, verbose=2, use_joblib=False):
"""
Note: joblib can be potentially VERY slow.
"""
with open(filename, 'rb') as file:
if verbose >= 2:
start = time.time()
logging.info(f'Loading PICKLE from {filename}...')
if use_joblib:
warnings.warn('Joblib is slower in newer versions of Python.')
import joblib
obj = joblib.load(file)
else:
try:
obj = pickle.load(file)
except EOFError as e:
logging.error(f'Load FAILED for {filename}.')
raise e
if verbose >= 2:
logging.info(f'Load done in {np.round(time.time()-start, 4)} seconds.')
return obj | 31,774 |
def test_cards(fabdb):
""" Test the cards function """
cards = fabdb.cards()
assert isinstance(cards, dict)
assert len(cards['data']) > 0 | 31,775 |
async def make_match(*args, register=False, **kwargs) -> Match:
"""Create a Match object. There should be no need to call this directly; use matchutil.make_match instead,
since this needs to interact with the database.
Parameters
----------
racer_1_id: int
The DB user ID of the first racer.
racer_2_id: int
The DB user ID of the second racer.
max_races: int
The maximum number of races this match can be. (If is_best_of is True, then the match is a best of
max_races; otherwise, the match is just repeating max_races.)
match_id: int
The DB unique ID of this match.
suggested_time: datetime.datetime
The time the match is suggested for. If no tzinfo, UTC is assumed.
r1_confirmed: bool
Whether the first racer has confirmed the match time.
r2_confirmed: bool
Whether the second racer has confirmed the match time.
r1_unconfirmed: bool
Whether the first racer wishes to unconfirm the match time.
r2_unconfirmed: bool
Whether the second racer wishes to unconfirm the match time.
match_info: MatchInfo
The types of races to be run in this match.
cawmentator_id: int
The DB unique ID of the cawmentator for this match.
sheet_id: int
The sheetID of the worksheet the match was created from, if any.
register: bool
Whether to register the match in the database.
Returns
---------
Match
The created match.
"""
if 'match_id' in kwargs and kwargs['match_id'] in match_library:
return match_library[kwargs['match_id']]
match = Match(*args, commit_fn=matchdb.write_match, **kwargs)
await match.initialize()
if register:
await match.commit()
match_library[match.match_id] = match
return match | 31,776 |
def is_anonymous(context: TreeContext) -> bool:
"""Returns ``True`` if the current node is anonymous."""
# return context[-1].anonymous
tn = context[-1].tag_name
return not tn or tn[0] == ':' | 31,777 |
def expand_to_beam_size(tensor, beam_size):
"""Tiles a given tensor by beam_size."""
tensor = tf.expand_dims(tensor, axis=1)
tile_dims = [1] * tensor.shape.ndims
tile_dims[1] = beam_size
return tf.tile(tensor, tile_dims) | 31,778 |
def saveLayer_PDN(fileName: str, layeredImage: LayeredImage) -> None:
"""Save a layered image as .pdn."""
del fileName, layeredImage
Logger(FHFormatter()).logPrint("Saving PDNs is not implemented in pypdn", LogType.ERROR)
raise NotImplementedError | 31,779 |
def get_vignettes(
h5_file,
bed_file,
tmp_dir=".",
importance_filter=True,
rsid_to_genes=None,
ensembl_to_hgnc=None):
"""get the example indices at the locations of interest
"""
# prefix (scanmotifs)
dirname = h5_file.split("/")[-2]
print dirname
# get a bed file from the h5 file and overlap
with h5py.File(h5_file, "r") as hf:
metadata = hf["example_metadata"][:,0]
metadata_bed_file = "{}/{}.metadata.tmp.bed.gz".format(
tmp_dir, dirname)
array_to_bed(metadata, metadata_bed_file, name_key="all", merge=False)
# overlap and read back in
overlap_file = "{}.overlap.bed.gz".format(metadata_bed_file.split(".bed")[0])
overlap_cmd = (
"zcat {} | "
"awk -F '\t' 'BEGIN{{OFS=\"\t\"}}{{ $2=$2+20; $3=$3-20; print }}' | " # offset
"bedtools intersect -wo -a stdin -b {} | "
"gzip -c > {}").format(metadata_bed_file, bed_file, overlap_file)
print overlap_cmd
os.system(overlap_cmd)
overlap_data = pd.read_csv(overlap_file, sep="\t", header=None)
print overlap_data.shape
# for each, go back in and find the index, then check importance scores
total = 0
for overlap_i in range(overlap_data.shape[0]):
metadata_i = overlap_data[3][overlap_i]
h5_i = np.where(metadata == metadata_i)[0][0]
rsid_i = overlap_data[9][overlap_i]
variant_pos = overlap_data[7][overlap_i] - overlap_data[1][overlap_i]
variant_pos -= 1 # offset by 1
if variant_pos < 0:
continue
# filter here, if using rsid to genes
if (rsid_to_genes is not None) and (len(rsid_to_genes.keys()) > 0):
try:
gene_id = rsid_to_genes[rsid_i]
except:
continue
else:
gene_id = "UNKNOWN"
# and get hgnc
hgnc_id = ensembl_to_hgnc.get(gene_id, "UNKNOWN")
with h5py.File(h5_file, "r") as hf:
#for key in sorted(hf.keys()): print key, hf[key].shape
if False:
variant_impt = hf["sequence-weighted.active"][h5_i,:,variant_pos,:]
#variant_impt = hf["sequence-weighted"][h5_i,:,420:580][:,variant_pos]
else:
start_pos = max(variant_pos - 1, 0)
stop_pos = min(variant_pos + 1, hf["sequence-weighted.active"].shape[2])
variant_impt = hf["sequence-weighted.active"][h5_i,:,start_pos:stop_pos,:]
variant_val = hf["sequence.active"][h5_i,:,variant_pos,:]
variant_ref_bp = idx_to_letter[np.argmax(variant_val)]
# variant scoring
try:
variant_impt_max = np.max(np.abs(variant_impt))
except:
import ipdb
ipdb.set_trace()
if variant_impt_max > 0:
# plot out
if True:
with h5py.File(h5_file, "r") as hf:
# get the full sequences
orig_importances = hf["sequence-weighted"][h5_i][:,420:580]
match_importances = hf["sequence-weighted.active"][h5_i]
importances = scale_scores(orig_importances, match_importances)
print gene_id
metadata_string = metadata_i.split("features=")[-1].replace(":", "_")
# TODO also add in chrom region
plot_file = "{}/{}.{}-{}.{}.{}.{}.{}.{}.plot.pdf".format(
tmp_dir, dirname, gene_id, hgnc_id, h5_i, rsid_i, variant_pos, variant_ref_bp, metadata_string)
print plot_file
plot_weights_group(importances, plot_file)
total += 1
print total
return None | 31,780 |
def delete_cluster(access_token, project_id, cluster_id):
"""删除集群"""
url = f"{BCS_CC_API_PRE_URL}/projects/{project_id}/clusters/{cluster_id}/"
params = {"access_token": access_token}
return http_delete(url, params=params) | 31,781 |
def test_hello():
"""Should call hello end point"""
client = flask_basic_restx_app.app.test_client()
result = client.get("/hello")
assert result.json == {"hello": "world"} | 31,782 |
def modifMail(depute, mail = MAIL):
"""
modifMail renvoit une chaine de caractères d'un mail personnélisé pour chaque députés des comissions choisies
Prend en entrée : infoDeput : un tableau de tableau avec les données des députés.
mail : l'adresse d'un fichier txt remplit selon le formalisme choisit
commissionChoisies : un tableau avec les différentes commissions correspondantes aux commissions du tableau infoDeput
cette fonction ne renvoit rien mais créer un dossier Mail dans lequel sera stoqué tous les documents textes personnalisés avec comme premiere ligne l'adresse email du député en question
"""
#Ouverture du fichier mail
f = open(mail, 'r')
messageMail = f.read()
#Dictionnaire mettant en relation chemps personnalisés et les données du deputé:
donnesDeputes = {}
donnesDeputes['@nom@'] = depute[NOM].replace('É', 'E') #Le É fait bugger tout le programme...
donnesDeputes['@fonction@'] = depute[FONCTION].lower()
donnesDeputes['@commission@'] = depute[COMMISSION_PERMANENTE].lower()
if(depute[SEXE] == 'F'):
donnesDeputes['@politesse@'] = 'madame'
elif(depute[SEXE] == 'H'):
donnesDeputes['@politesse@'] = 'monsieur'
#On remplace les tag par les données des députés
texteMail = messageMail
for tag in donnesDeputes.keys():
texteMail = texteMail.replace(tag, donnesDeputes[tag])
#On s'occupe des mots genrés
message = texteMail.split("#")
while(len(message) > 1):
mot = message[1].split('/')
if(depute[SEXE] == 'F'):
message[0] += mot[1]
elif(depute[SEXE] == 'H'):
message[0] += mot[0]
message.pop(1)
message[0] += message[1]
message.pop(1)
return(message[0]) | 31,783 |
def atoi(s, base=None): # real signature unknown; restored from __doc__
"""
atoi(s [,base]) -> int
Return the integer represented by the string s in the given
base, which defaults to 10. The string s must consist of one
or more digits, possibly preceded by a sign. If base is 0, it
is chosen from the leading characters of s, 0 for octal, 0x or
0X for hexadecimal. If base is 16, a preceding 0x or 0X is
accepted.
"""
return 0 | 31,784 |
def add_to_command_lookup(command, entity):
"""Add discovered command to lookup
"""
commands = COMMAND_LOOKUP
if command in commands:
commands[command].append(entity)
else:
commands[command] = [entity] | 31,785 |
def gzip_requested(accept_encoding_header):
"""
Check to see if the client can accept gzipped output, and whether or
not it is even the preferred method. If `identity` is higher, then no
gzipping should occur.
"""
encodings = parse_encoding_header(accept_encoding_header)
# Do the actual comparisons
if('gzip' in encodings):
return encodings['gzip'] >= encodings['identity']
elif('*' in encodings):
return encodings['*'] >= encodings['identity']
else:
return False | 31,786 |
def test_vwprocess(model_class):
"""
Test whether results are same as using vw command directly:
run 'vw -q :: -p /dev/stdout --quiet' command:
0 1.0 |u u1 |i i1
1 1.0 |u u1 |i i2
1 1.0 |u u1 |i i3
1 1.0 |u u2 |i i1
1 1.0 |u u2 |i i2
0 1.0 |u u2 |i i3
|u u1 |i i1
|u u1 |i i2
|u u1 |i i3
|u u2 |i i1
|u u2 |i i2
|u u2 |i i3
0 1.0 |u u1 |i i1
1 1.0 |u u1 |i i2
1 1.0 |u u1 |i i3
1 1.0 |u u2 |i i1
1 1.0 |u u2 |i i2
0 1.0 |u u2 |i i3
|u u1 |i i1
|u u1 |i i2
|u u1 |i i3
|u u2 |i i1
|u u2 |i i2
|u u2 |i i3
"""
try:
if os.path.exists('/tmp/testmodel.vw'):
os.remove('/tmp/testmodel.vw')
if os.path.exists('/tmp/testmodel2.vw'):
os.remove('/tmp/testmodel2.vw')
if os.path.exists('/tmp/testmodel3.vw'):
os.remove('/tmp/testmodel3.vw')
vw_process = VWProcessTest(TestFormatter(), ['-q', '::', '--save_resume', '-f', '/tmp/testmodel.vw', '--quiet'],
write_only=True)
vw_process.train(common_features=user_features1, items_features=items_features, labels=[0, 1, 1], weights=[1.0, 1.0, 1.0])
vw_process.train(common_features=user_features2, items_features=items_features, labels=[1, 1, 0], weights=[1.0, 1.0, 1.0])
vw_process.close()
assert os.path.exists('/tmp/testmodel.vw')
vw_process = VWProcessTest(TestFormatter(),
['--save_resume', '-i', '/tmp/testmodel.vw', '-f', '/tmp/testmodel2.vw', '--quiet'])
predictions = vw_process.predict(common_features=user_features1, items_features=items_features)
assert list(predictions) == [0.777104, 0.96657, 0.69223]
predictions = vw_process.predict(common_features=user_features2, items_features=items_features)
assert list(predictions) == [0.666838, 0.727325, 0.242521]
vw_process.train(common_features=user_features1, items_features=items_features, labels=[0, 1, 1],
weights=[1.0, 1.0, 1.0])
vw_process.train(common_features=user_features2, items_features=items_features, labels=[1, 1, 0],
weights=[1.0, 1.0, 1.0])
vw_process.close()
assert os.path.exists('/tmp/testmodel2.vw')
# 2nd time we test only len(batch) = 1 to test if batches size != batch_size will pass
vw_process = model_class(TestFormatter(),
['--save_resume', '-i', '/tmp/testmodel2.vw', '-f', '/tmp/testmodel3.vw', '--quiet'])
predictions = vw_process.predict(common_features=user_features1, items_features=items_features)
assert list(predictions) == [0.558472, 1.0, 0.736521]
vw_process.close()
assert os.path.exists('/tmp/testmodel3.vw')
finally:
if os.path.exists('/tmp/testmodel.vw'):
os.remove('/tmp/testmodel.vw')
if os.path.exists('/tmp/testmodel2.vw'):
os.remove('/tmp/testmodel2.vw')
if os.path.exists('/tmp/testmodel3.vw'):
os.remove('/tmp/testmodel3.vw') | 31,787 |
async def load(ctx, extension):
"""Load specified cog"""
extension = extension.lower()
bot.load_extension(f'cogs.{extension}')
await ctx.send('{} has been loaded.'.format(extension.capitalize())) | 31,788 |
def create_logger(name="dummy", level=logging.DEBUG, record_format=None):
"""Create a logger according to the given settings"""
if record_format is None:
record_format = "%(asctime)s\t%(levelname)s\t%(message)s"
logger = logging.getLogger("modbus_tk")
logger.setLevel(level)
formatter = logging.Formatter(record_format)
if name == "udp":
log_handler = LogitHandler(("127.0.0.1", 1975))
elif name == "console":
log_handler = ConsoleHandler()
elif name == "dummy":
log_handler = DummyHandler()
else:
raise Exception("Unknown handler %s" % name)
log_handler.setFormatter(formatter)
logger.addHandler(log_handler)
return logger | 31,789 |
def precip_units(units):
"""
Return a standardized name for precip units.
"""
kgm2s = ['kg/m2/s', '(kg/m^2)/s', 'kg/m^2/s', 'kg m^-2 s^-1',
'kg/(m^2 s)', 'kg m-2 s-1']
mmday = ['mm/day', 'mm day^-1']
if units.lower() in kgm2s:
return 'kg m^-2 s^-1'
elif units.lower() in mmday:
return 'mm day^-1'
else:
raise ValueError('Unknown units ' + units) | 31,790 |
def validate_cfg(cfg):
"""Validate config content."""
_validate_agent_cfg(cfg) | 31,791 |
def exportSiteProperties(context):
""" Export site properties as an XML file.
"""
site = context.getSite()
logger = context.getLogger('properties')
exporter = queryMultiAdapter((site, context), IBody)
if exporter is None:
logger.warning('Export adapter missing.')
return
context.writeDataFile(_FILENAME, exporter.body, exporter.mime_type) | 31,792 |
def enumerate_quantities(
df: pd.DataFrame, cols: Union[List[str], None] = None, qty_col: str = "quantity"
) -> pd.DataFrame:
"""Creates new dataframe to convert x,count to x*count."""
if not cols:
raise ValueError("parameter cols must be an iterable of strings")
new_cols: List = [
sum(df.apply(lambda x: [x[col]] * x[qty_col], axis=1).tolist(), [])
for col in cols
]
new_df = pd.DataFrame(new_cols, index=cols).T
return new_df | 31,793 |
def calcul_acc(labels, preds):
"""
a private function for calculating accuracy
Args:
labels (Object): actual labels
preds (Object): predict labels
Returns:
None
"""
return sum(1 for x, y in zip(labels, preds) if x == y) / len(labels) | 31,794 |
def makeMolFromAtomsAndBonds(atoms, bonds, spin=None):
"""
Create a new Molecule object from a sequence of atoms and bonds.
"""
mol = Molecule(pybel.ob.OBMol())
OBMol = mol.OBMol
for atomicnum in atoms:
a = pybel.ob.OBAtom()
a.SetAtomicNum(atomicnum)
OBMol.AddAtom(a)
for bond in bonds:
if len(bond) != 3:
raise Exception('Bond must be specified by two indices and a bond order')
OBMol.AddBond(bond[0] + 1, bond[1] + 1, bond[2])
mol.assignSpinMultiplicity()
if spin is not None:
OBMol.SetTotalSpinMultiplicity(spin)
OBMol.SetHydrogensAdded()
return mol | 31,795 |
def alpha_040(enddate, index='all'):
"""
Inputs:
enddate: 必选参数,计算哪一天的因子
index: 默认参数,股票指数,默认为所有股票'all'
Outputs:
Series:index 为成分股代码,values为对应的因子值
公式:
((-1\* rank(stddev(high, 10)))\* correlation(high, volume, 10))
"""
enddate = to_date_str(enddate)
func_name = sys._getframe().f_code.co_name
return JQDataClient.instance().get_alpha_101(**locals()) | 31,796 |
def get_preprocessor(examples, tokenize_fn, pad_ids):
"""
Input:
examples: [List[str]] input texts
tokenize_fn: [function] encodes text into IDs
Output:
tf input features
"""
def generator():
for example in examples:
tokens = tokenize_fn(example)
yield pad_ids + tokens
return generator | 31,797 |
def is_propositional_effect(eff: BaseEffect):
""" An effect is propositional if it is either an add or a delete effect. """
return isinstance(eff, (AddEffect, DelEffect)) | 31,798 |
def read_user(msg):
"""Read user input.
:param msg: A message to prompt
:type msg: ``str``
:return: ``True`` if user gives 'y' otherwhise False.
:rtype: ``bool``
"""
user_input = input("{msg} y/n?: ".format(msg=msg))
return user_input == 'y' | 31,799 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.