content
stringlengths 22
815k
| id
int64 0
4.91M
|
|---|---|
def _transpose_list_array(x):
"""Transposes a list matrix
"""
n_dims = len(x)
assert n_dims > 0
n_samples = len(x[0])
rows = [None] * n_samples
for i in range(n_samples):
r = [None] * n_dims
for j in range(n_dims):
r[j] = x[j][i]
rows[i] = r
return rows
| 21,700
|
def load_businessgroup(request):
""" Business Group Dependent/Chained Dropdown List """
business_type_id = request.GET.get('business_type')
business_group_list = BusinessGroup.objects.filter(
business_type_id=business_type_id).order_by('name')
context = {'business_group_list': business_group_list}
return render(request, 'app_sme12/form_partial/bus_group_dropdown_list_options.html', context)
| 21,701
|
def seed_normalization(train_X, train_Y, test_X, testY, nor_method=0, merge=0, column=0):
"""
0 for minmax 1 for standard, 2 for nothing
:param nor_method:
:param merge:是否训练集测试集一起归一化
:return:
"""
# imp_mean = SimpleImputer(missing_values=np.nan, strategy="mean")
imp_mean = KNNImputer(n_neighbors=10,weights="uniform")
train_X = imp_mean.fit_transform(train_X)
test_X = imp_mean.fit_transform(test_X)
if column == 0:
if nor_method == 0:
scaler = MinMaxScaler()
elif nor_method == 1:
scaler = StandardScaler()
elif nor_method == 2:
scaler = Normalizer()
elif nor_method == 3:
scaler = Pipeline([('min_max', MinMaxScaler()),
('standard', StandardScaler())])
else:
return train_X, train_Y, test_X, testY
if merge == 0:
scaler.fit(np.vstack((train_X, test_X)))
train_X = scaler.transform(train_X)
test_X = scaler.transform(test_X)
elif merge == 1:
scaler.fit(train_X)
train_X = scaler.transform(train_X)
test_X = scaler.transform(test_X)
else:
train_X = scaler.fit_transform(train_X)
test_X = scaler.fit_transform(test_X)
#scaler.fit(np.vstack((train_X, test_X)))
return train_X, train_Y, test_X, testY
else:
train_X = train_X.T
x_mean = np.mean(train_X, axis=0)
x_std = np.std(train_X, axis=0)
train_X = (train_X - x_mean) / (x_mean - x_std)
test_X = test_X.T
x_mean = np.mean(test_X, axis=0)
x_std = np.std(test_X, axis=0)
test_X = (test_X - x_mean) / (x_mean - x_std)
return train_X.T, train_Y, test_X.T, testY
| 21,702
|
def combine_matrix_runs(path, runs, pacc_file):
"""Combine a set of transition matrix files.
Args:
path: The base path containing the data to combine.
runs: The list of runs to combine.
pacc_file: The name of the file to combine.
Returns:
A TransitionMatrix object with the combined data.
"""
true_path = pathlib.Path(path)
return combine_matrices([read_matrix(true_path / run / pacc_file)
for run in runs])
| 21,703
|
def submit_tweet_with_media(message, mediafile, tweet_to_reply=None, handle=None):
"""
imfile is the path to an media
tweet_to_reply is a tweet that you're replying to, if not None
"""
if not handle:
handle = twitter_handle()
media_ids = handle.upload_media(media=open(mediafile))
if tweet_to_reply is None:
handle.update_status(status=message,
media_ids=media_ids['media_id'])
else:
# must mention user's name for it to be a reply
message += ' @' + tweet_to_reply['user']['screen_name']
handle.update_status(status=message,
in_reply_to_status_id=tweet_to_reply['id'],
media_ids=media_ids['media_id'])
| 21,704
|
def get_pkg_descr(package, version=None, last_modified=None):
"""
Get package description from registry
"""
json_data = fetch_page('http://registry.npmjs.org/%s' % package, last_modified=last_modified)
if json_data is None: # NB: empty string is not None but will fail the check
return None
else:
return json.loads(json_data)
| 21,705
|
def has_pattern(str_or_strlist):
"""When passed a string, equivalent to calling looks_like_pattern.
When passed a string list, returns True if any one of the strings looks like a pattern,
False otherwise."""
strlist = [str_or_strlist] if isinstance(str_or_strlist, str) else str_or_strlist
return len([s for s in strlist if looks_like_pattern(s)]) > 0
| 21,706
|
def update_hidden_area(*args):
"""update_hidden_area(hidden_area_t ha) -> bool"""
return _idaapi.update_hidden_area(*args)
| 21,707
|
def after_update_forecast_datasets(msg, config, checklist):
"""Calculate the list of workers to launch after the
update_forecast_datasets worker ends.
:arg msg: Nowcast system message.
:type msg: :py:class:`nemo_nowcast.message.Message`
:arg config: :py:class:`dict`-like object that holds the nowcast system
configuration that is loaded from the system configuration
file.
:type config: :py:class:`nemo_nowcast.config.Config`
:arg dict checklist: System checklist: data structure containing the
present state of the nowcast system.
:returns: Worker(s) to launch next
:rtype: list
"""
next_workers = {
"crash": [],
"failure fvcom forecast": [],
"failure nemo forecast": [],
"failure nemo forecast2": [],
"failure wwatch3 forecast": [],
"failure wwatch3 forecast2": [],
"success fvcom forecast": [],
"success nemo forecast": [],
"success nemo forecast2": [],
"success wwatch3 forecast": [],
"success wwatch3 forecast2": [],
}
if msg.type.startswith("success"):
model = msg.type.split()[1]
run_type = msg.type.split()[2]
try:
run_date = checklist[f"{model.upper()} run"][run_type]["run date"]
except KeyError:
# FVCOM run has model config prefixed to run type
run_date = checklist[f"{model.upper()} run"][f"x2 {run_type}"]["run date"]
next_workers[msg.type].append(
NextWorker("nowcast.workers.ping_erddap", args=[f"{model}-forecast"])
)
if model == "nemo":
next_workers[msg.type].extend(
[
NextWorker(
"nowcast.workers.make_plots",
args=["nemo", run_type, "publish", "--run-date", run_date],
),
NextWorker(
"nowcast.workers.make_surface_current_tiles",
args=[run_type, "--run-date", run_date],
),
]
)
return next_workers[msg.type]
| 21,708
|
def index(request):
"""Homepage for this app.
"""
with open('index.html') as fp:
return HttpResponse(fp.read())
| 21,709
|
def SECH(*args) -> Function:
"""
The SECH function returns the hyperbolic secant of an angle.
Learn more: https//support.google.com/docs/answer/9116560
"""
return Function("SECH", args)
| 21,710
|
def read_ATAC_10x(matrix, cell_names='', var_names='', path_file=''):
"""
Load sparse matrix (including matrices corresponding to 10x data) as AnnData objects.
read the mtx file, tsv file coresponding to cell_names and the bed file containing the variable names
Parameters
----------
matrix: sparse count matrix
cell_names: optional, tsv file containing cell names
var_names: optional, bed file containing the feature names
Return
------
AnnData object
"""
mat = mmread(''.join([path_file, matrix]))
mat = mat.toarray()
mat = np.matrix(mat.transpose())
with open(path_file+cell_names) as f:
barcodes = f.readlines()
barcodes = [x[:-1] for x in barcodes]
with open(path_file+var_names) as f:
var_names = f.readlines()
var_names = ["_".join(x[:-1].split('\t')) for x in var_names]
adata = ad.AnnData(mat, obs=pd.DataFrame(index=barcodes), var=pd.DataFrame(index=var_names))
adata.uns['omic'] = 'ATAC'
return(adata)
| 21,711
|
def print_header():
"""
Prints header for app
:return:
"""
print('-------------------------------------------')
print(' Weather APP')
print('-------------------------------------------')
print()
| 21,712
|
def _resolve_placeholder(placeholder, original):
"""Resolve a placeholder to the given original object.
:param placeholder: The placeholder to resolve, in place.
:type placeholder: dict
:param original: The object that the placeholder represents.
:type original: dict
"""
new = copy.deepcopy(original)
# We are supposed to be resolving the placeholder,
# not replacing it with another.
assert original["type"] != "placeholder"
# The name remains the same.
new["name"] = placeholder["name"]
new["full_name"] = placeholder["full_name"]
# Record where the placeholder originally came from.
new["original_path"] = original["full_name"]
# The source lines for this placeholder do not exist in this file.
# The keys might not exist if original is a resolved placeholder.
new.pop("from_line_no", None)
new.pop("to_line_no", None)
# Resolve the children
stack = list(new.get("children", ()))
while stack:
child = stack.pop()
# Relocate the child to the new location
assert child["full_name"].startswith(original["full_name"])
suffix = child["full_name"][len(original["full_name"]) :]
child["full_name"] = new["full_name"] + suffix
# The source lines for this placeholder do not exist in this file.
# The keys might not exist if original is a resolved placeholder.
child.pop("from_line_no", None)
child.pop("to_line_no", None)
# Resolve the remaining children
stack.extend(child.get("children", ()))
placeholder.clear()
placeholder.update(new)
| 21,713
|
def get_html_subsection(name):
"""
Return a subsection as HTML, with the given name
:param name: subsection name
:type name: str
:rtype: str
"""
return "<h2>{}</h2>".format(name)
| 21,714
|
def test_load_pandas_df(
size,
num_samples,
num_movies,
movie_example,
title_example,
genres_example,
year_example,
tmp,
):
"""Test MovieLens dataset load as pd.DataFrame"""
# Test if correct data are loaded
header = ["a", "b", "c"]
df = load_pandas_df(size=size, local_cache_path=tmp, header=header)
assert len(df) == num_samples
assert len(df.columns) == len(header)
# Test if raw-zip file, rating file, and item file are cached
assert len(os.listdir(tmp)) == 3
# Test title, genres, and released year load
header = ["a", "b", "c", "d", "e"]
with pytest.warns(Warning):
df = load_pandas_df(
size=size,
header=header,
local_cache_path=tmp,
title_col="Title",
genres_col="Genres",
year_col="Year",
)
assert len(df) == num_samples
assert (
len(df.columns) == 7
) # 4 header columns (user, item, rating, timestamp) and 3 feature columns
assert "e" not in df.columns # only the first 4 header columns are used
# Get two records of the same items and check if the item-features are the same.
head = df.loc[df["b"] == movie_example][:2]
title = head["Title"].values
assert title[0] == title[1]
assert title[0] == title_example
genres = head["Genres"].values
assert genres[0] == genres[1]
assert genres[0] == genres_example
year = head["Year"].values
assert year[0] == year[1]
assert year[0] == year_example
# Test default arguments
df = load_pandas_df(size)
assert len(df) == num_samples
# user, item, rating and timestamp
assert len(df.columns) == 4
| 21,715
|
def compute_norms(items):
"""
Compute the norms of the item vectors provided.
Arguments:
items -- a hashmap which maps itemIDs to the characteristic vectors
"""
norms = {}
for item in items:
norms[item] = np.sqrt(np.sum(np.square(items[item])))
return norms
| 21,716
|
def check_cert_path(path):
"""Pass."""
build_path = path.parent / "build_certs.py"
if not path.is_file():
error = "Cert {path!r} does not exist, run {build_path!r}"
error = error.format(path=format(path), build_path=format(build_path))
raise Exception(error)
| 21,717
|
def do_part_1():
"""
Solves part 1
"""
digested_lines = list(map(digest_line, input_lines(2)))
# Poor man's partial
doubles = sum(map(lambda l: contains_nple(l, reps=2), digested_lines))
triples = sum(map(lambda l: contains_nple(l, reps=3), digested_lines))
print(doubles * triples)
return doubles * triples
| 21,718
|
def create_Rz_batch(a):
"""
Creates a batch of rotation matrices about z of angles a.
Input (batch)
Output (batch, 3, 3)
"""
return torch.stack([
torch.stack([torch.cos(a),
torch.sin(a),
torch.zeros_like(a)],
dim=1),
torch.stack([-torch.sin(a),
torch.cos(a),
torch.zeros_like(a)],
dim=1),
torch.stack([torch.zeros_like(a),
torch.zeros_like(a),
torch.ones_like(a)],
dim=1)
], dim=2)
| 21,719
|
def get_upsample_filter(size):
"""Make a 2D bilinear kernel suitable for upsampling"""
factor = (size + 1) // 2
if size % 2 == 1:
center = factor - 1
else:
center = factor - 0.5
og = np.ogrid[:size, :size]
filter = (1 - abs(og[0] - center) / factor) * \
(1 - abs(og[1] - center) / factor)
return torch.from_numpy(filter).float()
| 21,720
|
def lst2gmst(longitude,
hour,
minute=None,
second=None,
longitudeDirection='W',
longitudeUnits='DEGREES'):
"""
Converts Local Sidereal Time to Greenwich Mean Sidereal Time.
Parameters
----------
longitude : float (any numeric type)
The longitude of the site to calculate the Local Sidereal Time. Defaults are
Longitude WEST and units DEGREES, but these can be changed with the optional
parameters lonDirection and lonUnits.
hour : int (or float)
If an integer, the function will expect a minute and second. If a float, it
will ignore minute and second and convert from decimal hours to hh:mm:ss.
minute : int
Ignored if hour is a float.
second : int (any numeric type, to include microseconds)
Ignored if hour is a float.
longitudeDirection : string
Default is longitude WEST, 'W', but you can specify EAST by passing 'E'.
longitudeUnits : string
Default units are 'DEGREES', but this can be switched to radians by passing
'RADIANS' in this parameter.
Returns
-------
hour : int
The hour of the calculated GMST
minute : int
The minutes of the calculated GMST
second: float
The seconds of the calculated GMST
Examples
--------
>>> lst2gmst(70.3425, hour=14, minute=26, second=18)
(19, 7, 40.20000000000607)
>>> lst2gmst(5.055477, hour=14.4383333333333333, longitudeDirection='E', longitudeUnits='RADIANS')
(19, 7, 40.20107388985991)
"""
if minute != None and second != None:
hours = sex2dec(hour, minute, second)
elif minute == None and second == None:
hours = hour
else:
raise AssertionError('minute and second must either be both set, or both unset.')
if longitudeUnits.upper() == 'DEGREES':
longitudeTime = longitude / 15.0
elif longitudeUnits.upper() == 'RADIANS':
longitudeTime = longitude * 180.0 / math.pi / 15.0
if longitudeDirection.upper() == 'W':
gmst = hours + longitudeTime
elif longitudeDirection.upper() == 'E':
gmst = hours - longitudeTime
else:
raise AssertionError('longitudeDirection must be W or E')
gmst = gmst % 24.0
return dec2sex(gmst)
| 21,721
|
def find_components(package, base_class):
"""Find components which are subclass of a given base class.
"""
for filename in resource_listdir(package, ''):
basename, extension = os.path.splitext(filename)
if extension != '.py' or basename.startswith('.'):
continue
module_name = "{}.{}".format(package, basename)
__import__(module_name, fromlist='*')
module = sys.modules[module_name]
if not hasattr(module, '__all__'):
continue
yield from scan_module(module, base_class)
| 21,722
|
def main():
"""run this by default"""
bigfix_cli = bescli.bescli.BESCLInterface()
bigfix_cli.do_conf()
# generate MSI uninstallers
# use session relevance to get the name of the property to get the values from:
property_name = bigfix_cli.bes_conn.session_relevance_array(
'unique value of names of bes properties whose(custom flag of it AND name of it contains "DisplayName" AND definition of it contains "ModifyPath")'
)[0]
# print help messages if property not found:
if "ERROR:" in property_name:
print("ERROR: Property not found!", property_name)
print("You may need to create the property that this script uses")
print(
"- Recommended Property Name: `DisplayNames of MSI Applications - Windows`"
)
print("- Recommended Property Evaluation Period: Once a day")
print(
"- Recommended Property Relevance found here: https://bigfix.me/relevance/details/3023371"
)
raise ValueError("ERROR: Property not found!", property_name)
# get the unique set of property results to generate the MSI installers for:
property_results = bigfix_cli.bes_conn.session_relevance_array(
f'unique values of values of results of bes property "{property_name}"'
)
# print(property_results)
template_dict = {}
template_dict["DownloadSize"] = "0"
template_file_path = os.path.join(
os.path.dirname(os.path.abspath(__file__)), "Uninstall_MSI-Windows.bes.mustache"
)
template_dict["template_file_path"] = template_file_path
template_dict = generate_bes_from_template.generate_bes_from_template.get_missing_bes_values(
template_dict
)
# print(template_dict)
for result in property_results:
# print(result)
# generate the uninstallers:
template_dict["DisplayName"] = result
generated_task = generate_bes_from_template.generate_bes_from_template.generate_content_from_template(
template_dict
)
print(save_item_to_besfile(generated_task))
# generate EXE uninstallers:
template_file_path = os.path.join(
os.path.dirname(os.path.abspath(__file__)), "Uninstall_EXE-Windows.bes.mustache"
)
template_dict["template_file_path"] = template_file_path
property_name = bigfix_cli.bes_conn.session_relevance_array(
'unique value of names of bes properties whose(custom flag of it AND name of it contains "DisplayName" AND definition of it contains "QuietUninstallString")'
)[0]
# print help messages if property not found:
if "ERROR:" in property_name:
print("ERROR: Property not found!", property_name)
print("You may need to create the property that this script uses")
print(
"- Recommended Property Name: `DisplayNames of MSI Applications - Windows`"
)
print("- Recommended Property Evaluation Period: Once a day")
print(
"- Recommended Property Relevance found here: https://bigfix.me/relevance/details/3023371"
)
raise ValueError("ERROR: Property not found!", property_name)
print(property_name)
property_results = bigfix_cli.bes_conn.session_relevance_array(
f'unique values of values of results of bes property "{property_name}"'
)
# print(property_results)
for result in property_results:
# print(result)
# generate the uninstallers:
template_dict["DisplayName"] = result
generated_task = generate_bes_from_template.generate_bes_from_template.generate_content_from_template(
template_dict
)
print(save_item_to_besfile(generated_task))
| 21,723
|
def filter_funcs(node) -> bool:
"""Filter to get functions names and remove dunder names"""
if not isinstance(node, ast.FunctionDef):
return False
elif node.name.startswith('__') or node.name.endswith('__'):
return False
else:
return True
| 21,724
|
def test_disabled_enable_debug() -> None:
"""Check that enable_debug=False works."""
tc.assertTrue(
is_file_exists(get_absolute_from_current_path(__file__, "0001.png"))
)
copy_file(
get_absolute_from_current_path(__file__, "0001.png"),
get_absolute_from_current_path(__file__, "0001_debug.png"),
)
SeparatePage().treat_file(
get_absolute_from_current_path(__file__, "0001_debug.png"),
debug=DebugImage(DebugImage.Level.OFF),
)
delete_file(get_absolute_from_current_path(__file__, "0001_debug.png"))
| 21,725
|
def main():
"""
Load the list of valid country codes and create output for each country.
Two output files and one plot are created for each country.
"""
with open(DATA_FOLDER+COUNTRY_CODES, "r") as infile:
infile.readline()
info = [(int(_l.split("\t")[0]), _l.split("\t")[3]) for _l in infile]
for c_id, name in info:
if c_id in [554, 643, 840]:
# 554, 643, 840 the three countries that span the -180/180 degree
# latitude line and hence cause an array of population data that is
# too large to be plotted on a map.
# These countries are:
# - New Zealand (554)
# - Russia (643)
# - USA (840)
print(c_id, "caused errors in the past. Skipping for now...")
continue
if os.path.exists("plots/{0}.png".format(c_id)):
print(c_id, "already present.")
else:
print("Running for country:", c_id)
plot = Plot(c_id)
plot.plot(title=name)
| 21,726
|
def attach(address, log_dir=None, multiprocess=True):
"""Starts a DAP (Debug Adapter Protocol) server in this process,
and connects it to the IDE that is listening for an incoming
connection on a socket with the specified address.
address must be a (host, port) tuple, as defined by the standard
socket module for the AF_INET address family.
If specified, log_dir must be a path to some existing directory;
the debugger will then create its log files in that directory.
A separate log file is created for every process, to accommodate
scenarios involving multiple processes. The log file for a process
with process ID <pid> will be named "ptvsd_<pid>.log".
If multiprocess is true, ptvsd will also intercept child processes
spawned by this process, inject a debug server into them, and
configure it to attach to the same IDE before the child process
starts running any user code.
This function doesn't return until connection to the IDE has been
established.
"""
from ptvsd.server import api
return api.attach(address, log_dir)
| 21,727
|
def create_api_token(
creator_id: UserID,
permissions: set[PermissionID],
*,
description: Optional[str] = None,
) -> ApiToken:
"""Create an API token."""
num_bytes = 40
token = token_urlsafe(num_bytes)
db_api_token = DbApiToken(
creator_id, token, permissions, description=description
)
db.session.add(db_api_token)
db.session.commit()
return _db_entity_to_api_token(db_api_token)
| 21,728
|
def parse_arguments():
"""Parse command line arguments."""
parser = argparse.ArgumentParser(description="Update OCFL inventory sidecar file",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("path", type=str, nargs="*",
help="OCFL inventory files or directories containing them")
parser.add_argument("--digest", default=None,
help="Digest algorithm to use overriding any in inventory")
ocfl.add_shared_args(parser)
args = parser.parse_args()
ocfl.check_shared_args(args)
return args
| 21,729
|
def match_image_widths(
image_i1: Image, image_i2: Image
) -> Tuple[Image, Image, Tuple[float, float], Tuple[float, float]]:
"""Automatically chooses the target width (larger of the two inputs), and
scales both images to that width.
Args:
image_i1: 1st image to match width.
image_i2: 2nd image to match width.
Returns:
Scaled image_i1.
Scaled image_i2.
Scaling factor (W, H) for image_i1.
Scaling factor (W, H) for image_i2.
"""
max_width = max(image_i1.width, image_i2.width)
# scale image_i1
new_width = int(max_width)
new_height = int(image_i1.height * new_width / image_i1.width)
scale_factor_i1 = (new_width / image_i1.width, new_height / image_i1.height)
scaled_image_i1 = resize_image(image_i1, new_height, new_width)
# scale image_i2
new_width = int(max_width)
new_height = int(image_i2.height * new_width / image_i2.width)
scale_factor_i2 = (new_width / image_i2.width, new_height / image_i2.height)
scaled_image_i2 = resize_image(image_i2, new_height, new_width)
return scaled_image_i1, scaled_image_i2, scale_factor_i1, scale_factor_i2
| 21,730
|
def get_parameter_value_and_validate_return_type(
domain: Optional[Domain] = None,
parameter_reference: Optional[Union[Any, str]] = None,
expected_return_type: Optional[Union[type, tuple]] = None,
variables: Optional[ParameterContainer] = None,
parameters: Optional[Dict[str, ParameterContainer]] = None,
) -> Optional[Any]:
"""
This method allows for the parameter_reference to be specified as an object (literal, dict, any typed object, etc.)
or as a fully-qualified parameter name. In either case, it can optionally validate the type of the return value.
"""
if isinstance(parameter_reference, dict):
parameter_reference = dict(copy.deepcopy(parameter_reference))
parameter_reference = get_parameter_value(
domain=domain,
parameter_reference=parameter_reference,
variables=variables,
parameters=parameters,
)
if expected_return_type is not None:
if not isinstance(parameter_reference, expected_return_type):
raise ge_exceptions.ProfilerExecutionError(
message=f"""Argument "{parameter_reference}" must be of type "{str(expected_return_type)}" \
(value of type "{str(type(parameter_reference))}" was encountered).
"""
)
return parameter_reference
| 21,731
|
def diff_tags(list_a, list_b):
"""
Return human readable diff string of tags changed between two tag lists
:param list_a: Original tag list
:param list_b: New tag list
:return: Difference string
"""
status_str = text_type("")
tags_added = [tag for tag in list_b if tag not in list_a]
tags_removed = [tag for tag in list_a if tag not in list_b]
if tags_added and tags_removed:
status_str += "added: {0}".format(text_type(tags_added))
status_str += " removed: {0}".format(text_type(tags_removed))
elif tags_added:
status_str += "added: {0}".format(text_type(tags_added))
elif tags_removed:
status_str += "removed: {0}".format(text_type(tags_removed))
if not status_str:
status_str = "no changes required."
return status_str
| 21,732
|
def distance_on_great_circle(start_point, direction, distance):
"""compute the location of a point a specified distance along a great circle
NOTE: This assumes a spherical earth. The error introduced in the location
is pretty small (~15 km for a 13000 km path), but it totall screws with
the altitude. YOU SHOULD NOT USE THE ALTITUDE COMING OUT OF THIS, ESPECIALLY
IF YOU HAVE ANY MEANGINFUL DISTANCE
Arguments:
start_point: the starting point of the great circle. The direction is
given in a NED frame at this point. Numpy (3,) array in radians, lla
direction: a NED vector indicating the direction of the great circle
distance: the length of the great circle arc (m)
Returns:
end_point: the end of a great circle path of length <distance> from
<start_point> with initial <direction>
"""
start_xyz = geodesy.conversions.lla_to_xyz(start_point)
direction = geometry.conversions.to_unit_vector(direction)
delta_xyz = geodesy.conversions.ned_to_xyz(
direction, numpy.array(start_point, ndmin=2))
rotation_axis = -geometry.conversions.to_unit_vector(
numpy.cross(start_xyz, delta_xyz))
rotation_magnitude = distance / environments.earth.constants['r0']
rotation_quaternion = geometry.quaternion.Quaternion()
rotation_quaternion.from_axis_and_rotation(
rotation_axis, rotation_magnitude)
end_point_xyz = rotation_quaternion.rot(start_xyz)
end_point = geodesy.conversions.xyz_to_lla(end_point_xyz)
return end_point
| 21,733
|
def bigo():
""" User inputs corresponding Big O of randomly generated expression """
# Init algorithm 1
a1 = randint(0, 2)
if (a1 == 0): a1 = Expression(terms=3)
else : a1 = return_rterm()
# Question user
print(f"\nGive the corresponding Big O of {a1}.")
user_inp = str(input("\nInput answer: ")).lower()
if (user_inp == a1.bigo().lower()): print(f"Correct!")
else: print(f"Incorrect ._. -> Correct Answer: {a1.bigo().lower()}")
| 21,734
|
def test_extra_kwargs_error():
"""Test that unrecognized kwargs gives a FutureWarning."""
with pytest.raises(TypeError) as wrn:
widgets.Label(unknown_kwarg="hi")
assert "unexpected keyword argument" in str(wrn)
| 21,735
|
def construct_tree_framework(bracket):
"""Given the tree in bracket form, creates a tree with labeled leaves
and unlabeled inner nodes."""
if type(bracket)==int: #base case, creates leaf
return Node(tree)
else: #recursive step, inner nodes
root = Node(None, construct_tree_framework(bracket[0]), construct_tree_framework(bracket[1]))
return root
| 21,736
|
def eckart_transform(atommasses, atomcoords):
"""Compute the Eckart transform.
This transform is described in https://gaussian.com/vib/.
Parameters
----------
atommasses : array-like
Atomic masses in atomic mass units (amu).
atomcoords : array-like
Atomic coordinates.
Returns
-------
array-like
Examples
--------
>>> from overreact import _datasets as datasets
>>> data = datasets.logfiles["tanaka1996"]["Cl·@UMP2/cc-pVTZ"]
>>> eckart_transform(data.atommasses, data.atomcoords)
array([[1., 0., 0.],
[0., 1., 0.],
[0., 0., 1.]])
>>> data = datasets.logfiles["symmetries"]["dihydrogen"]
>>> eckart_transform(data.atommasses, data.atomcoords)
array([[...]])
>>> data = datasets.logfiles["symmetries"]["water"]
>>> eckart_transform(data.atommasses, data.atomcoords)
array([[-9.42386999e-01, 0.00000000e+00, 0.00000000e+00,
2.99716727e-01, -2.86166258e-06, -7.42376895e-02,
-1.19022276e-02, 4.33736541e-03, -1.28081683e-01],
[-0.00000000e+00, -9.42386999e-01, 0.00000000e+00,
1.40934586e-02, -1.34562803e-07, 1.01850683e-01,
-1.52466204e-01, -2.78628770e-01, -2.13218735e-02],
[-0.00000000e+00, -0.00000000e+00, -9.42386999e-01,
-1.47912143e-01, 1.41224899e-06, -1.40724409e-01,
-3.86450545e-02, -1.77596105e-02, -2.61565554e-01],
[-2.36544652e-01, -0.00000000e+00, -0.00000000e+00,
-5.97037403e-01, -6.33525274e-01, 2.70812665e-02,
-2.34354970e-01, 8.09905642e-02, 3.52169811e-01],
[-0.00000000e+00, -2.36544652e-01, -0.00000000e+00,
-2.80742485e-02, -2.97900030e-02, -6.93753868e-01,
5.78451116e-01, 2.06337502e-01, 2.89647600e-01],
[-0.00000000e+00, -0.00000000e+00, -2.36544652e-01,
2.94641819e-01, 3.12648820e-01, -1.12274948e-02,
-4.19760855e-01, 1.83772848e-01, 7.41205673e-01],
[-2.36544652e-01, -0.00000000e+00, -0.00000000e+00,
-5.97025305e-01, 6.33536675e-01, 2.68679525e-01,
2.81773098e-01, -9.82705016e-02, 1.58103880e-01],
[-0.00000000e+00, -2.36544652e-01, -0.00000000e+00,
-2.80736797e-02, 2.97905391e-02, 2.87983715e-01,
2.89697972e-02, 9.03711399e-01, -2.04701877e-01],
[-0.00000000e+00, -0.00000000e+00, -2.36544652e-01,
2.94635849e-01, -3.12654446e-01, 5.71869440e-01,
5.73721626e-01, -1.13019078e-01, 3.00863871e-01]])
"""
atommasses = np.asarray(atommasses)
natom = len(atommasses)
dof = 3 * natom
moments, axes, atomcoords = inertia(atommasses, atomcoords, align=False)
x = np.block(
[
np.ones(natom)[:, np.newaxis],
np.zeros(natom)[:, np.newaxis],
np.zeros(natom)[:, np.newaxis],
]
)
y = np.block(
[
np.zeros(natom)[:, np.newaxis],
np.ones(natom)[:, np.newaxis],
np.zeros(natom)[:, np.newaxis],
]
)
z = np.block(
[
np.zeros(natom)[:, np.newaxis],
np.zeros(natom)[:, np.newaxis],
np.ones(natom)[:, np.newaxis],
]
)
x *= np.sqrt(atommasses[:, np.newaxis])
y *= np.sqrt(atommasses[:, np.newaxis])
z *= np.sqrt(atommasses[:, np.newaxis])
D_trans = np.block([x.reshape(1, dof).T, y.reshape(1, dof).T, z.reshape(1, dof).T])
D_rot = np.array(
[
np.cross((atomcoords @ axes)[i], axes[:, j]) / np.sqrt(atommasses[i])
for i in range(natom)
for j in range(3)
]
)
D = np.block([D_trans, D_rot])
return np.linalg.qr(D, mode="complete")[0]
| 21,737
|
def plot_rdkit(mol, filename=None):
"""
Plots an RDKit molecule in Matplotlib
:param mol: an RDKit molecule
:param filename: save the image with the given filename
:return:
"""
if rdc is None:
raise ImportError('`draw_rdkit_mol` requires RDkit.')
if filename is not None:
Draw.MolToFile(mol, filename)
img = Draw.MolToImage(mol)
plt.imshow(img)
| 21,738
|
def as_java_array(gateway, java_type, iterable):
"""Creates a Java array from a Python iterable, using the given p4yj gateway"""
java_type = gateway.jvm.__getattr__(java_type)
lst = list(iterable)
arr = gateway.new_array(java_type, len(lst))
for i, e in enumerate(lst):
jobj = as_java_object(gateway, e)
arr[i] = jobj
return arr
| 21,739
|
def match(a: np.ndarray, b: np.ndarray) -> np.ndarray:
"""Finds the matrix R that minimizes the frobenius norm of RA - B, where
R is orthonormal.
Args:
a (np.ndarray[samples, features]): the first matrix to match
b (np.ndarray[samples, features]): the second matrix to match
Returns:
np.ndarray: the orthonormal matching matrix R
"""
tus.check_ndarrays(
a=(a, ('samples', 'features'), ('float32', 'float64')),
b=(b, (('samples', a.shape[0]), ('features', a.shape[1])), a.dtype)
)
m = b @ a.T
u, _, vh = scipy.linalg.svd(m)
return np.real(u @ vh)
| 21,740
|
def macro_cons_silver_amount():
"""
全球最大白银ETF--iShares Silver Trust持仓报告, 数据区间从20060429-至今
:return: pandas.Series
2006-04-29 263651152
2006-05-02 263651152
2006-05-03 445408550
2006-05-04 555123947
2006-05-05 574713264
...
2019-10-17 Show All
2019-10-18 Show All
2019-10-21 Show All
2019-10-22 Show All
2019-10-23 Show All
"""
t = time.time()
res = requests.get(
JS_CONS_SLIVER_ETF_URL.format(
str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)
)
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["白银"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["总价值(美元)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "etf",
"attr_id": "2",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, [0, 3]]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", keep="last", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "silver_amount"
url = "https://cdn.jin10.com/data_center/reports/etf_2.json"
r = requests.get(url)
data_json = r.json()
append_temp_df = pd.DataFrame(data_json["values"]).T
append_temp_df.columns = [item["name"] for item in data_json["keys"]]
temp_append_df = append_temp_df["总价值"]
temp_append_df.name = "silver_amount"
temp_df = temp_df.reset_index()
temp_df["index"] = temp_df["index"].astype(str)
temp_df = temp_df.append(temp_append_df.reset_index())
temp_df.drop_duplicates(subset=["index"], keep="last", inplace=True)
temp_df.index = pd.to_datetime(temp_df["index"])
del temp_df["index"]
temp_df = temp_df[temp_df != 'Show All']
temp_df.sort_index(inplace=True)
temp_df = temp_df.astype(float)
return temp_df
| 21,741
|
def shuffle(answers):
"""
Returns mixed answers and the index of the correct one,
assuming the first answer is the correct one.
"""
indices = list(range(len(answers)))
random.shuffle(indices)
correct = indices.index(0)
answers = [answers[i] for i in indices]
return answers, correct
| 21,742
|
def annotations_to_xml(annotations_df: pd.DataFrame, image_path: Union[str, Path],
write_file=True) -> str:
"""
Load annotations from dataframe (retinanet output format) and
convert them into xml format (e.g. RectLabel editor / LabelImg).
Args:
annotations_df (DataFrame): Format [xmin,ymin,xmax,ymax,label,...]
image_path: string/Path path to the file where these bboxes are found
write_file: Writes the xml at the same path as the image it describes.
Overwrites the existent file, if any.
Returns:
XML
<annotation>
<folder>unlabeled_imgs</folder>
<filename>autumn-forest-from-above-2210x1473.jpeg</filename>
<path>/work/trees/unlabeled_imgs/autumn-forest-from-above-2210x1473.jpeg</path>
<source>
<database>Unknown</database>
</source>
<size>
<width>2210</width>
<height>1473</height>
<depth>3</depth>
</size>
<segmented>0</segmented>
<object>
<name>tree</name>
<pose>Unspecified</pose>
<truncated>0</truncated>
<difficult>0</difficult>
<bndbox>
<xmin>718</xmin>
<ymin>603</ymin>
<xmax>792</xmax>
<ymax>705</ymax>
</bndbox>
</object>
</annotation>
"""
image_path = Path(image_path)
out_dict = {
'folder': image_path.parent.name,
'filename': image_path.name,
'path': str(image_path),
'segmented': 0
}
xml_out = '<annotation>\n'
xml_out += dict2xml(out_dict, indent=" ") + '\n'
xml_out += "\n".join([__annotation_row_to_dict(row) for _, row in annotations_df.iterrows()])
xml_out += '\n</annotation>\n'
if write_file:
# annotations file should be near its image
file_path = image_path.parent / f'{image_path.stem}.xml'
with open(file_path, 'w+') as the_file:
the_file.write(xml_out)
return xml_out
| 21,743
|
def depth(sequence, func=max, _depth=0):
"""
Find the nesting depth of a nested sequence
"""
if isinstance(sequence, dict):
sequence = list(sequence.values())
depth_list = [
depth(item, func=func, _depth=_depth + 1)
for item in sequence
if (isinstance(item, dict) or util_type.is_listlike(item))
]
if len(depth_list) > 0:
return func(depth_list)
else:
return _depth
| 21,744
|
def tei_email(elem_text):
"""
create TEI element <email> with given element text
"""
email = etree.Element("email")
email.text = elem_text
return email
| 21,745
|
def rebuild(request):
"""Rebuild ``XPI`` file. It can be provided as POST['location']
:returns: (JSON) contains one field - hashtag it is later used to download
the xpi using :meth:`xpi.views.check_download` and
:meth:`xpi.views.get_download`
"""
# validate entries
secret = request.POST.get('secret', None)
if not secret or secret != settings.AMO_SECRET_KEY:
log.error("Rebuild requested with an invalid key. Rejecting.")
return HttpResponseForbidden('Access denied')
options = request.POST.get('options', None)
location = request.POST.get('location', None)
addons = request.POST.get('addons', None)
upload = request.FILES.get('upload', None)
if not location and not upload and not addons:
log.error("Rebuild requested but files weren't specified. Rejecting.")
return HttpResponseBadRequest('Please provide XPI files to rebuild')
if location and upload:
log.error("Rebuild requested but location and upload provided."
"Rejecting")
return HttpResponseBadRequest('Please provide XPI files to rebuild')
# locate SDK source directory
sdk_version = request.POST.get('sdk_version', None)
if sdk_version:
sdk = get_object_or_404(SDK, version=sdk_version)
sdk_source_dir = sdk.get_source_dir()
else:
sdk_source_dir = (settings.REPACKAGE_SDK_SOURCE
or _get_latest_sdk_source_dir())
sdk_manifest = '%s/packages/%s/package.json' % (sdk_source_dir, 'addon-kit')
try:
handle = open(sdk_manifest)
except Exception, err:
log.critical("Problems loading SDK manifest\n%s" % str(err))
raise
else:
sdk_version = simplejson.loads(handle.read())['version']
handle.close()
pingback = request.POST.get('pingback', None)
priority = request.POST.get('priority', None)
post = request.POST.urlencode()
if priority and priority == 'high':
rebuild_task = tasks.high_rebuild
else:
rebuild_task = tasks.low_rebuild
response = {'status': 'success'}
errors = []
counter = 0
if location or upload:
hashtag = get_random_string(10)
if location:
log.debug('[%s] Single rebuild started for location (%s)' %
(hashtag, location))
else:
log.debug('[%s] Single rebuild started from upload' % hashtag)
filename = request.POST.get('filename', None)
try:
package_overrides = _get_package_overrides(request.POST,
sdk_version)
except BadManifestFieldException, err:
errors.append('[%s] %s' % (hashtag, str(err)))
else:
rebuild_task.delay(
location, upload, sdk_source_dir, hashtag,
package_overrides=package_overrides,
filename=filename, pingback=pingback,
post=post, options=options)
counter = counter + 1
if addons:
try:
addons = simplejson.loads(addons)
except Exception, err:
errors.append('[%s] %s' % (hashtag, str(err)))
else:
for addon in addons:
error = False
filename = addon.get('filename', None)
hashtag = get_random_string(10)
location = addon.get('location', None)
upload_name = addon.get('upload', None)
upload = None
if upload_name:
upload = request.FILES.get(upload_name, None)
if not (location or upload):
errors.append("[%s] Files not specified." % hashtag)
error = True
if location and upload:
errors.append(("[%s] Location and upload provided. "
"Rejecting") % hashtag)
error = True
try:
package_overrides = _get_package_overrides(addon,
sdk_version)
except Exception, err:
errors.append('[%s] %s' % (hashtag, str(err)))
error = True
if not error:
rebuild_task.delay(
location, upload, sdk_source_dir, hashtag,
package_overrides=package_overrides,
filename=filename, pingback=pingback,
post=post)
counter = counter + 1
if errors:
log.error("Errors reported when rebuilding")
response['status'] = 'some failures'
response['errors'] = ''
for e in errors:
response['errors'] = "%s%s\n" % (response['errors'], e)
log.error(" Error: %s" % e)
response['addons'] = counter
uuid = request.POST.get('uuid', 'no uuid')
log.info("%d addon(s) will be created, %d syntax errors, uuid: %s" %
(counter, len(errors), uuid))
return HttpResponse(simplejson.dumps(response),
mimetype='application/json')
| 21,746
|
def get_requirements(req_file: str) -> List[str]:
"""
Extract requirements from provided file.
"""
req_path = Path(req_file)
requirements = req_path.read_text().split("\n") if req_path.exists() else []
return requirements
| 21,747
|
def _generate_ngram_contexts(ngram: str) -> 'List[Acronym]':
"""
Generate a list of contextualized n-grams with a decreasing central n-gram and increasing \
lateral context.
:param ngram:
:return:
"""
tokens = ngram.split(" ")
ngram_size = len(tokens)
contexts = []
# Walk only until half and `max_diff` more.
for i in range(0, int((ngram_size + 1 + MAX_DIFF) / 2)):
# Allow up to `max_diff` difference in size.
for j in range(ngram_size - i + MAX_DIFF, ngram_size - i - MAX_DIFF - 1, -1):
# Do not allow empty acronym.
if i >= j:
break
# Do not walk past the n-gram.
if j > ngram_size:
continue
left = sys.intern(" ".join(tokens[0:i]))
right = sys.intern(" ".join(tokens[j:ngram_size]))
center = sys.intern(" ".join(tokens[i:j]))
contexts.append(Acronym(acronym=center, left_context=left,
right_context=right))
return contexts
| 21,748
|
def get_gmail_account(slug):
"""
Return the details of the given account - just pass in the slug
e.g. get_account('testcity')
"""
service = get_gapps_client()
if not service:
return None
try:
return service.users().get(userKey=make_email(slug)).execute()
except HttpError:
return None
| 21,749
|
def _estimate_components(data, xdata):
"""Not implemented."""
raise NotImplementedError
| 21,750
|
def unisolate_machine_command():
"""Undo isolation of a machine.
Returns:
(str, dict, dict). Human readable, context, raw response
"""
headers = ['ID', 'Type', 'Requestor', 'RequestorComment', 'Status', 'MachineID', 'ComputerDNSName']
machine_id = demisto.args().get('machine_id')
comment = demisto.args().get('comment')
machine_action_response = unisolate_machine_request(machine_id, comment)
machine_action_data = get_machine_action_data(machine_action_response)
entry_context = {
'MicrosoftATP.MachineAction(val.ID === obj.ID)': machine_action_data
}
human_readable = tableToMarkdown("The request to stop the isolation has been submitted successfully:",
machine_action_data, headers=headers, removeNull=True)
return human_readable, entry_context, machine_action_response
| 21,751
|
def func(*listItems):
"""
1、遍历所有的列表元素
2、遍历所有的列表元素里面的所有元素放进去一个列表里面
3、排序这个列表,返回最大的那个元素
"""
tmp_list=[]
for item in listItems:
if isinstance(item,list):
for i in item:
tmp_list.append(i)
tmp_list=list(filter(lambda k:isinstance(k,int),tmp_list))
tmp_list.sort(reverse=True)
max_value=tmp_list[0]
return max_value
| 21,752
|
def factory(name: str):
"""Factory function to return a processing function for
Part of Speech tagging.
Parameters:
-----------
name : str
Identifier, e.g. 'spacy-de', 'stanza-de', 'flair-de', 'someweta-de',
'someweta-web-de'
Example:
--------
import nlptasks as nt
import nlptasks.pos
sequences = [['Die', 'Kuh', 'ist', 'bunt', '.']]
myfn = nt.pos.factory("spacy-de")
idseqs, TAGSET = myfn(sequences, maxlen=4)
"""
if name in ("spacy", "spacy-de"):
return spacy_de
elif name in ("stanza", "stanza-de"):
return stanza_de
elif name == "flair-de":
return flair_de
elif name in ("someweta", "someweta-de"):
return someweta_de
elif name in ("someweta-web", "someweta-web-de"):
return someweta_web_de
else:
raise Exception(f"Unknown PoS tagger: '{name}'")
| 21,753
|
def backoff_handler(debug_only=True):
"""Backoff logging handler for when polling occurs.
Args:
details (dict): Backoff context containing the number of tries,
target function currently executing, kwargs, args, value,
and wait time.
"""
def _wrapped(details):
message = '[Backoff]: Calling \'{}\' again in {:f} seconds with {:d} tries so far'.format(
details['target'].__name__,
details['wait'],
details['tries']
)
if not debug_only:
LOGGER.info(message)
else:
LOGGER.debug(message)
return _wrapped
| 21,754
|
def align_dataframes(framea, frameb, fill_value = 0.0):
"""Use pandas DataFrame structure to align two-dimensional data
:param framea: First pandas dataframe to align
:param frameb: Other pandas dataframe to align
:param fill_value: default fill value (0.0 float)
return: tuple of aligned frames
"""
zeroframe = frameb.copy()
zeroframe[:] = fill_value
aligneda = framea.add(zeroframe, fill_value = fill_value)
zeroframe = framea.copy()
zeroframe[:] = fill_value
alignedb = frameb.add(zeroframe, fill_value = fill_value)
return aligneda, alignedb
| 21,755
|
def flip_ud(img):
"""
Expects shape to be (num_examples, modalities, depth, width, height)
"""
return np.flip(img.copy(), 3)
| 21,756
|
def download_facescrub((data_dir, genders, names, urls, bboxes)):
"""
download from urls into folder names using wget
"""
assert(len(names) == len(urls))
assert(len(names) == len(bboxes))
# download using external wget
CMD = 'wget -c -t 1 -T 3 "%s" -O "%s"'
for i in range(len(names)):
directory = join(data_dir, genders[i])
if not exists(directory):
print(directory)
os.mkdir(directory)
fname = hashlib.sha1(urls[i]).hexdigest() + "_" + names[i] + '.jpg'
dst = join(directory, fname)
print("downloading", dst)
if exists(dst):
print("already downloaded, skipping...")
continue
else:
res = os.system(CMD % (urls[i], dst))
# get face
face_directory = join(directory, 'face')
if not exists(face_directory):
os.mkdir(face_directory)
img = cv2.imread(dst)
if img is None:
# no image data
os.remove(dst)
else:
face_path = join(face_directory, fname)
face = img[bboxes[i][1]:bboxes[i][3], bboxes[i][0]:bboxes[i][2]]
cv2.imwrite(face_path, face)
#write bbox to file
with open(join(directory, '_bboxes.txt'), 'a') as fd:
bbox_str = ','.join([str(_) for _ in bboxes[i]])
fd.write('%s %s\n' % (fname, bbox_str))
| 21,757
|
def data_context_service_interface_pointuuid_otsi_service_interface_point_spec_otsi_capability_get(uuid): # noqa: E501
"""data_context_service_interface_pointuuid_otsi_service_interface_point_spec_otsi_capability_get
returns tapi.photonic.media.OtsiCapabilityPac # noqa: E501
:param uuid: Id of service-interface-point
:type uuid: str
:rtype: TapiPhotonicMediaOtsiCapabilityPac
"""
return 'do some magic!'
| 21,758
|
def test_select_all_columns():
"""Validate select returning all columns using '*' (the default)."""
_logger.debug(stack()[0][3])
config = deepcopy(_CONFIG)
t = table(config)
data = t.select(container='tuple')
assert len(data[0]) == len(t.columns())
| 21,759
|
def _get_trial_event_times(events, units, trial_cond_name):
"""
Get median event start times from all unit-trials from the specified "trial_cond_name" and "units" - aligned to GO CUE
:param events: list of events
"""
events = list(events) + ['go']
event_types, event_times = (psth.TrialCondition().get_trials(trial_cond_name)
* (experiment.TrialEvent & [{'trial_event_type': eve} for eve in events])
& units).fetch('trial_event_type', 'trial_event_time')
period_starts = [(event_type, np.nanmedian((event_times[event_types == event_type]
- event_times[event_types == 'go']).astype(float)))
for event_type in events[:-1] if len(event_times[event_types == event_type])]
present_events, event_starts = list(zip(*period_starts))
return np.array(present_events), np.array(event_starts)
| 21,760
|
def is_path_exists_or_creatable(pathname=None):
"""
`True` if the passed pathname is a valid pathname for the current OS _and_
either currently exists or is hypothetically creatable; `False` otherwise.
This function is guaranteed to _never_ raise exceptions.
"""
try:
# To prevent "os" module calls from raising undesirable exceptions on
# invalid pathnames, is_pathname_valid() is explicitly called first.
return is_pathname_valid(pathname) and (os.path.exists(pathname) or is_path_creatable(pathname))
# Report failure on non-fatal filesystem complaints (e.g., connection
# timeouts, permissions issues) implying this path to be inaccessible. All
# other exceptions are unrelated fatal issues and should not be caught here.
except OSError:
return False
| 21,761
|
def set_virtualenv_prefix(prefix_tuple):
"""
:return: Sets the virtualenv prefix given a tuple returned from get_virtualenv_prefix()
"""
if prefix_tuple[0] == 'sys.real_prefix' and hasattr(sys, 'real_prefix'):
sys.real_prefix = prefix_tuple[1]
elif prefix_tuple[0] == 'sys.base_prefix' and hasattr(sys, 'base_prefix'):
sys.base_prefix = prefix_tuple[1]
| 21,762
|
def draw_point(framebuffer, x, y, color):
"""
Draw a single pixel of given color at the specified coordinates.
:param framebuffer: An instance of the framebuffer class.
:param int x: The X-coordinate.
:param int y: The Y-coordinate.
:param color: An instance of the color class.
"""
framebuffer.pixel_data[y * framebuffer.width + x] = color.get_value()
| 21,763
|
def select_region(selections, positions, region):
"""
selection in region from selections
"""
if not region:
return selections
region = list(region) + [None, None]
assert all([x is None or isinstance(x, Iterable) and len(x) == 2
for x in region]), 'region should be collections of x,y,z region'
output = []
for sel in selections:
for regi, reg in enumerate(region[:3]):
if reg:
if reg[0] <= positions[sel][regi] <= reg[1]:
output.append(sel)
return output
| 21,764
|
def summarize_center_and_dispersion(
analysis_layer,
summarize_type=["CentralFeature"],
ellipse_size=None,
weight_field=None,
group_field=None,
output_name=None,
context=None,
gis=None,
estimate=False,
future=False):
"""
.. image:: _static/images/summarize_center_and_dispersion/summarize_center_and_dispersion.png
The ``summarize_center_and_dispersion`` method finds central features and directional distributions. It can be used to answer questions such as:
* Where is the center?
* Which feature is the most accessible from all other features?
* How dispersed, compact, or integrated are the features?
* Are there directional trends?s
==================== =========================================================
**Argument** **Description**
-------------------- ---------------------------------------------------------
analysis_layer Required frature layer. The point, line, or polygon features to be analyzed. See :ref:`Feature Input<FeatureInput>`.
-------------------- ---------------------------------------------------------
summarize_type Required list of strings. The method with which to summarize the ``analysis_layer``.
Choice list: ["CentralFeature", "MeanCenter", "MedianCenter", "Ellipse"]
-------------------- ---------------------------------------------------------
ellipse_size Optional string. The size of the output ellipse in standard deviations.
Choice list: ['1 standard deviations', '2 standard deviations', '3 standard deviations']
The default ellipse size is '1 standard deviations'.
-------------------- ---------------------------------------------------------
weight_field Optional field. A numeric field in the ``analysis_layer`` to be used to
weight locations according to their relative importance.
-------------------- ---------------------------------------------------------
group_field Optional field. The field used to group features for separate directional
distribution calculations. The ``group_field`` can be of
integer, date, or string type.
-------------------- ---------------------------------------------------------
output_name Optional string. If provided, the method will create a feature service of the results.
You define the name of the service. If ``output_name`` is not supplied, the method will return a feature collection.
-------------------- ---------------------------------------------------------
context Optional string. Context contains additional settings that affect task execution. For ``summarize_center_and_dispersion``, there are two settings.
#. Extent (``extent``) - a bounding box that defines the analysis area. Only those features in the input layer that intersect the bounding box will be buffered.
#. Output Spatial Reference (``outSR``) - the output features will be projected into the output spatial reference.
-------------------- ---------------------------------------------------------
estimate Optional boolean. If True, the number of credits to run the operation will be returned.
-------------------- ---------------------------------------------------------
future Optional boolean. If True, the result will be a GPJob object and results will be returned asynchronously.
==================== =========================================================
:returns: list of items if ``output_name`` is supplied else, a Python dictionary with the following keys:
"central_feature_result_layer" : layer (FeatureCollection)
"mean_feature_result_layer" : layer (FeatureCollection)
"median_feature_result_layer" : layer (FeatureCollection)
"ellipse_feature_result_layer" : layer (FeatureCollection)
.. code-block:: python
# USAGE EXAMPLE: To find central features and mean center of earthquake over past months.
central_features = summarize_center_and_dispersion(analysis_layer=earthquakes,
summarize_type=["CentralFeature","MeanCenter"],
ellipse_size='2 standard deviations',
weight_field='mag',
group_field='magType',
output_name='find central features and mean center of earthquake over past months')
"""
gis = _arcgis.env.active_gis if gis is None else gis
return gis._tools.featureanalysis.summarize_center_and_dispersion(
analysis_layer,
summarize_type,
ellipse_size,
weight_field,
group_field,
output_name,
context,
estimate=estimate, future=future)
| 21,765
|
def rename_channel(channel_id, old_name, new_name):
"""Renames channel folders and updates channel.json"""
channel_list_path = archive_path / "channels.json"
with open(channel_list_path, 'r+') as channel_list:
old_channel_list = json.load(channel_list)
channel = next((ch for ch in old_channel_list if ch['id'] == channel_id), None)
if channel is not None:
channel['name'] = new_name
else:
print(f"Renamed channel (id={channel_id}) from {old_name} to {new_name},"
f"but have not found channel with id on channels.json",
file=err_stream, flush=True)
channel_list.seek(0)
json.dump(old_channel_list, channel_list, indent=4)
channel_list.truncate()
print(f"Channel rename event from {old_name} to {new_name}", flush=True,
file=info_stream)
if old_name is not None:
old_path = archive_path / old_name
new_path = archive_path / new_name
old_path.rename(new_path)
else:
print(f"Attempted to rename channel id {channel_id}, but it doesn't exist",
flush=True, file=warn_stream)
| 21,766
|
def length_entropy(r: np.ndarray, minlen: int = 2) -> float:
"""Calculate entropy of diagonal lengths in RQ matrix.
Args:
r (np.ndarray[bool, bool]): Recurrence matrix
minlen (int): Minimum length of a line
Returns:
float: Shannon entropy of distribution of segment lengths
"""
dlens = diagonal_lengths(r, minlen)
counts = _dlen_counts(dlens, minlen, r.shape[0])
return entropy(counts)
| 21,767
|
def verify_scholarship_chair(user):
""" Verify user has Scholarship Chair permissions """
user_id = user.brother.id
if Position.objects.filter(title='President')[0].brother.id == user_id or \
Position.objects.filter(title='Scholarship Chair')[0].brother.id == user_id or \
debug:
return True
else:
return False
| 21,768
|
def float_range(start=0, stop=None, step=1):
"""
Much like the built-in function range, but accepts floats
>>> tuple(float_range(0, 9, 1.5))
(0.0, 1.5, 3.0, 4.5, 6.0, 7.5)
"""
start = float(start)
while start < stop:
yield start
start += step
| 21,769
|
def rotate(posList, axis, angle):
"""Rotate the points about a given axis by a given angle."""
#normalize axis, turn angle into radians
axis = axis/np.linalg.norm(axis)
angle = np.deg2rad(angle)
#rotation matrix construction
ux, uy, uz = axis
sin, cos = np.sin(angle), np.cos(angle)
rotMat = np.array([[cos+ux*ux*(1.-cos), ux*uy*(1.-cos)-uz*sin, ux*uz*(1.-cos)+uy*sin],
[uy*ux*(1.-cos)+uz*sin, cos+uy*uy*(1.-cos), uy*uz*(1.-cos)-ux*sin],
[uz*ux*(1.-cos)-uy*sin, uz*uy*(1.-cos)+ux*sin, cos+uz*uz*(1.-cos)]])
#rotate points
return np.transpose(np.dot(rotMat,np.transpose(posList)))
| 21,770
|
def _make_frame_with_filename(tb, idx, filename):
"""Return a copy of an existing stack frame with a new filename."""
frame = tb[idx]
return FrameSummary(
filename,
frame.lineno,
frame.name,
frame.line)
| 21,771
|
def do_environment_session_create(mc, args):
"""Creates a new configuration session for environment ID."""
environment_id = args.id
session_id = mc.sessions.configure(environment_id).id
print("Created new session:")
formatters = {"id": utils.text_wrap_formatter}
utils.print_dict({"id": session_id}, formatters=formatters)
| 21,772
|
def median(X):
"""
Middle value after sorting all values by size, or mean of the two middle values.
Parameters
----------
X : np.array
Dataset. Should be a two-dimensional array.
Returns
-------
a: np.array
One-dimensional array that contains the median for each feature.
"""
return np.nanmedian(X, axis=0)
| 21,773
|
def setTextFont(self, strng):
"""
TOWRITE
:param `strng`: TOWRITE
:type `strng`: QString
"""
self.textFontSelector.setCurrentFont(QFont(strng))
self.setSettingsTextFont(strng)
| 21,774
|
def _filter_none_values(d: dict):
"""
Filter out the key-value pairs with `None` as value.
Arguments:
d
dictionary
Returns:
filtered dictionary.
"""
return {key: value for (key, value) in d.items() if value is not None}
| 21,775
|
def new_project(request):
"""
if this is a new project, call crud_project without a slug and
with action set to New
"""
return crud_project(request, slug=None, action="New")
| 21,776
|
def get_Simon_instance(simon_instance):
"""Return an instance of the Simon family as a `Cipher`."""
if simon_instance == SimonInstance.simon_32_64:
default_rounds = 32
n = 16
m = 4
z = "11111010001001010110000111001101111101000100101011000011100110"
elif simon_instance == SimonInstance.simon_48_96:
default_rounds = 36
n = 24
m = 4
z = "10001110111110010011000010110101000111011111001001100001011010"
elif simon_instance == SimonInstance.simon_64_128:
default_rounds = 44
n = 32
m = 4
z = "11011011101011000110010111100000010010001010011100110100001111"
else:
raise ValueError("invalid instance of Simon")
class SimonKeySchedule(RoundBasedFunction):
"""Key schedule function."""
num_rounds = default_rounds
input_widths = [n for _ in range(m)]
output_widths = [n for _ in range(default_rounds)]
@classmethod
def set_num_rounds(cls, new_num_rounds):
cls.num_rounds = new_num_rounds
cls.input_widths = [n for _ in range(min(m, new_num_rounds))]
cls.output_widths = [n for _ in range(new_num_rounds)]
@classmethod
def eval(cls, *master_key):
if cls.num_rounds <= m:
return list(reversed(master_key))[:cls.num_rounds]
k = [None for _ in range(cls.num_rounds)]
k[:m] = list(reversed(master_key))
for i in range(m, cls.num_rounds):
tmp = RotateRight(k[i - 1], 3)
if m == 4:
tmp ^= k[i - 3]
tmp ^= RotateRight(tmp, 1)
k[i] = ~k[i - m] ^ tmp ^ int(z[(i - m) % 62]) ^ 3
return k
class SimonEncryption(Encryption, RoundBasedFunction):
"""Encryption function."""
num_rounds = default_rounds
input_widths = [n, n]
output_widths = [n, n]
round_keys = None
@classmethod
def set_num_rounds(cls, new_num_rounds):
cls.num_rounds = new_num_rounds
@classmethod
def eval(cls, x, y):
for i in range(cls.num_rounds):
x, y = (y ^ SimonRF(x) ^ cls.round_keys[i], x)
cls.add_round_outputs(x, y)
return x, y
class SimonCipher(Cipher):
key_schedule = SimonKeySchedule
encryption = SimonEncryption
_simon_instance = simon_instance
@classmethod
def set_num_rounds(cls, new_num_rounds):
cls.key_schedule.set_num_rounds(new_num_rounds)
cls.encryption.set_num_rounds(new_num_rounds)
@classmethod
def test(cls):
old_num_rounds = cls.num_rounds
cls.set_num_rounds(default_rounds)
if cls._simon_instance == SimonInstance.simon_32_64:
plaintext = (0x6565, 0x6877)
key = (0x1918, 0x1110, 0x0908, 0x0100)
assert cls(plaintext, key) == (0xc69b, 0xe9bb)
elif cls._simon_instance == SimonInstance.simon_48_96:
plaintext = (0x726963, 0x20646e)
key = (0x1a1918, 0x121110, 0x0a0908, 0x020100)
assert cls(plaintext, key) == (0x6e06a5, 0xacf156)
elif cls._simon_instance == SimonInstance.simon_64_128:
plaintext = (0x656b696c, 0x20646e75)
key = (0x1b1a1918, 0x13121110, 0x0b0a0908, 0x03020100)
assert cls(plaintext, key) == (0x44c8fc20, 0xb9dfa07a)
else:
raise ValueError("invalid instance of Simon")
cls.set_num_rounds(old_num_rounds)
return SimonCipher
| 21,777
|
def load_mnist(path, kind="train"):
"""
Documentation:
---
Description:
Load MNIST images and labels from unzipped source files.
---
Parameters:
kind : str
Used to identify training data vs. validation data. Pass
"train" to load training data, and "t10k" to load validation
data.
---
Returns
images : Numpy array
Numpy array containing all images in dataset. Has shape N by
784, where N is the number of samples and 784 is the number
of pixels.
targets : Numpy array
Numpy array containing all targets associated with images.
Has shape N by 1, where N is the number of samples.
"""
labels_path = os.path.join(path,
"{}-labels-idx1-ubyte".format(kind))
images_path = os.path.join(path,
"{}-images-idx3-ubyte".format(kind))
with open(labels_path, "rb") as lbpath:
magic, n = struct.unpack(">II",
lbpath.read(8))
targets = np.fromfile(lbpath,
dtype=np.uint8)
with open(images_path, "rb") as imgpath:
magic, num, rows, cols = struct.unpack(">IIII",
imgpath.read(16))
images = np.fromfile(imgpath,
dtype=np.uint8).reshape(
len(targets), 784)
return images, targets
| 21,778
|
def chop(data):
"""Split given data stream in normalized chunks.
:param data: Data stream to be divided.
:type data: python:bytes
:returns: A three items tuple with the chunk start index, the chunk length
and the chunk data.
:rtype: ~typing.Tuple[python:int, python:int, python:bytes]
"""
data_size = len(data) # Maximum length of the data.
ck_start = 0 # Chunk start index within the data.
ck_end = 0 # Chunk end index within the data.
ct_idx = c.GHASH_CHUNK_HI # Current cutting index.
while ct_idx != ck_end:
ck_end = ck_start + cut(data[ck_start:ct_idx])
yield ck_start, ck_end, data[ck_start:ck_end]
ck_start, ct_idx = ck_end, min(ck_end + c.GHASH_CHUNK_HI, data_size)
| 21,779
|
def run_server():
"""Run server."""
uvicorn.run(
"hive_gns.server.serve:app",
host=config['server_host'],
port=int(config['server_port']),
log_level="info",
reload=False,
workers=10
)
| 21,780
|
def clamp(val: float) -> int:
"""Clamp a number to that expected by a reasonable RGB component
This ensures that we don't have negative values, or values exceeding one byte
Additionally, all number inputs are rounded
Args:
val (float): Raw float value to clamp
Returns:
int: Clamped R/G/B value
"""
return floor(min(max(0, val), 255))
| 21,781
|
def componental_mfpt(trans: np.ndarray, **kwargs) -> np.ndarray:
"""Compute Markov mean first passage times per connected component of the chain."""
n_comps, comp_labels = scipy.sparse.csgraph.connected_components(
trans, **kwargs
)
hier_trans = transition_matrix(trans)
absorbing = np.isclose(np.diag(hier_trans), 1)
if n_comps == 1 and not absorbing.any():
print('shortcut')
return mfpt(hier_trans)
else:
print('longrun')
times = np.full_like(hier_trans, fill_value=np.inf)
# for each autonomous subsystem
for comp_i in range(n_comps):
is_comp = (comp_labels == comp_i)
absorbing_i = np.flatnonzero(absorbing & is_comp)
nonabsorbing_i = np.flatnonzero(~absorbing & is_comp)
times[nonabsorbing_i[:, None], nonabsorbing_i] = mfpt(
hier_trans[nonabsorbing_i[:, None], nonabsorbing_i]
)
times[absorbing_i, absorbing_i] = 1
return times
| 21,782
|
def get_snippet(path):
"""Get snippet source string"""
current_file_dir = os.path.dirname(__file__)
absolute_path = os.path.join(current_file_dir, path)
with open(absolute_path) as src:
return src.read()
| 21,783
|
def atleast_1d(*arys):
"""
Convert inputs to arrays with at least one dimension.
Scalar inputs are converted to 1-dimensional arrays, whilst
higher-dimensional inputs are preserved.
Parameters
----------
array1, array2, ... : array_like
One or more input arrays.
Returns
-------
ret : ndarray
An array, or sequence of arrays, each with ``a.ndim >= 1``.
Copies are made only if necessary.
See Also
--------
atleast_2d, atleast_3d
Examples
--------
>>> np.atleast_1d(1.0)
array([ 1.])
>>> x = np.arange(9.0).reshape(3,3)
>>> np.atleast_1d(x)
array([[ 0., 1., 2.],
[ 3., 4., 5.],
[ 6., 7., 8.]])
>>> np.atleast_1d(x) is x
True
>>> np.atleast_1d(1, [3, 4])
[array([1]), array([3, 4])]
"""
res = []
for ary in arys:
ary = asanyarray(ary)
if len(ary.shape) == 0 :
result = ary.reshape(1)
else :
result = ary
res.append(result)
if len(res) == 1:
return res[0]
else:
return res
| 21,784
|
def post_inbox():
"""
POST /v3/inbox/tests
:return:
"""
data = {
'domain': 'domain.com',
'from': 'user@sending_domain.com',
'subject': 'testSubject',
'html': '<html>HTML version of the body</html>' }
req = client.inbox_tests.create(domain=domain, data=data)
print(req.json())
| 21,785
|
def test_list_negative_integer_white_space_nistxml_sv_iv_list_negative_integer_white_space_1_4(mode, save_output, output_format):
"""
Type list/negativeInteger is restricted by facet whiteSpace with value
collapse.
"""
assert_bindings(
schema="nistData/list/negativeInteger/Schema+Instance/NISTSchema-SV-IV-list-negativeInteger-whiteSpace-1.xsd",
instance="nistData/list/negativeInteger/Schema+Instance/NISTXML-SV-IV-list-negativeInteger-whiteSpace-1-4.xml",
class_name="NistschemaSvIvListNegativeIntegerWhiteSpace1",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
)
| 21,786
|
def plot_fig_1(f, axs):
"""
Plot the figure labelled Figure 1 in our paper given the data.
"""
idx = 0
N = 5
x_values = [i + 1 for i in range(N)]
axs[idx].plot(
x_values,
create_weights(N, "uniform"),
CB_color_cycle[1],
marker="o",
label=r"\texttt{uniform}",
linewidth=linewidth,
markersize=markersize,
)
axs[idx].plot(
x_values,
create_weights(N, "centred"),
CB_color_cycle[7],
marker="o",
label=r"\texttt{centred}",
linewidth=linewidth,
markersize=markersize,
)
axs[idx].plot(
x_values,
create_weights(N, "increasing"),
CB_color_cycle[2],
marker="o",
label=r"\texttt{increasing}",
linewidth=linewidth,
markersize=markersize,
)
axs[idx].plot(
x_values,
create_weights(N, "decreasing"),
CB_color_cycle[0],
marker="o",
label=r"\texttt{decreasing}",
linewidth=linewidth,
markersize=markersize,
)
axs[idx].tick_params(
axis="x", which="both", bottom=False, top=False, labelbottom=False
)
axs[idx].set_ylabel("Weights", labelpad=10)
axs[idx].set_title(str(N) + " bandwidths", pad=8, fontsize=fs)
axs[idx].set_yticks([0, 0.1, 0.2, 0.3, 0.4, 0.5])
axs[idx].set_ylim([-0.025, 0.525])
idx = 1
N = 6
x_values = [i + 1 for i in range(N)]
axs[idx].plot(
x_values,
create_weights(N, "uniform"),
CB_color_cycle[1],
marker="o",
label=r"\texttt{MMDAgg uniform}",
linewidth=linewidth,
markersize=markersize,
)
axs[idx].plot(
x_values,
create_weights(N, "centred"),
CB_color_cycle[7],
marker="o",
label=r"\texttt{MMDAgg centred}",
linewidth=linewidth,
markersize=markersize,
)
axs[idx].plot(
x_values,
create_weights(N, "increasing"),
CB_color_cycle[2],
marker="o",
label=r"\texttt{MMDAgg increasing}",
linewidth=linewidth,
markersize=markersize,
)
axs[idx].plot(
x_values,
create_weights(N, "decreasing"),
CB_color_cycle[0],
marker="o",
label=r"\texttt{MMDAgg decreasing}",
linewidth=linewidth,
markersize=markersize,
)
axs[idx].tick_params(
axis="x", which="both", bottom=False, top=False, labelbottom=False
)
axs[idx].set_title(str(N) + " bandwidths", pad=8, fontsize=fs)
f.text(0.5, 0.065, "Bandwidths sorted in increasing order", ha="center")
| 21,787
|
def cli(codelabel):
"""Click interface."""
try:
code = Code.get_from_string(codelabel)
except NotExistent:
print("The code '{}' does not exist.".format(codelabel))
sys.exit(1)
example_dft_atomic_kinds(code)
| 21,788
|
def write_formatted_mons(pkmn_string):
"""
Writes string with all Pokémon to txt file.
params: pkmn_string (string)
returns: None
"""
try:
with open('pokemon_list.txt', 'w', encoding='utf8') as pkfile:
pkfile.write(pkmn_string)
pkfile.close()
print("File saved successfully.")
except Exception as e:
print(f"Something went wrong while attempting to create the file: {e}")
return
| 21,789
|
def _encode_mapping(mapping, f):
"""Encodes the mapping items in lexical order (spec)"""
f.write(_TYPE_DICT)
for key, value in sorted(mapping.items()):
_encode_buffer(key, f)
bencode(value, f)
f.write(_TYPE_END)
| 21,790
|
def lines2str(lines, sep = "\n"):
"""Merge a list of lines into a single string
Args:
lines (list, str, other): a list of lines or a single object
sep (str, optional): a separator
Returns:
str: a single string which is either a concatenated lines (using
a custom or the default separator) or a str(lines) result
"""
if isinstance(lines, str):
return lines
if hasattr(lines, '__iter__'):
return sep.join(lines)
return str(lines)
| 21,791
|
def tail(fname, n=10, with_tail='tail'):
"""Get the last lines in a file.
Parameters
----------
fname : str
File name.
n : int, optional
Number of lines to get (default is 10).
with_tail : str, optional
The 'tail' command to use (default is `tail`).
Returns
-------
str
The last lines in file, or None on error.
"""
fname = os.path.abspath(fname)
try:
lines = subprocess.check_output(
[with_tail, '-n{n}'.format(n=n), fname])
except subprocess.CalledProcessError:
raise RuntimeError('Unable to get status. Please try again.')
except Exception:
raise
else:
return lines.strip()
| 21,792
|
def caplog_handler_at_level(caplog_fixture, level, logger=None):
"""
Helper function to set the caplog fixture's handler to a certain level as well, otherwise it wont be captured
e.g. if caplog.set_level(logging.INFO) but caplog.handler is at logging.CRITICAL, anything below CRITICAL wont be
captured.
"""
starting_handler_level = caplog_fixture.handler.level
caplog_fixture.handler.setLevel(level)
with caplog_fixture.at_level(level, logger=logger):
yield
caplog_fixture.handler.setLevel(starting_handler_level)
| 21,793
|
def evaluate(board):
"""
Evaluates chess board
input parameter(s):
board --> The chess board to be evaluated
return parameter(s):
score --> The board evaluation
"""
score = 0
for i in range(len(board)):
for j in range(len(board[i])):
# Add piece value and it's current square value (A Queen on d4 will be worth 900 + 5)
piece_value = piece_values[board[i][j]] + \
square_values[board[i][j]][i][j]
# Add piece value to overall board score
score += piece_value
return score
| 21,794
|
def get_atom(value):
"""atom = [CFWS] 1*atext [CFWS]
An atom could be an rfc2047 encoded word.
"""
atom = Atom()
if value and value[0] in CFWS_LEADER:
token, value = get_cfws(value)
atom.append(token)
if value and value[0] in ATOM_ENDS:
raise errors.HeaderParseError(
"expected atom but found '{}'".format(value))
if value.startswith('=?'):
try:
token, value = get_encoded_word(value)
except errors.HeaderParseError:
# XXX: need to figure out how to register defects when
# appropriate here.
token, value = get_atext(value)
else:
token, value = get_atext(value)
atom.append(token)
if value and value[0] in CFWS_LEADER:
token, value = get_cfws(value)
atom.append(token)
return atom, value
| 21,795
|
def create_page():
"""新增页面"""
tags = dbutils.get_tags()
return render_template('edit.html', title='新建', edit=False, tags=tags)
| 21,796
|
def test_init_values(test_dbm_fx):
"""
Test and verify all values required for instantiation are not None.
"""
assert test_dbm_fx._initialized
assert test_dbm_fx._dbhost
assert test_dbm_fx._dbpass
assert test_dbm_fx._dbname
assert test_dbm_fx._dbport
assert test_dbm_fx._dbuser
| 21,797
|
def calculate_class_weights():
"""
:return: class-wise true-label-area / false-label-area as a dictionary
"""
df = collect_stats()
df = df.fillna(0)
df = df.pivot(index = 'Class', columns = 'ImageId', values = 'TotalArea')
df = df.sum(axis=1)
df = df / (2500. - df)
return df.to_dict()
| 21,798
|
def get_data_layer(roidb, num_classes):
"""return a data layer."""
if cfg.TRAIN.HAS_RPN:
if cfg.IS_MULTISCALE:
layer = GtDataLayer(roidb)
else:
layer = RoIDataLayer(roidb, num_classes)
else:
layer = RoIDataLayer(roidb, num_classes)
return layer
| 21,799
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.