content
stringlengths 22
815k
| id
int64 0
4.91M
|
|---|---|
def log_exception(function):
"""Exception logging wrapper."""
@functools.wraps(function)
def wrapper(*args, **kwargs):
try:
return function(*args, **kwargs)
except:
err = "There was an exception in "
err += function.__name__
logger.exception(err)
return wrapper
| 19,600
|
def test_forecast_example_inputs():
"""Note: this test is extremely minimal, and only checks if a solution is
similar to a previous result.
This test may also fail if parallelisation is used (which may be in the
case in MKL versions of numpy), since the order of random number accessing
from different processes/threads could be different.
"""
np.random.seed(42) # This should not be changed
launch_datetime = datetime(2017, 4, 24,hour=12,minute=15)
simEnvironment = forecastEnvironment(launchSiteLat=29.2108, # deg
launchSiteLon=-81.0228, # deg
launchSiteElev=4, # m
dateAndTime=launch_datetime,
forceNonHD=True,
debugging=True)
# Set up the example input data files (from 24/04/2017, Daytona Beach)
fileDict = {}
for paramName in GFS_Handler.weatherParameters.keys():
fileDict[paramName] = os.path.join(os.path.dirname(astra.__file__),
'../test/example_data',
'gfs_0p50_06z.ascii?{}[12:15][0:46][231:245][545:571]'.format(paramName))
simEnvironment.loadFromNOAAFiles(fileDict)
output_dir = tempfile.mktemp(suffix='')
inputs = {'environment': simEnvironment,
'balloonGasType': 'Helium',
'balloonModel': 'TA800',
'nozzleLift': 1, # kg
'payloadTrainWeight': 0.433, # kg
'parachuteModel': 'SPH36',
'trainEquivSphereDiam': 0.1,
'outputFile': output_dir}
simFlight = flight(**inputs)
simFlight.run()
# TODO: Add more checks here to compare the path obtained with a reference
# solution. This will require some saved forecast files
test_fname = os.path.join(output_dir, 'out.kml')
assert(os.path.isfile(test_fname))
# check that the kml is a close match to the reference solution
reference_fname = os.path.join(os.path.dirname(__file__), 'example_data/expected_output/out.kml')
assert(filecmp.cmp(reference_fname, test_fname))
| 19,601
|
def calculate_triad_connectivity(tt1, tt2, tt3, ipi1, ipi2, tau_z_pre, tau_z_post,
base_time, base_ipi, resting_time, n_patterns):
"""
This function gives you the connectivity among a triad, assuming that all the other temporal structure outside of
the trial is homogeneus
:param tt1:
:param tt2:
:param tt3:
:param ipi1:
:param ipi2:
:param tau_z_pre:
:param tau_z_post:
:param base_time:
:param base_ipi:
:param resting_time:
:param n_patterns:
:return:
"""
Tt = (n_patterns - 3) * base_time + tt1 + tt2 + tt3 + ipi1 + ipi2 + \
(n_patterns - 2) * base_ipi + resting_time
# Single probabilities
p1_pre = calculate_probability_theo(Tp=tt1, Tstart=0.0, Ttotal=Tt, tau_z=tau_z_pre)
p2_pre = calculate_probability_theo(Tp=tt2, Tstart=0.0, Ttotal=Tt, tau_z=tau_z_pre)
p3_pre = calculate_probability_theo(Tp=tt3, Tstart=0.0, Ttotal=Tt, tau_z=tau_z_pre)
p1_post = calculate_probability_theo(Tp=tt1, Tstart=0.0, Ttotal=Tt, tau_z=tau_z_post)
p2_post = calculate_probability_theo(Tp=tt2, Tstart=0.0, Ttotal=Tt, tau_z=tau_z_post)
p3_post = calculate_probability_theo(Tp=tt3, Tstart=0.0, Ttotal=Tt, tau_z=tau_z_post)
# joint-self probabilities
p11 = calculate_self_probability_theo(T1=tt1, Tt=Tt, tau1=tau_z_pre, tau2=tau_z_post)
p22 = calculate_self_probability_theo(T1=tt2, Tt=Tt, tau1=tau_z_pre, tau2=tau_z_post)
p33 = calculate_self_probability_theo(T1=tt3, Tt=Tt, tau1=tau_z_pre, tau2=tau_z_post)
# Joint probabilities
Ts = tt1 + ipi1
p21 = calculate_joint_probabilities_theo(T1=tt1, Ts=Ts, T2=tt2, Tt=Tt, tau1=tau_z_pre, tau2=tau_z_post)
Ts = tt1 + ipi1 + tt2 + ipi2
p31 = calculate_joint_probabilities_theo(T1=tt1, Ts=Ts, T2=tt3, Tt=Tt, tau1=tau_z_pre, tau2=tau_z_post)
Ts = tt1 + ipi1
p12 = calculate_joint_probabilities_theo(T1=tt1, Ts=Ts, T2=tt2, Tt=Tt, tau1=tau_z_post, tau2=tau_z_pre)
Ts = tt2 + ipi2
p32 = calculate_joint_probabilities_theo(T1=tt2, Ts=Ts, T2=tt3, Tt=Tt, tau1=tau_z_pre, tau2=tau_z_post)
Ts = tt1 + ipi1 + tt2 + ipi2
p13 = calculate_joint_probabilities_theo(T1=tt1, Ts=Ts, T2=tt3, Tt=Tt, tau1=tau_z_post, tau2=tau_z_pre)
Ts = tt2 + ipi2
p23 = calculate_joint_probabilities_theo(T1=tt2, Ts=Ts, T2=tt3, Tt=Tt, tau1=tau_z_post, tau2=tau_z_pre)
# Weights
w11 = np.log10(p11 / (p1_pre * p1_post))
w12 = np.log10(p12 / (p1_pre * p2_post))
w13 = np.log10(p13 / (p1_pre * p3_post))
w21 = np.log10(p21 / (p2_pre * p1_post))
w22 = np.log10(p22 / (p2_pre * p2_post))
w23 = np.log10(p23 / (p2_pre * p3_post))
w31 = np.log10(p31 / (p3_pre * p1_post))
w32 = np.log10(p32 / (p3_pre * p2_post))
w33 = np.log10(p33 / (p3_pre * p3_post))
# Betas
beta1 = np.log10(p1_post)
beta2 = np.log10(p2_post)
beta3 = np.log10(p3_post)
# Bs (un-normalized)
B12 = w22 - w12 + beta2 - beta1
B13 = w33 - w13 + beta3 - beta1
B21 = w11 - w21 + beta1 - beta2
B23 = w33 - w32 + beta3 - beta2
B31 = w11 - w31 + beta1 - beta3
B32 = w22 - w32 + beta2 - beta3
return locals()
| 19,602
|
def random_date_from(date,
min_td=datetime.timedelta(seconds=0),
max_td=datetime.timedelta(seconds=0)):
"""
Produces a datetime at a random offset from date.
Parameters:
date: datetime
The reference datetime.
min_td: timedelta, optional
The minimum offset from the reference datetime (could be negative).
max_td: timedelta, optional
The maximum offset from the reference datetime (could be negative).
Return:
datetime
A new_date such that (date + min_td) <= new_date < (date + max_td).
"""
min_s = min(min_td.total_seconds(), max_td.total_seconds())
max_s = max(min_td.total_seconds(), max_td.total_seconds())
offset = random.uniform(min_s, max_s)
return date + datetime.timedelta(seconds=offset)
| 19,603
|
def generate_log_normal_dist_value(frequency, mu, sigma, draws, seed_value):
"""
Generates random values using a lognormal distribution,
given a specific mean (mu) and standard deviation (sigma).
https://stackoverflow.com/questions/51609299/python-np-lognormal-gives-infinite-
results-for-big-average-and-st-dev
The parameters mu and sigma in np.random.lognormal are not the mean
and STD of the lognormal distribution. They are the mean and STD
of the underlying normal distribution.
Parameters
----------
mu : int
Mean of the desired distribution.
sigma : int
Standard deviation of the desired distribution.
draws : int
Number of required values.
Returns
-------
random_variation : float
Mean of the random variation over the specified itations.
"""
if seed_value == None:
pass
else:
frequency_seed_value = seed_value * frequency * 100
np.random.seed(int(str(frequency_seed_value)[:2]))
normal_std = np.sqrt(np.log10(1 + (sigma/mu)**2))
normal_mean = np.log10(mu) - normal_std**2 / 2
hs = np.random.lognormal(normal_mean, normal_std, draws)
return round(np.mean(hs),2)
| 19,604
|
def enableStandardSearchPath( enable = True ):
"""Whether or not the standard search path should be searched. This standard
search path is is by default searched *after* the standard data library,
and is built by concatenating entries in the NCRYSTAL_DATA_PATH
environment variables with entries in the compile time definition of the
same name (in that order). Note that by default the standard search path
is searched *after* the standard data library.
"""
_rawfct['ncrystal_enable_stdsearchpath'](1 if enable else 0)
| 19,605
|
def do(args):
""" Main entry point for depends action. """
build_worktree = qibuild.parsers.get_build_worktree(args, verbose=(not args.graph))
project = qibuild.parsers.get_one_build_project(build_worktree, args)
collected_dependencies = get_deps(
build_worktree, project, args.direct, args.runtime, args.reverse)
# create title
label = project.name
if args.runtime:
label = label + " run time"
else:
label = label + " build time"
if args.direct:
label = label + " direct"
if args.reverse:
label = label + " reverse dependencies"
else:
label = label + " dependencies"
# display
if args.graph:
print_deps_graph(project.name, label, collected_dependencies)
else:
qisys.ui.info(qisys.ui.green, label)
if args.tree:
print_deps_tree(collected_dependencies)
else:
print_deps_compressed(collected_dependencies)
| 19,606
|
def test_sign_image_same_image_source(
registry_v2_image_source: RegistryV2ImageSource, image: str
):
"""Test image signing."""
src_image_name = ImageName.parse(image)
dest_image_name = copy.deepcopy(src_image_name)
dest_image_name.tag = "{0}_signed".format(dest_image_name.tag)
def assertions(result: dict):
assert result
image_config = result["image_config"]
assert image_config
assert "FAKE SIGNATURE" in str(image_config)
signature_value = result["signature_value"]
assert signature_value
assert "FAKE SIGNATURE" in signature_value
verify_image_data = result["verify_image_data"]
assert verify_image_data
assert image_config == verify_image_data["image_config"]
manifest = verify_image_data["manifest"]
assert manifest
manifest_signed = result["manifest_signed"]
assert manifest_signed
assert manifest_signed.get_config_digest() == image_config.get_config_digest()
assert len(manifest_signed.get_layers()) == len(image_config.get_image_layers())
# 1. Single signature
assertions(
registry_v2_image_source.sign_image(
FakeSigner(), src_image_name, registry_v2_image_source, dest_image_name
)
)
# TODO: Test signing image twice (with same key, with different keys ...)
# Can we do this here (using dockerhub), or do we need to do this in test_imageconfig.py???
| 19,607
|
def test_definition_list_single_item():
"""A definition list with a single item."""
content = ";Foo : Bar"
wikicode = mwparserfromhell.parse(content)
assert compose(wikicode) == "<dl><dt>Foo </dt><dd> Bar</dd></dl>"
| 19,608
|
def auto_regenerate_axes(elements: List[int]) -> None:
"""regenerate element axis system
Args:
elements (List[int]): element IDs
"""
| 19,609
|
def compute_mean_field(
grain_index_field,
field_data,
field_name,
vx_size=(1.0, 1.0, 1.0),
weighted=False,
compute_std_dev=False,
):
"""
Compute mean shear system by grains.
Args:
grain_index_field : VTK field containing index
field_data : VTK field containing shear field
field_name : the requested name of field
vx_size=(1.,1.,1.) : the voxel size
weighted=False : whether or not the mean and stddev is weighted
by grain volume ratio
compute_std_dev=False : whether we compute standard deviation
for `field_name`
Returns:
value_by_grain: 2D numpy array with every mean value for each grains
mean_field: 3D numpy array containing mean shear field
std_field: 3D numpy array containing standard_dev grains field
if compute_std_dev is True
"""
real_indx_grains = np.unique(grain_index_field)
field = field_data.PointData[field_name]
field_dimension = field_data.GetDimensions()
mean_field = np.zeros_like(field)
std_field = np.zeros_like(field)
# volume_grains = np.zeros_like(grain_index_field)
vx_vol = np.prod(vx_size) # vx_size[0]*vx_size[1]*vx_size[2]
# print(np.prod(vx_size))
# if weighted:
volume_total = vx_vol * np.prod(field_dimension)
# else:
# volume_total = 1.0
# print(" volume_total ", volume_total)
# print(" np.prod(field_dimension) ", np.prod(field_dimension))
volume = 1.0
for index in real_indx_grains:
mask_grains = np.nonzero(grain_index_field == index)
# if weighted:
# volume = np.count_nonzero(grain_index_field == index) * vx_vol
mean = algs.mean(field[mask_grains], axis=0) # * volume / volume_total
if VERBOSE:
print(
"- index {} v_i {} v_t {} mean {} mean {}".format(
index,
volume,
volume_total,
algs.mean(field[mask_grains], axis=0),
mean,
)
)
if compute_std_dev:
std_dev = np.std(field[mask_grains], axis=0) # * volume / volume_total
std_field[mask_grains] = std_dev
# volume_grains[mask_grains] = volume
mean_field[mask_grains] = mean
# gamma_by_grain = np.row_stack(gamma_by_grain)
value_by_grain = np.unique(mean_field, axis=0)
# print(" gamma_by_grain ", gamma_by_grain.shape)
# mean_by_grains = np.column_stack((real_indx_grains,gamma_by_grain))
return value_by_grain, mean_field, std_field
| 19,610
|
def evaluate_argument_value(xpath_or_tagname, datafile):
"""This function takes checks if the given xpath_or_tagname exists in the
datafile and returns its value. Else returns None."""
tree = ET.parse(datafile)
root = tree.getroot()
if xpath_or_tagname.startswith(root.tag + "/"):
xpath_or_tagname = xpath_or_tagname[len(root.tag + "/"):]
try:
xpath_or_tagname = root.find(xpath_or_tagname).text
except Exception:
print_error("Invalid xpath: {0}".format(root.tag + "/" + xpath_or_tagname))
xpath_or_tagname = None
else:
print_error("Invalid xpath: {0}".format(xpath_or_tagname))
xpath_or_tagname = None
return xpath_or_tagname
| 19,611
|
def test_savings_flexible_user_left_quota():
"""Tests the API endpoint to get left daily purchase quota of flexible product"""
client = Client(key, secret)
response = client.savings_flexible_user_left_quota(productId=1)
response.should.equal(mock_item)
| 19,612
|
def update():
"""Update a resource"""
pass
| 19,613
|
def normalise_genome_position(x):
"""
Normalise position (circular genome)
"""
x['PositionNorm0'] = np.where(x['Position'] > (x['GenomeLength'] / 2),
(x['GenomeLength'] - x['Position']),
x['Position'])
x['PositionNorm'] = x['PositionNorm0']**(1/2)
# Reference position
n_reads = x['readCount'].max()
start_position_ref = int(1)
end_position_ref = x['GenomeLength'].iloc[0]
end_position_ref = end_position_ref + n_reads
increase_by = (end_position_ref / n_reads)
x['ref_Position'] = list(frange(start_position_ref, end_position_ref,
increase_by))
x['ref_Position'] = x['ref_Position'].astype(int)
x['PositionNorm_ref0'] = np.where(x['ref_Position'] > (x['GenomeLength'] / 2),
(x['GenomeLength'] - x['ref_Position']),
x['ref_Position'])
x['PositionNorm_ref'] = x['PositionNorm_ref0'].astype(int)
return x
| 19,614
|
def random_address(invalid_data):
"""
Generate Random Address
return: string containing imitation postal address.
"""
fake = Faker(['en_CA']) # localized to Canada
return fake.address().replace('\n',', '), global_valid_data
| 19,615
|
def rot_poly(angle, polygon, n):
"""rotate polygon into 2D plane in order to determine if a point exists
within it. The Shapely library uses 2D geometry, so this is done in order
to use it effectively for intersection calculations.
Parameters
----------
angle : float
Euler angle to rotate a vector with respect to n
polygon : NumPy array
Coordinates encompassing a polygon (i.e. a boundary)
n : NumPy array
Normal vector of a boundary
Returns
-------
poly_2d : Shapely Polygon object
Shapely Polygon object in 2D coordinates
Notes
-----
This is not an elegant way of doing this. This works for surfaces that are
tilted with respect to the x-axis, and will work for surfaces with a normal
that is parallel to the y-axis, but will not allow for anything else. For
the code to be fully generalizable, this function will need to be expanded.
"""
xvect = np.array([1,0,0])
frontbacktest = lc.incidence_angle(n,xvect)
# if this is a front or back surface of the LSC, rotate with respect to y
if frontbacktest == 0 or frontbacktest == math.pi:
poly_2d = rot_poly_y(angle, polygon)
# otherwise, rotate with respect to x
else:
poly_2d = rot_poly_x(angle, polygon)
return poly_2d
| 19,616
|
def bytes_isspace(x: bytes) -> bool:
"""Checks if given bytes object contains only whitespace elements.
Compiling bytes.isspace compiles this function.
This function is only intended to be executed in this compiled form.
Args:
x: The bytes object to examine.
Returns:
Result of check.
"""
if len(x) == 0:
return False
for i in x:
if i != ord(' ') and i != ord('\t') and i != ord('\n') and i != ord('\r') and i != 0x0b and i != ord('\f'):
return False
return True
| 19,617
|
def snake_case(name: str):
"""
https://stackoverflow.com/a/1176023/1371716
"""
name = re.sub('(\\.)', r'_', name)
name = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', name)
name = re.sub('__([A-Z])', r'_\1', name)
name = re.sub('([a-z0-9])([A-Z])', r'\1_\2', name)
return name.lower()
| 19,618
|
def generate_aggregate_plots(countries):
"""
Generate a set of visualizing settlement distribution and size.
"""
# all_data = []
for country in countries:
iso3 = country[0]
data = pd.read_csv(os.path.join(RESULTS, iso3, 'aggregate_results.csv'))#[:100]
data['strategy'] = data['strategy'].replace({
'baseline': 'Baseline',
'smart_diesel_generators': 'Smart Diesel',
'smart_solar': 'Smart Solar',
'pure_solar': 'Solar',
})
data['population_m'] = data['population'] / 1e6
data['phones_m'] = data['phones'] / 1e6
data['smartphones_m'] = data['smartphones'] / 1e6
data['data_consumption_PB'] = data['data_consumption_GB'] / 1e6
data['electricity_consumption_GWh'] = data['electricity_consumption_kWh'] / 1e6
data['carbon_t'] = data['carbon_kgs'] / 1e3
data['nitrogen_oxides_t'] = data['nitrogen_oxides_kgs'] / 1e3
data['sulpher_oxides_t'] = data['sulpher_oxides_kgs'] / 1e3
data['pm10_t'] = data['pm10_kgs'] / 1e3
data = data[['strategy', 'population_m', 'phones_m', 'smartphones_m',
'data_consumption_PB', 'electricity_consumption_GWh',
'carbon_t', 'nitrogen_oxides_t', 'sulpher_oxides_t', 'pm10_t'
]]
data.columns = ['Strategy', 'Population (Mn)', 'Phones (Mn)', 'Smartphones (Mn)',
'Data Consumption (PB)', 'Power Consumption (GWh)',
'Carbon (T)', 'Nitrogen Oxides (T)', 'Sulpher Oxides (T)', 'PM10 (T)'
]
long_data = pd.melt(data,
id_vars=['Strategy'],
value_vars=['Population (Mn)', 'Phones (Mn)', 'Smartphones (Mn)',
'Data Consumption (PB)', 'Power Consumption (GWh)',
'Carbon (T)', 'Nitrogen Oxides (T)', 'Sulpher Oxides (T)', 'PM10 (T)'])
long_data.columns = ['Strategy', 'Metric', 'Value']
pairplot = sns.catplot(x="Strategy", y='Value', #hue="Frequency (GHz)",
col="Metric", col_wrap=3, palette=sns.color_palette("husl", 6),
kind="bar",
data=long_data, sharex=False, sharey=False, orient='v',
# facet_kws=dict(sharex=False, sharey=False),
legend="full")
plt.subplots_adjust(hspace=0.3, wspace=0.3, bottom=0.07)
pairplot.savefig(os.path.join(VIS, 'boxplot_{}'.format(iso3)))
| 19,619
|
def run():
"""Log in and store credentials"""
args = parse_args()
appname = sys.argv[0]
hostname = platform.node()
codetools.setup_logging(args.debug)
password = ''
if args.token_path is None and args.delete_role is True:
cred_path = os.path.expanduser('~/.sq_github_token_delete')
elif args.token_path is None and args.delete_role is False:
cred_path = os.path.expanduser('~/.sq_github_token')
else:
cred_path = os.path.expandvars(os.path.expanduser(args.token_path))
if not os.path.isfile(cred_path):
print("""
Type in your password to get an auth token from github
It will be stored in {0}
and used in subsequent occasions.
""".format(cred_path))
while not password:
password = getpass('Password for {0}: '.format(args.user))
note = textwrap.dedent("""\
{app} via bored^H^H^H^H^H terrified opossums[1]
on {host}
by {user} {creds}
[1] https://youtu.be/ZtLrn2zPTxQ?t=1m10s
""").format(
app=appname,
host=hostname,
user=args.user,
creds=cred_path
)
note_url = 'https://www.youtube.com/watch?v=cFvijBpzD_Y'
if args.delete_role:
scopes = ['repo', 'user', 'delete_repo', 'admin:org']
else:
scopes = ['repo', 'user']
global g
g = github.Github(args.user, password)
u = g.get_user()
try:
auth = u.create_authorization(
scopes=scopes,
note=note,
note_url=note_url,
)
except github.TwoFactorException:
auth = u.create_authorization(
scopes=scopes,
note=note,
note_url=note_url,
# not a callback
onetime_password=codetools.github_2fa_callback()
)
g = github.Github(auth.token)
with open(cred_path, 'w') as fdo:
fdo.write(auth.token + '\n')
fdo.write(str(auth.id))
print('Token written to {0}'.format(cred_path))
else:
print("You already have an auth file: {0} ".format(cred_path))
print("Delete it if you want a new one and run again")
print("Remember to also remove the corresponding token on Github")
| 19,620
|
def get_loss_data():
"""
This function returns a list of paths to all .npy loss
files.
Returns
-------
path_list : list of strings
The list of paths to output files
"""
path = "./data/*_loss.npy"
path_list = glob.glob(path, recursive=True)
return path_list
| 19,621
|
def autocommit(connection, value):
"""
Set autocommit
:Parameters:
`value` : ``bool``
yes or no?
"""
connection.autocommit(int(bool(value)))
| 19,622
|
def ranked_avg_knn_scores(batch_states, memory, k=10, knn=batch_count_scaled_knn):
"""
Computes ranked average KNN score for each element in batch of states
\sum_{i = 1}^{K} (1/i) * d(x, x_i)
Parameters
----------
k: k neighbors
batch_states: numpy array of size [batch_size x state_size]
memory: numpy array of size [memory_size x state_size]
Returns
-------
numpy array of scores of dims [batch_size]
"""
nearest_neighbor_scores = knn(batch_states, memory, k=k)
k = nearest_neighbor_scores.shape[1]
scales = 1 / np.expand_dims(np.arange(1, k + 1), axis=0).repeat(batch_states.shape[0], axis=0)
# There may be the edge case where the number of unique distances for this particular batch
# is less than k. If that's the case, we need to reduce our scales dimension.
# This means one of two things:
# 1. you either have a very small map, or
# 2. your representation has collapsed into less than k points.
ranked_avg_scores = np.multiply(nearest_neighbor_scores, scales)
return np.sum(ranked_avg_scores, axis=-1)
| 19,623
|
def export_geopackage(
eopatch,
geopackage_path,
feature,
geometry_column: str = "geometry",
columns: Optional[List[str]] = None,
):
"""A utility function for exporting
:param eopatch: EOPatch to save
:param geopackage_path: Output path where Geopackage will be written.
:param feature: A vector feature from EOPatches that will be exported to Geopackage
:param geometry_column: Name of a column that will be taken as a geometry column.
:param columns: Columns from dataframe that will be written into Geopackage besides geometry column. By default
all columns will be taken.
Note: in the future it could be implemented as an eo-learn task, the main problem is that writing has to be
consecutive.
"""
existing_layers = fiona.listlayers(geopackage_path) if os.path.exists(geopackage_path) else []
gdf = eopatch[feature]
layer_name = f"{feature[1]}_{gdf.crs.to_epsg()}"
mode = "a" if layer_name in existing_layers else "w"
if not len(gdf.index):
return
# Only one geometry column can be saved to a Geopackage
if isinstance(gdf[geometry_column].iloc[0], str):
gdf[geometry_column] = gdf[geometry_column].apply(loads)
gdf = gdf.set_geometry(geometry_column)
if columns is not None:
gdf = gdf.filter(columns + [geometry_column], axis=1)
gdf.to_file(geopackage_path, mode=mode, layer=layer_name, driver="GPKG", encoding="utf-8")
| 19,624
|
def login_exempt(view_func):
"""登录豁免,被此装饰器修饰的action可以不校验登录."""
def wrapped_view(*args, **kwargs):
return view_func(*args, **kwargs)
wrapped_view.login_exempt = True
return wraps(view_func, assigned=available_attrs(view_func))(wrapped_view)
| 19,625
|
def to_uint8_image(message : ImageMessage) -> ImageMessage:
"""Convert image type to uint8.
Args:
message (ImageMessage): Image to be converted
Returns:
ImageMessage: Resulting iamge
"""
message.image = np.uint8(message.image*255)
if message.mask is not None:
message.mask = np.uint8(message.mask*255)
return message
| 19,626
|
def list_zero_alphabet() -> list:
"""Build a list: 0, a, b, c etc."""
score_dirs = ['0']
for char in string.ascii_lowercase:
score_dirs.append(char)
return score_dirs
| 19,627
|
def proxy(values=(0,), names=('constant',), types=('int8',)):
""" Create a proxy image with the given values, names and types
:param values: list of values for every band of the resulting image
:type values: list
:param names: list of names
:type names: list
:param types: list of band types. Options are 'int8', 'int16', 'int32',
'int64', 'uint8', 'uint16', 'uint32', 'byte', 'short', 'int', 'long',
'float' and 'double'
:type types: list
:rtype: ee.Image
"""
values = list(values)
names = list(names)
types = list(types)
tps = dict(zip(names, types))
im = ee.Image(values).rename(names).cast(tps)
# update mask
for v, n in zip(values, names):
if v is None:
band = im.select(n)
masked = band.selfMask()
im = im.addBands(masked, overwrite=True)
return im
| 19,628
|
def aiff2mp3(infile_path: str) -> None:
"""Convert AIFF to MP3 using lame."""
args = [_LAME_CLT]
args.append(infile_path)
subprocess.run(args)
| 19,629
|
def _to_versions(raw_ls_remote_lines, version_join, tag_re, tag_filter_re):
"""Converts raw ls-remote output lines to a sorted (descending)
list of (Version, v_str, git_hash) objects.
This is used for source:git method to find latest version and git hash.
"""
ret = []
for line in raw_ls_remote_lines:
git_hash, ref = six.ensure_text(line).split('\t')
if ref.startswith('refs/tags/'):
tag = ref[len('refs/tags/'):]
if tag_filter_re and not tag_filter_re.match(tag):
continue
m = tag_re.match(tag)
if not m:
continue
v_str = m.group(1)
if version_join:
v_str = '.'.join(v_str.split(version_join))
ret.append((parse_version(v_str), v_str, git_hash))
return sorted(ret, reverse=True)
| 19,630
|
def arraysum(x: int)->int:
"""
These function gives sum of all elements of list by iterating through loop and adding them.
Input: Integer
Output: Interger
"""
sum = 0
for i in x:
sum += i
return sum
| 19,631
|
def get_sensitivity_scores(model, features, top_n):
"""
Finds the sensitivity of each feature in features for model. Returns the top_n
feature names, features_top, alongside the sensitivity values, scores_top.
"""
# Get just the values of features
x_train = features.values
# Apply min max normalization
scaler = MinMaxScaler().fit(x_train)
x_train = scaler.transform(x_train)
# Find mean and standard deviation of each feature
x_train_avg = np.mean(x_train, axis=0).reshape(1, -1)
x_train_std = np.std(x_train, axis=0).reshape(1, -1)
prediction_mean = model.predict(x_train_avg)
scores_max = []
# Iterate over each feature
for i in range(x_train_avg.shape[1]):
# Copy x_train_avg
x_train_i = x_train_avg.copy()
# Add the standard deviation of i to that column
x_train_i[:, i] = x_train_i[:, i] + x_train_std[:, i]
result_i = model.predict(x_train_i)
# Take the difference and divide by standard deviation
diff = (result_i - prediction_mean) / x_train_std[:, i]
scores_max.append(diff.flatten()[0])
scores_max = np.absolute(scores_max)
indices_top = np.argsort(scores_max)[-top_n:]
features_top = features.iloc[:, indices_top].columns
scores_top = scores_max[indices_top]
return features_top, scores_top
| 19,632
|
def mad(data):
"""Median absolute deviation"""
m = np.median(np.abs(data - np.median(data)))
return m
| 19,633
|
def test_larger_cases(arg, expected) -> None:
"""Test a few other integers."""
assert fib(arg) == expected
| 19,634
|
async def get_intents(current_user: User = Depends(Authentication.get_current_user_and_bot)):
"""
Fetches list of existing intents for particular bot
"""
return Response(data=mongo_processor.get_intents(current_user.get_bot())).dict()
| 19,635
|
def pars(f):
"""
>>> list(pars(StringIO('a\nb\nc\n\nd\ne\nf\n')))
[['a\n','b\n','c\n'],['d\n','e\n','f\n']]
"""
par = []
for line in f:
if line == '\n':
yield par
par = []
else:
par.append(line)
if par != []:
yield par
| 19,636
|
def launch(result_images, source):
"""
Launch the GUI.
"""
entries = find_failing_tests(result_images, source)
if len(entries) == 0:
print("No failed tests")
sys.exit(0)
app = QtWidgets.QApplication(sys.argv)
dialog = Dialog(entries)
dialog.show()
filter = EventFilter(dialog)
app.installEventFilter(filter)
sys.exit(app.exec_())
| 19,637
|
def test_fsp_id_range():
"""
Verify that fsp id is in the range of 1 to 27
"""
fsp_id = 0
with pytest.raises(ValueError):
_ = FSPConfiguration(fsp_id, FSPFunctionMode.CORR, 1, 140, 0)
fsp_id = 28
with pytest.raises(ValueError):
_ = FSPConfiguration(fsp_id, FSPFunctionMode.CORR, 1, 140, 0)
| 19,638
|
def test_makedirs_with_extant_directories(tmpdir):
"""
``makedirs_exist_ok`` doesn't care if the directories already exist.
"""
d = tmpdir.join('a', 'b', 'c')
d.ensure(dir=True)
files.makedirs_exist_ok(d.strpath)
assert d.exists()
| 19,639
|
def open_beneath(
path: Union[AnyStr, "os.PathLike[AnyStr]"],
flags: int,
*,
mode: int = 0o777,
dir_fd: Optional[int] = None,
no_symlinks: bool = False,
remember_parents: bool = False,
audit_func: Optional[Callable[[str, int, AnyStr], None]] = None,
) -> int:
"""
Open a file "beneath" a given directory.
This function guarantees that no ``..`` component in ``path``, or in a symbolic link encountered
in resolving ``path``, will ever be allowed to escape the "root" directory specified by
``dir_fd``. (In very specific circumstances, race conditions may allow multiple ``..``
components in a row to cause ``open_beneath()`` to temporarily leave the directory in question,
but it will check for such an escape before continuing and resolving any non-``..`` components).
Currently, ``open_beneath()`` is able to take advantage of OS-specific path resolution features
on the following platforms:
- Linux 5.6+
The ``path``, ``flags``, and ``mode`` arguments are as for ``os.open(...)``.
If ``dir_fd`` is given and not ``None``, it is used to determine the directory relative to which
paths will be resolved. Otherwise, the current working directory is used.
``path`` can be an absolute path, or it can contain references to symlinks that target absolute
paths. In either case, the path is interpreted as if the process had ``chroot()``ed to the
directory referenced by ``dir_fd`` (or the current working directory, as described above).
If ``no_symlinks`` is True, no symlinks will be allowed during resolution of the path.
If ``audit_func`` is not ``None``, it indicates a function that will be called to "audit"
components of the path as they are resolved. The function will be called with three arguments:
a "description" string indicating the context, a file descriptor referring to the most recently
resolved directory, and a path whose meaning depends on the "description". The following
"descriptions" are currently used (though more may be added):
- ``"before"``: This is called at each stage of the path resolution, just before the next
component is resolved. In this case, the third argument is the component that is about to be
resolved (which may be ``/`` or ``..``).
- ``"symlink"``: This is called immediately after encountering a symbolic link. In this case,
the third argument is the target of the symlink that was encountered.
The function should NOT perform any operations on the given file descriptor, or behavior is
undefined. Additionally, it should always return ``None``; other return values may have special
meanings in the future.
If an exception is raised in an ``audit_func``, ``open_beneath()`` will clean up properly and
pass the exception up to the caller.
Here is an example ``audit_func`` that blocks ``..`` components in symlinks::
def audit(desc, cur_fd, path):
if desc == "symlink":
while path:
path, part = os.path.split(path.rstrip("/"))
if part == "..":
raise RuntimeError("'..' component encountered")
If ``remember_parents`` is True, it triggers an alternate escape prevention strategy. This flag
makes ``open_beneath()`` retain open file descriptors to all of the directories it has
previously seen. This allows it to simply rewind back to those directories when encountering a
``..`` element, instead of having to perform potentially inefficient escape detection. (By
default, after a series of ``..`` elements, ``open_beneath()`` has to check that the current
directory is still contained within the "root".)
This is more efficient, but it requires a large number of file descriptors, and a malicious
attacker in control of the specified ``path`` *or* the filesystem could easily cause
``open_beneath()`` to exhaust all the available file descriptors. Use with caution!
Note: If ``open_beneath`` is able to take advantage of OS-specific path resolution features,
then ``remember_parents`` is ignored.
"""
path = os.fspath(path)
flags |= os.O_NOCTTY
if audit_func is None and _try_open_beneath is not None:
fd = _try_open_beneath(path, flags, mode=mode, dir_fd=dir_fd, no_symlinks=no_symlinks)
if fd is not None:
return fd
slash: AnyStr
dot: AnyStr
if isinstance(path, bytes):
slash = b"/"
dot = b"."
else:
slash = "/"
dot = "."
# We need a file descriptor that won't move (the current directory might) that we can use to
# perform lookups from.
new_dir_fd = os.open(".", DIR_OPEN_FLAGS) if dir_fd is None else dir_fd
try:
return _open_beneath(
path,
new_dir_fd,
flags,
mode,
no_symlinks,
slash=slash,
dot=dot,
remember_parents=remember_parents,
audit_func=audit_func,
)
finally:
if new_dir_fd != dir_fd:
os.close(new_dir_fd)
| 19,640
|
def train():
"""Train CIFAR-10/100 for a number of steps."""
with tf.Graph().as_default(), tf.device('/cpu:0'):
# Create a variable to count the number of train() calls. This equals the
# number of batches processed * FLAGS.num_gpus.
global_step = tf.get_variable(
'global_step', [], dtype=tf.int32,
initializer=tf.constant_initializer(0), trainable=False)
# Calculate the learning rate schedule.
num_batches_per_epoch = (cifar.NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN /
(FLAGS.batch_size * FLAGS.num_gpus))
decay_steps = int(num_batches_per_epoch * cifar.NUM_EPOCHS_PER_DECAY)
lr = learning_rate_fn(num_batches_per_epoch, global_step)
if FLAGS.alt_optimizer != '':
# Create an alternate optimizer
opt = alt_optimizer(lr, FLAGS.alt_optimizer)
else:
# Create an optimizer that performs gradient descent.
opt = tf.train.GradientDescentOptimizer(lr)
# Calculate the gradients for each model tower.
tower_grads = []
tower_losses = []
tower_images = []
tower_labels = []
tower_images_pl = []
tower_labels_pl = []
with tf.variable_scope(tf.get_variable_scope()):
for i in xrange(FLAGS.num_gpus):
with tf.device('/gpu:%d' % i):
with tf.name_scope('%s_%d' % (cifar.TOWER_NAME, i)) as scope:
# Calculate the loss for one tower of the CIFAR model. This function
# constructs the entire CIFAR model but shares the variables across
# all towers.
with tf.variable_scope(tf.get_variable_scope(), reuse=tf.AUTO_REUSE):
loss, images, labels, images_pl, labels_pl, precision = cifar_common.tower_loss(scope)
tower_losses.append(loss)
tower_images.append(images)
tower_labels.append(labels)
tower_images_pl.append(images_pl)
tower_labels_pl.append(labels_pl)
# Reuse variables for the next tower.
tf.get_variable_scope().reuse_variables()
# Retain the summaries from the final tower.
summaries = tf.get_collection(tf.GraphKeys.SUMMARIES, scope)
# Retain the Batch Normalization updates operations only from the
# final tower. Ideally, we should grab the updates from all towers
# but these stats accumulate extremely fast so we can ignore the
# other stats from the other towers without significant detriment.
batchnorm_updates = tf.get_collection(tf.GraphKeys.UPDATE_OPS, scope)
# Calculate the gradients for the batch of data on this CIFAR tower.
grads = opt.compute_gradients(loss)
# Keep track of the gradients across all towers.
tower_grads.append(grads)
# We must calculate the mean of each gradient. Note that this is the
# synchronization point across all towers.
grads = cifar_common.average_gradients(tower_grads)
loss = tf.add_n(tower_losses)
loss = tf.divide(loss, FLAGS.num_gpus)
# Add a summary to track the learning rate.
summaries.append(tf.summary.scalar('learning_rate', lr))
# Add histograms for gradients.
for grad, var in grads:
if grad is not None:
summaries.append(tf.summary.histogram(var.op.name + '/gradients', grad))
# Apply the gradients to adjust the shared variables.
apply_gradient_op = opt.apply_gradients(grads, global_step=global_step)
# Add histograms for trainable variables.
for var in tf.trainable_variables():
summaries.append(tf.summary.histogram(var.op.name, var))
batchnorm_updates_op = tf.group(*batchnorm_updates)
# Track the moving averages of all trainable variables.
variable_averages = tf.train.ExponentialMovingAverage(
cifar.MOVING_AVERAGE_DECAY, global_step)
variables_averages_op = variable_averages.apply(tf.trainable_variables() +
tf.moving_average_variables())
# Group all updates to into a single train op.
train_op = tf.group(apply_gradient_op, variables_averages_op, batchnorm_updates_op)
# Create a saver.
saver = tf.train.Saver(tf.global_variables(), max_to_keep=5)
# Build the summary operation from the last tower summaries.
summary_op = tf.summary.merge(summaries)
# Build an initialization operation to run below.
init = tf.global_variables_initializer()
# Start running operations on the Graph. allow_soft_placement must be set to
# True to build towers on GPU, as some of the ops do not have GPU
# implementations.
sess = tf.Session(config=tf.ConfigProto(
allow_soft_placement=True,
log_device_placement=FLAGS.log_device_placement))
sess.run(init)
# Start the queue runners.
tf.train.start_queue_runners(sess=sess)
summary_writer = tf.summary.FileWriter(FLAGS.train_dir, sess.graph)
max_steps = int(FLAGS.num_epochs * num_batches_per_epoch)
print('Max Training Steps: ', max_steps)
for step in xrange(max_steps):
start_time = time.time()
_, loss_value, lrate = sess.run([train_op, loss, lr])
duration = time.time() - start_time
assert not np.isnan(loss_value), 'Model diverged with loss = NaN'
if step % 10 == 0:
num_examples_per_step = FLAGS.batch_size * FLAGS.num_gpus
examples_per_sec = num_examples_per_step / duration
sec_per_batch = duration / FLAGS.num_gpus
format_str = ('%s: step %d, loss = %.2f, lrate = %.4f, (%.1f examples/sec; %.3f '
'sec/batch)')
print(format_str % (datetime.now(), step, loss_value, lrate,
examples_per_sec, sec_per_batch))
if step % 100 == 0:
summary_str = sess.run(summary_op)
summary_writer.add_summary(summary_str, step)
# Save the model checkpoint periodically.
if step % 1000 == 0 or (step + 1) == max_steps:
checkpoint_path = os.path.join(FLAGS.train_dir, 'model.ckpt')
saver.save(sess, checkpoint_path, global_step=step)
return loss_value
| 19,641
|
def find_offset(
ax: Numbers, ay: Numbers, bx: Numbers, by: Numbers, upscale: bool = True
) -> float:
"""Finds value, by which the spectrum should be shifted along x-axis to best
overlap with the first spectrum. If resolution of spectra is not identical,
one of them will be interpolated to match resolution of the other one. By default
interpolation is done on the lower-resolution spectra. This can be changed
by passing ``upscale = False`` to function call.
Parameters
----------
ax
Abscissa of the first spectrum.
ay
Values of the first spectrum.
bx
Abscissa of the second spectrum.
by
Values of the second spectrum.
upscale
If interpolation should be done on more loosely spaced spectrum (default).
When set to False, spectrum with lower resolution will be treated as reference
for density of data points.
Returns
-------
float
Value, by which second spectrum should be shifted, in appropriate units.
"""
ax, ay, bx, by = unify_abscissa(ax, ay, bx, by, upscale=upscale)
shift = idx_offset(ay, by)
if shift < 0:
offset = ax[0] - bx[abs(shift)]
else:
offset = ax[shift] - bx[0]
return offset
| 19,642
|
def And(*xs, simplify=True):
"""Expression conjunction (product, AND) operator
If *simplify* is ``True``, return a simplified expression.
"""
xs = [Expression.box(x).node for x in xs]
y = exprnode.and_(*xs)
if simplify:
y = y.simplify()
return _expr(y)
| 19,643
|
def test_lif_builtin(rng):
"""Test that the dynamic model approximately matches the rates."""
dt = 1e-3
t_final = 1.0
N = 10
lif = nengo.LIF()
gain, bias = lif.gain_bias(
rng.uniform(80, 100, size=N), rng.uniform(-1, 1, size=N))
x = np.arange(-2, 2, .1).reshape(-1, 1)
J = gain * x + bias
voltage = np.zeros_like(J)
reftime = np.zeros_like(J)
spikes = np.zeros((int(t_final / dt),) + J.shape)
for i, spikes_i in enumerate(spikes):
lif.step_math(dt, J, spikes_i, voltage, reftime)
math_rates = lif.rates(x, gain, bias)
sim_rates = spikes.mean(0)
assert np.allclose(sim_rates, math_rates, atol=1, rtol=0.02)
| 19,644
|
def get_assignment_submissions(course_id, assignment_id):
""" return a list of submissions for an assignment """
return api.get_list('courses/{}/assignments/{}/submissions'.format(course_id, assignment_id))
| 19,645
|
def send_email(to, content=None, title=None, mail_from=None,
attach=None, cc=None, bcc=None, text=None, html=None, headers=None):
"""
:param to: 收件人,如 'linda@gmail.com' 或 'linda@gmail.com, tom@gmail.com' 或 ['linda@gmail.com, tom@gmail.com']
:param content: 邮件内容,纯文本或HTML str
:param title: 邮件标题 str or list
:param mail_from: 发件人 str
:param attach: 附件列表 ["@/tmp/test.txt"]
:param cc: 抄送人, 格式同收件人
:param bcc: 匿名抄送人, 格式同收件人
:param text: 邮件纯文本 str
:param html: 邮件HTML str
:param headers: 其他 MIME Header属性 dict
:return: 正常返回 {} dict
"""
arg_dict = dict()
if isinstance(to, list):
to = ', '.join(to)
arg_dict['to'] = to
if isinstance(cc, list):
cc = ', '.join(cc)
arg_dict['cc'] = cc
if isinstance(bcc, list):
bcc = ', '.join(bcc)
arg_dict['bcc'] = bcc
if isinstance(title, list):
title = ''.join(title)
arg_dict['title'] = title
arg_dict['mail_from'] = mail_from
arg_dict['content'] = content
arg_dict['attach'] = attach
arg_dict['text'] = text
arg_dict['html'] = html
arg_dict['headers'] = headers or {}
e = Email()
msg = e.build_email(arg_dict)
return e.send_email(msg)
| 19,646
|
def get_abbreviation(res_type, abbr):
"""
Returns abbreviation value from data set
@param res_type: Resource type (html, css, ...)
@type res_type: str
@param abbr: Abbreviation name
@type abbr: str
@return dict, None
"""
return get_settings_resource(res_type, abbr, 'abbreviations')
| 19,647
|
def get_stock_list(month_before=12, trade_date='20200410', delta_price=(10, 200), total_mv=50, pe_ttm=(10, 200)):
"""
month_before : 获取n个月之前所有上市公司的股票列表,
默认为获取一年前上市公司股票列表
delta_price :用于剔除掉金额大于delta_price的股票,若为空则不剔除
TIPS : delta_price 和今天的股价进行比较
"""
stock_list = pro.stock_basic(exchange='', list_status='L', fields='ts_code,name,market,list_date')
# 去除创业板和科创板股票
stock_list1 = stock_list[~stock_list['market'].isin(["科创板","创业板"])].reset_index(drop=True)
# 去除ST,银行和证券股票
index_list = []
for i in range(len(stock_list1)):
if '银行' in stock_list1.iloc[i]['name'] \
or 'ST' in stock_list1.iloc[i]['name'] \
or '证券' in stock_list1.iloc[i]['name'] :
index_list.append(i)
for i in index_list:
stock_list1 = stock_list1.drop(i)
stock_list1 = stock_list1.reset_index(drop=True)
# 去除上市时间未满一年的股票(默认)
delta_date = date_util.get_date_months_before(month_before)
stock_list2 = stock_list1[stock_list1["list_date"] <= delta_date].reset_index(drop=True)
stock_list = stock_list2.drop(['market', 'list_date'], axis=1)
# 去除市值在x亿之下的公司
if total_mv is not None:
for i in range(len(stock_list)):
try:
df = pro.daily_basic(ts_code=stock_list["ts_code"][i], \
trade_date=trade_date, fields='ts_code,total_mv')
stock_list.loc[i, "total_mv"] = df.loc[0, "total_mv"] if df.empty is False else 0
except:
time.sleep(3)
stock_list = stock_list[stock_list["total_mv"] > total_mv * 10000].reset_index(drop=True)
# 去除pe_ttm为None且不在区间内的公司
if pe_ttm is not None:
for i in range(len(stock_list)):
try:
df = pro.daily_basic(ts_code=stock_list["ts_code"][i], \
trade_date=trade_date, fields='ts_code,pe_ttm')
stock_list.loc[i, "pe_ttm"] = df.loc[0, "pe_ttm"] if df.empty is False else None
except:
time.sleep(3)
stock_list = stock_list[stock_list['pe_ttm'] > pe_ttm[0]].reset_index(drop=True)
stock_list = stock_list[stock_list['pe_ttm'] < pe_ttm[1]].dropna().reset_index(drop=True)
# 剔除 date_time 时刻价格不在区间内的股票
if delta_price is not None:
stock_list['price'] = np.zeros(len(stock_list))
for i in range(len(stock_list)):
stock_code = stock_list.iloc[i]["ts_code"]
try:
current_df = ts.pro_bar(ts_code=stock_code, adj='qfq',
start_date=trade_date, end_date=trade_date)
if current_df.empty:
continue
stock_list.loc[i, "price"] = (current_df.loc[0, "close"] + current_df.loc[0, "pre_close"]) / 2
except:
time.sleep(3)
stock_list = stock_list[stock_list['price'] > delta_price[0]].reset_index(drop=True)
stock_list = stock_list[stock_list['price'] < delta_price[1]].reset_index(drop=True)
stock_list.to_csv("./data_pulled/stock_date_delta_price{}.csv".format(delta_price), index=False)
return stock_list
| 19,648
|
def dim_axis_label(dimensions, separator=', '):
"""
Returns an axis label for one or more dimensions.
"""
if not isinstance(dimensions, list): dimensions = [dimensions]
return separator.join([d.pprint_label for d in dimensions])
| 19,649
|
def get_random_idx(k: int, size: int) -> np.ndarray:
"""
Get `k` random values of a list of size `size`.
:param k: number or random values
:param size: total number of values
:return: list of `k` random values
"""
return (np.random.rand(k) * size).astype(int)
| 19,650
|
def bitmask_8bit(array, pad_value=None):
"""Return 8-bit bitmask for cardinal and diagonal neighbours."""
shape, padded = shape_padded(array, pad_value=pad_value)
cardinals = get_cardinals(shape, padded)
diagonals = get_diagonals(shape, padded)
# TODO: https://forum.unity.com/threads/2d-tile-bitmasking.513840/#post-3366221
bitmask = cardinals + (diagonals << 4)
return bitmask
| 19,651
|
def fhir_search_path_meta_info(path: str) -> Union[tuple, NoneType]:
""" """
resource_type = path.split(".")[0]
properties = path.split(".")[1:]
model_cls = resource_type_to_resource_cls(resource_type)
result = None
for prop in properties:
for (
name,
jsname,
typ,
is_list,
of_many,
not_optional,
) in model_cls().elementProperties():
if prop != name:
continue
if typ not in (int, float, bool, str):
model_cls = typ
result = (jsname, is_list, of_many)
break
return result
| 19,652
|
def _is_camel_case_ab(s, index):
"""Determine if the index is at 'aB', which is the start of a camel token.
For example, with 'workAt', this function detects 'kA'."""
return index >= 1 and s[index - 1].islower() and s[index].isupper()
| 19,653
|
def global_ox_budget(
devstr,
devdir,
devrstdir,
year,
dst='./1yr_benchmark',
is_gchp=False,
overwrite=True,
spcdb_dir=None
):
"""
Main program to compute TransportTracersBenchmark budgets
Arguments:
maindir: str
Top-level benchmark folder
devstr: str
Denotes the "Dev" benchmark version.
year: int
The year of the benchmark simulation (e.g. 2016).
Keyword Args (optional):
dst: str
Directory where budget tables will be created.
Default value: './1yr_benchmark'
is_gchp: bool
Denotes if data is from GCHP (True) or GCC (false).
Default value: False
overwrite: bool
Denotes whether to ovewrite existing budget tables.
Default value: True
spcdb_dir: str
Directory where species_database.yml is stored.
Default value: GCPy directory
"""
# Store global variables in a private class
globvars = _GlobVars(
devstr,
devdir,
devrstdir,
year,
dst,
is_gchp,
overwrite,
spcdb_dir
)
# ==================================================================
# Compute Ox budget [Tg a-1]
# ==================================================================
# Mass from initial & final restart file
mass = init_and_final_mass(
globvars,
["O3", "Ox"]
)
# Sources and sinks
prodloss = annual_average_prodloss(
globvars
)
wetdep = annual_average_wetdep(
globvars
)
drydep = annual_average_drydep(
globvars
)
# Dynamics, net Ox, lifetime in days
metrics = annual_metrics(
globvars,
mass,
prodloss,
wetdep,
drydep
)
# ==================================================================
# Print budgets to file
# ==================================================================
print_budget(
globvars,
mass,
prodloss,
wetdep,
drydep,
metrics
)
| 19,654
|
def test_xgb_boston(tmpdir, model_format, objective):
# pylint: disable=too-many-locals
"""Test XGBoost with Boston data (regression)"""
X, y = load_boston(return_X_y=True)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, shuffle=False)
dtrain = xgb.DMatrix(X_train, label=y_train)
dtest = xgb.DMatrix(X_test, label=y_test)
param = {'max_depth': 8, 'eta': 1, 'silent': 1, 'objective': objective}
num_round = 10
xgb_model = xgb.train(param, dtrain, num_boost_round=num_round,
evals=[(dtrain, 'train'), (dtest, 'test')])
if model_format == 'json':
model_name = 'boston.json'
model_path = os.path.join(tmpdir, model_name)
xgb_model.save_model(model_path)
tl_model = treelite.Model.load(filename=model_path, model_format='xgboost_json')
else:
tl_model = treelite.Model.from_xgboost(xgb_model)
out_pred = treelite.gtil.predict(tl_model, X_test)
expected_pred = xgb_model.predict(dtest)
np.testing.assert_almost_equal(out_pred, expected_pred, decimal=5)
| 19,655
|
def create_folder():
"""
This Function Create Empty Folder At Begin
:return:folder status as boolean
"""
folder_flag = 0
list_of_folders = os.listdir(SOURCE_DIR)
for i in ["doc", "image", "output", "font"]:
if i not in list_of_folders:
os.mkdir(i)
folder_flag += 1
if i == "doc":
file = open(os.path.join(DOC_DIR, "index.txt"), "w")
if read_lorem() is None:
file.write("This is For First Page . . .")
else:
file.write(read_lorem())
file.close()
return bool(folder_flag)
| 19,656
|
def create_bulleted_tool_list(tools):
"""
Helper function that returns a text-based bulleted list of the given tools.
Args:
tools (OrderedDict): The tools whose names (the keys) will be added to the
text-based list.
Returns:
str: A bulleted list of tool names.
"""
return TOOL_LIST_HEADER + create_bulleted_list(tools.keys())
| 19,657
|
def _c3_merge(sequences, cls, context):
"""Merges MROs in *sequences* to a single MRO using the C3 algorithm.
Adapted from http://www.python.org/download/releases/2.3/mro/.
"""
result = []
while True:
sequences = [s for s in sequences if s] # purge empty sequences
if not sequences:
return result
for s1 in sequences: # find merge candidates among seq heads
candidate = s1[0]
for s2 in sequences:
if candidate in s2[1:]:
candidate = None
break # reject the current head, it appears later
else:
break
if not candidate:
# Show all the remaining bases, which were considered as
# candidates for the next mro sequence.
raise exceptions.InconsistentMroError(
message="Cannot create a consistent method resolution order "
"for MROs {mros} of class {cls!r}.",
mros=sequences, cls=cls, context=context)
result.append(candidate)
# remove the chosen candidate
for seq in sequences:
if seq[0] == candidate:
del seq[0]
| 19,658
|
def rgb2hex(rgb: tuple) -> str:
"""
Converts RGB tuple format to HEX string
:param rgb:
:return: hex string
"""
return '#%02x%02x%02x' % rgb
| 19,659
|
def min_var_portfolio(cov_mat, allow_short=False):
"""
Computes the minimum variance portfolio.
Note: As the variance is not invariant with respect
to leverage, it is not possible to construct non-trivial
market neutral minimum variance portfolios. This is because
the variance approaches zero with decreasing leverage,
i.e. the market neutral portfolio with minimum variance
is not invested at all.
Parameters
----------
cov_mat: pandas.DataFrame
Covariance matrix of asset returns.
allow_short: bool, optional
If 'False' construct a long-only portfolio.
If 'True' allow shorting, i.e. negative weights.
Returns
-------
weights: pandas.Series
Optimal asset weights.
"""
if not isinstance(cov_mat, pd.DataFrame):
raise ValueError("Covariance matrix is not a DataFrame")
n = len(cov_mat)
P = opt.matrix(cov_mat.values)
q = opt.matrix(0.0, (n, 1))
# Constraints Gx <= h
if not allow_short:
# x >= 0
G = opt.matrix(-np.identity(n))
h = opt.matrix(0.0, (n, 1))
else:
G = None
h = None
# Constraints Ax = b
# sum(x) = 1
A = opt.matrix(1.0, (1, n))
b = opt.matrix(1.0)
# Solve
optsolvers.options['show_progress'] = False
sol = optsolvers.qp(P, q, G, h, A, b)
if sol['status'] != 'optimal':
warnings.warn("Convergence problem")
# Put weights into a labeled series
weights = pd.Series(sol['x'], index=cov_mat.index)
return weights
| 19,660
|
def validate(doc,method):
"""checks if the mobile number is unique if email and mobile number are same then it allows to save the customer """
doc.date=datetime.datetime.now()
points_earned=0
points_consumed=0
total_points=0
remaining_points=0
if doc.get("points_table"):
for raw in doc.get("points_table"):
# if raw.points_earned:
if raw.status=="Active" or raw.status=="Partially Consumed":
remaining_points += raw.remaining_points
# points_earned+=int(raw.points_earned)
# else:
# raw.points_earned=0
# points_consumed+=int(raw.points_consumed)
# doc.total_points=points_earned - points_consumed
#self.pos_customer_id=self.name
doc.total_points=remaining_points
| 19,661
|
def relu(fd: DahliaFuncDef) -> str:
"""tvm.apache.org/docs/api/python/relay/nn.html#tvm.relay.nn.relu"""
data, res = fd.args[0], fd.dest
num_dims = get_dims(data.comp)
args = data.comp.args
indices = ""
var_name = CHARACTER_I
for _ in range(num_dims):
indices += f'[{var_name}]'
var_name = next_character(var_name)
data_type = fd.data_type
zero = f'({"0.0" if "fix" in data_type else "0"} as {data_type})'
input = f'{data.id.name}{indices}'
result = f'{res.id.name}{indices}'
loop_body = f"""if ({input} > {zero}) {{ {result} := {input}; }}
else {{ {result} := {zero}; }}"""
return emit_dahlia_definition(
fd,
emit_dahlia_loop(data, loop_body)
)
| 19,662
|
def detect_on_image(img_path, img_is_path=False, threshold=0.5, rect_th=1, text_th=1, text_size=1):
"""
img_path: absolute path, or an RGB tensor.
threshold: determines minimum confidence in order to consider prediction.
img_is_path: toggles if img_path is an absolute path or an RGB tensor.
"""
if img_is_path:
img = Image.open(img_path).convert("RGB")
else:
img = img_path
img = np.array(img)
#pointer to transformation function
#after transforming into pytorch tensor, puts it into composition
transform = T.Compose([T.ToTensor()])
#applies transformations, sends iimage to gpu defined on device_'cuda'
#forward pass, gets network output
pred = model([transform(img).cuda()])
#accesses the network prediction scores, detaches it, brings it to CPU and converts it into np array
pred_scores = list(pred[0]['scores'].detach().cpu().numpy())
#list of indices of every score above threshold
pred_t_list = [pred_scores.index(x) for x in pred_scores if x > threshold]
#index of the worst acceptable prediction score
if len(pred_t_list) == 0:
return None, None
pred_t = pred_t_list[-1]
masks = (pred[0]['masks'] > 0.5).squeeze().detach().cpu().numpy()
#gets the coco categories names of labels
pred_class = [COCO_INSTANCE_CATEGORY_NAMES[i] for i in list(pred[0]['labels'].cpu().numpy())]
#list of tuples with x and y coordinates for boxes to be drawn
pred_boxes = [[(i[0], i[1]), (i[2], i[3])] for i in list(pred[0]['boxes'].detach().cpu().numpy())]
#BUG: what if the worst is the last
masks = masks[:pred_t+1]
pred_boxes = pred_boxes[:pred_t+1]
pred_class = pred_class[:pred_t+1]
#RETURNED THIS::: masks, pred_boxes, pred_class, pred_scores[:pred_t+1]
pred_scores = pred_scores[:pred_t+1]
# for i in range(len(masks)):
# #rgb_mask = random_colour_masks(masks[i])
# if len(masks[i].shape) < 2:
# continue
# rgb_mask = get_coco_category_color_mask(masks[i], pred_class[i])
# img = cv2.addWeighted(img, 1, rgb_mask, 0.5, 0)
# img = cv2.rectangle(img, pred_boxes[i][0], pred_boxes[i][1], color=(0, 255, 0), thickness=rect_th)
# img = cv2.putText(img, f"{pred_class[i]}: {pred_scores[i]:.2f} >= {threshold:.2f}",
# pred_boxes[i][0], cv2.FONT_HERSHEY_SIMPLEX,
# text_size, (0, 255, 0), thickness=text_th)
person_pred_boxes = []
for idx, box in enumerate(pred_boxes):
if pred_class[idx] == 'person' and pred_scores[idx] >= threshold:
person_pred_boxes.append(box)
return person_pred_boxes
| 19,663
|
def base_hillclimb(base_sol: tuple, neighbor_method: str, max_fevals: int, searchspace: Searchspace, all_results, kernel_options, tuning_options, runner, restart=True, randomize=True, order=None):
""" Hillclimbing search until max_fevals is reached or no improvement is found
Base hillclimber that evaluates neighbouring solutions in a random or fixed order
and possibly immediately moves to the neighbour if it is an improvement.
:params base_sol: Starting position for hillclimbing
:type base_sol: list
:params neighbor_method: Method to use to select neighboring parameter configurations to visit
during hillclimbing, either "Hamming", "strictly-adjacent" or "adjacent" are supported.
:type neighbor_method: string
:params max_fevals: Maximum number of unique function evaluations that is allowed
during the search.
:type max_fevals: int
:params searchspace: The searchspace object.
:type searchspace: Seachspace
:params all_results: List of dictionaries with all benchmarked configurations
:type all_results: list(dict)
:param kernel_options: A dictionary with all options for the kernel.
:type kernel_options: dict
:param tuning_options: A dictionary with all options regarding the tuning
process.
:type tuning_options: dict
:params runner: A runner from kernel_tuner.runners
:type runner: kernel_tuner.runner
:params restart: Boolean that controls whether to greedely restart hillclimbing
from a new position as soon as an improved position is found. True by default.
:type restart: bool
:params randomize: Boolean that controls whether the dimensions of the tunable
parameters are randomized.
:type randomize: bool
:params order: Fixed order among the dimensions of the tunable parameters are
to be evaluated by the hillclimber.
:type order: list
:returns: The final position that was reached when hillclimbing halted.
:rtype: list
"""
if randomize and order:
raise ValueError("Using a preset order and randomize at the same time is not supported.")
tune_params = tuning_options.tune_params
# measure start point score
best_score = _cost_func(base_sol, kernel_options, tuning_options, runner, all_results, check_restrictions=False)
found_improved = True
while found_improved:
child = list(base_sol[:])
found_improved = False
current_results = []
vals = list(tune_params.values())
if order is None:
indices = list(range(len(vals)))
else:
indices = order
if randomize:
random.shuffle(indices)
# in each dimension see the possible values
for index in indices:
neighbors = searchspace.get_param_neighbors(tuple(child), index, neighbor_method, randomize)
# for each value in this dimension
for val in neighbors:
orig_val = child[index]
child[index] = val
# get score for this position
score = _cost_func(child, kernel_options, tuning_options, runner, current_results, check_restrictions=False)
# generalize this to other tuning objectives
if score < best_score:
best_score = score
base_sol = child[:]
found_improved = True
if restart:
break
else:
child[index] = orig_val
fevals = len(tuning_options.unique_results)
if fevals >= max_fevals:
all_results += current_results
return base_sol
if found_improved and restart:
break
# append current_results to all_results
all_results += current_results
return base_sol
| 19,664
|
def filter_dates(dates):
"""filter near dates"""
j = 0
while j < len(dates):
date = dates[j]
i = 3
j += 1
while True:
date += timedelta(days=1)
if date in dates:
i += 1
else:
if i > 2:
del dates[j:j+i-1]
break
return dates
| 19,665
|
def tar_and_gzip(dir, out_path, filter=None, prefix=''):
"""Tar and gzip the given *dir* to a tarball at *out_path*.
If we encounter symlinks, include the actual file, not the symlink.
:type dir: str
:param dir: dir to tar up
:type out_path: str
:param out_path: where to write the tarball too
:param filter: if defined, a function that takes paths (relative to *dir* and returns ``True`` if we should keep them
:type prefix: str
:param prefix: subdirectory inside the tarball to put everything into (e.g. ``'mrjob'``)
"""
if not os.path.isdir(dir):
raise IOError('Not a directory: %r' % (dir,))
if not filter:
filter = lambda path: True
# supposedly you can also call tarfile.TarFile(), but I couldn't
# get this to work in Python 2.5.1. Please leave as-is.
tar_gz = tarfile.open(out_path, mode='w:gz')
for dirpath, dirnames, filenames in os.walk(dir):
for filename in filenames:
path = os.path.join(dirpath, filename)
# janky version of os.path.relpath() (Python 2.6):
rel_path = path[len(os.path.join(dir, '')):]
if filter(rel_path):
# copy over real files, not symlinks
real_path = os.path.realpath(path)
path_in_tar_gz = os.path.join(prefix, rel_path)
tar_gz.add(real_path, arcname=path_in_tar_gz, recursive=False)
tar_gz.close()
| 19,666
|
def biweight_location(a, c=6.0, M=None, axis=None, eps=1e-8):
"""
Copyright (c) 2011-2016, Astropy Developers
Compute the biweight location for an array.
Returns the biweight location for the array elements.
The biweight is a robust statistic for determining the central
location of a distribution.
The biweight location is given by the following equation
.. math::
C_{bl}= M+\\frac{\Sigma_{\|u_i\|<1} (x_i-M)(1-u_i^2)^2}
{\Sigma_{\|u_i\|<1} (1-u_i^2)^2}
where M is the sample mean or if run iterative the initial guess,
and u_i is given by
.. math::
u_{i} = \\frac{(x_i-M)}{cMAD}
where MAD is the median absolute deviation.
For more details, see Beers, Flynn, and Gebhardt, 1990, AJ, 100, 32B
Parameters
----------
a : array-like
Input array or object that can be converted to an array.
c : float, optional
Tuning constant for the biweight estimator. Default value is 6.0.
M : float, optional
Initial guess for the biweight location.
axis : tuple, optional
tuple of the integer axis values ot calculate over. Should be sorted.
Returns
-------
biweight_location : float
Returns the biweight location for the array elements.
Examples
--------
This will generate random variates from a Gaussian distribution and return
the biweight location of the distribution::
>>> from utils import biweight_location
>>> from numpy.random import randn
>>> randvar = randn(10000)
>>> cbl = biweight_location(randvar)
See Also
--------
median_absolute_deviation, biweight_midvariance
Note
--------
Copy of the astropy function with the "axis" argument added appropriately.
"""
if M is None:
if isinstance(a, np.ma.MaskedArray):
func = np.ma.median
else:
a = np.array(a, copy=False)
func = np.median
M = func(a, axis=axis)
else:
a = np.array(a, copy=False)
N = M*1.
# set up the difference
if axis is not None:
for i in axis:
N = np.expand_dims(N, axis=i)
d = a - N
# set up the weighting
if axis is not None:
MAD = median_absolute_deviation(a, axis=axis)
for i in axis:
MAD = np.expand_dims(MAD, axis=i)
else:
MAD = median_absolute_deviation(a)
u = np.where(MAD < eps, 0., d / c / MAD)
# now remove the outlier points
if isinstance(a, np.ma.MaskedArray):
mask = (np.abs(u) < 1).astype(np.int) * (1-a.mask.astype(np.int))
else:
mask = (np.abs(u) < 1).astype(np.int)
u = (1 - u ** 2) ** 2
return M + (d * u * mask).sum(axis=axis) / (u * mask).sum(axis=axis)
| 19,667
|
def update_copy(src, dest):
"""
Possibly copy `src` to `dest`. No copy unless `src` exists.
Copy if `dest` does not exist, or mtime of dest is older than
of `src`.
Returns: None
"""
if os.path.exists(src):
if (not os.path.exists(dest) or
os.path.getmtime(dest) < os.path.getmtime(src)):
shutil.copy(src, dest)
return None
| 19,668
|
def model_21(GPUS = 1):
""" one dense: 3000 """
model = Sequential()
model.add(Convolution3D(60, kernel_size = (3, 3, 3), strides = (1, 1, 1), input_shape = (9, 9, 9, 20))) # 32 output nodes, kernel_size is your moving window, activation function, input shape = auto calculated
model.add(BatchNormalization())
model.add(Activation(activation = 'relu'))
model.add(Convolution3D(60, (3, 3, 3)))
model.add(BatchNormalization())
model.add(Activation(activation = 'relu'))
model.add(Convolution3D(60, (3, 3, 3)))
model.add(BatchNormalization())
model.add(Activation(activation = 'relu'))
model.add(Flatten()) # now our layers have been combined to one
model.add(Dense(3000)) # 300 nodes in the last hidden layer
model.add(BatchNormalization())
model.add(Activation(activation = 'relu'))
model.add(Dropout(0.5))
model.add(Dense(20, activation = 'softmax')) # output layer has 20 possible classes (amino acids 0 - 19)
if GPUS >= 2:
model = multi_gpu_model(model, gpus=GPUS)
return model
| 19,669
|
def test_genomic_dup4(test_normalize, genomic_dup4_default,
genomic_dup4_rse_lse, genomic_dup4_free_text_default,
genomic_dup4_free_text_rse_lse):
"""Test that genomic duplication works correctly."""
q = "NC_000020.11:g.(?_30417576)_(31394018_?)dup" # 38
resp = test_normalize.normalize(q, "default")
assertion_checks(resp, genomic_dup4_default)
resp = test_normalize.normalize(q, "cnv")
assertion_checks(resp, genomic_dup4_default)
resp = test_normalize.normalize(q, "repeated_seq_expr")
assertion_checks(resp, genomic_dup4_rse_lse)
resp = test_normalize.normalize(q, "literal_seq_expr")
assertion_checks(resp, genomic_dup4_rse_lse)
q = "NC_000020.10:g.(?_29652252)_(29981821_?)dup" # 37
resp = test_normalize.normalize(q, "default")
assertion_checks(resp, genomic_dup4_default, ignore_id=True)
resp = test_normalize.normalize(q, "cnv")
assertion_checks(resp, genomic_dup4_default, ignore_id=True)
genomic_dup4_rse_lse.variation.definition = q
resp = test_normalize.normalize(q, "repeated_seq_expr")
assertion_checks(resp, genomic_dup4_rse_lse, ignore_id=True)
resp = test_normalize.normalize(q, "literal_seq_expr")
assertion_checks(resp, genomic_dup4_rse_lse, ignore_id=True)
# Free Text
for q in [
"PRPF8 g.(?_1577736)_(1587865_?)dup", # 37
"PRPF8 g.(?_1674442)_(1684571_?)dup" # 38
]:
resp = test_normalize.normalize(q, "default")
assertion_checks(resp, genomic_dup4_free_text_default, ignore_id=True)
resp = test_normalize.normalize(q, "cnv")
assertion_checks(resp, genomic_dup4_free_text_default, ignore_id=True)
genomic_dup4_rse_lse.variation.definition = q
resp = test_normalize.normalize(q, "repeated_seq_expr")
genomic_dup4_free_text_rse_lse.variation.definition = q
assertion_checks(resp, genomic_dup4_free_text_rse_lse, ignore_id=True)
resp = test_normalize.normalize(q, "literal_seq_expr")
assertion_checks(resp, genomic_dup4_free_text_rse_lse, ignore_id=True)
# Invalid
invalid_queries = [
"NC_000020.10:g.(?_29652252)_(63025530_?)dup",
"NC_000020.11:g.(?_29652252)_(64444169_?)dup",
"PRPF8 g.(?_1650628)_(1684571_?)dup"
]
assert_text_variation(invalid_queries, test_normalize)
| 19,670
|
def strWeekday(
date: str,
target: int,
after: bool = False,
) -> str:
"""
Given a ISO string `date` return the nearest `target` weekday.
**Parameters**
- `date`: The date around which the caller would like target searched.
- `target`: Weekday number as in the `datetime` Standard Library Module.
**Returns**
The ISO YYYY-MM-DD string representation of the nearest given weekday.
"""
dtdate = pd.to_datetime(date)
if datetime.datetime.weekday(dtdate) != target:
if not after:
date = str(dtdate - pd.offsets.Week(weekday=target)).split(" ")[0]
else:
date = str(dtdate + pd.offsets.Week(weekday=target)).split(" ")[0]
return date
| 19,671
|
def main():
"""Main runner
"""
if len(sys.argv) < 2:
exit_with_error("No file or folder specified.")
path = sys.argv[1]
compress_path(path)
| 19,672
|
def combine_bincounts_kernelweights(
xcounts, ycounts, gridsize, colx, coly, L, lenkernel, kernelweights, mid, binwidth
):
"""
This function combines the bin counts (xcounts) and bin averages (ycounts) with
kernel weights via a series of direct convolutions. As a result, binned
approximations to X'W X and X'W y, denoted by weigthedx and weigthedy, are computed.
Recall that the local polynomial curve estimator beta_ and its derivatives are
minimizers to a locally weighted least-squares problem. At each grid
point g = 1,..., M in the grid, beta_ is computed as the solution to the
linear matrix equation:
X'W X * beta_ = X'W y,
where W are kernel weights approximated by the Gaussian density function.
X'W X and X'W y are approximated by weigthedx and weigthedy,
which are the result of a direct convolution of bin counts (xcounts) and kernel
weights, and bin averages (ycounts) and kernel weights, respectively.
The terms "kernel" and "kernel function" are used interchangeably
throughout.
For more information see the documentation of the main function locpoly
under KernReg.locpoly.
Parameters
----------
xcounts: np.ndarry
1-D array of binned x-values ("bin counts") of length gridsize.
ycounts: np.ndarry
1-D array of binned y-values ("bin averages") of length gridsize.
gridsize: int
Number of equally-spaced grid points.
colx: int
Number of columns of output array weigthedx, i.e. the binned approximation to X'W X.
coly: int
Number of columns of output array weigthedy, i.e the binned approximation to X'W y.
lenkernel: int
Length of 1-D array kernelweights.
kernelweights: np.ndarry
1-D array of length lenfkap containing
approximated weights for the Gaussian kernel
(W in the notation above).
L: int
Parameter defining the number of times the kernel function
has to be evaluated.
Note that L < N, where N is the total number of observations.
mid: int
Midpoint of kernelweights.
binwidth: float
Bin width.
Returns
-------
weigthedx: np.ndarry
Dimensions (M, colx). Binned approximation to X'W X.
weigthedy: np.ndarry
Dimensions (M, coly). Binned approximation to X'W y.
"""
weigthedx = np.zeros((gridsize, colx))
weigthedy = np.zeros((gridsize, coly))
for g in range(gridsize):
if xcounts[g] != 0:
for i in range(max(0, g - L - 1), min(gridsize, g + L)):
if 0 <= i <= gridsize - 1 and 0 <= g - i + mid - 1 <= lenkernel - 1:
fac_ = 1
weigthedx[i, 0] += xcounts[g] * kernelweights[g - i + mid - 1]
weigthedy[i, 0] += ycounts[g] * kernelweights[g - i + mid - 1]
for j in range(1, colx):
fac_ = fac_ * binwidth * (g - i)
weigthedx[i, j] += (
xcounts[g] * kernelweights[g - i + mid - 1] * fac_
)
if j < coly:
weigthedy[i, j] += (
ycounts[g] * kernelweights[g - i + mid - 1] * fac_
)
return weigthedx, weigthedy
| 19,673
|
def test_z_ratios_theory():
""" Test that the theoretical shear changes properly with redshift"""
nfw_1 = offset_nfw.NFWModel(cosmo, delta=200, rho='rho_c')
base = nfw_1.gamma_theory(1., 1.E14, 4, 0.1, 0.15)
new_z = numpy.linspace(0.15, 1.1, num=20)
new_gamma = nfw_1.gamma_theory(1, 1.E14, 4, 0.1, new_z)
new_gamma /= base
numpy.testing.assert_allclose(new_gamma, cosmo.angular_diameter_distance_z1z2(0.1, new_z)/cosmo.angular_diameter_distance_z1z2(0.1, 0.15)*cosmo.angular_diameter_distance(0.15)/cosmo.angular_diameter_distance(new_z))
base = nfw_1.kappa_theory(1., 1.E14, 4, 0.1, 0.15)
new_sigma = nfw_1.kappa_theory(1, 1.E14, 4, 0.1, new_z)
new_sigma /= base
numpy.testing.assert_allclose(new_sigma, cosmo.angular_diameter_distance_z1z2(0.1, new_z)/cosmo.angular_diameter_distance_z1z2(0.1, 0.15)*cosmo.angular_diameter_distance(0.15)/cosmo.angular_diameter_distance(new_z))
#TODO: do again, miscentered
| 19,674
|
def get_icon_for_group(group):
"""Get the icon for an AOVGroup."""
# Group has a custom icon path so use. it.
if group.icon is not None:
return QtGui.QIcon(group.icon)
if isinstance(group, IntrinsicAOVGroup):
return QtGui.QIcon(":ht/rsc/icons/aovs/intrinsic_group.png")
return QtGui.QIcon(":ht/rsc/icons/aovs/group.png")
| 19,675
|
def tempdir():
"""Creates and returns a temporary directory, deleting it on exit."""
path = tempfile.mkdtemp(suffix='bbroll')
try:
yield path
finally:
shutil.rmtree(path)
| 19,676
|
def close_and_commit(cur, conn):
"""Closes the cursor and connection used to query a db,
also commits the transaction.
Args:
cur (int): the connection used to connect to the db DB_NAME
conn (str): the cursor used to retrieve results from the query
"""
cur.close()
conn.commit()
conn.close()
| 19,677
|
def listCtdProfilesJson(request):
""" Generates a JSON file containing a list of datasets and their properties. """
| 19,678
|
def kde(ctx, fxyz, design_matrix, use_atomic_descriptors, only_use_species,
prefix, savexyz, savetxt):
"""
Kernel density estimation using the design matrix.
This command function evaluated before the specific ones,
we setup the general stuff here, such as read the files.
"""
if not fxyz and design_matrix[0]:
return
if prefix is None: prefix = "ASAP-kde"
ctx.obj['kde_options'] = {'prefix': prefix,
'savexyz': savexyz,
'savetxt': savetxt,
'use_atomic_descriptors': use_atomic_descriptors,
'only_use_species': only_use_species
}
ctx.obj['asapxyz'], ctx.obj['design_matrix'], _ = read_xyz_n_dm(fxyz, design_matrix, use_atomic_descriptors,
only_use_species, False)
| 19,679
|
def unix_to_windows_path(path_to_convert, drive_letter='C'):
"""
For a string representing a POSIX compatible path (usually
starting with either '~' or '/'), returns a string representing an
equivalent Windows compatible path together with a drive letter.
Parameters
----------
path_to_convert : string
A string representing a POSIX path
drive_letter : string (Default : 'C')
A single character string representing the desired drive letter
Returns
-------
string
A string representing a Windows compatible path.
"""
if path_to_convert.startswith('~'):
path_to_convert = path_to_convert[1:]
if path_to_convert.startswith('/'):
path_to_convert = path_to_convert[1:]
path_to_convert = '{}{}{}'.format(drive_letter,
':\\',
path_to_convert).replace('/', '\\')
return path_to_convert
| 19,680
|
def pool_stats(ctx, poolid):
"""Get statistics about a pool"""
ctx.initialize_for_batch()
convoy.fleet.action_pool_stats(
ctx.batch_client, ctx.config, pool_id=poolid)
| 19,681
|
def recordview_create_values(
coll_id="testcoll", view_id="testview", update="RecordView", view_uri=None,
view_entity_type="annal:Test_default",
num_fields=4, field3_placement="small:0,12",
extra_field=None, extra_field_uri=None
):
"""
Entity values used when creating a record view entity
"""
view_values = (
{ 'annal:type': "annal:View"
, 'rdfs:label': "%s %s/%s"%(update, coll_id, view_id)
, 'rdfs:comment': "%s help for %s in collection %s"%(update, view_id, coll_id)
, 'annal:view_entity_type': view_entity_type
, 'annal:open_view': True
, 'annal:view_fields':
[ { 'annal:field_id': layout.FIELD_TYPEID+"/Entity_id"
, 'annal:field_placement': "small:0,12;medium:0,6"
}
, { 'annal:field_id': layout.FIELD_TYPEID+"/Entity_type"
, 'annal:field_placement': "small:0,12;medium:6,6"
}
, { 'annal:field_id': layout.FIELD_TYPEID+"/Entity_label"
, 'annal:field_placement': "small:0,12"
}
, { 'annal:field_id': layout.FIELD_TYPEID+"/Entity_comment"
# , 'annal:field_placement': field3_placement
}
]
})
if view_uri:
view_values['annal:uri'] = view_uri
if field3_placement:
view_values['annal:view_fields'][3]['annal:field_placement'] = field3_placement
if extra_field:
efd = (
{ 'annal:field_id': extra_field
, 'annal:field_placement': "small:0,12"
})
if extra_field_uri:
efd['annal:property_uri'] = extra_field_uri
view_values['annal:view_fields'].append(efd)
if num_fields == 0:
view_values['annal:view_fields'] = []
return view_values
| 19,682
|
def calculateZ(f, t2, a0, a1, a2=0, a3=0):
""" given the frequency array and the filter coefficients,
return Z(s) as a np.array()
"""
s = np.array(f)*2*math.pi*1j ####################
z = (1 + s*t2)/(s*(a3*s**3 + a2*s**2 + a1*s + a0))
return z
| 19,683
|
def get_team_project_default_permissions(team, project):
"""
Return team role for given project.
"""
perms = get_perms(team, project)
return get_role(perms, project) or ""
| 19,684
|
def update_secrets(newsecrets_ldb, secrets_ldb, messagefunc):
"""Update secrets.ldb
:param newsecrets_ldb: An LDB object that is connected to the secrets.ldb
of the reference provision
:param secrets_ldb: An LDB object that is connected to the secrets.ldb
of the updated provision
"""
messagefunc(SIMPLE, "Update of secrets.ldb")
reference = newsecrets_ldb.search(base="@MODULES", scope=SCOPE_BASE)
current = secrets_ldb.search(base="@MODULES", scope=SCOPE_BASE)
assert reference, "Reference modules list can not be empty"
if len(current) == 0:
# No modules present
delta = secrets_ldb.msg_diff(ldb.Message(), reference[0])
delta.dn = reference[0].dn
secrets_ldb.add(reference[0])
else:
delta = secrets_ldb.msg_diff(current[0], reference[0])
delta.dn = current[0].dn
secrets_ldb.modify(delta)
reference = newsecrets_ldb.search(expression="objectClass=top", base="",
scope=SCOPE_SUBTREE, attrs=["dn"])
current = secrets_ldb.search(expression="objectClass=top", base="",
scope=SCOPE_SUBTREE, attrs=["dn"])
hash_new = {}
hash = {}
listMissing = []
listPresent = []
empty = ldb.Message()
for i in range(0, len(reference)):
hash_new[str(reference[i]["dn"]).lower()] = reference[i]["dn"]
# Create a hash for speeding the search of existing object in the
# current provision
for i in range(0, len(current)):
hash[str(current[i]["dn"]).lower()] = current[i]["dn"]
for k in hash_new.keys():
if not hash.has_key(k):
listMissing.append(hash_new[k])
else:
listPresent.append(hash_new[k])
for entry in listMissing:
reference = newsecrets_ldb.search(expression="distinguishedName=%s" % entry,
base="", scope=SCOPE_SUBTREE)
current = secrets_ldb.search(expression="distinguishedName=%s" % entry,
base="", scope=SCOPE_SUBTREE)
delta = secrets_ldb.msg_diff(empty, reference[0])
for att in hashAttrNotCopied:
delta.remove(att)
messagefunc(CHANGE, "Entry %s is missing from secrets.ldb" %
reference[0].dn)
for att in delta:
messagefunc(CHANGE, " Adding attribute %s" % att)
delta.dn = reference[0].dn
secrets_ldb.add(delta)
for entry in listPresent:
reference = newsecrets_ldb.search(expression="distinguishedName=%s" % entry,
base="", scope=SCOPE_SUBTREE)
current = secrets_ldb.search(expression="distinguishedName=%s" % entry, base="",
scope=SCOPE_SUBTREE)
delta = secrets_ldb.msg_diff(current[0], reference[0])
for att in hashAttrNotCopied:
delta.remove(att)
for att in delta:
if att == "name":
messagefunc(CHANGE, "Found attribute name on %s,"
" must rename the DN" % (current[0].dn))
identic_rename(secrets_ldb, reference[0].dn)
else:
delta.remove(att)
for entry in listPresent:
reference = newsecrets_ldb.search(expression="distinguishedName=%s" % entry, base="",
scope=SCOPE_SUBTREE)
current = secrets_ldb.search(expression="distinguishedName=%s" % entry, base="",
scope=SCOPE_SUBTREE)
delta = secrets_ldb.msg_diff(current[0], reference[0])
for att in hashAttrNotCopied:
delta.remove(att)
for att in delta:
if att == "msDS-KeyVersionNumber":
delta.remove(att)
if att != "dn":
messagefunc(CHANGE,
"Adding/Changing attribute %s to %s" %
(att, current[0].dn))
delta.dn = current[0].dn
secrets_ldb.modify(delta)
res2 = secrets_ldb.search(expression="(samaccountname=dns)",
scope=SCOPE_SUBTREE, attrs=["dn"])
if len(res2) == 1:
messagefunc(SIMPLE, "Remove old dns account")
secrets_ldb.delete(res2[0]["dn"])
| 19,685
|
def readPlistFromResource(path, restype='plst', resid=0):
"""Read plst resource from the resource fork of path.
"""
from Carbon.File import FSRef, FSGetResourceForkName
from Carbon.Files import fsRdPerm
from Carbon import Res
fsRef = FSRef(path)
resNum = Res.FSOpenResourceFile(fsRef, FSGetResourceForkName(), fsRdPerm)
Res.UseResFile(resNum)
plistData = Res.Get1Resource(restype, resid).data
Res.CloseResFile(resNum)
return readPlistFromString(plistData)
| 19,686
|
def plot_confusion_matrix(
cm, classes, normalize=False, title="Confusion matrix", cmap=plt.cm.Blues
):
"""Plots a confusion matrix.
Args:
cm (np.array): The confusion matrix array.
classes (list): List wit the classes names.
normalize (bool): Flag to normalize data.
title (str): Title of the plot.
cmap (matplotlib.cm): `Matplotlib colormap <https://matplotlib.org/api/cm_api.html>`_
**Examples**::
>> a = np.array([[10, 3, 0],[1, 2, 3],[1, 5, 9]])
>> classes = ['cl1', 'cl2', 'cl3']
>> plot_confusion_matrix(a, classes, normalize=False)
>> plot_confusion_matrix(a, classes, normalize=True)
"""
cm_max = cm.max()
cm_min = cm.min()
if cm_min > 0:
cm_min = 0
if normalize:
cm = cm.astype("float") / cm.sum(axis=1)[:, np.newaxis]
cm_max = 1
plt.imshow(cm, interpolation="nearest", cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
thresh = cm_max / 2.0
plt.clim(cm_min, cm_max)
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(
j,
i,
round(cm[i, j], 3), # round to 3 decimals if they are float
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black",
)
plt.ylabel("True label")
plt.xlabel("Predicted label")
plt.show()
| 19,687
|
def simple_satunet(
input_shape,
kernel=(2, 2),
num_classes=1,
activation="relu",
use_batch_norm=True,
dropout=0.1,
dropout_change_per_layer=0.0,
dropout_type="standard",
use_dropout_on_upsampling=False,
filters=8,
num_layers=4,
strides=(1, 1),
):
"""
Customisable UNet architecture (Ronneberger et al. 2015 https://arxiv.org/abs/1505.04597)
input_shape: shape (x, y, num_channels)
num_classes (int): 1 for binary segmentation
activation (str): A keras.activations.Activation to use. ReLu by default.
use_batch_norm (bool): Whether to use Batch Normalisation across the channel axis between convolutions
dropout (float , 0. and 1.): dropout after the first convolutional block. 0. = no dropout
dropout_change_per_layer (float , 0. and 1.): Factor to add to the Dropout after each convolutional block
dropout_type (one of "spatial" or "standard"): Spatial is recommended by https://arxiv.org/pdf/1411.4280.pdf
use_dropout_on_upsampling (bool): Whether to use dropout in the decoder part of the network
filters (int): Convolutional filters in the initial convolutional block. Will be doubled every block
num_layers (int): Number of total layers in the encoder not including the bottleneck layer
"""
upconv_filters = int(1.5 * filters)
# Build U-Net model
inputs = tf.keras.layers.Input(input_shape)
x = inputs
down_layers = []
for l in range(num_layers):
x = conv2d_block(
inputs=x,
filters=filters,
use_batch_norm=use_batch_norm,
dropout=dropout,
dropout_type=dropout_type,
activation=activation,
strides=strides, # (1,1),
)
down_layers.append(x)
x = tf.keras.layers.MaxPooling2D(kernel)(x)
dropout += dropout_change_per_layer
# filters = filters * 2 # double the number of filters with each layer
x = conv2d_block(
inputs=x,
filters=filters,
use_batch_norm=use_batch_norm,
dropout=dropout,
dropout_type=dropout_type,
activation=activation,
strides=strides, # (1,1),
)
if not use_dropout_on_upsampling:
dropout = 0.0
dropout_change_per_layer = 0.0
for conv in reversed(down_layers):
filters //= 2 # decreasing number of filters with each layer
dropout -= dropout_change_per_layer
# x = upsample(filters, kernel, strides=(2,2), padding="same")(x)#(2, 2)
x = tf.keras.layers.UpSampling2D(kernel)(x)
x = tf.keras.layers.concatenate([x, conv])
x = conv2d_block(
inputs=x,
filters=upconv_filters,
use_batch_norm=use_batch_norm,
dropout=dropout,
dropout_type=dropout_type,
activation=activation,
)
# outputs = tf.keras.layers.Conv2D(num_classes, (1, 1), activation=output_activation)(x)
# ## classify
if num_classes == 1:
outputs = tf.keras.layers.Conv2D(
num_classes, (1, 1), padding="same", activation="sigmoid"
)(x)
else:
outputs = tf.keras.layers.Conv2D(
num_classes, (1, 1), padding="same", activation="softmax"
)(x)
model = tf.keras.models.Model(inputs=[inputs], outputs=[outputs])
return model
| 19,688
|
def fracday2datetime(tdata):
"""
Takes an array of dates given in %Y%m%d.%f format and returns a
corresponding datetime object
"""
dates = [datetime.strptime(str(i).split(".")[0], "%Y%m%d").date()
for i in tdata]
frac_day = [i - np.floor(i) for i in tdata]
ratios = [(Fraction(i).limit_denominator().numerator,
Fraction(i).limit_denominator().denominator) for i in frac_day]
times = [datetime.strptime(
str(timedelta(seconds=timedelta(days=i[0]/i[1]).total_seconds())),
'%H:%M:%S').time() for i in ratios]
date_times = [datetime.combine(d, t) for d, t in zip(dates, times)]
return date_times
| 19,689
|
def set_env_variables_permanently_win(key_value_pairs, whole_machine = False):
"""
Similar to os.environ[var_name] = var_value for all pairs provided, but instead of setting the variables in the
current process, sets the environment variables permanently at the os MACHINE level.
NOTE: process must be "elevated" before making this call. Use "sudo" first.
Original Recipe from http://code.activestate.com/recipes/416087/
:param key_value_pairs: a dictionary of variable name+value to set
:param whole_machine: if True the env variables will be set at the MACHINE (HKLM) level.
If False it will be done at USER level (HKCU)
:return:
"""
if not isinstance(key_value_pairs, dict):
raise ValueError('{!r} must be {}'.format(key_value_pairs, dict))
if os.name != 'nt':
raise ModuleNotFoundError('Attempting Windows operation on non-Windows')
subkey = r'SYSTEM\CurrentControlSet\Control\Session Manager\Environment' if whole_machine else r'Environment'
with winreg.OpenKeyEx(winreg.HKEY_LOCAL_MACHINE if whole_machine else winreg.HKEY_CURRENT_USER,
subkey, 0, winreg.KEY_ALL_ACCESS) as key:
for name, value in key_value_pairs.items():
print(' setting environment variable -->', name, '=', value)
try:
present, value_type = winreg.QueryValueEx(key, name)
except OSError:
present = '<Not defined>'
value_type = winreg.REG_SZ
print('old value was {} = {}'.format(name, present))
if name.upper() in ['PATH', 'PATHEXT']:
if value.upper() in present.split(';'): # these two keys will always be present and contain ";"
print('Value {} already in {}'.format(value, present))
continue
else:
print('"{}" will not be entirely changed. "{}" will be appended at the end.'.format(
name, value))
value = '{};{}'.format(present, value)
if value:
print("Setting ENVIRONMENT VARIABLE '{}' to '{}'".format(name, value))
winreg.SetValueEx(key, name, 0, value_type, value)
else:
print("Deleting ENV VARIABLE '{}'".format(name))
try:
winreg.DeleteValue(key, name)
except FileNotFoundError:
pass # ignore if already deleted
# tell all the world that a change has been made
win32gui.SendMessageTimeout(win32con.HWND_BROADCAST, win32con.WM_SETTINGCHANGE, 0, 'Environment',
win32con.SMTO_ABORTIFHUNG, 1000)
if has_context():
input('Hit <Enter> to continue . . .')
| 19,690
|
def train(ctx, cids, msday, meday, acquired):
"""Trains a random forest model for a set of chip ids
Args:
ctx: spark context
cids (sequence): sequence of chip ids [(x,y), (x1, y1), ...]
msday (int): ordinal day, beginning of training period
meday (int); ordinal day, end of training period
acquired (str): ISO8601 date range
Returns:
A trained model or None
"""
name = 'random-forest-training'
log = logger(ctx, name)
# wire everything up
aux = timeseries.aux(ctx=ctx,
cids=cids,
acquired=acquired)\
.filter('trends[0] NOT IN (0, 9)')\
.repartition(ccdc.PRODUCT_PARTITIONS).persist()
aid = aux.select(aux.cx, aux.cy).distinct()
ccd = pyccd.read(ctx, aid).filter('sday >= {} AND eday <= {}'.format(msday, meday))
fdf = features.dataframe(aux, ccd).persist()
if fdf.count() == 0:
log.info('No features found to train model')
return None
else:
log.debug('sample feature:{}'.format(fdf.first()))
log.debug('feature row count:{}'.format(fdf.count()))
log.debug('feature columns:{}'.format(fdf.columns))
model = pipeline(fdf).fit(fdf)
# manage memory
aux.unpersist()
fdf.unpersist()
return model
| 19,691
|
def leave_out(y, model, path, leave=1, **kwargs):
"""
Parameters
----------
y: test set
model: model fit by training set
leave: how many neurons are left-out
Returns
-------
"""
from scipy.linalg import svd
_, nbin, z_dim = model["mu"].shape
y_dim = y.shape[-1]
# Z = USV'
# Za = USV'a = (US)(V'a) = (USV'V)(V'a)
u, s, vt = svd(model["mu"].reshape(-1, z_dim), full_matrices=False)
a_orth = vt @ model["a"]
b = model["b"]
nfold = y_dim // leave
if 0 < nfold < y_dim:
y_perm = np.random.permutation(y_dim)
elif nfold == y_dim:
y_perm = np.arange(y_dim)
else:
raise ValueError("invalid leave: {}".format(leave))
folds = np.array_split(y_perm, nfold) # k-fold
for i, fold in enumerate(folds):
in_mask = np.ones(y_dim, dtype=bool)
in_mask[fold] = False
y_in = y[:, :, in_mask]
a_in = a_orth[:, in_mask] # orth
b_in = b[:, in_mask]
lik = ["spike"] * y_in.shape[-1]
fold_path = "{}_leave_{}_out_{}".format(path, leave, i)
# DEBUG
print("{}".format(fold_path))
fit(
y=y_in,
z_dim=z_dim,
lik=lik,
a=a_in,
b=b_in,
history=model["history"],
sigma=model["sigma"],
omega=model["omega"],
rank=model["rank"],
path=fold_path,
learn_param=False,
learn_post=True,
learn_hyper=False,
**kwargs
)
with h5py.File(fold_path, "a") as fout:
fout["fold"] = fold
| 19,692
|
def install():
"""Routine to be run by the win32 installer with the -install switch."""
from IPython.core.release import version
# Get some system constants
prefix = sys.prefix
python = pjoin(prefix, 'python.exe')
# Lookup path to common startmenu ...
ip_start_menu = pjoin(get_special_folder_path('CSIDL_COMMON_PROGRAMS'), 'IPython')
# Create IPython entry ...
if not os.path.isdir(ip_start_menu):
os.mkdir(ip_start_menu)
directory_created(ip_start_menu)
# Create .py and .bat files to make things available from
# the Windows command line. Thanks to the Twisted project
# for this logic!
programs = [
'ipython',
'iptest',
'ipcontroller',
'ipengine',
'ipcluster',
'irunner'
]
scripts = pjoin(prefix,'scripts')
for program in programs:
raw = pjoin(scripts, program)
bat = raw + '.bat'
py = raw + '.py'
# Create .py versions of the scripts
shutil.copy(raw, py)
# Create .bat files for each of the scripts
bat_file = file(bat,'w')
bat_file.write("@%s %s %%*" % (python, py))
bat_file.close()
# Now move onto setting the Start Menu up
ipybase = pjoin(scripts, 'ipython')
link = pjoin(ip_start_menu, 'IPython.lnk')
cmd = '"%s"' % ipybase
mkshortcut(python,'IPython',link,cmd)
link = pjoin(ip_start_menu, 'pysh.lnk')
cmd = '"%s" -p sh' % ipybase
mkshortcut(python,'IPython (command prompt mode)',link,cmd)
link = pjoin(ip_start_menu, 'scipy.lnk')
cmd = '"%s" -p scipy' % ipybase
mkshortcut(python,'IPython (scipy profile)',link,cmd)
link = pjoin(ip_start_menu, 'ipcontroller.lnk')
cmd = '"%s" -xy' % pjoin(scripts, 'ipcontroller')
mkshortcut(python,'IPython controller',link,cmd)
link = pjoin(ip_start_menu, 'ipengine.lnk')
cmd = '"%s"' % pjoin(scripts, 'ipengine')
mkshortcut(python,'IPython engine',link,cmd)
# Create documentation shortcuts ...
t = prefix + r'\share\doc\ipython\manual\ipython.pdf'
f = ip_start_menu + r'\Manual in PDF.lnk'
mkshortcut(t,r'IPython Manual - PDF-Format',f)
t = prefix + r'\share\doc\ipython\manual\html\index.html'
f = ip_start_menu + r'\Manual in HTML.lnk'
mkshortcut(t,'IPython Manual - HTML-Format',f)
| 19,693
|
def flatten(iterable):
"""
Unpacks nested iterables into the root `iterable`.
Examples:
```python
from flashback.iterating import flatten
for item in flatten(["a", ["b", ["c", "d"]], "e"]):
print(item)
#=> "a"
#=> "b"
#=> "c"
#=> "d"
#=> "e"
assert flatten([1, {2, 3}, (4,), range(5, 6)]) == (1, 2, 3, 4, 5)
```
Params:
iterable (Iterable<Any>): the iterable to flatten
Returns:
tuple<Any>: the flattened iterable
"""
items = []
for item in iterable:
if isinstance(item, (list, tuple, set, frozenset, range)):
for nested_item in flatten(item):
items.append(nested_item)
else:
items.append(item)
return tuple(items)
| 19,694
|
def test_top_level_domains_db_is_loaded():
"""The TLD database should be loaded."""
assert dnstwist.DB_TLD
| 19,695
|
def get_file_info(bucket, filename):
"""Returns information about stored file.
Arguments:
bucket: a bucket that contains the file.
filename: path to a file relative to bucket root.
Returns:
FileInfo object or None if no such file.
"""
try:
stat = cloudstorage.stat(
'/%s/%s' % (bucket, filename), retry_params=_make_retry_params())
return FileInfo(size=stat.st_size)
except cloudstorage.errors.NotFoundError:
return None
| 19,696
|
def generate_tree(depth, max_depth, max_args):
"""Generate tree-like equations.
Args:
depth: current depth of the node, int.
max_depth: maximum depth of the tree, int.
max_args: maximum number of arguments per operator, int.
Returns:
The root node of a tree structure.
"""
if depth < max_depth:
r = random.random()
else:
r = 1
if r > VALUE_P:
value = random.choice(VALUES)
return value, 1
else:
length = 2
num_values = random.randint(2, max_args)
values = []
for _ in range(num_values):
sub_t, sub_l = generate_tree(depth + 1, max_depth, max_args)
values.append(sub_t)
length += sub_l
op = random.choice(OPERATORS)
t = (op, values[0])
for value in values[1:]:
t = (t, value)
t = (t, END)
return t, length
| 19,697
|
def create_experiment(body): # noqa: E501
"""create a experiment
instantiate/start experiment # noqa: E501
:param body: Experiment Object
:type body: dict | bytes
:rtype: ApiResponse
"""
if connexion.request.is_json:
req = Experiment.from_dict(connexion.request.get_json()) # noqa: E501
urn = req.cluster
if 'urn' not in urn:
urn = os.getenv('URN_' + req.cluster)
elif 'authority+cm' not in urn:
urn = urn + '+authority+cm'
logger.info('urn = {}'.format(urn))
if ',' not in req.profile:
req.profile = emulab.EMULAB_PROJ + ',' + req.profile
if req.username is None:
req.username = emulab.EMULAB_EXPERIMENT_USER
if req.project is None:
req.project = emulab.EMULAB_PROJ
# update the profile from repo
update_repo_cmd = '{} sudo -u {} manage_profile updatefromrepo {}'.format(
emulab.SSH_BOSS, req.username, req.profile)
emulab.send_request(update_repo_cmd)
emulab_cmd = '{} sudo -u {} start-experiment -a {} -w --name {} --project {} {}'.format(
emulab.SSH_BOSS, req.username, urn, req.name, req.project, req.profile)
emulab_stdout = emulab.send_request(emulab_cmd)
return ApiResponse(code=0, output="Please use getExperiment to check whether success or fail")
| 19,698
|
def test_lookup_capacity():
"""
Unit test.
"""
with pytest.raises(KeyError):
lookup_capacity({}, 'test', 'test', 'test', 'test','test')
| 19,699
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.