content stringlengths 22 815k | id int64 0 4.91M |
|---|---|
def hdfs_open(server, username, path, **args):
"""Read a file.
Returns a filelike object (specifically, an httplib response object).
"""
datanode_url = datanode_url(server, username, path, **args)
response = _datanode_request(server, username, 'GET', datanode_url)
if response.status == httplib.OK:
return response
else:
content = response.read()
_raise_error(response.status, content) | 30,300 |
def get_serv_loader(desc_file=SERVICES_FILE):
"""Get a ServiceLoader with service descriptions in the given file.
Uses a "singleton" when the file is `SERVICES_FILE`.
"""
global _serv_loader
if desc_file == SERVICES_FILE:
if _serv_loader is None:
with open(desc_file, "r") as fp:
_serv_loader = ServiceLoader(fp)
return _serv_loader
with open(desc_file, "r") as fp:
ld = ServiceLoader(fp)
return ld | 30,301 |
def check_reverse_lookup():
"""
Check if host fqdn resolves to current host ip
"""
try:
host_name = socket.gethostname().lower()
host_ip = socket.gethostbyname(host_name)
host_fqdn = socket.getfqdn().lower()
fqdn_ip = socket.gethostbyname(host_fqdn)
return host_ip == fqdn_ip
except socket.error:
pass
return False | 30,302 |
def render_pages(site_data: SiteData) -> dict[Path, str]:
"""Create page content."""
homepage = (site_data.homepage, rendering.render_homepage(site_data))
record_pages = [
(page, rendering.render_record_page(site_data, uri))
for uri, page in site_data.record_pages.items()
]
toc_pages = [(page, "TODO") for record_type, page in site_data.toc_pages.items()]
all_pages = [homepage] + record_pages + toc_pages
return {
path: rendering.apply_links(content, site_data) for path, content in all_pages
} | 30,303 |
def download_file(clean_url, download_folder, downloaded_file_name, depth=0, error_output=True):
""" Downloads a specific file.
:param clean_url: Decoded URL to the file.
:param download_folder: Folder to place the downloaded file in.
:param downloaded_file_name: File name to save the download to.
:param depth: (optional) Hierarchy depth of the handled Confluence page.
:param error_output: (optional) Set to False if you do not want to see any error outputs
:returns: Path to the downloaded file.
"""
downloaded_file_path = '%s/%s' % (download_folder, downloaded_file_name)
# Download file if it does not exist yet
if not os.path.exists(downloaded_file_path):
absolute_download_url = '%s%s' % (settings.CONFLUENCE_BASE_URL, clean_url)
print('%sDOWNLOAD: %s' % ('\t'*(depth+1), downloaded_file_name))
try:
utils.http_download_binary_file(absolute_download_url, downloaded_file_path,
auth=settings.HTTP_AUTHENTICATION, headers=settings.HTTP_CUSTOM_HEADERS,
verify_peer_certificate=settings.VERIFY_PEER_CERTIFICATE,
proxies=settings.HTTP_PROXIES)
except utils.ConfluenceException as e:
if error_output:
error_print('%sERROR: %s' % ('\t'*(depth+2), e))
else:
print('%sWARNING: %s' % ('\t'*(depth+2), e))
return downloaded_file_path | 30,304 |
def compact_map_save(hpmap, outname, exclude=lambda x:x>0):
"""
Save maps in a pixel, signal format
Parameters
----------
hpmap: healpix map
Healpix map
outname: str
Output name
exclude: function
Function to mask pixels to be excluded, must have hpmap as argument
Returns
-------
None
"""
mask = exclude(hpmap)
Table([np.arange(hpmap.size, dtype=int)[mask], hpmap[mask]],
names=['pixel', 'signal']).write(outname)
return | 30,305 |
def _clean_bar_plot_data(df_in: pd.DataFrame,
sweep_vars: Sequence[Text] = None) -> pd.DataFrame:
"""Clean the summary data for bar plot comparison of agents."""
df = df_in.copy()
df['env'] = pd.Categorical(
df.bsuite_env, categories=_ORDERED_EXPERIMENTS, ordered=True)
df['type'] = pd.Categorical(
df['type'], categories=_ORDERED_TYPES, ordered=True)
if sweep_vars is None:
df['agent'] = 'agent'
elif len(sweep_vars) == 1:
df['agent'] = df[sweep_vars[0]].astype(str)
else:
df['agent'] = (df[sweep_vars].astype(str)
.apply(lambda x: x.name + '=' + x, axis=0)
.apply(lambda x: '\n'.join(x), axis=1) # pylint:disable=unnecessary-lambda
)
return df | 30,306 |
def reset_db(ml_dag_repository: MLDagRepository) -> None:
""" Resets DB before each test to initial testing state """
ml_dag_repository.metadata.drop_all()
ml_dag_repository.metadata.create_all() | 30,307 |
def file_resources():
"""File Resources."""
return {
'mock_file': CustomFileResource(service=FileService()),
'mock_file_action': CustomFileActionResource(service=FileService()),
} | 30,308 |
def p_node_test(p):
"""NodeTest : NameTest
| NODETYPE '(' ')'
| NODETYPE '(' Literal ')'
"""
if len(p) == 2:
p[0] = p[1]
elif len(p) == 4:
p[0] = ast.NodeType(p[1])
else:
p[0] = ast.NodeType(p[1], p[3]) | 30,309 |
def test_get_resampled_multiscene(
sMf, tmp_path, fake_multiscene_empty, fake_multiscene2):
"""Test getting a resampled multiscene from files."""
from sattools.scutil import get_resampled_multiscene
sMf.return_value = fake_multiscene_empty
def load(ds_all, unload=None):
for (sc, ref_sc) in zip(
fake_multiscene_empty.scenes, fake_multiscene2.scenes):
for ds in ds_all:
sc[ds] = ref_sc[ds]
fake_multiscene_empty.load = load
ms = get_resampled_multiscene(
[str(tmp_path / f"in{i:d}") for i in (1, 2, 3)],
["glm", "abi"],
"C14",
["C14_flash_extent_density"])
assert ms[0] is fake_multiscene_empty
assert "C14" in ms[0].first_scene
assert "C10" not in ms[0].first_scene
sMf.assert_called_once_with(
[str(tmp_path / f"in{i:d}") for i in (1, 2, 3)],
reader=["glm", "abi"],
ensure_all_readers=True,
scene_kwargs={},
group_keys=["start_time"],
time_threshold=35)
ms = get_resampled_multiscene(
[str(tmp_path / f"in{i:d}") for i in (1, 2, 3)],
["glm", "abi"],
"C08",
["C10"])
assert "C10" in ms[0].first_scene
ms = get_resampled_multiscene(
[str(tmp_path / f"in{i:d}") for i in (1, 2, 3)],
["glm", "abi"],
6.2,
[7.3])
assert "C10" in ms[0].first_scene | 30,310 |
def prepare_data(features=None):
"""Prepare data for analysis
Args:
features (list of str): list with features
Returns:
X_train (np.matrix): train X
X_test (np.matrix): test X
y_train (np.matrix): train y
y_test (np.matrix): test y
"""
# Read data
xls = pd.ExcelFile('Database.xlsx')
db1 = xls.parse(1)
db2 = xls.parse(2)
db1.loc[np.isnan(db1['Sales']), 'Sales'] = 0
y = (db1['Sales']).as_matrix()
# Fill the premium column in db2
db2['Premium Offered'] = db1['Premium Offered'].mean()
# To get all columns in X, we need to mix it with the training data
if features is None:
features = [x for x in db1.columns if x not in not_features]
db3 = pd.concat([db1[features], db2[features]], axis=0)
# Generate an X matrix
Xall = proccess_X(db3, features)
X = Xall[:db1.shape[0], :]
X2 = Xall[db1.shape[0]:, :]
# Train and test sets
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.15, random_state=42)
# Pickle the data
data = {'X1': X, 'X_train': X_train, 'X_test': X_test, 'y_train': y_train, 'y_test': y_test, 'X2': X2}
pickle.dump(data, open('rf_data.dat', 'wb'))
return X_train, X_test, y_train, y_test | 30,311 |
def choose_my_art_date(my_location, google_maps_key, mapping = False, search_range = 500, min_rating = 4.3):
"""
Function to select an artsy date and dinner; randomly selects local arts event from NY ArtBeat API
found at https://www.nyartbeat.com/resources/doc/api, and uses the arts event data to determine a nearby restaurant.
Args
----
Required:
my_location(str): Starting point address - must be within NYC Metro Location
google_maps_key (str): Optional google maps API key needed to geocode your location
To obtain a google maps API key, please refer to https://developers.google.com/maps
Optional:
search_range(float): Distance from starting point (radius for search, meters)
Default: 500
min_rating(float): should be 1-5
Default: 4.3
mapping(bool): Boolean param specifying whether user wants a simple interactive map returned of matching locations
Default: False
Returns
---
DataFrame with [max_results] art events in the New York Metro area in the [radius] of the [specified location]
Fields:
Event_Name(str): Name of Event
Event_Description(str): Details about event
Event_Price_Adult(float): Price for tickets
DateEnd(date): Last date for exhibit or installation
Event_Lat(float): Latitude of event
Event_Lon(float): Longitude of event
Event_Address(str): Address for event - requires geocoding.
Restaurant_Name(str): Name of restaurant
Price_Level(str): $ - $$$$
Restaurant_Rating(float): 1-5
Restaurant_Address(str): Distance from starting point (my location)
Restaurant_Lat(float): Latitude of restaurant
Restaurant_Lon(float): Longitude of restaurant
Map (Optional): Interactive Google Maps Output with Markers for selected restaurant and selected event.
Usage Example
---
[in]:
choose_my_art_date("Met Museum", google_maps_key)
[out]:
df
| Event_Name | Eugène Leroy “About Marina”
| Event_Description | Michael Werner Gallery, New York presents an e...
| Price | Free
| DateEnd | 2021-12-23
| Distance | 438.962726
| Event_Lat | 40.775625
...
[out]:
Interactive Map
"""
lat,lon = geocoding(my_location = my_location, google_maps_key = google_maps_key)
df_events = find_my_art_events(my_location = my_location, google_maps_key = google_maps_key, lat = lat, lon = lon, mapping = False, search_range = search_range)
selected_event_row = df_events.sample(n = 1)
event_lat = selected_event_row['Event_Lat'].values
event_lon = selected_event_row['Event_Lon'].values
df_dinner = find_my_dinner(lat = event_lat, lon = event_lon, google_maps_key = google_maps_key, mapping = False, search_range = search_range)
selected_restaurant_row = df_dinner.sample(n = 1)
date_night_df = pd.concat([selected_event_row,selected_restaurant_row], axis=1).unstack().reset_index().dropna().drop(columns = ['level_1']).rename(columns = {'level_0':'Field',0:'Value'})
if mapping == True:
lat_lon_df = pd.concat([selected_event_row[['Event_Name','Event_Lat','Event_Lon']].rename(columns = {'Event_Name':'Name','Event_Lat':'Lat','Event_Lon':'Lon'}),\
selected_restaurant_row[['Restaurant_Name','Restaurant_Lat','Restaurant_Lon']].rename(columns = {'Restaurant_Name':'Name','Restaurant_Lat':'Lat','Restaurant_Lon':'Lon'})], axis=0).reset_index()
nymap = map_events(lat_lon_df, google_maps_key, name_column = 'Name', start_lat = lat, start_lon = lon, lat_column = 'Lat', long_column = 'Lon')
return date_night_df,nymap
else:
return date_night_df | 30,312 |
def flatten_repo_tree(d, parent_key=''):
"""Flatten a dict to so that keys become nodes in a path."""
items = []
for k, v in d.items():
new_key = os.path.join(parent_key, k)
try:
repo = Repo(**v)
except (ValidationError, TypeError):
repo = None
if repo:
items.append((new_key, v))
elif isinstance(v, MutableMapping):
items.extend(flatten_repo_tree(v, new_key).items())
else:
items.append((new_key, v))
return dict(items) | 30,313 |
def hash_text(message: str, hash_alg: str = 'keccak256') -> str:
"""get the hash of text data
:param message: str
:param hash_alg: str, `keccak256` or `sha256`, the default value is `keccak256`
:return: hex str, digest message with `keccak256` or `sha256`
"""
if hash_alg == 'keccak256':
_hash = keccak256(text=message)
elif hash_alg == 'sha256':
_hash = hashlib.sha256(message.encode()).hexdigest()
else:
raise ValueError(f'unsupport hash_alg param, hash_alg: {hash_alg}')
return _hash | 30,314 |
def get_grains_connected_to_face(mesh, face_set, node_id_grain_lut):
"""
This function find the grain connected to the face set given as argument.
Three nodes on a grain boundary can all be intersected by one grain
in which case the grain face is on the boundary or by two grains. It
is therefore sufficient to look at the set of grains contained by any
three nodes in the face set and take the intersection of these sets.
:param mesh: The mesh
:type mesh: :class:`Mesh`
:param face_set: The face set to find grains connected to
:type: face_set: :class:`ElementSet`
:return: The grain identifiers that intersect the face.
:rtype: list of ints
"""
grains_connected_to_face = []
grains = face_set.name[4:].split("_")
if len(grains) == 2:
return [int(g) for g in grains]
triangle_element = mesh.elements[face_set.ids[0]]
for node_id in triangle_element.vertices:
grains_with_node_id = node_id_grain_lut[node_id]
grains_connected_to_face.append(set(grains_with_node_id))
return list(set.intersection(*grains_connected_to_face)) | 30,315 |
def get_git_projects(git_worktree, args,
default_all=False,
use_build_deps=False,
groups=None):
""" Get a list of git projects to use """
git_parser = GitProjectParser(git_worktree)
groups = vars(args).get("groups")
if groups:
use_build_deps = False
if use_build_deps:
# To avoid getting all the projects when no project is given
# and running from the subdir of a build project
if not at_top_worktree(git_worktree):
default_all = False
build_worktree = qibuild.worktree.BuildWorkTree(git_worktree.worktree)
build_parser = GitBuildProjectParser(git_worktree, build_worktree)
return build_parser.parse_args(args, default_all=default_all)
if groups:
return git_worktree.get_git_projects(groups=groups)
return git_parser.parse_args(args, default_all=default_all) | 30,316 |
def RmZ(
ps: Table,
r_band: Literal["9601", "9602"] = "9602",
z_band: Literal["9801", "9901"] = "9901",
**kw
) -> units.mag:
"""R-Z color.
Parameters
----------
ps : astropy.table.Table
need arguments for r(z)_band functions
r_band: {'9601', '9602'}
R band to use
(default '9602')
z_band: {'9801', '9901'}
Z band to use
(default '9901')
kwargs
passes to R & Z-band functions
Returns
-------
R-Z color
"""
return _b1mb2(ps, "R_MP" + r_band, "Z_MP" + z_band, **kw) | 30,317 |
def get_table_names(connection: psycop.extensions.connection) -> List[str]:
"""
Report the name of the tables.
E.g., tables=['entities', 'events', 'stories', 'taxonomy']
"""
query = """
SELECT table_name
FROM information_schema.tables
WHERE table_type = 'BASE TABLE'
AND table_schema = 'public'
"""
cursor = connection.cursor()
cursor.execute(query)
tables = [x[0] for x in cursor.fetchall()]
return tables | 30,318 |
def mrpxmrp(sigmaset1, sigmaset2):
"""in work; returns transformation [FN] = [FB(s2)][BN(s1)]
"""
q1 = np.array(sigmaset1)
q2 = np.array(sigmaset2)
sig1_norm = norm(sigmaset1)
sig2_norm = norm(sigmaset2)
scalar1 = 1 - sig1_norm**2
scalar2 = 1 - sig2_norm**2
scalar3 = 2.
denom = 1 + sig1_norm**2*sig2_norm**2-2*vec.vdotv(sigmaset1, sigmaset2)
term1 = vec.vxs(scalar1, sigmaset2)
term2 = vec.vxs(scalar2, sigmaset1)
term3 = vec.vxs(2, vec.vcrossv(sigmaset2, sigmaset1))
numer = vec.vxadd(term1, vec.vxadd(term2, -term3))
sigma = vec.vxs(denom, numer)
# sigma = (1-(q1.T*q1))*q2+(1-(q2*q2.T))*q1+2*np.cross(q1.T,q2.T).T;
# sigma = sigma/(1+q1.T*q1 * q2.T*q2-2*q1.T*q2);
return np.array(sigma) | 30,319 |
def continuous_partition_data(data, bins='auto', n_bins=10):
"""Convenience method for building a partition object on continuous data
Args:
data (list-like): The data from which to construct the estimate.
bins (string): One of 'uniform' (for uniformly spaced bins), 'ntile' (for percentile-spaced bins), or 'auto' (for automatically spaced bins)
n_bins (int): Ignored if bins is auto.
Returns:
A new partition_object::
{
"bins": (list) The endpoints of the partial partition of reals,
"weights": (list) The densities of the bins implied by the partition.
}
"""
if bins == 'uniform':
bins = np.linspace(start=np.min(data), stop=np.max(data), num = n_bins+1)
elif bins =='ntile':
bins = np.percentile(data, np.linspace(start=0, stop=100, num = n_bins+1))
elif bins != 'auto':
raise ValueError("Invalid parameter for bins argument")
hist, bin_edges = np.histogram(data, bins, density=False)
return {
"bins": bin_edges,
"weights": hist / len(data)
} | 30,320 |
def create_pulsed_programming_accuracy_plot_2(pulsed_programmings, directory_name):
"""
This function creates a plot showing the impact of the tolerance on the accuracy of the pulsed programming.
Parameters
----------
pulsed_programmings : list of list of PulsedProgramming.PulsedProgramming
directory_name : string
The directory name where the plots will be save
Returns
----------
"""
if not os.path.isdir(f'{directory_name}'):
os.mkdir(f'{directory_name}')
ax = plt.subplot(1, 1, 1)
x = []
y = []
error_bar = []
colors = ['blue', 'orange', 'green', 'red', 'purple', 'brown', 'pink', 'olive', 'cyan', 'gray']
linestyle = [(0, (1, 10)), (0, (1, 5)), (0, (1, 1)),
(0, (3, 10, 1, 10)), (0, (3, 5, 1, 5)), (0, (3, 1, 1, 1))]
counter = 0
dict_ = {}
for current_conf in pulsed_programmings:
for current in current_conf:
key = f'{current.pulse_algorithm}_{current.variance_read*300}'
if dict_.get(f'{key}') is None:
dict_[f'{key}'] = {}
if dict_.get(f'{key}').get(float(current.tolerance)) is None:
dict_[f'{key}'][float(current.tolerance)] = []
dict_[f'{key}'][float(current.tolerance)].append(current)
for i in dict_.keys():
dict_[i] = dict(sorted(dict_.get(i).items()))
# for i in dict_.keys():
# print(i)
# for j in dict_.get(i).keys():
# print(j, dict_.get(i).get(j))
y_ = []
for key in dict_.keys():
for current_key in dict_.get(key).keys():
for current in dict_.get(key).get(current_key):
accuracy = 100 * (current.res_states_practical[0][1] - current.res_states[0][1]) / current.res_states[0][1]
y_.append(accuracy)
x.append(dict_.get(key).get(current_key)[0].tolerance)
y.append(np.mean(y_[:]))
error_bar.append(np.std(y_[:]))
y_.clear()
label = f'{key.split("_")[0]} {key.split("_")[1]}% read'
# ax.errorbar(x, y, error_bar, color=colors[counter], marker='o', label=label, linestyle=linestyle[counter],capsize=5)
ax.plot(x, y, color=colors[counter], marker='o', label=label, linestyle=linestyle[counter])
counter += 1
x.clear()
y.clear()
error_bar.clear()
for key in dict_.keys():
pulsed_programmings_ = dict_.get(key).get(1)
nb_simulations = len(pulsed_programmings_)
textstr = 'Constants\n---------------\n'
# textstr += f'Pulse algorithm : {algorithm}\n'
textstr += f'Number of reading : {pulsed_programmings_[0].number_of_reading}\n'
textstr += f'Variability write : {pulsed_programmings_[0].variance_write * 300} %\n'
textstr += f'Tolerance : {pulsed_programmings_[0].tolerance} '
textstr += '%\n' if pulsed_programmings_[0].is_relative_tolerance else 'Ohm\n'
break
plt.figtext(0.70, 0.35, textstr, fontsize=8)
ax.legend(bbox_to_anchor=(1.05, 1), loc='upper left')
plt.title(f'{nb_simulations} simulations per point')
ax.set_ylabel('Accuracy (%)')
ax.set_xlabel('Tolerance (%)')
filename = f'accuracy_2_plot.jpg'
plt.tight_layout()
plt.savefig(f'{directory_name}\\{filename}', dpi=1200)
plt.close('all')
# plt.show() | 30,321 |
def readInput(infile,genefile, segfile):
"""
Reads input files.
Extended description of function.
Parameters:
infile (str): File containing list of genes to be analyzed
genefile (str): File containing gene range definitions
segfile (str): File containing cell line intervals and copy number data
Returns:
genes (list): List of genes
genedef (dict): Dictionary of genes mapping to corresponding intervals
interval_dict(dict): Dictionary of dictionary of interval trees containing cell line ranges
"""
with open(infile) as inf:
genes = [i.strip() for i in inf.readlines()]
with open(genefile) as genef:
dictgenes = csv.DictReader(genef, delimiter="\t")
genedef = {}
for d in dictgenes:
if d["cds_from"] != "-" and d["cds_to"] != "-":
genedef[d["gene"]] = (d["#chromosome"], Interval(int(d["cds_from"]),int(d["cds_to"])))
with open(segfile) as seg:
interval_dict = {}
dictseg = csv.DictReader(seg, delimiter="\t")
for d in dictseg:
d = dict(d)
if "e" in d["End"]:
#Replace one incorrect exponential value
d["End"] = 115000000
if d["CCLE_name"] in interval_dict:
if d["Chromosome"] in interval_dict[d["CCLE_name"]]:
interval_dict[d["CCLE_name"]][d["Chromosome"]][int(d["Start"]):int(d["End"])] = float(d["Segment_Mean"])
else:
interval_dict[d["CCLE_name"]][d["Chromosome"]] = IntervalTree()
interval_dict[d["CCLE_name"]][d["Chromosome"]][int(d["Start"]):int(d["End"])] = float(d["Segment_Mean"])
else:
interval_dict[d["CCLE_name"]] = dict()
interval_dict[d["CCLE_name"]][d["Chromosome"]] = IntervalTree()
interval_dict[d["CCLE_name"]][d["Chromosome"]][int(d["Start"]):int(d["End"])] = float(d["Segment_Mean"])
return genes, genedef, interval_dict | 30,322 |
def list_rulesets(command):
"""
tests if the list rulesets command is running properly
"""
namespace = app.main(command)
assert namespace.command == 'lr' or namespace.command == "listrulesets" | 30,323 |
def library_name(name, suffix=SHLIB_SUFFIX, is_windows=is_windows):
"""
Convert a file basename `name` to a library name (no "lib" and ".so" etc.)
>>> library_name("libpython3.7m.so") # doctest: +SKIP
'python3.7m'
>>> library_name("libpython3.7m.so", suffix=".so", is_windows=False)
'python3.7m'
>>> library_name("libpython3.7m.dylib", suffix=".dylib", is_windows=False)
'python3.7m'
>>> library_name("python37.dll", suffix=".dll", is_windows=True)
'python37'
"""
if not is_windows and name.startswith("lib"):
name = name[len("lib"):]
if suffix and name.endswith(suffix):
name = name[:-len(suffix)]
return name | 30,324 |
def main():
"""Entry point."""
parser = argparse.ArgumentParser(description='People protected by coastal habitat analysis')
parser.add_argument(
'--population', type=str, required=True,
help='path to the population raster')
parser.add_argument(
'--reefs', type=str, required=True,
help='path to the reefs raster')
parser.add_argument(
'--mangroves_forest', type=str, required=True,
help='path to the mangroves_forest raster')
parser.add_argument(
'--saltmarsh_wetland', type=str, required=True,
help='path to the saltmarsh_wetland raster')
parser.add_argument(
'--seagrass', type=str, required=True,
help='path to the seagrass raster')
parser.add_argument(
'--shrub', type=str, required=True,
help='path to the shrub raster')
parser.add_argument(
'--sparse', type=str, required=True,
help='path to the sparse raster')
parser.add_argument(
'--prefix', type=str, required=True,
help='path to the output prefix')
args = parser.parse_args()
hab_layers = {
'reefs': (args.reefs, 2000.0),
'mangroves_forest': (args.mangroves_forest, 2000.1),
'saltmarsh_wetland': (args.saltmarsh_wetland, 1000.0),
'seagrass': (args.seagrass, 500.0),
'shrub': (args.shrub, 2000.01),
'sparse': (args.sparse, 500.01),
}
workspace_dir = f'workspace_{args.prefix}'
churn_dir = os.path.join(workspace_dir, 'churn')
for dir_path in [churn_dir, workspace_dir]:
os.makedirs(dir_path, exist_ok=True)
task_graph = taskgraph.TaskGraph(
workspace_dir, multiprocessing.cpu_count(), 15.0)
task_graph.add_task()
pop_aligned_raster_path = os.path.join(churn_dir, "pop_aligned.tif")
hab_warp_task = task_graph.add_task(
func=warp_by_area,
args=(
churn_dir, args.population, TARGET_PIXEL_SIZE,
pop_aligned_raster_path),
target_path_list=[pop_aligned_raster_path],
task_name=f'align and resample {pop_aligned_raster_path}')
hab_warp_task.join()
pop_raster_info = geoprocessing.get_raster_info(pop_aligned_raster_path)
hab_coverage_task_list = []
hab_raster_path_list = []
hab_warp_task_list = []
pop_coverage_on_raster_list = []
hab_pop_coverage_task_list = []
hab_mask_cover_path_list = []
for hab_key, (unaligned_hab_raster_path, prot_dist) in hab_layers.items():
hab_raster_path = os.path.join(
churn_dir, '%s_aligned%s' % os.path.splitext(os.path.basename(
unaligned_hab_raster_path)))
# align the habitat to the population using max so we get all the
# high to low resolution pixel coverage
hab_warp_task = task_graph.add_task(
func=geoprocessing.warp_raster,
args=(
unaligned_hab_raster_path, TARGET_PIXEL_SIZE,
hab_raster_path, 'max'),
kwargs={'target_bb': pop_raster_info['bounding_box']},
target_path_list=[hab_raster_path],
task_name=f'align {unaligned_hab_raster_path}')
hab_raster_path_list.append((hab_raster_path, 1))
hab_warp_task_list.append(hab_warp_task)
pixel_size_degree = TARGET_PIXEL_SIZE[0]
kernel_raster_path = os.path.join(
churn_dir, f'{hab_key}_{prot_dist}_kernel.tif')
# this convolution is a flat disk and picks up partial pixels right
# on the edges of the circle
kernel_task = task_graph.add_task(
func=create_flat_radial_convolution_mask,
args=(
pixel_size_degree, prot_dist, kernel_raster_path),
target_path_list=[kernel_raster_path],
task_name=f'make kernel for {hab_key}')
# project habitat coverage out the distance that it should cover
# the values don't matter here just the coverage
hab_mask_cover_raster_path = os.path.join(
churn_dir, f'{hab_key}_coverage.tif')
hab_coverage_task = task_graph.add_task(
func=geoprocessing.convolve_2d,
args=(
(hab_raster_path, 1), (kernel_raster_path, 1),
hab_mask_cover_raster_path),
kwargs={
'mask_nodata': False,
},
dependent_task_list=[kernel_task, hab_warp_task],
target_path_list=[hab_mask_cover_raster_path],
task_name=f'create hab coverage for {hab_key}')
hab_coverage_task_list.append(hab_coverage_task)
hab_mask_cover_path_list.append((hab_mask_cover_raster_path, 1))
# project population out the distance that habitat protects so we
# can see where the population will intersect with the habitat
hab_pop_coverage_raster_path = os.path.join(
churn_dir, f'{hab_key}_pop_coverage.tif')
pop_coverage_task = task_graph.add_task(
func=geoprocessing.convolve_2d,
args=(
(pop_aligned_raster_path, 1), (kernel_raster_path, 1),
hab_pop_coverage_raster_path),
kwargs={'mask_nodata': False},
dependent_task_list=[kernel_task],
target_path_list=[hab_pop_coverage_raster_path],
task_name=f'create pop coverage for {hab_key}')
# mask projected population to hab to see how much population
# intersects with habitat, the result will be a hab shaped splotch
# where each pixel represents the number of people within protective
# distance
pop_coverage_on_hab_raster_path = os.path.join(
args.workspace_dir, f'{hab_key}_pop_on_hab.tif')
hab_mask_pop_task = task_graph.add_task(
func=geoprocessing.raster_calculator,
args=(
[(hab_raster_path, 1), (hab_pop_coverage_raster_path, 1)],
_mask_op, pop_coverage_on_hab_raster_path,
gdal.GDT_Float32, -1),
dependent_task_list=[hab_warp_task, pop_coverage_task],
target_path_list=[pop_coverage_on_hab_raster_path],
task_name=f'mask pop by hab effect layer {hab_key}')
hab_pop_coverage_task_list.append(hab_mask_pop_task)
pop_coverage_on_raster_list.append(
(pop_coverage_on_hab_raster_path, 1))
# combine all the hab coverages into one big raster for total coverage
total_hab_mask_raster_path = os.path.join(
churn_dir, 'total_hab_mask_coverage.tif')
total_hab_mask_task = task_graph.add_task(
func=geoprocessing.raster_calculator,
args=(
hab_mask_cover_path_list, _union_op, total_hab_mask_raster_path,
gdal.GDT_Byte, 0),
dependent_task_list=hab_coverage_task_list,
target_path_list=[total_hab_mask_raster_path],
task_name='total hab mask coverage')
# mask the population raster by the total hab coverage, this shows
# how many total people are protected by any habitat
affected_pop_raster_path = os.path.join(
churn_dir, 'affected_population.tif')
total_affectd_pop_task = task_graph.add_task(
func=geoprocessing.raster_calculator,
args=(
[(total_hab_mask_raster_path, 1), (pop_aligned_raster_path, 1)],
_mask_op, affected_pop_raster_path, gdal.GDT_Float32, -1),
dependent_task_list=[total_hab_mask_task],
target_path_list=[affected_pop_raster_path],
task_name=f'mask pop by hab effect layer')
# sum the protected population
sum_mask_pop_task = task_graph.add_task(
func=_sum_raster,
args=(affected_pop_raster_path,),
dependent_task_list=[total_affectd_pop_task],
store_result=True,
task_name=f'sum up {affected_pop_raster_path}')
# calculate the total number of people protected by each habitat pixel
# all together
total_pop_coverage_raster_path = os.path.join(
churn_dir, 'total_pop_coverage_on_hab.tif')
total_pop_coverage_mask_task = task_graph.add_task(
func=geoprocessing.raster_calculator,
args=(
pop_coverage_on_raster_list, _sum_rasters_op,
total_pop_coverage_raster_path, gdal.GDT_Float32, -1),
dependent_task_list=hab_pop_coverage_task_list,
target_path_list=[total_pop_coverage_raster_path],
task_name='combined population coverage')
# sum the protected population
sum_hab_mask_pop_task = task_graph.add_task(
func=_sum_raster,
args=(total_pop_coverage_raster_path,),
dependent_task_list=[total_pop_coverage_mask_task],
store_result=True,
task_name=f'sum up {total_pop_coverage_raster_path}')
# normalize the total population on habitat by the sum of total people
# protected / sum of total pop hab mask layer
norm_total_pop_hab_mask_raster_path = os.path.join(
churn_dir, 'norm_total_pop_hab_mask_coverage.tif')
norm_total_pop_hab_mask_task = task_graph.add_task(
func=geoprocessing.raster_calculator,
args=([
(total_pop_coverage_raster_path, 1),
(sum_mask_pop_task.get()/sum_hab_mask_pop_task.get(), 'raw')],
_mult_by_scalar_op, norm_total_pop_hab_mask_raster_path,
gdal.GDT_Float32, -1),
dependent_task_list=[total_pop_coverage_mask_task],
target_path_list=[norm_total_pop_hab_mask_raster_path],
task_name=f'normalize final pop coverage {norm_total_pop_hab_mask_raster_path}')
task_graph.join()
task_graph.close()
LOGGER.info('all done') | 30,325 |
def gen_v2v_spmv_schedule(adj, spmv_pairs, nft, eft, eid, out):
"""Generate v2v spmv schedule.
Parameters
----------
adj : tuple (sparse matrix, utils.Index)
spmv_pairs : list of pair
nft : var.Var
input node features
eft : var.Var
input edge features
eid : var.Var
eid index
out : var.Var
output node features
"""
adjmat, shuffle_idx = adj
adj_var = var.SPMAT(adjmat)
if shuffle_idx is not None:
new_eid = utils.reorder_index(eid.data, shuffle_idx)
eid = var.IDX(new_eid)
for mfn, rfn in spmv_pairs:
if mfn.use_edge_feature:
ftedge = ir.READ(eft, eid, var.STR(mfn.edge_field))
ftsrc = ir.READ_COL(nft, var.STR(mfn.src_field))
ftdst = ir.SPMV_WITH_DATA(adj_var, ftedge, ftsrc)
else:
ftsrc = ir.READ_COL(nft, var.STR(mfn.src_field))
ftdst = ir.SPMV(adj_var, ftsrc)
# save for merge
ir.WRITE_COL_(out, var.STR(rfn.out_field), ftdst) | 30,326 |
def grid_search_dict(org_params: Dict[str, Any]) -> Iterator[Tuple[str, Dict[str, Any]]]:
"""
Iterate list in dict to do grid search.
Examples
--------
>>> test_dict = dict(a=[1,2], b = [1,2,3], c = 4)
>>> list(grid_search_dict(test_dict))
[('a:1-b:1', {'c': 4, 'a': 1, 'b': 1}),
('a:1-b:2', {'c': 4, 'a': 1, 'b': 2}),
('a:1-b:3', {'c': 4, 'a': 1, 'b': 3}),
('a:2-b:1', {'c': 4, 'a': 2, 'b': 1}),
('a:2-b:2', {'c': 4, 'a': 2, 'b': 2}),
('a:2-b:3', {'c': 4, 'a': 2, 'b': 3})]
>>> test_dict = dict(a=1, b = 2, c = 3)
>>> list(grid_search_dict(test_dict))
[('one', {'a': 1, 'b': 2, 'c': 3})]
Parameters
----------
org_params : Dict
Dictionary to be grid searched
Yields
------
name : str
Name that describes the parameter of the grid
param: Dict[str, Any]
Dictionary that contains the parameter at grid
"""
search_keys = []
non_search_keys = []
for key in org_params.keys():
if isinstance(org_params[key], list):
search_keys.append(key)
else:
non_search_keys.append(key)
if len(search_keys) == 0:
yield "one", org_params
else:
param_generator = product(*[org_params[key] for key in search_keys])
for one_param_set in param_generator:
one_dict = {k: org_params[k] for k in non_search_keys}
tmp = dict(list(zip(search_keys, one_param_set)))
one_dict.update(tmp)
one_name = "-".join([k + ":" + str(tmp[k]) for k in search_keys])
yield one_name, one_dict | 30,327 |
def add_light(light_type: str = 'POINT') -> str:
"""
Add a light of the given type to the scene, return
the name key of the newly added light
:param light_type:
:return: The named key used to index the object
"""
if utils.is_new_api():
bpy.ops.object.light_add(type=light_type)
else:
bpy.ops.object.lamp_add(type=light_type)
light_obj = bpy.context.selected_objects[0]
# Enable contact shadows
if utils.is_new_api():
light_obj.data.use_contact_shadow = True
# Return the name
return light_obj.name | 30,328 |
def test_twosinesum():
"""checks that the S function is commensurate with direct calculation
for t = 1, n = 2, T = 2pi"""
apt = abs(S(1, 2, 2 * pi) - (4 / pi) * (sin(1) + sin(3) / 3)) < 1E-3
msg = "Sums do not match."
assert apt, msg | 30,329 |
def set_constants(ze=40, p=0.4,
kc_min=0.01, kc_max=1.0,
snow_alpha=0.2, snow_beta=11.0,
ke_max=1.0,
a_min=0.45, a_max=0.90):
"""
:param ze:
:param p: the fraction of TAW that a crop can extract from the root zone without suffering water stress; ASCE pg 226
:param kc_min:
:param kc_max:
:param snow_alpha:
:param snow_beta:
:param ke_max:
:param a_min:
:param a_max:
:return:
"""
d = dict(s_mon=datetime(1900, 7, 1),
e_mon=datetime(1900, 10, 1),
ze=ze, p=p,
kc_min=kc_min, kc_max=kc_max,
snow_alpha=snow_alpha, snow_beta=snow_beta,
ke_max=ke_max,
a_min=a_min, a_max=a_max)
print 'constants dict: {}\n'.format(pformat(d, indent=2))
return d | 30,330 |
def project_poses(poses, P):
"""Compute projected poses x = Pp."""
assert poses.ndim == 2 and poses.shape[-1] == 3, \
'Invalid pose dim at ext_proj {}'.format(poses.shape)
assert P.shape == (3, 4), 'Invalid projection shape {}'.format(P.shape)
p = np.concatenate([poses, np.ones((len(poses), 1))], axis=-1)
x = np.matmul(P, p.T)
return x.T | 30,331 |
def print_latest_failures(server, job_names):
"""Get a list of all tests that failed on the most recent build of the given jobs."""
build_urls = {}
test_failures = {}
for job_name in job_names:
job = server.get_job(job_name)
for build in recent_builds(job, num=1):
build_urls[build.name] = build.baseurl
for result in test_results(build):
if result.status not in ['PASSED', 'FIXED', 'SKIPPED']:
if result.identifier() not in test_failures:
test_failures[result.identifier()] = {'builds':[]}
test_failures[result.identifier()]['builds'].append(build.name)
# Order failures from most to least
test_failures = list(test_failures.items())
test_failures.sort(key=lambda t: len(t[1]['builds']), reverse=True)
print('---------------Markdown---------------')
for test_name, info in test_failures:
print('* `{test_name}`'.format(test_name=test_name))
build_links = []
for build_name in info['builds']:
build_links.append(
'[{name}]({url})'.format(name=build_name, url=build_urls[build_name]))
print(' * ' + ', '.join(build_links))
print('---------------Markdown---------------') | 30,332 |
def argmin(x):
"""
Returns the index of the smallest element of the iterable `x`.
If two or more elements equal the minimum value, the index of the first
such element is returned.
>>> argmin([1, 3, 2, 0])
3
>>> argmin(abs(x) for x in range(-3, 4))
3
"""
argmin_ = None
min_ = None
for (nItem, item) in enumerate(x):
if (argmin_ is None) or (item < min_):
argmin_ = nItem
min_ = item
return argmin_ | 30,333 |
def detect_scenes(files: Collection[str], pipeline: PipelineContext, progress=ProgressMonitor.NULL):
"""Detect scenes for the given files."""
files = tuple(files)
remaining_video_paths = tuple(missing_scenes(files, pipeline))
# Ensure dependencies are satisfied
if not frame_features_exist(remaining_video_paths, pipeline):
extract_frame_level_features(remaining_video_paths, pipeline, progress=progress.subtask(0.9))
progress = progress.subtask(0.1)
# Skip step if required results already exist
if not remaining_video_paths:
logger.info("Scene detection is not required. Skipping...")
progress.complete()
return
logger.info("Starting scene detection for %s of %s files", len(remaining_video_paths), len(files))
config = pipeline.config
frame_features = pipeline.repr_storage.frame_level
repr_keys = tuple(map(pipeline.reprkey, remaining_video_paths))
# Do extract scenes
scenes = extract_scenes(repr_keys, frame_features, min_scene_duration=config.proc.minimum_scene_duration)
scene_metadata = pd.DataFrame(asdict(scenes))
if config.database.use:
result_storage = pipeline.result_storage
result_storage.add_scenes(zip(scenes.video_filename, scenes.video_sha256, scenes.scene_duration_seconds))
if config.save_files:
scene_metadata_output_path = os.path.join(config.repr.directory, "scene_metadata.csv")
scene_metadata.to_csv(scene_metadata_output_path)
logger.info("Scene Metadata saved in: %s", scene_metadata_output_path)
logger.info("Done scene detection.")
progress.complete() | 30,334 |
def find_spot(entry, list):
"""
return index of entry in list
"""
for s, spot in enumerate(list):
if entry==spot:
return s
else:
raise ValueError("could not find entry: "+ str(entry)+ " in list: "+ str(list)) | 30,335 |
def add_texts_lower_right(image: numpy.array, texts: List[str]):
"""add texts into lower right corner of cv2 object
Args:
image (numpy.array): cv2 image object
texts (List[str]): texts
"""
W, H = image.shape[1], image.shape[0]
str_len = max([len(text) for text in texts])
pos = (W - (18 * str_len), H - 15 - (30 * len(texts)))
add_texts(image, texts, pos) | 30,336 |
def parse_file(path, game=None, path_relative_to_game=True, verbose=False):
"""
Parse a single file and return a Tree.
path, game:
If game is None, path is a full path and the game is determined from that.
Or game can be supplied, in which case path is a path relative to the game directory.
"""
if not path_relative_to_game:
pass
else:
path, game = pyradox.config.combine_path_and_game(path, game)
encodings = game_encodings[game]
lines = readlines(path, encodings)
if verbose: print('Parsing file %s.' % path)
token_data = lex(lines, path)
return parse_tree(token_data, path) | 30,337 |
def test_reactivate_unituser_as_unitadmin(module_client):
"""Reactivate unituser as Unit Admin"""
# Try to get token as user that is to be deactivated
response = module_client.get(
tests.DDSEndpoint.ENCRYPTED_TOKEN,
auth=tests.UserAuth(tests.USER_CREDENTIALS[unituser["username"]]).as_tuple(),
headers=tests.DEFAULT_HEADER,
)
assert response.status_code == http.HTTPStatus.UNAUTHORIZED
# Reactivate user
response = module_client.post(
tests.DDSEndpoint.USER_ACTIVATION,
headers=tests.UserAuth(tests.USER_CREDENTIALS["unitadmin"]).token(module_client),
json={**unituser, "action": "reactivate"},
)
assert response.status_code == http.HTTPStatus.OK
assert (
f"You successfully reactivated the account {unituser['username']} ({unituser['email']}, {unituser['role']})!"
in response.json["message"]
)
# Try to get token after reactivation
response = module_client.get(
tests.DDSEndpoint.ENCRYPTED_TOKEN,
auth=tests.UserAuth(tests.USER_CREDENTIALS[unituser["username"]]).as_tuple(),
headers=tests.DEFAULT_HEADER,
)
assert response.status_code == http.HTTPStatus.OK | 30,338 |
def test_DateEncoder():
"""Tests encoding.DateEncoder"""
# Dummy data
df = pd.DataFrame()
df['a'] = ['1999/09', '2001/01', '2040/12', '1987/05']
df['y'] = np.random.randn(4)
# Transform
date_cols = {
'a': ('%Y/%m', ['year', 'month'])
}
de = DateEncoder(date_cols)
dfo = de.fit_transform(df)
assert dfo.shape[0] == 4
assert dfo.shape[1] == 3
assert 'a' not in dfo
assert 'a_year' in dfo
assert 'a_month' in dfo
assert 'y' in dfo
assert dfo.loc[0, 'a_year'] == 1999
assert dfo.loc[1, 'a_year'] == 2001
assert dfo.loc[2, 'a_year'] == 2040
assert dfo.loc[3, 'a_year'] == 1987
assert dfo.loc[0, 'a_month'] == 9
assert dfo.loc[1, 'a_month'] == 1
assert dfo.loc[2, 'a_month'] == 12
assert dfo.loc[3, 'a_month'] == 5 | 30,339 |
def svn_repos_fs_commit_txn(*args):
"""svn_repos_fs_commit_txn(svn_repos_t * repos, svn_fs_txn_t * txn, apr_pool_t pool) -> svn_error_t"""
return _repos.svn_repos_fs_commit_txn(*args) | 30,340 |
async def run_command(
cmd: list, cwd: str = None, log_path=None, environ=None) -> str:
"""
Run a command.
If 'log_path' is provided, stdout and stderr will be written to this
location regardless of the end result.
:raises subprocess.CalledProcessError: If the command returned an error
:returns: Command stdout
"""
if not environ:
environ = os.environ.copy()
process = await asyncio.create_subprocess_exec(
*cmd, stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE,
cwd=cwd,
env=environ
)
stdout, stderr = await process.communicate()
if log_path:
async with aiofiles.open(log_path, "ab") as file_:
now = datetime.datetime.now(datetime.timezone.utc)
await file_.write(
f"\n===COMMAND===\n{now.isoformat()}\n{cmd}".encode("utf-8")
)
await file_.write(b"\n===STDOUT===\n")
await file_.write(stdout)
await file_.write(b"\n===STDERR===\n")
await file_.write(stderr)
if process.returncode != 0:
raise CalledProcessError(
returncode=process.returncode,
cmd=cmd,
output=stdout,
stderr=stderr
)
logger.debug(
"Command %s completed.\nOUTPUT: %s\n",
" ".join(cmd), stdout
)
return stdout.decode("utf-8") | 30,341 |
def iter_dir(temp_dir, blast_db, query_name, iteration):
"""
Get the work directory for the current iteration.
We need to call this function in child processes so it cannot be in an
object.
"""
name = '{}_{}_{:02d}'.format(
basename(blast_db), basename(query_name), iteration)
return join(temp_dir, name) | 30,342 |
def assert_allclose(
actual: numpy.ndarray,
desired: numpy.ndarray,
atol: float,
err_msg: Literal["OneClassSVM"],
):
"""
usage.sklearn: 1
"""
... | 30,343 |
def zoomSurface(src, zoomx, zoomy, smooth):
"""Zooms a surface with different x & y scaling factors.
This function renders to a new surface, with optional anti-aliasing. If a
zoom factor is negative, the image will be flipped along that axis. If the
surface is not 8-bit or 32-bit RGBA/ABGR, it will be converted into a 32-bit
RGBA format on the fly.
Args:
src (:obj:`SDL_Surface`): The surface to zoom.
zoomx (float): The x-axis (horizontal) zoom factor.
zoomy (float): The y-axis (vertical) zoom factor.
smooth (int): If set to 1, the output image will be anti-aliased. If set
to 0, no anti-aliasing will be performed. Must be either 0 or 1.
Returns:
:obj:`SDL_Surface`: A new output surface with zoom applied.
"""
return _funcs["zoomSurface"](src, zoomx, zoomy, smooth) | 30,344 |
def array(*cols: Column) -> Column:
"""
Return column of arrays
"""
return (struct(*cols).apply(list)).alias(f"[{', '.join([Column.getName(c) for c in cols])}]") | 30,345 |
def addtodo(request):
"""
:param request: HttpRequest object
:return: None. Redirects to Webpage.
if pk is not available defaults to id=0.
"""
pk = request.POST.get("id_todo", 0)
if pk:
todoobj = Todo.objects.get(pk=pk)
newtodoform = NewTodoForm(request.POST, instance=todoobj)
else:
newtodoform = NewTodoForm(request.POST)
if newtodoform.is_valid():
newtodoform.save()
return redirect("todoindex") | 30,346 |
def prepare_inputs(boxes, digits_occurrence):
"""
:param boxes:
2D list of 81 gray OpenCV images (2D numpy arrays)
:param digits_occurrence:
2D numpy array that contains True or False values that represent occurrence of digits
:return:
if no digit was found returns None;
otherwise returns 4D numpy array with shape = (digits count, 28, 28, 1) that
contains cropped, scaled and centered digits that are perfectly prepared for a cnn model
(at least for this model I created)
"""
digits_count = 0
for y in digits_occurrence:
for x in y:
digits_count += int(x)
if digits_count == 0:
return None
cropped_boxes_with_digits = get_cropped_boxes_with_digits(boxes, digits_occurrence)
digits = get_cropped_digits(cropped_boxes_with_digits)
if digits is None:
return None
resize(digits)
digits = add_margins(digits, 28, 28)
center_using_mass_centers(digits)
digits = digits.reshape((digits.shape[0], 28, 28, 1))
digits = digits / 255
return digits | 30,347 |
def evaluate_constants(const_arrays, expr): # pragma: no cover
"""Convert constant arguments to cupy arrays, and perform any possible
constant contractions.
"""
return expr(*[to_cupy(x) for x in const_arrays], backend='cupy', evaluate_constants=True) | 30,348 |
def setup_family(dompc, family, create_liege=True, create_vassals=True,
character=None, srank=None, region=None, liege=None,
num_vassals=2):
"""
Creates a ruler object and either retrieves a house
organization or creates it. Then we also create similar
ruler objects for an npc liege (if we should have one),
and npc vassals (if we should have any). We return a tuple of
our ruler object, our liege's ruler object or None, and a list
of vassals' ruler objects.
"""
vassals = []
# create a liege only if we don't have one already
if create_liege and not liege:
name = "Liege of %s" % family
liege = setup_ruler(name)
ruler = setup_ruler(family, dompc, liege)
if create_vassals:
vassals = setup_vassals(family, ruler, region, character, srank, num=num_vassals)
return ruler, liege, vassals | 30,349 |
def info_cmd(ctx):
"""
Display information in the blockchain.
"""
pass | 30,350 |
async def async_setup_platform(hass, config, async_add_entites, discovery_info=None):
"""Set up Buspro switch devices."""
# noinspection PyUnresolvedReferences
from pybuspro.devices import Switch
hdl = hass.data[DATA_BUSPRO].hdl
devices = []
for address, device_config in config[CONF_DEVICES].items():
name = device_config[CONF_NAME]
address2 = address.split('.')
device_address = (int(address2[0]), int(address2[1]))
channel_number = int(address2[2])
_LOGGER.debug("Adding switch '{}' with address {} and channel number {}".format(name, device_address,
channel_number))
switch = Switch(hdl, device_address, channel_number, name)
devices.append(BusproSwitch(hass, switch))
async_add_entites(devices) | 30,351 |
def normalize_line(line: dict, lang: str):
"""Apply normalization to a line of OCR.
The normalization rules that are applied depend on the language in which
the text is written. This normalization is necessary because Olive, unlike
e.g. Mets, does not encode explicitly the presence/absence of whitespaces.
:param dict line: A line of OCR text.
:param str lang: Language of the text.
:return: A new line of text.
:rtype: dict
"""
mw_tokens = [
token
for token in line["t"]
if "qid" in token
]
# apply normalization only to those lines that contain at least one
# multi-word token (denoted by presence of `qid` field)
if len(mw_tokens) > 0:
line = merge_pseudo_tokens(line)
line = normalize_hyphenation(line)
for i, token in enumerate(line["t"]):
if "qid" not in token and "nf" in token:
del token["nf"]
if "qid" in token:
del token["qid"]
if i == 0 and i != len(line["t"]) - 1:
insert_ws = insert_whitespace(
token["tx"],
line["t"][i + 1]["tx"],
None,
lang
)
elif i == 0 and i == len(line["t"]) - 1:
insert_ws = insert_whitespace(
token["tx"],
None,
None,
lang
)
elif i == len(line["t"]) - 1:
insert_ws = insert_whitespace(
token["tx"],
None,
line["t"][i - 1]["tx"],
lang
)
else:
insert_ws = insert_whitespace(
token["tx"],
line["t"][i + 1]["tx"],
line["t"][i - 1]["tx"],
lang
)
if not insert_ws:
token["gn"] = True
return line | 30,352 |
def dump(self, option):
"""Dump information.
Parameters
----------
Returns
-------
"""
pass
return | 30,353 |
def new(template,directory,url_template):
"""
Generates the necessary files for a new PreTeXt project.
Supports `pretext new book` (default) and `pretext new article`,
or generating from URL with `pretext new --url-template [URL]`.
"""
directory_fullpath = os.path.abspath(directory)
if utils.project_path(directory_fullpath) is not None:
log.warning(f"A project already exists in `{utils.project_path(directory_fullpath)}`.")
log.warning(f"No new project will be generated.")
return
log.info(f"Generating new PreTeXt project in `{directory_fullpath}` using `{template}` template.")
if url_template is not None:
r = requests.get(url_template)
archive = zipfile.ZipFile(io.BytesIO(r.content))
else:
template_path = static.path('templates', f'{template}.zip')
archive = zipfile.ZipFile(template_path)
# find (first) project.ptx to use as root of template
filenames = [os.path.basename(filepath) for filepath in archive.namelist()]
project_ptx_index = filenames.index('project.ptx')
project_ptx_path = archive.namelist()[project_ptx_index]
project_dir_path = os.path.dirname(project_ptx_path)
with tempfile.TemporaryDirectory() as tmpdirname:
for filepath in [filepath for filepath in archive.namelist() if filepath.startswith(project_dir_path)]:
archive.extract(filepath,path=tmpdirname)
tmpsubdirname = os.path.join(tmpdirname,project_dir_path)
shutil.copytree(tmpsubdirname,directory,dirs_exist_ok=True)
log.info(f"Success! Open `{directory_fullpath}/source/main.ptx` to edit your document")
log.info(f"Then try to `pretext build` and `pretext view` from within `{directory_fullpath}`.") | 30,354 |
def attribute_summary(attribute_value, item_type, limit=None):
"""Summarizes the information in fields attributes where content is
written as an array of arrays like tag_cloud, items, etc.
"""
if attribute_value is None:
return None
items = ["%s (%s)" % (item, instances) for
item, instances in attribute_value]
items_length = len(items)
if limit is None or limit > items_length:
limit = items_length
return "%s %s: %s" % (items_length, type_singular(item_type,
items_length == 1),
", ".join(items[0: limit])) | 30,355 |
def create_rgb_vrt(outname, infiles, overviews, overview_resampling):
"""
Creation of the color composite VRT file.
Parameters
----------
outname: str
Full path to the output VRT file.
infiles: list[str]
A list of paths pointing to the linear scaled measurement backscatter files.
overviews: list[int]
Internal overview levels to be defined for the created VRT file.
overview_resampling: str
Resampling method applied to overview pyramids.
"""
print(outname)
# make sure order is right and co-polarization (VV or HH) is first
pols = [re.search('[hv]{2}', os.path.basename(f)).group() for f in infiles]
if pols[1] in ['vv', 'hh']:
infiles.reverse()
pols.reverse()
# format overview levels
ov = str(overviews)
for x in ['[', ']', ',']:
ov = ov.replace(x, '')
# create VRT file and change its content
gdalbuildvrt(src=infiles, dst=outname, options={'separate': True})
tree = etree.parse(outname)
root = tree.getroot()
srs = tree.find('SRS').text
geotrans = tree.find('GeoTransform').text
bands = tree.findall('VRTRasterBand')
new_band = etree.SubElement(root, 'VRTRasterBand',
attrib={'dataType': 'Float32', 'band': '3', 'subClass': 'VRTDerivedRasterBand'})
new_band.append(deepcopy(bands[0].find('NoDataValue')))
pxfun_type = etree.SubElement(new_band, 'PixelFunctionType')
pxfun_type.text = 'mul'
new_band.append(deepcopy(bands[0].find('ComplexSource')))
new_band.append(deepcopy(bands[1].find('ComplexSource')))
src = new_band.findall('ComplexSource')[1]
fname = src.find('SourceFilename')
fname_old = fname.text
nodata = src.find('NODATA').text
src_attr = src.find('SourceProperties').attrib
fname.text = etree.CDATA("""
<VRTDataset rasterXSize="{rasterxsize}" rasterYSize="{rasterysize}">
<SRS dataAxisToSRSAxisMapping="1,2">{srs}</SRS>
<GeoTransform>{geotrans}</GeoTransform>
<VRTRasterBand dataType="{dtype}" band="1" subClass="VRTDerivedRasterBand">
<PixelFunctionType>{px_fun}</PixelFunctionType>
<ComplexSource>
<SourceFilename relativeToVRT="1">{fname}</SourceFilename>
<SourceBand>1</SourceBand>
<SourceProperties RasterXSize="{rasterxsize}" RasterYSize="{rasterysize}" DataType="{dtype}" BlockXSize="{blockxsize}" BlockYSize="{blockysize}"/>
<SrcRect xOff="0" yOff="0" xSize="{rasterxsize}" ySize="{rasterysize}"/>
<DstRect xOff="0" yOff="0" xSize="{rasterxsize}" ySize="{rasterysize}"/>
<NODATA>{nodata}</NODATA>
</ComplexSource>
</VRTRasterBand>
<OverviewList resampling="{ov_resampling}">{ov}</OverviewList>
</VRTDataset>
""".format(rasterxsize=src_attr['RasterXSize'], rasterysize=src_attr['RasterYSize'], srs=srs, geotrans=geotrans,
dtype=src_attr['DataType'], px_fun='inv', fname=fname_old,
blockxsize=src_attr['BlockXSize'], blockysize=src_attr['BlockYSize'],
nodata=nodata, ov_resampling=overview_resampling.lower(), ov=ov))
bands = tree.findall('VRTRasterBand')
for band, col in zip(bands, ['Red', 'Green', 'Blue']):
color = etree.Element('ColorInterp')
color.text = col
band.insert(0, color)
for i, band in enumerate(bands):
if i in [0, 1]:
band.remove(band.find('NoDataValue'))
ovr = etree.SubElement(root, 'OverviewList', attrib={'resampling': overview_resampling.lower()})
ovr.text = ov
etree.indent(root)
tree.write(outname, pretty_print=True, xml_declaration=False, encoding='utf-8') | 30,356 |
def add_profile(db, profile, xon, xoff, size, dynamic_th, pool):
"""Add or modify a buffer profile"""
config_db = db.cfgdb
ctx = click.get_current_context()
profile_entry = config_db.get_entry('BUFFER_PROFILE', profile)
if profile_entry:
ctx.fail("Profile {} already exist".format(profile))
update_profile(ctx, config_db, profile, xon, xoff, size, dynamic_th, pool) | 30,357 |
def markdown_inside_fence(contents, fence="yaml"):
"""
This generator iterates through the entire `contents` of a Markdown
file line-by-line. It yields each line along with its index that is
only within a YAML block.
It ignores lines that are not part of YAML block.
# Parameters
contents:list(str)
- A list of strings representing every line within a Markdown
file. This includes YAML blocks and code fences.
fence:str
- The name of the block we are interested in examining
- Available choices: "yaml", "code"
- Default - "yaml"
# Return
A tuple:
(12, "UUID: 12345...")
# NOTE
The index is 0 based and maps directly to the `contents` list.
"""
if contents is None:
return # this is effectively raising a StopIteration
ignore_block = MDFence()
for i, line in enumerate(contents):
# return the line if it is within a YAML block only.
# We don't want the start or end marker of the YAML block.
if (
ignore_block.in_block(line)
and ignore_block.in_block_type[fence]
and not ignore_block.yaml_rule.match(line)
):
yield i, line | 30,358 |
def generate_groups(message: message.Message, fields: Iterable[str],
max_repeated_group: int, **kwargs) -> Iterator[str]:
"""Yields possible group keys given the message and field.
This function will generate group keys based on the fields provided in
`fields`. Each field in `fields` may be repeated in `message`; if this is the
case, the group keys will come from all possible combinations of the field
values in the repeated field up to `max_repeated_group`.
If more than one field is provided in `fields`, the same operation will be
applied in the given order to create a nested group key until all items in
`fields` are processed. Basic implementation idea is as follows:
1. Extract field as an iterable.
2. Get all possible combination of the field, up to `max_repeated_group`.
3. Repeat steps 1 and 2 for the next field and combine it with all
combinations we have for the current field.
4. Repeat step 3 until we go through each field in `fields`.
Implementation-wise, we memoize step 2.
Args:
message: The message that contains the desired field to get the keys.
fields: The fields to extract the fields to generate the group keys.
max_repeated_group: The maximum group size; must be at least 1.
**kwargs: For internal use.
Yields:
Iterables of group keys generated from the message and field.
"""
if 'parent_groups' in kwargs:
parent_groups = tuple(kwargs['parent_groups'])
else:
parent_groups = None
if 'combinations_cache' in kwargs:
combinations_cache = kwargs['combinations_cache']
else:
combinations_cache = {}
extracted_fields = tuple(extract_field_as_iterable(message, fields[0]))
for group_size in range(
1, 1 + min((max_repeated_group or 1), len(extracted_fields))):
combinations_key = (extracted_fields, group_size)
if combinations_key not in combinations_cache:
combinations_cache[combinations_key] = tuple(
itertools.combinations(extracted_fields, group_size))
groups = combinations_cache[combinations_key]
if len(fields) == 1:
yield from _generate_combined_groups(groups, parent_groups)
else:
yield from generate_groups(
message,
fields[1:],
max_repeated_group,
parent_groups=_generate_combined_groups(groups, parent_groups),
combinations_cache=combinations_cache) | 30,359 |
def test_bfs_returns_false():
"""Test that the breadth first search method returns false."""
from CTCI_4_1 import is_route_bfs
graph = {'a': ['b', 'c'],
'b': ['c'],
'c': ['a', 'b'],
'd': ['a']}
node1 = 'a'
node2 = 'd'
assert is_route_bfs(graph, node1, node2) is False | 30,360 |
def root():
"""Base view."""
new_list = json.dumps(str(utc_value))
return new_list | 30,361 |
def plc_read_db(plc_client, db_no, entry_offset, entry_len):
"""
Read specified amount of bytes at offset from a DB on a PLC
"""
try:
db_var = plc_client.db_read(db_no, entry_offset, entry_len)
except Exception as err:
print "[-] DB read error:", err
sys.exit(1)
db_val = struct.unpack('!f', binascii.hexlify(db_var).decode('hex'))[0]
return db_val | 30,362 |
def test_cli_parse_args_empty(mocker, change_curdir_fixtures):
"""When no arg is given we should find the closest tackle file."""
mock = mocker.patch("tackle.main.update_source", autospec=True, return_value={})
main([])
assert mock.called
assert isinstance(mock.call_args[0][0], Context)
context = mock.call_args[0][0].dict()
assert context['input_string'] == os.path.abspath('.tackle.yaml') | 30,363 |
def reload_subs(verbose=True):
""" Reloads ibeis and submodules """
import_subs()
rrr(verbose=verbose)
getattr(constants, 'rrr', lambda verbose: None)(verbose=verbose)
getattr(main_module, 'rrr', lambda verbose: None)(verbose=verbose)
getattr(params, 'rrr', lambda verbose: None)(verbose=verbose)
getattr(other, 'reload_subs', lambda verbose: None)(verbose=verbose)
getattr(dbio, 'reload_subs', lambda verbose: None)(verbose=verbose)
getattr(algo, 'reload_subs', lambda verbose: None)(verbose=verbose)
getattr(control, 'reload_subs', lambda verbose: None)(verbose=verbose)
getattr(viz, 'reload_subs', lambda: None)()
getattr(gui, 'reload_subs', lambda verbose: None)(verbose=verbose)
getattr(algo, 'reload_subs', lambda verbose: None)(verbose=verbose)
getattr(viz, 'reload_subs', lambda verbose: None)(verbose=verbose)
getattr(web, 'reload_subs', lambda verbose: None)(verbose=verbose)
rrr(verbose=verbose) | 30,364 |
def validate_overlap_for(doc, doctype, fieldname, value=None):
"""Checks overlap for specified field.
:param fieldname: Checks Overlap for this field
"""
existing = get_overlap_for(doc, doctype, fieldname, value)
if existing:
frappe.throw(_("This {0} conflicts with {1} for {2} {3}").format(doc.doctype, existing.name,
doc.meta.get_label(fieldname) if not value else fieldname , value or doc.get(fieldname)), OverlapError) | 30,365 |
def __fail(copied_files):
"""
Prints a fail message and calls cleanup.
:param copied_files:
:return:
"""
print('An error has occurred.')
print('Cleaning up copied files...')
__cleanup(copied_files) | 30,366 |
def getPath(path=__file__):
"""
Get standard path from path. It supports ~ as home directory.
:param path: it can be to a folder or file. Default is __file__ or module's path.
If file exists it selects its folder.
:return: dirname (path to a folder)
.. note:: It is the same as os.path.dirname(os.path.abspath(path)).
"""
if path.startswith("~"):
path = os.path.expanduser("~") + path[1:]
if "." in path: # check extension
return os.path.dirname(os.path.abspath(path)) # just use os
else:
return os.path.abspath(path) | 30,367 |
def rank_adjust(t, c=None):
"""
Currently limited to only Mean Order Number
Room to expand to:
Modal Order Number, and
Median Order Number
Uses mean order statistic to conduct rank adjustment
For further reading see:
http://reliawiki.org/index.php/Parameter_Estimation
Above reference provides excellent explanation of how this method is
derived this function currently assumes good input
"""
# Total items in test/population
N = len(t)
# Preallocate adjusted ranks array
ranks = np.zeros(N)
if c is None:
c = np.zeros(N)
# Rank adjustment for [right] censored data
# PMON - "Previous Mean Order Number"
# NIPBSS - "Number of Items Before Present Suspended Set"
PMON = 0
for i in range(0, N):
if c[i] == 0:
NIBPSS = N - i
ranks[i] = PMON + (N + 1 - PMON) / (1 + NIBPSS)
PMON = ranks[i]
elif c[i] == 1:
ranks[i] = np.nan
else:
# ERROR
raise ValueError("Censoring flag must be 0 or 1 with rank_adjust")
return ranks | 30,368 |
def print_AF(newAttacksFrom, fileformat):
"""
Invoked when the user uses stdout as a wanted output.
This method prints the AF resulting from the reductions asked on the terminal.
dict*str --> None
"""
assert type(newAttacksFrom) is dict, "The first argument ofthis method must be a dictionary containing the attacks in the AF, needed to be printed. (type Dict)"
assert type(fileformat) is str, "The second argument of this method method must be the extension of the outputFile. (type Dictionary)"
if fileformat == "tgf":
for i in args:
print("".join([i,""]))
print("#")
for j in newAttacksFrom.keys():
for k in newAttacksFrom[j]:
print(''.join([j," ",k]))
elif fileformat == "apx":
for i in args:
print("arg({})".format(i))
for j in newAttacksFrom.keys():
for k in newAttacksFrom[j]:
print("att({},{})".format(j,k))
else:
print("Unsupported format ", fileformat,", suported formats are : ")
print_formats()
raise UnsupportedFormatException("Unsuported format : ", fileformat) | 30,369 |
def login_captcha(username, password, sid):
"""
bilibili login with captcha.
depend on captcha recognize service, please do not use this as first choice.
Args:
username: plain text username for bilibili.
password: plain text password for bilibili.
sid: session id
Returns:
code: login response code (0: success, -105: captcha error, ...).
access_token: token for further operation.
refresh_token: token for refresh access_token.
sid: session id.
mid: member id.
expires_in: access token expire time (30 days)
"""
jsessionid, captcha_img = get_capcha(sid)
captcha_str = recognize_captcha(captcha_img)
hash, pubkey, sid = get_key(sid, jsessionid)
encrypted_password = cipher.encrypt_login_password(password, hash, pubkey)
url_encoded_username = parse.quote_plus(username)
url_encoded_password = parse.quote_plus(encrypted_password)
post_data = {
'appkey': APPKEY,
'captcha': captcha_str,
'password': url_encoded_password,
'platform': "pc",
'ts': str(int(datetime.now().timestamp())),
'username': url_encoded_username
}
post_data['sign'] = cipher.sign_dict(post_data, APPSECRET)
# avoid multiple url parse
post_data['username'] = username
post_data['password'] = encrypted_password
post_data['captcha'] = captcha_str
headers = {
'Connection': 'keep-alive',
'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',
'User-Agent': '',
'Accept-Encoding': 'gzip,deflate',
}
r = requests.post(
"https://passport.bilibili.com/api/oauth2/login",
headers=headers,
data=post_data,
cookies={
'JSESSIONID': jsessionid,
'sid': sid
}
)
response = r.json()
if response['code'] == 0:
login_data = response['data']
return response['code'], login_data['access_token'], login_data['refresh_token'], sid, login_data['mid'], login_data["expires_in"]
else:
return response['code'], None, None, sid, None, None | 30,370 |
def ml_transitions(game, attach=True, verbose=False):
"""
dataframe to directional line movement arrays
"""
transition_classes = []
prev = [None, None]
for i, row in game.iterrows():
cur = list(row[["a_ml", "h_ml"]])
transition_class = analyze.classify_transition(prev, cur)
transition_classes.append(transition_class)
prev = cur
if attach:
trans_df = pd.DataFrame(transition_classes)
trans_df = trans_df.add_prefix("trans_class_")
ret = pd.concat([game, trans_df], axis=1)
else:
ret = transition_classes
if verbose:
strings = {i: s for i, s in enumerate(bm.TRANSITION_CLASS_STRINGS)}
for i, t in enumerate(transition_classes):
class_num = np.argmax(t)
print(f"{i}: {strings[class_num]}")
return ret | 30,371 |
def initialize_leftcorners(grammar, trace=None):
"""
Calculate the left-corner tables from the grammar.
The tables are stored in grammar['leftcorner'] and grammar['lcword'].
It requires that grammar['topdown'], grammar['sequences'] and
grammar['emptycats'] are already calculated.
"""
td_grammar = grammar['topdown']
sequences = grammar['sequences']
emptycats = grammar['emptycats']
leftcorner_words = grammar['lcwords'] = defaultdict(set)
leftcorner = grammar['leftcorner'] = defaultdict(set)
if trace: ctr = TracedCounter("Leftcorners:")
leftcorner_parents = defaultdict(set)
for tdrules in td_grammar.itervalues():
for rule in tdrules:
rhss = sequences[rule.fun]
for lbl, rhs in rhss.iteritems():
parent = Symbol(rule.cat, lbl)
for sym in rhs:
if trace: ctr.inc()
if type(sym) is not RHSSymbol:
leftcorner_words[sym].add(parent)
break
sym = sym.toSymbol(rule.args)
leftcorner_parents[sym].add(parent)
if sym not in emptycats:
break
for sym, parents in leftcorner_parents.iteritems():
agenda = list(parents)
while agenda:
parent = agenda.pop()
if parent not in leftcorner[sym]:
leftcorner[sym].add(parent)
if trace: ctr.inc()
if leftcorner[parent]:
leftcorner[sym].update(leftcorner[parent])
elif parent in leftcorner_parents:
agenda.extend(leftcorner_parents[parent])
for (cat, lbls) in grammar['catlabels'].iteritems():
for lbl in lbls:
sym = Symbol(cat, lbl)
leftcorner[sym].add(sym)
if trace: ctr.inc()
if trace: ctr.finalize()
assert all(type(first) is Symbol and type(parent) is Symbol
for first, parents in grammar['leftcorner'].iteritems()
for parent in parents)
assert all(not isinstance(word, (tuple, list, set, dict)) and type(parent) is Symbol
for word, parents in grammar['lcwords'].iteritems()
for parent in parents) | 30,372 |
def plugin_uninstall(plugin, flags=None, kvflags=None):
"""
Uninstall a Helm plugin.
Return True if succeed, else the error message.
plugin
(string) The plugin to uninstall.
flags
(list) Flags in argument of the command without values. ex: ['help', '--help']
kvflags
(dict) Flags in argument of the command with values. ex: {'v': 2, '--v': 4}
CLI Example:
.. code-block:: bash
salt '*' helm.plugin_uninstall PLUGIN
"""
return _exec_true_return(
commands=["plugin", "uninstall", plugin], flags=flags, kvflags=kvflags
) | 30,373 |
def test_consistency_GPUA_parallel():
"""
Verify that the random numbers generated by GPUA_mrg_uniform, in
parallel, are the same as the reference (Java) implementation by
L'Ecuyer et al.
"""
from theano.sandbox.gpuarray.tests.test_basic_ops import \
mode_with_gpu as mode
from theano.sandbox.gpuarray.type import gpuarray_shared_constructor
seed = 12345
n_samples = 5
n_streams = 12
n_substreams = 7 # 7 samples will be drawn in parallel
samples = []
curr_rstate = numpy.array([seed] * 6, dtype='int32')
for i in range(n_streams):
stream_samples = []
rstate = [curr_rstate.copy()]
for j in range(1, n_substreams):
rstate.append(rng_mrg.ff_2p72(rstate[-1]))
rstate = numpy.asarray(rstate)
rstate = gpuarray_shared_constructor(rstate)
new_rstate, sample = rng_mrg.GPUA_mrg_uniform.new(rstate, ndim=None,
dtype='float32',
size=(n_substreams,))
rstate.default_update = new_rstate
# Not really necessary, just mimicking
# rng_mrg.MRG_RandomStreams' behavior
sample.rstate = rstate
sample.update = (rstate, new_rstate)
# We need the sample back in the main memory
cpu_sample = tensor.as_tensor_variable(sample)
f = theano.function([], cpu_sample, mode=mode)
for k in range(n_samples):
s = f()
stream_samples.append(s)
samples.append(numpy.array(stream_samples).T.flatten())
# next stream
curr_rstate = rng_mrg.ff_2p134(curr_rstate)
samples = numpy.array(samples).flatten()
assert(numpy.allclose(samples, java_samples)) | 30,374 |
def geom_to_tuple(geom):
"""
Takes a lat/long point (or geom) from KCMO style csvs.
Returns (lat, long) tuple
"""
geom = geom[6:]
geom = geom.replace(" ", ", ")
return eval(geom) | 30,375 |
def b_s_Poole(s, V_max, z, halo_type, bias_type):
""" This function expresses Equation (2) of Poole et al (2014)
and fetches the parameters needed to compute it.
Args:
s (numpy.ndarray) : scale values
V_max (float) : halo maximum circular velocity
z (float) : redshift of interest
halo_type (str) : halo type
bias_type (str) : bias type
Returns:
A list containing two arrays with the values of `b_s`
and `b_x` at each scale
"""
# Set the bias parameters
[b_x, s_o, V_SF] = set_b_s_Poole_params(V_max, z, halo_type, bias_type)
# Create b(s) arrays
V_max_norm = V_max/220.
if(V_max_norm < V_SF):
b_s = b_x*(1.-(s_o/s))**0.5
else:
b_s = b_x*(1.+(s_o/s))**0.5
return([b_s, b_x]) | 30,376 |
def find_places_location():
"""Finds the location of the largest places.sqlite file"""
location = ""
filesize = 0
for user in listdir('/Users'):
profile_dir = '/Users/' + user + '/Library/Application Support/Firefox/Profiles/'
if path.exists(profile_dir):
for profile in listdir(profile_dir):
places_location = profile_dir + profile + "/places.sqlite"
if path.exists(places_location):
size = path.getsize(places_location)
if size > filesize:
filesize = size
location = places_location
return location | 30,377 |
def argumentParser(listOfArguments):
"""Parses arguments"""
argumentParserFilter = {}
for argument in listOfArguments:
if argument in sys.argv:
argumentParserFilter[listOfArguments[argument]] = True
else:
argumentParserFilter[listOfArguments[argument]] = False
return argumentParserFilter | 30,378 |
def _install_one(
repo_url, branch, destination, commit='', patches=None,
exclude_modules=None, include_modules=None, base=False, work_directory=''
):
""" Install a third party odoo add-on
:param string repo_url: url of the repo that contains the patch.
:param string branch: name of the branch to checkout.
:param string destination: the folder where the add-on should end up at.
:param string commit: Optional commit rev to checkout to. If mentioned, that take over the branch
:param string work_directory: the path to the directory of the yaml file.
:param list patches: Optional list of patches to apply.
"""
patches = patches or []
patches = [
core.FilePatch(file=patch['file'], work_directory=work_directory)
if 'file' in patch else core.Patch(**patch)
for patch in patches
]
addon_cls = core.Base if base else core.Addon
addon = addon_cls(
repo_url, branch, commit=commit, patches=patches,
exclude_modules=exclude_modules, include_modules=include_modules)
addon.install(destination) | 30,379 |
def test_create_board_for_other_organization(api_client_factory, board_data):
"""Recourse that is out of control (i.e. organization) should not be found and raised exception 400."""
api_client = api_client_factory()
response = api_client.post(reverse("api:board-list"), board_data)
assert (
response.status_code == 400 # bad request (body)
), "Organization inside body should not be found and raise ValidationError (400)" | 30,380 |
def rasterize_polygon(poly_as_array, shape, geo_ref):
"""
Return a boolean numpy mask with 1 for cells within polygon.
Args:
poly_as_array: A polygon as returned by ogrpoly2array (list of numpy arrays / rings)
shape: Shape (nrows, ncols) of output array
geo_ref: GDAL style georeference of grid.
Returns:
Numpy boolean 2d array.
"""
xy = mesh_as_points(shape, geo_ref)
return points_in_polygon(xy, poly_as_array).reshape(shape) | 30,381 |
def _model_columns(ins):
""" Get columns info
:type ins: sqlalchemy.orm.mapper.Mapper
:rtype: list[SaColumnDoc]
"""
columns = []
for c in ins.column_attrs:
# Skip protected
if c.key.startswith('_'):
continue
# Type
column_type = c.columns[0].type # FIXME: support multi-column properties
# Compile it using a dialect if necessary
try:
column_type_str = str(column_type)
except sa_exc.UnsupportedCompilationError:
# Got to compile it using a dialect
# TODO: support other dialects in addition to Postgres
column_type_str = column_type.compile(dialect=postgresql.dialect())
except sa_exc.CompileError:
column_type_str = '?'
# Collect
columns.append(SaColumnDoc(
key=c.key,
doc=c.doc or '',
type=column_type_str,
null=_is_attribute_nullable(c),
))
return columns | 30,382 |
def get_reg_part(reg_doc):
"""
Depending on source, the CFR part number exists in different places. Fetch
it, wherever it is.
"""
potential_parts = []
potential_parts.extend(
# FR notice
node.attrib['PART'] for node in reg_doc.xpath('//REGTEXT'))
potential_parts.extend(
# e-CFR XML, under PART/EAR
node.text.replace('Pt.', '').strip()
for node in reg_doc.xpath('//PART/EAR')
if 'Pt.' in node.text)
potential_parts.extend(
# e-CFR XML, under FDSYS/HEADING
node.text.replace('PART', '').strip()
for node in reg_doc.xpath('//FDSYS/HEADING')
if 'PART' in node.text)
potential_parts.extend(
# e-CFR XML, under FDSYS/GRANULENUM
node.text.strip() for node in reg_doc.xpath('//FDSYS/GRANULENUM'))
potential_parts = [p for p in potential_parts if p.strip()]
if potential_parts:
return potential_parts[0] | 30,383 |
def load(vocab_name, corpus_reader, sconn):
"""
Given a solr connection, load terms from a Getty vocab
"""
idx = 0
bulk_size = 1000
docs = []
for term in corpus_reader.terms():
idx += 1
term_text = term.findtext('Term_Text'),
docs.append({
'id': term.findtext('Term_ID'),
'vocab_name_s': vocab_name,
'term_text_s': term_text,
'term_text_t': term_text,
'term_type_s': term.findtext('Term_Type'),
'historic_flag_s': term.findtext('Historic_Flag'),
'display_order_i': term.findtext('Display_Order'),
'preferred_term_b': term.tag == 'Preferred_Term'
})
if idx % bulk_size == 0:
sconn.add_many(docs)
sconn.commit()
docs = []
print ' %s: Processed %d' % (vocab_name, idx)
# perform a final commit
sconn.add_many(docs)
sconn.commit()
print 'Successfully imported %d records from %s' % (idx, vocab_name) | 30,384 |
def lazy_validate(request_body_schema, resource_to_validate):
"""A non-decorator way to validate a request, to be used inline.
:param request_body_schema: a schema to validate the resource reference
:param resource_to_validate: dictionary to validate
:raises keystone.exception.ValidationError: if `resource_to_validate` is
None. (see wrapper method below).
:raises TypeError: at decoration time when the expected resource to
validate isn't found in the decorated method's
signature
"""
schema_validator = validators.SchemaValidator(request_body_schema)
schema_validator.validate(resource_to_validate) | 30,385 |
def load_configuration(module: str, configs_path=None) -> dict:
"""
Load the configuration and return the dict of the configuration loaded
:param module: The module name to load the configuration.
:type module: str
:param configs_path: path where to check configs. Default `configs/modules/`
:type configs_path: str
:return: Dict of the configuration if present.
:rtype: dict
:raise FileNotFoundError: If configuration file not found
"""
Validator().string(module)
module = module.lower()
if configs_path:
module_path = Path(f"{configs_path}{module}.json") # search for config file
if not module_path.exists():
raise FileNotFoundError(
f"Couldn't find the configuration file of the module {module_path.absolute()}"
)
else:
server_path = Path(
f"configs{sep}modules{sep}server{sep}{module}.json"
) # search for config file in server
android_path = Path(
f"configs{sep}modules{sep}android{sep}{module}.json"
) # search for config file in android
if server_path.exists():
module_path = server_path
elif android_path.exists():
module_path = android_path
else:
raise FileNotFoundError(
f"Couldn't find the configuration file of the module {module}.json"
)
with module_path.open() as mod_file:
mod_data = json.load(mod_file)
return mod_data | 30,386 |
def startup(update: Update) -> None:
"""Send a message when the bot is started
to the users listed in the user_ids."""
query = 'whoami'
response = ps_command(query)[0].split('\n')[0]
response = f"{response} -> Online"
for user in chat_ids:
update.bot.sendMessage(text=response, chat_id=user) | 30,387 |
def main():
"""
Übergibt dem Controller die TermeApp Klasse.
:rtype: object
"""
controller = control.TermeApp()
"""Start ist im StartScreen / Hauptmenue."""
controller.run() | 30,388 |
async def process_resource_event(
lifecycle: lifecycles.LifeCycleFn,
registry: registries.OperatorRegistry,
memories: containers.ResourceMemories,
resource: resources.Resource,
event: bodies.Event,
replenished: asyncio.Event,
event_queue: posting.K8sEventQueue,
) -> None:
"""
Handle a single custom object low-level watch-event.
Convert the low-level events, as provided by the watching/queueing tasks,
to the high-level causes, and then call the cause-handling logic.
All the internally provoked changes are intercepted, do not create causes,
and therefore do not call the handling logic.
"""
body: bodies.Body = event['object']
patch: patches.Patch = patches.Patch()
delay: Optional[float] = None
# Each object has its own prefixed logger, to distinguish parallel handling.
logger = logging_engine.ObjectLogger(body=body)
posting.event_queue_loop_var.set(asyncio.get_running_loop())
posting.event_queue_var.set(event_queue) # till the end of this object's task.
# Recall what is stored about that object. Share it in little portions with the consumers.
# And immediately forget it if the object is deleted from the cluster (but keep in memory).
memory = await memories.recall(body, noticed_by_listing=event['type'] is None)
if event['type'] == 'DELETED':
await memories.forget(body)
# Invoke all silent spies. No causation, no progress storage is performed.
if registry.resource_watching_handlers[resource]:
resource_watching_cause = causation.detect_resource_watching_cause(
event=event,
resource=resource,
logger=logger,
patch=patch,
memo=memory.user_data,
)
await process_resource_watching_cause(
lifecycle=lifecycles.all_at_once,
registry=registry,
memory=memory,
cause=resource_watching_cause,
)
# Object patch accumulator. Populated by the methods. Applied in the end of the handler.
# Detect the cause and handle it (or at least log this happened).
if registry.resource_changing_handlers[resource]:
extra_fields = registry.resource_changing_handlers[resource].get_extra_fields()
old, new, diff = lastseen.get_essential_diffs(body=body, extra_fields=extra_fields)
resource_changing_cause = causation.detect_resource_changing_cause(
event=event,
resource=resource,
logger=logger,
patch=patch,
old=old,
new=new,
diff=diff,
memo=memory.user_data,
initial=memory.noticed_by_listing and not memory.fully_handled_once,
)
delay = await process_resource_changing_cause(
lifecycle=lifecycle,
registry=registry,
memory=memory,
cause=resource_changing_cause,
)
# Whatever was done, apply the accumulated changes to the object.
# But only once, to reduce the number of API calls and the generated irrelevant events.
if patch:
logger.debug("Patching with: %r", patch)
await patching.patch_obj(resource=resource, patch=patch, body=body)
# Sleep strictly after patching, never before -- to keep the status proper.
# The patching above, if done, interrupts the sleep instantly, so we skip it at all.
# Note: a zero-second or negative sleep is still a sleep, it will trigger a dummy patch.
if delay and patch:
logger.debug(f"Sleeping was skipped because of the patch, {delay} seconds left.")
elif delay is None and not patch:
logger.debug(f"Handling cycle is finished, waiting for new changes since now.")
elif delay is not None:
if delay > 0:
logger.debug(f"Sleeping for {delay} seconds for the delayed handlers.")
limited_delay = min(delay, handling.WAITING_KEEPALIVE_INTERVAL)
unslept_delay = await sleeping.sleep_or_wait(limited_delay, replenished)
else:
unslept_delay = None # no need to sleep? means: slept in full.
if unslept_delay is not None:
logger.debug(f"Sleeping was interrupted by new changes, {unslept_delay} seconds left.")
else:
# Any unique always-changing value will work; not necessary a timestamp.
dummy_value = datetime.datetime.utcnow().isoformat()
dummy_patch = patches.Patch({'status': {'kopf': {'dummy': dummy_value}}})
logger.debug("Provoking reaction with: %r", dummy_patch)
await patching.patch_obj(resource=resource, patch=dummy_patch, body=body) | 30,389 |
def load_sprites(obj_list:list, paths:list):
"""Load the sprites for each of the items in parallel"""
#Run functions concurrently
for i, obj in enumerate(obj_list):
#Add image sprites to each class concurrently
asyncio.run(add_to_sprite(obj, paths[i])) | 30,390 |
def loadFileOBJ(model, fileName):
"""Loads an obj file with associated mtl file to produce Buffer object
as part of a Shape. Arguments:
*model*
Model object to add to.
*fileName*
Path and name of obj file relative to top directory.
"""
model.coordinateSystem = "Y-up"
model.parent = None
model.childModel = [] # don't really need parent and child pointers but will speed up traversing tree
model.vNormal = False
model.vGroup = {} # holds the information for each vertex group
# read in the file and parse into some arrays
filePath = os.path.split(os.path.abspath(fileName))[0]
print filePath
f = open(fileName, 'r')
vertices = []
normals = []
uvs = []
faces = {}
materials = {}
material = ""
mcounter = 0
mcurrent = 0
numv = [] #number of vertices for each material (nb each vertex will have three coords)
numi = [] #number of indices (triangle corners) for each material
mtllib = ""
# current face state
group = 0
object = 0
smooth = 0
for l in f:
chunks = l.split()
if len(chunks) > 0:
# Vertices as (x,y,z) coordinates
# v 0.123 0.234 0.345
if chunks[0] == "v" and len(chunks) == 4:
x = float(chunks[1])
y = float(chunks[2])
z = -float(chunks[3]) # z direction away in gl es 2.0 shaders
vertices.append((x,y,z))
# Normals in (x,y,z) form; normals might not be unit
# vn 0.707 0.000 0.707
if chunks[0] == "vn" and len(chunks) == 4:
x = float(chunks[1])
y = float(chunks[2])
z = -float(chunks[3]) # z direction away in gl es 2.0 shaders
normals.append((x,y,z))
# Texture coordinates in (u,v)
# vt 0.500 -1.352
if chunks[0] == "vt" and len(chunks) >= 3:
u = float(chunks[1])
v = float(chunks[2])
uvs.append((u,v))
# Face
if chunks[0] == "f" and len(chunks) >= 4:
vertex_index = []
uv_index = []
normal_index = []
# Precompute vert / normal / uv lists
# for negative index lookup
vertlen = len(vertices) + 1
normlen = len(normals) + 1
uvlen = len(uvs) + 1
if len(numv) < (mcurrent+1): numv.append(0)
if len(numi) < (mcurrent+1): numi.append(0)
for v in chunks[1:]:
numv[mcurrent] += 1
numi[mcurrent] += 3
vertex = parse_vertex(v)
if vertex['v']:
if vertex['v'] < 0:
vertex['v'] += vertlen
vertex_index.append(vertex['v'])
if vertex['t']:
if vertex['t'] < 0:
vertex['t'] += uvlen
uv_index.append(vertex['t'])
if vertex['n']:
if vertex['n'] < 0:
vertex['n'] += normlen
normal_index.append(vertex['n'])
numi[mcurrent] -= 6 # number of corners of triangle = (n-2)*3 where n is the number of corners of face
if not mcurrent in faces: faces[mcurrent] = []
faces[mcurrent].append({
'vertex':vertex_index,
'uv':uv_index,
'normal':normal_index,
'group':group,
'object':object,
'smooth':smooth,
})
# Group
if chunks[0] == "g" and len(chunks) == 2:
group = chunks[1]
# Object
if chunks[0] == "o" and len(chunks) == 2:
object = chunks[1]
# Materials definition
if chunks[0] == "mtllib" and len(chunks) == 2:
mtllib = chunks[1]
# Material
if chunks[0] == "usemtl":
if len(chunks) > 1:
material = chunks[1]
else:
material = ""
if not material in materials:
mcurrent = mcounter
materials[material] = mcounter
mcounter += 1
else:
mcurrent = materials[material]
# Smooth shading
if chunks[0] == "s" and len(chunks) == 2:
smooth = chunks[1]
if VERBOSE:
print "materials: ", materials
print "numv: ", numv
for g in faces:
numv[g] -= 1
numi[g] -= 1
g_vertices = []
g_normals = []
g_tex_coords = []
g_indices = []
i = 0 # vertex counter in this material
j = 0 # triangle vertex count in this material
if VERBOSE:
print "len uv=",len(vertices)
for f in faces[g]:
iStart = i
for v in range(len(f['vertex'])):
g_vertices.append(vertices[f['vertex'][v]-1])
g_normals.append(normals[f['normal'][v]-1])
if (len(f['uv']) > 0 and len(uvs[f['uv'][v]-1]) == 2):
g_tex_coords.append(uvs[f['uv'][v]-1])
i += 1
n = i - iStart - 1
for t in range(1,n):
g_indices.append((iStart, iStart + t + 1, iStart + t))
model.buf.append(Buffer(model, g_vertices, g_tex_coords, g_indices, g_normals))
n = len(model.buf) - 1
model.vGroup[g] = n
model.buf[n].indicesLen = len(model.buf[n].indices)
model.buf[n].material = (0.0, 0.0, 0.0, 0.0)
model.buf[n].ttype = GL_TRIANGLES
#for i in range(len(model.vGroup[g].normals)):
# print model.vGroup[g].normals[i],
if VERBOSE:
print
print "indices=",len(model.buf[n].indices)
print "vertices=",len(model.buf[n].vertices)
print "normals=",len(model.buf[n].normals)
print "tex_coords=",len(model.buf[n].tex_coords)
material_lib = parse_mtl(open(os.path.join(filePath, mtllib), 'r'))
for m in materials:
if VERBOSE:
print m
if 'mapDiffuse' in material_lib[m]:
tfileName = material_lib[m]['mapDiffuse']
model.buf[model.vGroup[materials[m]]].texFile = tfileName
model.buf[model.vGroup[materials[m]]].textures = [Texture(os.path.join(filePath, tfileName), False, True)] # load from file
else:
model.buf[model.vGroup[materials[m]]].texFile = None
model.buf[model.vGroup[materials[m]]].textures = []
if 'colorDiffuse' in material_lib[m]:#TODO don't create this array if texture being used though not exclusive.
#TODO check this works with appropriate mtl file
redVal = material_lib[m]['colorDiffuse'][0]
grnVal = material_lib[m]['colorDiffuse'][1]
bluVal = material_lib[m]['colorDiffuse'][2]
model.buf[model.vGroup[materials[m]]].material = (redVal, grnVal, bluVal, 1.0)
model.buf[model.vGroup[materials[m]]].unib[3:6] = [redVal, grnVal, bluVal] | 30,391 |
def direct(input_writer, script_str, run_dir, prog,
geo, charge, mult, method, basis, **kwargs):
""" Generates an input file for an electronic structure job and
runs it directly.
:param input_writer: elstruct writer module function for desired job
:type input_writer: elstruct function
:param script_str: string of bash script that contains
execution instructions electronic structure job
:type script_str: str
:param run_dir: name of directory to run electronic structure job
:type run_dir: str
:param prog: electronic structure program to run
:type prog: str
:param geo: cartesian or z-matrix geometry
:type geo: tuple
:param charge: molecular charge
:type charge: int
:param mult: spin multiplicity
:type mult: int
:param method: electronic structure method
:type method: str
:returns: the input string, the output string, and the run directory
:rtype: (str, str)
"""
input_str = input_writer(
prog=prog,
geo=geo, charge=charge, mult=mult, method=method, basis=basis,
**kwargs)
output_strs = from_input_string(script_str, run_dir, input_str)
output_str = output_strs[0]
return input_str, output_str | 30,392 |
def get_ids(records, key):
"""Utility method to extract list of Ids from Bulk API insert/query result.
Args:
records (:obj:`list`): List of records from a Bulk API insert or SOQL query.
key (:obj:`str`): Key to extract - 'Id' for queries or 'id' for inserted data.
Returns:
(:obj:`list`) of inserted record Ids in form [{'Id':'001000000000001'},...]
"""
return [{'Id': record[key]} for record in records] | 30,393 |
def read(*parts):
"""
Build an absolute path from *parts* and return the contents of the
resulting file. Assume UTF-8 encoding.
"""
with codecs.open(os.path.join(HERE, *parts), "rb", "utf-8") as f:
return f.read() | 30,394 |
def auto_help(func):
"""Automatically registers a help command for this group."""
if not isinstance(func, commands.Group):
raise TypeError('Auto help can only be applied to groups.')
cmd = commands.Command(_call_help, name='help', hidden=True)
func.add_command(cmd)
return func | 30,395 |
def unique_justseen(iterable, key=None):
"""
List unique elements, preserving order. Remember only the element just seen.
>>> [x for x in unique_justseen('AAAABBBCCDAABBB')]
['A', 'B', 'C', 'D', 'A', 'B']
>>> [x for x in unique_justseen('ABBCcAD', str.lower)]
['A', 'B', 'C', 'A', 'D']
"""
imap = itertools.imap
itemgetter = operator.itemgetter
groupby = itertools.groupby
return imap(next, imap(itemgetter(1), groupby(iterable, key))) | 30,396 |
def unique_rows(arr, thresh=0.0, metric='euclidean'):
"""Returns subset of rows that are unique, in terms of Euclidean distance
http://stackoverflow.com/questions/16970982/find-unique-rows-in-numpy-array
"""
distances = squareform(pdist(arr, metric=metric))
idxset = {tuple(np.nonzero(v)[0]) for v in distances <= thresh}
return arr[[x[0] for x in idxset]] | 30,397 |
def backdoors_listing(request,option=None):
"""
Generate the Backdoor listing page.
:param request: Django request.
:type request: :class:`django.http.HttpRequest`
:param option: Action to take.
:type option: str of either 'jtlist', 'jtdelete', 'csv', or 'inline'.
:returns: :class:`django.http.HttpResponse`
"""
request.user._setup()
user = request.user
if user.has_access_to(BackdoorACL.READ):
if option == "csv":
return generate_backdoor_csv(request)
elif option== "jtdelete" and not user.has_access_to(BackdoorACL.DELETE):
result = {'sucess':False,
'message':'User does not have permission to delete Backdoor.'}
return HttpResponse(json.dumps(result,
default=json_handler),
content_type="application/json")
return generate_backdoor_jtable(request, option)
else:
return render_to_response("error.html",
{'error': 'User does not have permission to view backdoor listing.'},
RequestContext(request)) | 30,398 |
def writeConfigs(pathfile: str, options: dict[str, dict[str, object]]) -> ConfigParser:
"""
docstring
"""
config = ConfigParser()
# config.read(pathfile)
for section in options:
if not config.has_section(section):
config.add_section(section)
for option, value in options[section].items():
config.set(section, option, value)
config.write(open(pathfile, mode="w", encoding="utf8")) | 30,399 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.