content stringlengths 22 815k | id int64 0 4.91M |
|---|---|
def default_sv2_sciencemask():
"""Returns default mask of bits for science targets in SV1 survey.
"""
sciencemask = 0
sciencemask |= sv2_mask["LRG"].mask
sciencemask |= sv2_mask["ELG"].mask
sciencemask |= sv2_mask["QSO"].mask
sciencemask |= sv2_mask["BGS_ANY"].mask
sciencemask |= sv2_mask["MWS_ANY"].mask
sciencemask |= sv2_mask["SCND_ANY"].mask
return sciencemask | 33,700 |
def upsert(biomodelos, geoserver, models_info, models_folder):
"""Create or update the model file in Geoserver for an existing model
in BioModelos
MODELS_INFO \t csv file that maps the tax_id, model_id and model_file
for each model to upload
MODELS_FOLDER \t Path to folder that contains all the files specified
in MODEL_INFO
"""
df = pd.read_csv(models_info)
for row in df.itertuples():
click.echo(f"Uploading {row.model_file} to Geoserver")
ws_name = f"taxid-{row.tax_id:0>5}"
try:
geoserver.create_ws(name=ws_name)
geoserver.load_layer(
file_path=os.path.join(models_folder, row.model_file),
workspace_name=ws_name,
layer_name=row.model_id,
)
click.echo(f"Updating {row.model_id} layer name")
biomodelos.update_model_layer(
model_id=row.model_id, layer=f"{ws_name}:{row.model_id}"
)
except Exception as e:
click.secho(str(e), fg="red")
return | 33,701 |
def solver_softmax(K, R):
"""
K = the number of arms (domains)
R = the sequence of past rewards
"""
softmax = np.zeros(K, dtype=float)
for i, r in R.items():
softmax[i] = np.mean(r)
softmax = np.exp(softmax) / np.exp(softmax).sum()
si = np.random.choice(np.arange(0, K, 1), size=1, p=softmax)[0]
index = {i: 0.0 for i in range(K)}
index[si] = 1.0
return index | 33,702 |
def mapview(request):
"""Map view."""
context = basecontext(request, 'map')
return render(request, 'map.html', context=context) | 33,703 |
def adapt_coastdat_weather_to_pvlib(weather, loc):
"""
Adapt the coastdat weather data sets to the needs of the pvlib.
Parameters
----------
weather : pandas.DataFrame
Coastdat2 weather data set.
loc : pvlib.location.Location
The coordinates of the weather data point.
Returns
-------
pandas.DataFrame : Adapted weather data set.
Examples
--------
>>> cd_id=1132101
>>> cd_weather=fetch_coastdat_weather(2014, cd_id)
>>> c=fetch_data_coordinates_by_id(cd_id)
>>> location=pvlib.location.Location(**getattr(c, '_asdict')())
>>> pv_weather=adapt_coastdat_weather_to_pvlib(cd_weather, location)
>>> 'ghi' in cd_weather.columns
False
>>> 'ghi' in pv_weather.columns
True
"""
w = pd.DataFrame(weather.copy())
w["temp_air"] = w.temp_air - 273.15
w["ghi"] = w.dirhi + w.dhi
clearskydni = loc.get_clearsky(w.index).dni
w["dni"] = pvlib.irradiance.dni(
w["ghi"],
w["dhi"],
pvlib.solarposition.get_solarposition(
w.index, loc.latitude, loc.longitude
).zenith,
clearsky_dni=clearskydni,
)
return w | 33,704 |
def bprop_distribute(arr, shp, out, dout):
"""Backpropagator for primitive `distribute`."""
return (array_reduce(scalar_add, dout, shape(arr)),
zeros_like(shp)) | 33,705 |
def ids_to_non_bilu_label_mapping(labelset: LabelSet) -> BiluMappings:
"""Mapping from ids to BILU and non-BILU mapping. This is used to remove the BILU labels to regular labels"""
target_names = list(labelset["ids_to_label"].values())
wo_bilu = [bilu_label.split("-")[-1] for bilu_label in target_names]
non_bilu_mapping = bilu_to_non_bilu(wo_bilu)
BiluMappings.non_bilu_label_to_bilu_ids = {}
BiluMappings.non_bilu_label_to_id = {}
for target_name, labels_list in non_bilu_mapping.items():
# 'upper_bound': ([1, 2, 3, 4], 1)
BiluMappings.non_bilu_label_to_bilu_ids[target_name] = labels_list, labels_list[0]
# 'upper_bound': 1
BiluMappings.non_bilu_label_to_id[target_name] = labels_list[0]
return BiluMappings | 33,706 |
def generate_input_types():
"""
Define the different input types that are used in the factory
:return: list of items
"""
input_types = ["Angle_irons", "Tubes", "Channels", "Mig_wire", "Argon_gas", "Galvanised_sheets", "Budget_locks",
"Welding_rods", "Body_filler", "Grinding_discs", "Drill_bits", "Primer", "Paints", "Thinner",
"Sand_paper", "Masking_tapes", "Carpet", "Pop_rivets", "Electrical_wires", "Bulbs", "Switch",
"Insulation_tapes", "Fasteners", "Adhesives", "Reflectors", "Accessories", "Rubbers",
"Aluminum_mouldings", "Glasses", "Window_locks"]
return input_types | 33,707 |
def ase_tile(cell, tmat):
"""Create supercell from primitive cell and tiling matrix
Args:
cell (pyscf.Cell): cell object
tmat (np.array): 3x3 tiling matrix e.g. 2*np.eye(3)
Return:
pyscf.Cell: supercell
"""
try:
from qharv.inspect.axes_elem_pos import ase_tile as atile
except ImportError:
msg = 'tiling with non-diagonal matrix require the "ase" package'
raise RuntimeError(msg)
# get crystal from cell object
axes = cell.lattice_vectors()
elem = [atom[0] for atom in cell._atom]
pos = cell.atom_coords()
axes1, elem1, pos1 = atile(axes, elem, pos, tmat)
# re-make cell object
cell1 = cell.copy()
cell1.atom = list(zip(elem1, pos1))
cell1.a = axes1
# !!!! how to change mesh ????
ncopy = np.diag(tmat)
cell1.mesh = np.array([ncopy[0]*cell.mesh[0],
ncopy[1]*cell.mesh[1],
ncopy[2]*cell.mesh[2]])
cell1.build(False, False, verbose=0)
cell1.verbose = cell.verbose
return cell1 | 33,708 |
def test_missing_access_fn() -> None:
"""
This test shows that the plugin needs an `access` provided or else it raises a type error.
"""
slot_filler = RuleBasedSlotFillerPlugin(rules=rules)
workflow = Workflow([slot_filler])
intent = Intent(name="intent", score=0.8)
body = "12th december"
entity = BaseEntity(
range={"from": 0, "to": len(body)},
body=body,
dim="default",
type="basic",
values=[{"key": "value"}],
)
workflow.output = {const.INTENTS: [intent], const.ENTITIES: [entity]}
with pytest.raises(TypeError):
workflow.run("") | 33,709 |
def in_order(root):
"""
In order traversal starts at the left node, then the root, then the right
node, recursively
"""
if root:
in_order(root.left)
print(root.value)
in_order(root.right) | 33,710 |
def timeout(timeout_sec, timeout_callback=None):
"""Decorator for timing out a function after 'timeout_sec' seconds.
To be used like, for a 7 seconds timeout:
@timeout(7, callback):
def foo():
...
Args:
timeout_sec: duration to wait for the function to return before timing out
timeout_callback: function to call in case of a timeout
"""
def decorator(f):
def timeout_handler(signum, frame):
raise TimeoutError(os.strerror(errno.ETIME))
def wrapper(*args, **kwargs):
signal.signal(signal.SIGALRM, timeout_handler)
signal.alarm(timeout_sec)
result = None
try:
result = f(*args, **kwargs)
except TimeoutError:
if timeout_callback:
timeout_callback()
pass
finally:
signal.alarm(0)
return result
return wraps(f)(wrapper)
return decorator | 33,711 |
def p_anonymous_method_expression(p):
"""anonymous_method_expression : DELEGATE explicit_anonymous_function_signature_opt block
""" | 33,712 |
def parse_chat_logs(input_path, user, self):
"""
Get messages from a person, or between that person and yourself.
"self" does not necessarily have to be your name.
Args:
input_path (str): Path to chat log HTML file
user (str): Full name of person, as appears in Messenger app
self (str): Your name, as appears in Messenger app
Returns:
list[str]: Each element is a message, i.e. what gets sent when the
enter key is pressed
"""
data = []
current_user = None
user_found = False
skip_thread = False
for element in etree.parse(input_path).iter():
tag = element.tag
content = element.text
cls = element.get("class")
if tag == "div" and cls == "thread":
# Do not parse threads with more than two people
skip_thread = content.count(",") > 1
if user_found:
user_found = False
elif tag == "span" and cls == "user" and not skip_thread:
current_user = content
if current_user == user:
user_found = True
elif tag == "p" and not skip_thread:
if (current_user == user) or (current_user == self and user_found):
data.append(content)
return data | 33,713 |
def remove_undesired_text_from_xml(BLAST_xml_file):
""" Removes undesired text from malformed XML files delivered by NCBI server via biopython wrapper.
"""
undesired_text = "CREATE_VIEW\n"
BLAST_xml_file_orig = BLAST_xml_file[:-4] + "_orig.xml"
move(BLAST_xml_file, BLAST_xml_file_orig)
with open(BLAST_xml_file_orig, "r") as orig_xml:
with open(BLAST_xml_file, "w") as cleaned_xml:
for line in orig_xml:
if line != undesired_text:
cleaned_xml.write(line)
Path(BLAST_xml_file_orig).unlink() | 33,714 |
def on_get(req, resp, schedule_id):
"""
Get schedule information. Detailed information on schedule parameters is provided in the
POST method for /api/v0/team/{team_name}/rosters/{roster_name}/schedules.
**Example request**:
.. sourcecode:: http
GET /api/v0/schedules/1234 HTTP/1.1
Host: example.com
**Example response**:
.. sourcecode:: http
HTTP/1.1 200 OK
Content-Type: application/json
{
"advanced_mode": 1,
"auto_populate_threshold": 30,
"events": [
{
"duration": 259200,
"start": 0
}
],
"id": 1234,
"role": "primary",
"role_id": 1,
"roster": "roster-foo",
"roster_id": 2922,
"team": "asdf",
"team_id": 2121,
"timezone": "US/Pacific"
}
"""
resp.body = json_dumps(get_schedules({'id': schedule_id}, fields=req.get_param_as_list('fields'))[0]) | 33,715 |
def make_labels(dest_folder, zoom, country, classes, ml_type, bounding_box, sparse, **kwargs):
"""Create label data from OSM QA tiles for specified classes
Perform the following operations:
- If necessary, re-tile OSM QA Tiles to the specified zoom level
- Iterate over all tiles within the bounding box and produce a label for each
- Save the label file as labels.npz
- Create an output for previewing the labels (GeoJSON or PNG depending upon ml_type)
Parameters
------------
dest_folder: str
Folder to save labels and example tiles into
zoom: int
The zoom level to create tiles at
country: str
The OSM QA Tile extract to download. The value should be a country string matching a value found in
`label_maker/countries.txt`
classes: list
A list of classes for machine learning training. Each class is defined as a dict
with two required properties:
- name: class name
- filter: A Mapbox GL Filter.
See the README for more details
ml_type: str
Defines the type of machine learning. One of "classification", "object-detection", or "segmentation"
bounding_box: list
The bounding box to create images from. This should be given in the form: `[xmin, ymin, xmax, ymax]`
as longitude and latitude values between `[-180, 180]` and `[-90, 90]` respectively
sparse: boolean
Limit the total background tiles to write based on `background_ratio` kwarg.
geojson: str
Filepath to optional geojson label input
**kwargs: dict
Other properties from CLI config passed as keywords to other utility functions
"""
for ctr_idx, ctr in enumerate(country):
mbtiles_file = op.join(dest_folder, '{}.mbtiles'.format(ctr))
mbtiles_file_zoomed = op.join(dest_folder, '{}-z{!s}.mbtiles'.format(ctr, zoom))
if not op.exists(mbtiles_file_zoomed):
filtered_geo = kwargs.get('geojson') or op.join(dest_folder, '{}.geojson'.format(ctr))
fast_parse = []
if not op.exists(filtered_geo):
fast_parse = ['-P']
print('Retiling QA Tiles to zoom level {} (takes a bit)'.format(zoom))
ps = Popen(['tippecanoe-decode', '-c', '-f', mbtiles_file], stdout=PIPE)
stream_filter_fpath = op.join(op.dirname(label_maker.__file__), 'stream_filter.py')
run([sys.executable, stream_filter_fpath, json.dumps(bounding_box)],
stdin=ps.stdout, stdout=open(filtered_geo, 'w'))
ps.wait()
run(['tippecanoe', '--no-feature-limit', '--no-tile-size-limit'] + fast_parse +
['-l', 'osm', '-f', '-z', str(zoom), '-Z', str(zoom), '-o',
mbtiles_file_zoomed, filtered_geo])
# Call tilereduce
print('Determining labels for each tile')
mbtiles_to_reduce = mbtiles_file_zoomed
tilereduce(dict(zoom=zoom, source=mbtiles_to_reduce, bbox=bounding_box,
args=dict(ml_type=ml_type, classes=classes)),
_mapper, _callback, _done)
# Add empty labels to any tiles which didn't have data
empty_label = _create_empty_label(ml_type, classes)
for tile in tiles(*bounding_box, [zoom]):
index = '-'.join([str(i) for i in tile])
global tile_results
if tile_results.get(index) is None:
tile_results[index] = empty_label
# Print a summary of the labels
_tile_results_summary(ml_type, classes)
# If the --sparse flag is provided, limit the total background tiles to write
if sparse:
pos_examples, neg_examples = [], []
for k in tile_results.keys():
# if we don't match any class, this is a negative example
if not sum([class_match(ml_type, tile_results[k], i + 1) for i, c in enumerate(classes)]):
neg_examples.append(k)
else:
pos_examples.append(k)
# Choose random subset of negative examples
n_neg_ex = int(kwargs['background_ratio'] * len(pos_examples))
neg_examples = np.random.choice(neg_examples, n_neg_ex, replace=False).tolist()
tile_results = {k: tile_results.get(k) for k in pos_examples + neg_examples}
print('Using sparse mode; subselected {} background tiles'.format(n_neg_ex))
# write out labels as numpy arrays
labels_file = op.join(dest_folder, 'labels.npz')
print('Writing out labels to {}'.format(labels_file))
np.savez(labels_file, **tile_results)
# write out labels as GeoJSON or PNG
if ml_type == 'classification':
features = []
if ctr_idx == 0:
label_area = np.zeros((len(list(tile_results.values())[0]), len(tile_results), len(country)), dtype=float)
label_bool = np.zeros((len(list(tile_results.values())[0]), len(tile_results), len(country)), dtype=bool)
for i, (tile, label) in enumerate(tile_results.items()):
label_bool[:, i, ctr_idx] = np.asarray([bool(l) for l in label])
label_area[:, i, ctr_idx] = np.asarray([float(l) for l in label])
# if there are no classes, activate the background
if ctr == country[-1]:
if all(v == 0 for v in label_bool[:, i, ctr_idx]):
label_bool[0, i, ctr_idx] = 1
feat = feature(Tile(*[int(t) for t in tile.split('-')]))
features.append(Feature(geometry=feat['geometry'],
properties=dict(feat_id=str(tile),
label=np.any(label_bool[:, i, :], axis=1).astype(int).tolist(),
label_area=np.sum(label_area[:, i, :], axis=1).tolist())))
if ctr == country[-1]:
json.dump(fc(features), open(op.join(dest_folder, 'classification.geojson'), 'w'))
elif ml_type == 'object-detection':
label_folder = op.join(dest_folder, 'labels')
if not op.isdir(label_folder):
makedirs(label_folder)
for tile, label in tile_results.items():
# if we have at least one bounding box label
if bool(label.shape[0]):
label_file = '{}.png'.format(tile)
img = Image.new('RGB', (256, 256))
draw = ImageDraw.Draw(img)
for box in label:
draw.rectangle(((box[0], box[1]), (box[2], box[3])), outline=class_color(box[4]))
print('Writing {}'.format(label_file))
if op.isfile(op.join(label_folder, label_file)):
old_img = Image.open(op.join(label_folder, label_file))
img.paste(old_img)
else:
img.save(op.join(label_folder, label_file))
elif ml_type == 'segmentation':
label_folder = op.join(dest_folder, 'labels')
if not op.isdir(label_folder):
makedirs(label_folder)
for tile, label in tile_results.items():
# if we have any class pixels
if np.sum(label):
label_file = '{}.png'.format(tile)
visible_label = np.array([class_color(l) for l in np.nditer(label)]).reshape(256, 256, 3)
img = Image.fromarray(visible_label.astype(np.uint8))
print('Writing {}'.format(label_file))
if op.isfile(op.join(label_folder, label_file)):
old_img = Image.open(op.join(label_folder, label_file))
img.paste(old_img)
else:
img.save(op.join(label_folder, label_file)) | 33,716 |
def while_e():
""" Lower case Alphabet letter 'e' pattern using Python while loop"""
row = 0
while row<7:
col = 0
while col<6:
if col==0 and row>0 and row<6 or col>0 and col<5 and row%3==0 or col==5 and row in (1,2):
print('*', end = ' ')
else:
print(' ', end = ' ')
col += 1
print()
row += 1 | 33,717 |
def pressure_correction(pressure, rigidity):
"""
function to get pressure correction factors, given a pressure time series and rigidity value for the station
:param pressure: time series of pressure values over the time of the data observations
:param rigidity: cut-off rigidity of the station making the observations
:return: series of correction factors
"""
p_0 = np.nanmean(pressure)
pressure_diff = pressure - p_0
# g cm^-2. See Desilets & Zreda 2003
mass_attenuation_length = attenuation_length(p_0, rigidity)
exponent = pressure_diff * mass_attenuation_length
pressure_corr = np.exp(exponent)
return pressure_corr | 33,718 |
def get_file_size(file_name):
"""
:rtype numeric: the number of bytes in a file
"""
bytes = os.path.getsize(file_name)
return humanize_bytes(bytes) | 33,719 |
def test_cumulative_return():
"""
Test for the cumulative return calculation.
:return:
"""
data = pd.DataFrame({'date': ['2020-10-04 12:09:07', '2020-10-05 16:10:05',
'2020-10-06 12:01:00', '2020-10-07 17:00:00'],
'pnl': [100000, 90000, 75000, -150000]})
result = app.pnl_summary(data)
ann_return = num_format(round(((100000+90000+75000-150000) / TOTAL_CAPITAL) * 100, 2)) + '%'
assert ann_return == result['Value'].iloc[1] | 33,720 |
def get_project_path_info():
"""
获取项目路径
project_path 指整个git项目的目录
poseidon_path 指git项目中名字叫poseidon的目录
"""
_poseidon_path = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))
_project_path = os.path.dirname(_poseidon_path)
return {"project_path": _project_path,
"poseidon_path": _poseidon_path} | 33,721 |
def main(argv=sys.argv):
"""
Main method called by the eggsecutable.
:param argv:
:return:
"""
utils.vip_main(init_volttron_central, identity=VOLTTRON_CENTRAL,
version=__version__) | 33,722 |
def prepare_data(song: dict) -> dict:
"""
Prepares song dataa for database insertion to cut down on duplicates
:param song: Song data
:return: The song data
"""
song['artist'] = song['artist'].upper().strip()
song['title'] = song['title'].upper().strip()
return song | 33,723 |
def com(struct):
"""
Calculates center of mass of the system.
"""
geo_array = struct.get_geo_array()
element_list = struct.geometry['element']
mass = np.array([atomic_masses_iupac2016[atomic_numbers[x]]
for x in element_list]).reshape(-1)
total = np.sum(mass)
com = np.sum(geo_array*mass[:,None], axis=0)
com = com / total
return com | 33,724 |
def distance_matrix(values, metric):
"""Generate a matrix of distances based on the `metric` calculation.
:param values: list of sequences, e.g. list of strings, list of tuples
:param metric: function (value, value) -> number between 0.0 and 1.0"""
matrix = []
progress = ProgressTracker(len(values))
for lidx, left in enumerate(values):
progress.tick(lidx)
row = []
for right in values:
row.append(metric(left, right))
matrix.append(row)
return np.array(matrix) | 33,725 |
def tic():
""" Python implementation of Matlab tic() function """
global __time_tic_toc
__time_tic_toc = time.time() | 33,726 |
def txt_write(lines: List[str], path: str, model: str = "w", encoding: str = "utf-8"):
"""
Write Line of list to file
Args:
lines: lines of list<str> which need save
path: path of save file, such as "txt"
model: type of write, such as "w", "a+"
encoding: type of encoding, such as "utf-8", "gbk"
"""
try:
file = open(path, model, encoding=encoding)
file.writelines(lines)
file.close()
except Exception as e:
logging.info(str(e)) | 33,727 |
def _Pack(content, offset, format_string, values):
"""Pack values to the content at the offset.
Args:
content: String to be packed.
offset: Offset from the beginning of the file.
format_string: Format string of struct module.
values: Values to struct.pack.
Returns:
Updated content.
"""
size = struct.calcsize(format_string)
return ''.join([content[:offset],
struct.pack(format_string, *values),
content[offset + size:]]) | 33,728 |
def get_rotation_matrix(orientation):
"""
Get the rotation matrix for a rotation around the x axis of n radians
Args:
- (float) orientation in radian
Return:
- (np.array) rotation matrix for a rotation around the x axis
"""
rotation_matrix = np.array(
[[1, 0, 0],
[0, math.cos(orientation), -math.sin(orientation)],
[0, math.sin(orientation), math.cos(orientation)]])
return rotation_matrix | 33,729 |
def logs_for_build(
build_id, session, wait=False, poll=10
): # noqa: C901 - suppress complexity warning for this method
"""Display the logs for a given build, optionally tailing them until the
build is complete.
Args:
build_id (str): The ID of the build to display the logs for.
wait (bool): Whether to keep looking for new log entries until the build completes (default: False).
poll (int): The interval in seconds between polling for new log entries and build completion (default: 10).
session (boto3.session.Session): A boto3 session to use
Raises:
ValueError: If waiting and the build fails.
"""
codebuild = session.client("codebuild")
description = codebuild.batch_get_builds(ids=[build_id])["builds"][0]
status = description["buildStatus"]
log_group = description["logs"].get("groupName")
stream_name = description["logs"].get("streamName") # The list of log streams
position = Position(
timestamp=0, skip=0
) # The current position in each stream, map of stream name -> position
# Increase retries allowed (from default of 4), as we don't want waiting for a build
# to be interrupted by a transient exception.
config = botocore.config.Config(retries={"max_attempts": 15})
client = session.client("logs", config=config)
job_already_completed = False if status == "IN_PROGRESS" else True
state = LogState.STARTING if wait and not job_already_completed else LogState.COMPLETE
dot = True
while state == LogState.STARTING and log_group == None:
time.sleep(poll)
description = codebuild.batch_get_builds(ids=[build_id])["builds"][0]
log_group = description["logs"].get("groupName")
stream_name = description["logs"].get("streamName")
if state == LogState.STARTING:
state = LogState.TAILING
# The loop below implements a state machine that alternates between checking the build status and
# reading whatever is available in the logs at this point. Note, that if we were called with
# wait == False, we never check the job status.
#
# If wait == TRUE and job is not completed, the initial state is STARTING
# If wait == FALSE, the initial state is COMPLETE (doesn't matter if the job really is complete).
#
# The state table:
#
# STATE ACTIONS CONDITION NEW STATE
# ---------------- ---------------- ------------------- ----------------
# STARTING Pause, Get Status Valid LogStream Arn TAILING
# Else STARTING
# TAILING Read logs, Pause, Get status Job complete JOB_COMPLETE
# Else TAILING
# JOB_COMPLETE Read logs, Pause Any COMPLETE
# COMPLETE Read logs, Exit N/A
#
# Notes:
# - The JOB_COMPLETE state forces us to do an extra pause and read any items that got to Cloudwatch after
# the build was marked complete.
last_describe_job_call = time.time()
dot_printed = False
while True:
for event, position in log_stream(client, log_group, stream_name, position):
print(event["message"].rstrip())
if dot:
dot = False
if dot_printed:
print()
if state == LogState.COMPLETE:
break
time.sleep(poll)
if dot:
print(".", end="")
sys.stdout.flush()
dot_printed = True
if state == LogState.JOB_COMPLETE:
state = LogState.COMPLETE
elif time.time() - last_describe_job_call >= 30:
description = codebuild.batch_get_builds(ids=[build_id])["builds"][0]
status = description["buildStatus"]
last_describe_job_call = time.time()
status = description["buildStatus"]
if status != "IN_PROGRESS":
print()
state = LogState.JOB_COMPLETE
if wait:
if dot:
print() | 33,730 |
def prepare_labels(label_type, org_xml_dir=ORG_XML_DIR, label_dir=LABEL_DIR):
""" Prepare the neural network output targets."""
if not os.path.exists(os.path.join(label_dir, "TEXT")):
os.makedirs(os.path.join(label_dir, "TEXT"))
if not os.path.exists(os.path.join(label_dir, "WORDLIST")):
os.makedirs(os.path.join(label_dir, "WORDLIST"))
for path in Path(org_xml_dir).glob("*.xml"):
fn = path.name
prefix, _ = os.path.splitext(fn)
rec_type, sents, _, _ = pangloss.get_sents_times_and_translations(str(path))
# Write the sentence transcriptions to file
sents = [preprocess_na(sent, label_type) for sent in sents]
for i, sent in enumerate(sents):
if sent.strip() == "":
# Then there's no transcription, so ignore this.
continue
out_fn = "%s.%d.%s" % (prefix, i, label_type)
sent_path = os.path.join(label_dir, rec_type, out_fn)
with open(sent_path, "w") as sent_f:
print(sent, file=sent_f) | 33,731 |
def machinesize(humansize):
"""convert human-size string to machine-size"""
if humansize == UNKNOWN_SIZE:
return 0
try:
size_str, size_unit = humansize.split(" ")
except AttributeError:
return float(humansize)
unit_converter = {
'Byte': 0, 'Bytes': 0, 'kB': 1, 'MB': 2, 'GB': 3, 'TB': 4, 'PB': 5
}
machinesize = float(size_str) * (1000 ** unit_converter[size_unit])
return machinesize | 33,732 |
def generate_erdos_renyi_netx(p, N):
""" Generate random Erdos Renyi graph """
g = networkx.erdos_renyi_graph(N, p)
W = networkx.adjacency_matrix(g).todense()
return g, torch.as_tensor(W, dtype=torch.float) | 33,733 |
def kmc_algorithm(process_list):
"""
:param rate_list: List with all the computed rates for all the neighbours for all the centers
:param process_list: List of elements dict(center, process, new molecule).
The indexes of each rate in rate_list have the same index that the associated process in
process_list.
Chooses a process using the list of rates and associates a time with this process using
the BKL Kinetic Monte-Carlo algorithm. The algorithm uses 2 random number, one to choose the process and the other
for the time. The usage of each random number is in an independent function
:return: plan: The chosen proces and the new molecule affected
time: the duration of the process
"""
rates_list = [proc.get_rate_constant() for proc in process_list]
process_index = select_process(rates_list)
chosen_process = process_list[process_index]
time = time_advance(rates_list)
return chosen_process, time | 33,734 |
def _update_changed_time(cnn: Connection) -> None:
"""Update task changed time."""
t: Table = PeriodicTasks.__table__
rv = cnn.execute(t.select(t.c.id == 1)).fetchone()
if not rv:
cnn.execute(t.insert().values(id=1, changed_at=datetime.now()))
else:
cnn.execute(
t.update().where(t.c.id == 1).values(changed_at=datetime.now())) | 33,735 |
def __matlab_round(x: float = None) -> int:
"""Workaround to cope the rounding differences between MATLAB and python"""
if x - np.floor(x) < 0.5:
return int(np.floor(x))
else:
return int(np.ceil(x)) | 33,736 |
def db_remove(src_path, db, chat_id):
"""Removes a string entry from a database file and the given set variable.
If it does not already exist in the set, do not take any action.
"""
if chat_id in db:
with open(src_path, "r+") as f:
l = f.readlines()
f.seek(0)
for i in l:
if i != (str(chat_id) + "\n"):
f.write(i)
f.truncate()
db.remove(chat_id) | 33,737 |
def rayleightest(circ_data, dim='time'):
"""Returns the p-value for the Rayleigh test of uniformity
This test is used to identify a non-uniform distribution, i.e. it is
designed for detecting an unimodal deviation from uniformity. More
precisely, it assumes the following hypotheses:
- H0 (null hypothesis): The population is distributed uniformly around the
circle.
- H1 (alternative hypothesis): The population is not distributed uniformly
around the circle.
Parameters
----------
circ_data : xarray DataArray
circular data [radian]
weights : xarray DataArray, optional
weights of the circular data (the default is None)
dim : str, optional
name of the core dimension (the default is 'time')
Returns
-------
xarray DataArray
p-value
"""
p_value = xr.apply_ufunc(_rayleightest, circ_data, #kwargs={'weights':weights},
input_core_dims=[[dim]], dask='parallelized', output_dtypes=[float])
p_value.name = 'rayleigh_p'
p_value.attrs.update(unit='', description='p-value for rayleigh test of uniformity')
return p_value | 33,738 |
def load_cfg(cfg_file: Union[str, Path]) -> dict:
"""Load the PCC algs config file in YAML format with custom tag
!join.
Parameters
----------
cfg_file : `Union[str, Path]`
The YAML config file.
Returns
-------
`dict`
A dictionary object loaded from the YAML config file.
"""
# [ref.] https://stackoverflow.com/a/23212524
## define custom tag handler
def join(loader, node):
seq = loader.construct_sequence(node)
return ''.join([str(i) for i in seq])
## register the tag handler
yaml.add_constructor('!join', join)
with open(cfg_file, 'r') as f:
cfg = yaml.load(f, Loader=yaml.FullLoader)
return cfg | 33,739 |
def generate(args):
"""Generates sample data for writing CBNs using the cli_main CLI utlity.
Args:
args: dict containing:
- log_type: string in the LOG_TYPE format
- start_date: optional string in the YYYY-MM-DD format The following
directory structure will be created (if it doesn't
exist) and sample data will be generated as follows: -
~/cbn/<log_type>/log_type_1.conf - ~/cbn/<log_type>/log_type_10.conf -
~/cbn/<log_type>/log_type_1k.conf - etc.
"""
sample_sizes = ['1', '10', '1000']
sample_names = ['1', '10', '1k']
# Collect data from yesterday if specific date not provided
start_date = args.start_date
end_date = args.end_date
# Verify directory structure exists or create it
sample_dir = pathlib.Path('{0}/cbn/{1}'.format(pathlib.Path.home(),
args.log_type.lower()))
sample_dir.mkdir(parents=True, exist_ok=True)
# Generate sample data of given sizes
for i, size in enumerate(sample_sizes):
outfile = '{0}/{1}_{2}.log'.format(sample_dir, args.log_type.lower(),
sample_names[i])
print(
'\nGenerating sample size: {}... '.format(sample_names[i]),
end='',
flush=True)
call_get_sample_logs(args, args.log_type.upper(), start_date, end_date,
int(size), outfile)
print('\nGenerated sample data ({0}); run this to go there:'.format(
args.log_type.upper()))
print(f'cd {sample_dir}') | 33,740 |
def test_api_exception():
"""Test API response Exception"""
with pytest.raises(BigoneAPIException):
with requests_mock.mock() as m:
json_obj = {
'errors': [{
'code': 20102,
'message': 'Unsupported currency ABC'
}]
}
m.get('https://big.one/api/v2/accounts/ABC', json=json_obj, status_code=422)
client.get_account('ABC') | 33,741 |
def pdist_triu(x, f=None):
"""Pairwise distance.
Arguments:
x: A set of points.
shape=(n,d)
f (optional): A kernel function that computes the similarity
or dissimilarity between two vectors. The function must
accept two matrices with shape=(m,d).
Returns:
Upper triangular pairwise distances in "unrolled" form.
"""
n = x.shape[0]
if f is None:
# Use Euclidean distance.
def f(x, y):
return np.sqrt(np.sum((x - y)**2, axis=1))
# Determine indices of upper triangular matrix (not including
# diagonal elements).
idx_upper = np.triu_indices(n, 1)
return f(x[idx_upper[0]], x[idx_upper[1]]) | 33,742 |
def simulate_in_dymola(heaPum, data, tableName, tableFileName):
""" Evaluate the heat pump performance from the model in Dymola.
:param heaPum: Heat pump model (object).
:param data: Reference performance data (object).
:param tableName: Name of the combiTimeTable.
:param tableFileName: Name of the text file containing the combiTimeTable.
:return: Performance data of the modeled heat pump (object).
.. note:: Performance data from the model is evaluated at the same
operating conditions (inlet water temperatures and mass flow
rates at the source and load sides) as in the reference data.
"""
import buildingspy.simulate.Simulator as si
from buildingspy.io.outputfile import Reader
from scipy.interpolate import interp1d
from builtins import str
import getpass
import os
import tempfile
# Find absolute path to buildings library
packagePath = os.path.normpath(
os.path.join(os.path.normpath(os.path.dirname(__file__)),
'..', '..', '..', '..', '..', '..'))
# Create temporary directory for simulation files
dirPrefix = tempfile.gettempprefix()
tmpDir = tempfile.mkdtemp(prefix=dirPrefix + '-'
+ 'HeatPumpCalibration' + '-'
+ getpass.getuser() + '-')
# Set parameters for simulation in Dymola
calModelPath = heaPum.modelicaCalibrationModelPath()
s = si.Simulator(calModelPath,
'dymola',
outputDirectory=tmpDir,
packagePath=packagePath)
s = heaPum.set_ModelicaParameters(s)
m1_flow_nominal = min(data.flowSource)
m2_flow_nominal = min(data.flowLoad)
tableFilePath = \
str(os.path.join(tmpDir, tableFileName).replace(os.sep, '/'))
s.addParameters({'m1_flow_nominal': m1_flow_nominal,
'm2_flow_nominal': m2_flow_nominal,
'calDat.fileName': tableFilePath})
# Write CombiTimeTable for dymola
data.write_modelica_combiTimeTable(tableName, tmpDir,
tableFileName, heaPum.CoolingMode)
# Simulation parameters
s.setStopTime(len(data.EWT_Source))
s.setSolver('dassl')
# Kill the process if it does not finish in 2 minutes
s.setTimeOut(120)
s.showProgressBar(False)
s.printModelAndTime()
# s.showGUI(show=True)
# s.exitSimulator(exitAfterSimulation=False)
s.simulate()
# Read results
modelName = heaPum.modelicaModelName()
ofr = Reader(os.path.join(tmpDir, modelName), 'dymola')
(time1, QCon) = ofr.values('heaPum.QCon_flow')
(time1, QEva) = ofr.values('heaPum.QEva_flow')
(time1, P) = ofr.values('heaPum.P')
t = [float(i) + 0.5 for i in range(len(data.EWT_Source))]
f_P = interp1d(time1, P)
P = f_P(t)
f_QCon = interp1d(time1, QCon)
QCon = f_QCon(t)
f_QEva = interp1d(time1, QEva)
QEva = f_QEva(t)
# # Clean up
# shutil.rmtree('calibrationModel')
if heaPum.CoolingMode:
Capacity = -QEva
HR = QCon
else:
Capacity = QCon
HR = -QEva
dymRes = SimulationResults(data.EWT_Source,
data.EWT_Load,
data.flowSource,
data.flowLoad,
Capacity,
HR,
P,
'Modelica')
return dymRes | 33,743 |
def get_model_defaults(cls):
"""
This function receives a model class and returns the default values
for the class in the form of a dict.
If the default value is a function, the function will be executed. This is meant for simple functions such as datetime and uuid.
Args:
cls: (obj) : A Model class.
Returns:
defaults: (dict) : A dictionary of the default values.
"""
tmp = {}
for key in cls.__dict__.keys():
col = cls.__dict__[key]
if hasattr(col, "expression"):
if col.expression.default is not None:
arg = col.expression.default.arg
if callable(arg):
tmp[key] = arg(cls.db)
else:
tmp[key] = arg
return tmp | 33,744 |
def mask_clip(mask_file, data_files, out_dir='.', pfb_outs=1, tif_outs=0) -> None:
"""clip a list of files using a full_dim_mask and a domain reference tif
Parameters
----------
mask_file : str
full_dim_mask file generated from shapefile to mask utility no_data,0's=bbox,1's=mask
data_files : list
list of data files (tif, pfb) to clip from
out_dir : str, optional
output directory (optional) (Default value = '.')
pfb_outs : int, optional
write pfb files as outputs (optional) (Default value = 1)
tif_outs : int, optional
write tif files as outputs (optional) (Default value = 0)
Returns
-------
None
"""
# read the full_dim_mask file
mask = SubsetMask(mask_file)
# create clipper with full_dim_mask
clipper = MaskClipper(subset_mask=mask, no_data_threshold=-1)
# clip all inputs and write outputs
clip_inputs(clipper, input_list=data_files, out_dir=out_dir, pfb_outs=pfb_outs,
tif_outs=tif_outs) | 33,745 |
def add_response_headers(headers=None):
"""This decorator adds the headers passed in to the response"""
headers = headers or {}
def decorator(f):
@wraps(f)
def decorated_function(*args, **kwargs):
resp = make_response(f(*args, **kwargs))
h = resp.headers
for header, value in headers.items():
h[header] = value
return resp
return decorated_function
return decorator | 33,746 |
def delete(path:pathlike) -> None:
"""Attempt to delete the canonical source file.
"""
protocol, fpath = _decompose(str(path))
if protocol == "file":
try:
pathlib.Path(fpath).unlink()
except Exception as e:
raise DeleteError(str(path), str(e))
elif protocol == "s3":
try:
s3 = boto3.resource("s3")
s3.Object(*_decomposeS3(fpath)).delete()
except Exception as e:
raise DeleteError(str(path), str(e))
else:
raise UnsupportedProtocolError(protocol) | 33,747 |
def modules_in_pkg(pkg):
"""Return the list of modules in a python package (a module with a
__init__.py file.)
:return: a list of strings such as `['list', 'check']` that correspond to
the module names in the package.
"""
for _, module_name, _ in pkgutil.walk_packages(pkg.__path__):
yield module_name | 33,748 |
def restore(request: Request, project_id: int) -> JSONResponse: # pylint: disable=invalid-name,redefined-builtin
"""Restore project.
Args:
project_id {int}: project id
Returns:
starlette.responses.JSONResponse
"""
log_request(request, {
'project_id': project_id
})
project_manager = ProjectManager()
project_manager.restore(project_id)
project = project_manager.get_project(project_id)
return JSONResponse(project, HTTPStatus.OK) | 33,749 |
def _broadcast_arrays(x, y):
"""Broadcast arrays."""
# Cast inputs as numpy arrays
# with nonzero dimension
x = np.atleast_1d(x)
y = np.atleast_1d(y)
# Get shapes
xshape = list(x.shape)
yshape = list(y.shape)
# Get singltons that mimic shapes
xones = [1] * x.ndim
yones = [1] * y.ndim
# Broadcast
x = np.tile(np.reshape(x, xshape + yones), xones + yshape)
y = np.tile(np.reshape(y, xones + yshape), xshape + yones)
# Return broadcast arrays
return x, y | 33,750 |
def Output(primitive_spec):
"""Mark a typespec as output."""
typespec = BuildTypespec(primitive_spec)
typespec.meta.sigdir = M.SignalDir.OUTPUT
return typespec | 33,751 |
def test__conformer_trunk():
""" tets dir_.conformer_trunk
"""
spc_trunk_ddir = dir_.conformer_trunk()
assert not spc_trunk_ddir.exists(PREFIX)
spc_trunk_ddir.create(PREFIX)
assert spc_trunk_ddir.exists(PREFIX) | 33,752 |
def cb_init_hook(optname, value):
"""exec arbitrary code to set sys.path for instance"""
exec(value) | 33,753 |
def save_predictions_df(predictions_df: np.ndarray,
directory: str,
last_observation_date: str,
forecast_horizon: int,
model_description: Optional[Dict[str, str]],
dataset_name: str,
dataset_index_key: str,
cadence: int,
extra_info: Optional[Dict[str, str]],
features_used: Optional[Sequence[str]] = None) -> str:
"""Saves a formatted predictions dataframe and updates a forecast indexer.
Args:
predictions_df: a dataframe of predictions, with columns ['date', 'site_id',
'prediction', 'target_name']
directory: the base directory to store indexes and forecasts.
last_observation_date: the date string corresponding to the last date of
data that the model had access to during training.
forecast_horizon: the maximum number of days into the future that the model
predicts.
model_description: optional description of the model.
dataset_name: the name of the dataset.
dataset_index_key: the unique key into the dataset index that contains the
training dataset that the model was trained on.
cadence: the cadence in days of the predictions. i.e. daily predictions have
a cadence of 1, weekly predictions have a cadence of 7.
extra_info: a dict of any additional information to store with the
forecasts.
features_used: the features that were used as inputs to produce the
forecasts.
Returns:
the unique forecast ID that this forecast is saved under.
"""
unique_key = base_indexing.get_unique_key()
forecast_directory = os.path.join(directory, "forecasts")
if not os.path.exists(forecast_directory):
os.makedirs(forecast_directory)
output_filepath = os.path.join(forecast_directory,
f"forecasts_{unique_key}.csv")
assert not os.path.exists(output_filepath), (
f"Forecasts already exist at {output_filepath}")
with open(output_filepath, "w") as fid:
predictions_df.to_csv(fid, index=False)
logging.info("Saved model forecasts with forecast ID %s to %s", unique_key,
output_filepath)
extra_info = extra_info or {}
extra_info["forecast_horizon"] = forecast_horizon
if model_description is not None:
extra_info["model_description"] = model_description
current_datetime = datetime.datetime.utcnow()
dataset_index = dataset_indexing.DatasetIndex(directory, dataset_name)
dataset_location = dataset_index.get_entry(dataset_index_key)["file_location"]
entry = build_entry(
forecast_id=unique_key,
file_location=output_filepath,
dataset_name=dataset_name,
last_observation_date=last_observation_date,
creation_timestamp=current_datetime.strftime(constants.DATETIME_FORMAT),
dataset_index_key=dataset_index_key,
dataset_location=dataset_location,
cadence=cadence,
features_used=features_used,
extra_info=extra_info)
base_indexing.open_index_and_add_entry(
directory, dataset_name, index_class=ForecastIndex, key=unique_key,
entry=entry)
return unique_key | 33,754 |
def partitionFromMask(mask):
""" Return the start and end address of the first substring without
wildcards """
for i in range(len(mask)):
if mask[i] == '*':
continue
for j in range(i+1, len(mask)):
if mask[j] == '*':
break
else:
if i+1 == len(mask):
j = i+1
else:
j += 1
break
return i, (j-1) | 33,755 |
def test_invalid_method_in_check_parameters(method: str) -> None:
"""Test error in check_parameters when invalid method is selected."""
mapie = MapieRegressor(DummyRegressor(), method=method)
with pytest.raises(ValueError, match=r".*Invalid method.*"):
mapie.fit(X_boston, y_boston) | 33,756 |
def add_documents_to_index(documents, index, retries=DEFAULT_NUM_RETRIES):
"""Adds a document to an index.
Args:
- documents: a list of documents. Each document should be a dictionary.
Every key in the document is a field name, and the corresponding
value will be the field's value.
If there is a key named 'id', its value will be used as the
document's id.
If there is a key named 'rank', its value will be used as
the document's rank.
By default, search results are returned ordered by descending rank.
If there is a key named 'language_code', its value will be used as
the document's language. Otherwise, constants.DEFAULT_LANGUAGE_CODE is
used.
- index: the name of the index to insert the document into, a string.
- retries: the number of times to retry inserting the documents.
Returns:
returns a list of document ids of the documents that were added.
Raises:
- SearchFailureError: raised when the indexing fails. If it fails for any
document, none will be inserted.
- ValueError: raised when invalid values are given.
"""
if not isinstance(index, python_utils.BASESTRING):
raise ValueError(
'Index must be the unicode/str name of an index, got %s'
% type(index))
index = gae_search.Index(index)
gae_docs = [_dict_to_search_document(d) for d in documents]
try:
logging.debug('adding the following docs to index %s: %s',
index.name, documents)
results = index.put(gae_docs, deadline=5)
except gae_search.PutError as e:
logging.exception('PutError raised.')
if retries > 1:
for res in e.results:
if res.code == gae_search.OperationResult.TRANSIENT_ERROR:
new_retries = retries - 1
logging.debug('%d tries left, retrying.' % (new_retries))
return add_documents_to_index(
documents=documents,
index=index.name,
retries=new_retries)
# At this pint, either we don't have any tries left, or none of the
# results has a transient error code.
raise SearchFailureError(e)
return [r.id for r in results] | 33,757 |
def mask_to_bias(mask: Array, dtype: jnp.dtype) -> Array:
"""Converts a mask to a bias-like Array suitable for adding to other biases.
Arguments:
mask: <bool> array of arbitrary shape
dtype: jnp.dtype, desired dtype of the returned array
Returns:
bias: <bool> array of the same shape as the input, with 0 in place of truthy
values and -1e10 in place of falsy values of mask
"""
return lax.select(mask,
jnp.full(mask.shape, 0).astype(dtype),
jnp.full(mask.shape, -1e10).astype(dtype)) | 33,758 |
def external_dependency(dirname, svnurl, revision):
"""Check out (if necessary) a given fixed revision of a svn url."""
dirpath = py.magic.autopath().dirpath().join(dirname)
revtag = dirpath.join('-svn-rev-')
if dirpath.check():
if not revtag.check() or int(revtag.read()) != revision:
print >> sys.stderr, ("Out-of-date benchmark checkout!"
" I won't update it automatically.")
print >> sys.stderr, ("To continue, move away or remove the "
"%r directory." % (dirname,))
sys.exit(1)
return True
CMD = "svn co -r%d %s@%d %s" % (revision, svnurl, revision, dirpath)
print >> sys.stderr, CMD
err = os.system(CMD)
if err != 0:
print >> sys.stderr, "* checkout failed, skipping this benchmark"
return False
revtag.write(str(revision))
return True | 33,759 |
def optimize_clustering(
data,
algorithm_names: Union[Iterable, str] = variables_to_optimize.keys(),
algorithm_parameters: Optional[Dict[str, dict]] = None,
random_search: bool = True,
random_search_fraction: float = 0.5,
algorithm_param_weights: Optional[dict] = None,
algorithm_clus_kwargs: Optional[dict] = None,
evaluation_methods: Optional[list] = None,
gold_standard: Optional[Iterable] = None,
metric_kwargs: Optional[dict] = None,
) -> tuple:
"""
Runs through many clusterers and parameters to get best clustering labels.
Args:
data: Dataframe with elements to cluster as index and examples as columns.
algorithm_names: Which clusterers to try. Default is in variables_to_optimize.Can also
put 'slow', 'fast' or 'fastest' for subset of clusterers. See hypercluster.constants.speeds.
algorithm_parameters: Dictionary of str:dict, with parameters to optimize for each clusterer. Ex. structure:: {'clusterer1':{'param1':['opt1', 'opt2', 'opt3']}}.
random_search: Whether to search a random selection of possible parameters or all possibilities. Default True.
random_search_fraction: If random_search is True, what fraction of the possible parameters to search, applied to all clusterers. Default 0.5.
algorithm_param_weights: Dictionary of str: dictionaries. Ex format - {'clusterer_name': {'parameter_name':{'param_option_1':0.5, 'param_option_2':0.5}}}.
algorithm_clus_kwargs: Dictionary of additional kwargs per clusterer.
evaluation_methods: Str name of evaluation metric to use. For options see
hypercluster.categories.evaluations. Default silhouette.
gold_standard: If using a evaluation needs ground truth, must provide ground truth labels. For options see hypercluster.constants.need_ground_truth.
metric_kwargs: Additional evaluation metric kwargs.
Returns:
Best labels, dictionary of clustering evaluations, dictionary of all clustering labels
"""
if algorithm_param_weights is None:
algorithm_param_weights = {}
if algorithm_clus_kwargs is None:
algorithm_clus_kwargs = {}
if algorithm_parameters is None:
algorithm_parameters = {}
if metric_kwargs is None:
metric_kwargs = {}
if evaluation_methods is None:
evaluation_methods = inherent_metrics
if algorithm_names in list(categories.keys()):
algorithm_names = categories[algorithm_names]
clustering_labels = {}
clustering_labels_df = pd.DataFrame()
for clusterer_name in algorithm_names:
label_df = (
AutoClusterer(
clusterer_name=clusterer_name,
params_to_optimize=algorithm_parameters.get(clusterer_name, None),
random_search=random_search,
random_search_fraction=random_search_fraction,
param_weights=algorithm_param_weights.get(clusterer_name, None),
clus_kwargs=algorithm_clus_kwargs.get(clusterer_name, None),
)
.fit(data)
.labels_
)
label_df.index = pd.MultiIndex.from_tuples(label_df.index)
clustering_labels[clusterer_name] = label_df
# Put all parameter labels into 1 for a big df
label_df = label_df.transpose()
cols_for_labels = label_df.index.to_frame()
inds = cols_for_labels.apply(
lambda row: param_delim.join(
[clusterer_name]
+ ["%s%s%s" % (k, val_delim, v) for k, v in row.to_dict().items()]
),
axis=1,
)
label_df.index = inds
label_df = label_df.transpose()
clustering_labels_df = pd.concat(
[clustering_labels_df, label_df], join="outer", axis=1
)
evaluation_results_df = pd.DataFrame({"methods": evaluation_methods})
for col in clustering_labels_df.columns:
evaluation_results_df[col] = evaluation_results_df.apply(
lambda row: evaluate_results(
clustering_labels_df[col],
method=row["methods"],
data=data,
gold_standard=gold_standard,
metric_kwargs=metric_kwargs.get(row["methods"], None),
),
axis=1,
)
return evaluation_results_df, clustering_labels_df, clustering_labels | 33,760 |
def bashhub():
"""Bashhub command line client"""
pass | 33,761 |
def sql_fingerprint(query, hide_columns=True):
"""
Simplify a query, taking away exact values and fields selected.
Imperfect but better than super explicit, value-dependent queries.
"""
parsed_query = parse(query)[0]
sql_recursively_simplify(parsed_query, hide_columns=hide_columns)
return str(parsed_query) | 33,762 |
def contfrac(x, y):
"""
calculate continuated fraction of x/y
"""
while y:
a = x // y
yield a
x, y = y, x - a * y | 33,763 |
def get_account():
"""Return one account and cache account key for future reuse if needed"""
global _account_key
if _account_key:
return _account_key.get()
acc = Account.query().get()
_account_key = acc.key
return acc | 33,764 |
def test_determine_reference_type_recursive() -> None:
"""Should return recursive reference type."""
reference = "#/test"
assert determine_reference_type(reference) == RECURSIVE_REFERENCE | 33,765 |
def resetGTFAttributes(infile, genome, gene_ids, outfile):
"""set GTF attributes in :term:`gtf` formatted file so that they are
compatible with cufflinks.
This method runs cuffcompare with `infile` against itself to add
attributes such as p_id and tss_id.
Arguments
---------
infile : string
Filename of :term:`gtf`-formatted input file
genome : string
Filename (without extension) of indexed genome file
in :term:`fasta` format.
gene_ids : dict
Dictionary mapping transcript ids to gene ids.
outfile : string
Output filename in :term:`gtf` format
"""
tmpfile1 = P.get_temp_filename(shared=True)
tmpfile2 = P.get_temp_filename(shared=True)
#################################################
E.info("adding tss_id and p_id")
# The p_id attribute is set if the fasta sequence is given.
# However, there might be some errors in cuffdiff downstream:
#
# cuffdiff: bundles.cpp:479: static void HitBundle::combine(const std::
# vector<HitBundle*, std::allocator<HitBundle*> >&, HitBundle&): Assertion
# `in_bundles[i]->ref_id() == in_bundles[i-1]->ref_id()' failed.
#
# I was not able to resolve this, it was a complex
# bug dependent on both the read libraries and the input reference gtf
# files
job_memory = "5G"
if infile.endswith(".gz"):
cat = "zcat"
else:
cat = "cat"
statement = '''
cuffcompare -r <( %(cat)s %(infile)s )
-T
-s %(genome)s.fa
-o %(tmpfile1)s
<( %(cat)s %(infile)s )
<( %(cat)s %(infile)s )
>& %(outfile)s.log
'''
P.run(statement)
#################################################
E.info("resetting gene_id and transcript_id")
# reset gene_id and transcript_id to ENSEMBL ids
# cufflinks patch:
# make tss_id and p_id unique for each gene id
outf = iotools.open_file(tmpfile2, "w")
map_tss2gene, map_pid2gene = {}, {}
inf = iotools.open_file(tmpfile1 + ".combined.gtf")
def _map(gtf, key, val, m):
if val in m:
while gene_id != m[val]:
val += "a"
if val not in m:
break
m[val] = gene_id
gtf.setAttribute(key, val)
for gtf in GTF.iterator(inf):
transcript_id = gtf.oId
gene_id = gene_ids[transcript_id]
gtf.transcript_id = transcript_id
gtf.gene_id = gene_id
# set tss_id
try:
tss_id = gtf.tss_id
except KeyError:
tss_id = None
try:
p_id = gtf.p_id
except KeyError:
p_id = None
if tss_id:
_map(gtf, "tss_id", tss_id, map_tss2gene)
if p_id:
_map(gtf, "p_id", p_id, map_pid2gene)
outf.write(str(gtf) + "\n")
outf.close()
# sort gtf file
geneset.sortGTF(tmpfile2, outfile)
# make sure tmpfile1 is NEVER empty
# assert tmpfile1
# for x in glob.glob(tmpfile1 + "*"):
# os.unlink(x)
# os.unlink(tmpfile2) | 33,766 |
def cli_args_exec(args):
"""
potcar args_exec
"""
if args.DEBUG:
print(__file__)
if args.test:
test(args.test_dir)
elif args.list:
get_avail_pot(args.pp_names, args.ptype, True)
else:
gen_potcar(args.pp_names, args.ptype, args.dirname, args.preview) | 33,767 |
def is_seq_of(
seq: Sequence, expected_type: type, seq_type: Optional[type] = None
) -> bool:
"""Check whether it is a sequence of some type.
Args:
seq (Sequence):
Sequence to be checked.
expected_type (type):
Expected type of sequence items.
seq_type (type, optional):
Expected sequence type.
"""
if seq_type is None:
exp_seq_type = abc.Sequence
else:
if not isinstance(seq_type, type):
raise TypeError(f"`seq_type` must be a valid type. But got: {seq_type}.")
exp_seq_type = seq_type
if not isinstance(seq, exp_seq_type):
return False
for item in seq:
if not isinstance(item, expected_type):
return False
return True | 33,768 |
def mutate_single_residue(atomgroup, new_residue_name):
"""
Mutates the residue into new_residue_name. The only atoms retained are
the backbone and CB (unless the new residue is GLY). If the original
resname == new_residue_name the residue is left untouched.
"""
resnames = atomgroup.resnames()
if len(resnames) == 1:
if resnames[0] == new_residue_name:
edited_atomgroup = atomgroup
else:
if new_residue_name == 'GLY':
edited_atomgroup = select_atoms_by_name(atomgroup, ["C", "CA", "N", "O"])
else:
edited_atomgroup = select_atoms_by_name(atomgroup, ["C", "CA", "N", "O", "CB"])
for t in edited_atomgroup:
t.resname = new_residue_name
else:
edited_atomgroup = atomgroup
return edited_atomgroup | 33,769 |
def test_initial_state():
"""Check the initial state of the class."""
c = StackOverflowCollector()
assert c is not None | 33,770 |
def future_bi_end_f30_base(s: [Dict, OrderedDict]):
"""期货30分钟笔结束"""
v = Factors.Other.value
for f_ in [Freq.F30.value, Freq.F5.value, Freq.F1.value]:
if f_ not in s['级别列表']:
warnings.warn(f"{f_} not in {s['级别列表']},默认返回 Other")
return v
# 开多仓因子
# --------------------------------------------------------------------------------------------------------------
long_opens = {
Factors.L2A0.value: [
[f"{Freq.F30.value}_倒1表里关系#{Signals.BD0.value}"],
]
}
for name, factors in long_opens.items():
for factor in factors:
if match_factor(s, factor):
v = name
# 平多仓因子
# --------------------------------------------------------------------------------------------------------------
long_exits = {
Factors.S2A0.value: [
[f"{Freq.F30.value}_倒1表里关系#{Signals.BU0.value}"],
]
}
for name, factors in long_exits.items():
for factor in factors:
if match_factor(s, factor):
v = name
return v | 33,771 |
def deleteFile(fileName):
"""
Delete a file
"""
os.remove(fileName) | 33,772 |
def convert_pre_to_021(cfg):
"""Convert config standard 0.20 into 0.21
Revision 0.20 is the original standard, which lacked a revision.
Variables moved from top level to inside item 'variables'.
Ocean Sites nomenclature moved to CF standard vocabulary:
- TEMP -> sea_water_temperature
- PSAL -> sea_water_salinity
"""
def label(v):
"""Convert Ocean Sites vocabulary to CF standard names
"""
if v == 'PRES':
return 'sea_water_pressure'
if v == 'TEMP':
return 'sea_water_temperature'
elif v == 'PSAL':
return 'sea_water_salinity'
else:
return v
keys = list(cfg.keys())
output = OrderedDict()
output['revision'] = '0.21'
if 'inherit' in keys:
output['inherit'] = cfg['inherit']
keys.remove('inherit')
if 'main' in cfg:
output['common'] = cfg['main']
keys.remove('main')
elif 'common' in cfg:
output['common'] = cfg['common']
keys.remove('common')
def fix_threshold(cfg):
"""Explicit threshold"""
for t in cfg:
if isinstance(cfg[t], (int, float)):
cfg[t] = {"threshold": cfg[t]}
return cfg
def fix_regional_range(cfg):
"""Explicit regions
"""
if "regional_range" in cfg:
cfg["regional_range"] = {"regions": cfg["regional_range"]}
return cfg
def fix_profile_envelop(cfg):
"""Explicit layers
Note
----
Should I confirm that cfg['profile_envelop'] is a list?
"""
if "profile_envelop" in cfg:
cfg["profile_envelop"] = {"layers": cfg["profile_envelop"]}
return cfg
output['variables'] = OrderedDict()
for k in keys:
cfg[k] = fix_threshold(cfg[k])
cfg[k] = fix_regional_range(cfg[k])
cfg[k] = fix_profile_envelop(cfg[k])
output['variables'][label(k)] = cfg[k]
# output[k] = cfg[k]
return output | 33,773 |
def test_no_timeout_locate_ns_existing(nsproxy):
"""
Locating a NS that exists with no timeout should be OK.
"""
locate_ns(nsproxy.addr(), timeout=0.) | 33,774 |
def get_alerts_alarms_object():
""" helper function to get alert alarms
"""
result = []
# Get query filters, query SystemEvents using event_filters
event_filters, definition_filters = get_query_filters(request.args)
if event_filters is None: # alerts_alarms
alerts_alarms = db.session.query(SystemEvent).all()
else:
alerts_alarms = db.session.query(SystemEvent).filter_by(**event_filters)
# Process alert_alarm json output based on definition filters
if alerts_alarms is not None:
result_json = get_alert_alarm_json(alerts_alarms, definition_filters)
if result_json is None:
result = []
else:
result = result_json
return result | 33,775 |
def data_cubes_combine_by_pixel(filepath, gal_name):
"""
Grabs datacubes and combines them by pixel using addition, finding the mean
and the median.
Parameters
----------
filepath : list of str
the data cubes filepath strings to pass to glob.glob
gal_name : str
galaxy name/descriptor
Returns
-------
lamdas : :obj:'~numpy.ndarray'
the wavelength vector for the cubes
cube_added : :obj:'~numpy.ndarray'
all cubes added
cube_mean : :obj:'~numpy.ndarray'
the mean of all the cubes
cube_median : :obj:'~numpy.ndarray'
the median of all the cubes
header : FITS header object
the header from the fits file
"""
#create list to append datas to
all_data = []
all_var = []
all_lamdas = []
#iterate through the filenames
#they should all be from fits files, so we can just use that loading function
for file in glob.glob(filepath):
fits_stuff = read_in_data_fits(file)
if len(fits_stuff) > 3:
lamdas, data, var, header = fits_stuff
all_var.append(var)
else:
lamdas, data, header = fits_stuff
#apply corrections to lambdas
lamdas = air_to_vac(lamdas)
lamdas = barycentric_corrections(lamdas, header)
all_lamdas.append(lamdas)
#apply Milky Way extinction correction
data = milky_way_extinction_correction(lamdas, data)
#append the data
all_data.append(data)
#check if var has the same number of cubes as the data, and if it doesn't, delete it
if len(all_data) > len(all_var):
del all_var
#because the exposures are so close together, the difference in lamda between
#the first to the last is only around 0.001A. There's a difference in the
#total length of about 0.0003A between the longest and shortest wavelength
#vectors after the corrections. So I'm taking the median across the whole
#collection. This does introduce some error, making the line spread function
#of the averaged spectra larger.
lamdas = np.median(all_lamdas, axis=0)
#adding the data
cube_added = np.zeros_like(all_data[0])
for cube in all_data:
cube_added += cube
#finding the mean
cube_mean = np.mean(all_data, axis=0)
#finding the median
cube_median = np.median(all_data, axis=0)
#if all_var in locals():
#adding the variances
#pickle the results
with open(filepath.split('*')[0]+'_'+gal_name+'_combined_by_pixel_'+str(date.today()),'wb') as f:
pickle.dump([lamdas, cube_added, cube_mean, cube_median], f)
f.close()
return lamdas, cube_added, cube_mean, cube_median, header | 33,776 |
def usdm_bypoint_service(
fmt: SupportedFormats,
):
"""Replaced above."""
return Response(handler(fmt), media_type=MEDIATYPES[fmt]) | 33,777 |
def read_geonames(filename):
"""
Parse geonames file to a pandas.DataFrame. File may be downloaded
from http://download.geonames.org/export/dump/; it should be unzipped
and in a "geonames table" format.
"""
import pandas as pd
return pd.read_csv(filename, **_GEONAMES_PANDAS_PARAMS) | 33,778 |
def create_property_map(cls, property_map=None):
""" Helper function for creating property maps """
_property_map = None
if property_map:
if callable(property_map):
_property_map = property_map(cls)
else:
_property_map = property_map.copy()
else:
_property_map = {}
return _property_map | 33,779 |
def test_payload_with_address_invalid_chars(api_server_test_instance):
""" Addresses cannot have invalid characters in it. """
invalid_address = "0x61c808d82a3ac53231750dadc13c777b59310bdg" # g at the end is invalid
channel_data_obj = {
"partner_address": invalid_address,
"token_address": "0xEA674fdDe714fd979de3EdF0F56AA9716B898ec8",
"settle_timeout": 10,
}
request = grequests.put(
api_url_for(api_server_test_instance, "channelsresource"), json=channel_data_obj
)
response = request.send().response
assert_response_with_error(response, HTTPStatus.BAD_REQUEST) | 33,780 |
def readNetCDF(filename, varName='intensity'):
"""
Reads a netCDF file and returns the varName variable.
"""
import Scientific
import Scientific.IO
import Scientific.IO.NetCDF
ncfile = Scientific.IO.NetCDF.NetCDFFile(filename,"r")
var1 = ncfile.variables[varName]
data = sp.array(var1.getValue(),dtype=float)
ncfile.close()
return data | 33,781 |
def show(ctx, name_only, cmds, under, fields, format, **kwargs):
"""Show the parameters of a command"""
cmds = cmds or sorted(config.parameters.readonly.keys())
if under:
cmds = [cmd for cmd in cmds if cmd.startswith(under)]
with TablePrinter(fields, format) as tp, Colorer(kwargs) as colorer:
for cmd_name in cmds:
if name_only:
click.echo(cmd_name)
else:
cmd = get_command_safe(cmd_name)
def get_line(profile_name):
return ' '.join(
[quote(p) for p in config.parameters.all_settings.get(profile_name, {}).get(cmd_name, [])])
if config.parameters.readprofile == 'settings-file':
args = config.parameters.readonly.get(cmd_name, [])
else:
values = {profile.name: get_line(profile.name) for profile in config.all_enabled_profiles}
args = colorer.colorize(values, config.parameters.readprofile)
if args == ['']:
# the command most likely has implicit settings and only
# explicit values are asked for. Skip it
continue
if cmd is None:
LOGGER.warning('You should know that the command {} does not exist'.format(cmd_name))
args = args or 'None'
tp.echo(cmd_name, args) | 33,782 |
def make_count_set(conds, r):
"""
returns an r session with a new count data set loaded as cds
"""
#r.assign('conds', vectors.StrVector.factor(vectors.StrVector(conds)))
r.assign('conds', vectors.StrVector(conds))
r('''
require('DSS')
cds = newSeqCountSet(count_matrix, conds)
''')
return r | 33,783 |
def RunCommand(cmd, timeout_time=None, retry_count=3, return_output=True,
stdin_input=None):
"""Spawn and retry a subprocess to run the given shell command.
Args:
cmd: shell command to run
timeout_time: time in seconds to wait for command to run before aborting.
retry_count: number of times to retry command
return_output: if True return output of command as string. Otherwise,
direct output of command to stdout.
stdin_input: data to feed to stdin
Returns:
output of command
"""
result = None
while True:
try:
result = RunOnce(cmd, timeout_time=timeout_time,
return_output=return_output, stdin_input=stdin_input)
except errors.WaitForResponseTimedOutError:
if retry_count == 0:
raise
retry_count -= 1
logger.Log("No response for %s, retrying" % cmd)
else:
# Success
return result | 33,784 |
def get_clusters_low_z(min_mass = 10**4, basepath='/lustre/scratch/mqezlou/TNG300-1/output'):
"""Script to write the position of z ~ 0 large mass halos on file """
halos = il.groupcat.loadHalos(basepath, 98, fields=['GroupMass', 'GroupPos','Group_R_Crit200'])
ind = np.where(halos['GroupMass'][:] > min_mass)
with h5py.File('clusters_TNG300-1.hdf5','w') as f :
f['Mass'] = halos['GroupMass'][ind]
f['Group_R_Crit200'] = halos['Group_R_Crit200'][ind]
f['x'], f['y'], f['z'] = halos['GroupPos'][ind[0],0], halos['GroupPos'][ind[0],1], halos['GroupPos'][ind[0],2]
f.close()
return 0 | 33,785 |
def test_wait_for_db(mocker):
"""Test waiting for db"""
mocker.patch("time.sleep", return_value=True)
ec = mocker.patch(
"django.db.backends.base.base.BaseDatabaseWrapper.ensure_connection",
return_value=None,
)
ec.side_effect = [OperationalError] * 5 + [True]
call_command("wait_for_db")
assert ec.call_count == 6 | 33,786 |
def create_env(idf_revision):
"""
Create ESP32 environment on home directory.
"""
if not os.path.isdir(root_directory):
create_root_dir()
fullpath = os.path.join(root_directory, idf_revision)
if os.path.isdir(fullpath):
print('Environment %s is already exists' % idf_revision)
else:
os.mkdir(fullpath)
download_idf(idf_revision, fullpath)
download_xtensa_toolchain(idf_revision, fullpath)
return True | 33,787 |
def _serialize_key(key: rsa.RSAPrivateKeyWithSerialization) -> bytes:
"""Return the PEM bytes from an RSA private key"""
return key.private_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PrivateFormat.TraditionalOpenSSL,
encryption_algorithm=serialization.NoEncryption(),
) | 33,788 |
def test_decide_overwrite_var_full_path_resultdir():
"""options provided in cli get preference over the ones provided inside tests
"""
temp_dir = os.path.join(os.path.split(__file__)[0], "war_test3.xml")
resultdir = os.path.split(__file__)[0]
namespace = Namespace(datafile=None, random_tc_execution=False,\
wrapperfile=None, resultdir=resultdir, logdir=None, outputdir=None, jobid=None,\
pythonpath=None, genericdatafile=None, gen_select_rows=None, gen_no_of_samples=None,\
gen_shuffle_columns=None, gen_purge_db=None, gen_exec_tag=None, gen_report=None)
result = warrior_cli_driver.decide_overwrite_var(namespace)
assert result['ow_resultdir'] == resultdir | 33,789 |
def main():
""" Pass command line arguments to NCL script"""
config = conf.config(__doc__, sys.argv[1:])
t0 = time.time()
if config['out-dir']==None:
out_dir = '.'
else:
out_dir = config['out-dir']
# if ncl-code-dir not specified, expect it in ../ncl relative to
# the path of this file
if config['ncl-script']==None:
ncl_script = NCL_SCRIPT
else:
ncl_code_dir = config['ncl-script']
cmd_files = config['<file>']
# Add nc extension if needed
nc_files = [ f if f.endswith('.nc') else f+'.nc' for f in cmd_files]
# Create height arrays
hgts = config['height']
hgts = '(/%s/)' % ','.join(map(str,hgts))
mode = config['mode']
dry_run = config['dry-run']
loc = config['loc']
opt = config['opt']
print '\n*****************************************************'
print 'extract.py'
if mode not in SUPPORTED_MODES:
raise ConfigError("specified mode not supported")
if mode=='loop':
# This will loop over each file seperately
for f in sorted(nc_files):
path,name = os.path.split(f)
out_file = out_dir+'/'+name.replace('wrfout', 'tseries')
if os.path.exists(out_file):
os.remove(out_file)
# Create NCL file array
in_file = f
#cmd = """FCST_FILE=%s NCL_OUT_FILE=%s LOCATIONS_FILE=%s NCL_OPT_FILE=%s ncl %s/%s 'extract_heights=%s' """ %(in_file, out_file, loc,opt, ncl_code_dir, NCL_SCRIPT, hgts)
cmd = """NCL_OPT_FILE=%s ncl 'in_file="%s"' 'out_file="%s"' 'extract_heights=%s' 'loc_file="%s"' %s""" % (opt,in_file,out_file, hgts, loc, ncl_script)
print cmd
# We could either aggregate all files together or loop over files
if not dry_run:
subprocess.call(cmd, shell=True)
elif mode=='lump':
f = nc_files[0]
path,name = os.path.split(f)
out_file = out_dir+'/'+name.replace('wrfout', 'tseries')
if os.path.exists(out_file):
os.rm(out_file)
# Create NCL file array
files = '","'.join(sorted(nc_files))
in_file = '(/"%s"/)' % files
cmd = """NCL_OPT_FILE=%s ncl 'in_file=%s' 'out_file="%s"' 'extract_heights=%s' 'loc_file="%s"' %s""" % (opt,in_file,out_file, hgts, loc, ncl_script)
print cmd
if not dry_run:
subprocess.call(cmd, shell=True)
te = time.time() - t0
print 'elapsed time: %0.1f ' % te | 33,790 |
def ldns_key_algo_supported(*args):
"""LDNS buffer."""
return _ldns.ldns_key_algo_supported(*args) | 33,791 |
def test_add_split():
"""Test add_split"""
# db init
EXAMPLE = get_dataset()
db = EXAMPLE(paths={'data': os.path.join('data', 'data'),
'meta': os.path.join('data', 'data')})
db.add('example_id', np.arange(len(db)), lazy=False)
# checks
db_split = copy.deepcopy(db)
db_split.add_split(5)
assert len(db) * 60 / 5 == len(db_split)
assert all(db_split['data'][0] == db['data'][0][0:16000 * 5])
db_split['example_id'][0] == db_split['example_id'][int(60 / 5 - 1)]
db_split = copy.deepcopy(db)
db_split.add_split(reference_key='data', split_size=16000, type='samples')
assert len(db) * 60 == len(db_split)
assert all(db_split['data'][0] == db['data'][0][0:16000])
db_split['example_id'][0] == db_split['example_id'][59]
db_split = copy.deepcopy(db)
db_split.add_split(reference_key='data', split_size=16000, type='samples', constraint='power2')
assert all(db_split['data'][0] == db['data'][0][0:2 ** 14]) | 33,792 |
def read_k_bytes(sock, remaining=0):
"""
Read exactly `remaining` bytes from the socket.
Blocks until the required bytes are available and
return the data read as raw bytes. Call to this
function blocks until required bytes are available
in the socket.
Arguments
---------
sock : Socket to inspect
remaining : Number of bytes to read from socket.
"""
ret = b"" # Return byte buffer
while remaining > 0:
d = sock.recv(remaining)
ret += d
remaining -= len(d)
return ret | 33,793 |
def total_minutes(data):
"""
Calcula a quantidade total de minutos com base nas palestras
submetidas.
"""
soma = 0
for item in data.keys():
soma += (item*len(data[item]))
return soma | 33,794 |
def test_repo_is_on_pypi_true():
"""Test 'repo_is_on_pypi'"""
assert common_funcs.repo_is_on_pypi({"name": "pytest"}) | 33,795 |
def grid_to_vector(grid, categories):
"""Transform a grid of active classes into a vector of labels.
In case several classes are active at time i, the label is
set to 'overlap'.
See :func:`ChildProject.metrics.segments_to_grid` for a description of grids.
:param grid: a NumPy array of shape ``(n, len(categories))``
:type grid: numpy.array
:param categories: the list of categories
:type categories: list
:return: the vector of labels of length ``n`` (e.g. ``np.array([none FEM FEM FEM overlap overlap CHI])``)
:rtype: numpy.array
"""
return np.vectorize(lambda x: categories[x])(
grid.shape[1] - np.argmax(grid[:, ::-1], axis=1) - 1
) | 33,796 |
def init_db():
"""Initialize database
"""
db = sqlite3.connect(app.config['DATABASE'])
db.row_factory = sqlite3.Row
with open('schema.sql', 'r') as f:
db.cursor().executescript(f.read())
db.commit() | 33,797 |
def standard_script_options(usage, description):
"""Create option parser pre-populated with standard observation script options.
Parameters
----------
usage, description : string
Usage and description strings to be used for script help
Returns
-------
parser : :class:`optparse.OptionParser` object
Parser populated with standard script options
"""
parser = optparse.OptionParser(usage=usage, description=description)
parser.add_option('--sb-id-code', type='string',
help='Schedule block id code for observation, '
'required in order to allocate correct resources')
parser.add_option('-u', '--experiment-id',
help='Experiment ID used to link various parts of '
'experiment together (use sb-id-code by default, or random UUID)')
parser.add_option('-o', '--observer',
help='Name of person doing the observation (**required**)')
parser.add_option('-d', '--description', default='No description.',
help="Description of observation (default='%default')")
parser.add_option('-f', '--centre-freq', type='float', default=1822.0,
help='Centre frequency, in MHz (default=%default)')
parser.add_option('-r', '--dump-rate', type='float', default=1.0,
help='Dump rate, in Hz (default=%default)')
# This option used to be in observe1, but did not make it to the
# common set of options of observe1 / observe2
# parser.add_option('-w', '--discard-slews', dest='record_slews', action='store_false', default=True,
# help='Do not record all the time, i.e. pause while antennas are slewing to the next target')
parser.add_option('-n', '--nd-params', default='coupler,10,10,180',
help="Noise diode parameters as '<diode>,<on>,<off>,<period>', "
"in seconds or 'off' for no noise diode firing (default='%default')")
parser.add_option('-p', '--projection', type='choice',
choices=projections, default=default_proj,
help="Spherical projection in which to perform scans, "
"one of '%s' (default), '%s'" % (
projections[0], "', '".join(projections[1:])))
parser.add_option('-y', '--dry-run', action='store_true', default=False,
help="Do not actually observe, but display script "
"actions at predicted times (default=%default)")
parser.add_option('--stow-when-done', action='store_true', default=False,
help="Stow the antennas when the capture session ends")
parser.add_option('--mode',
help="DBE mode to use for experiment, keeps current mode by default)")
parser.add_option('--dbe-centre-freq', type='float', default=None,
help="DBE centre frequency in MHz, used to select coarse band for "
"narrowband modes (unchanged by default)")
parser.add_option('--horizon', type='float', default=5.0,
help="Session horizon (elevation limit) in degrees (default=%default)")
parser.add_option('--no-mask', action='store_true', default=False,
help="Keep all correlation products by not applying baseline/antenna mask")
return parser | 33,798 |
def connect():
""" Connect to the PostgreSQL database server """
conn = None
try:
# connect to the PostgreSQL server
print('Connecting to the PostgreSQL database...')
url = ""+sv+"."+ns+"."+"svc.cluster.local"
conn = psycopg2.connect(
host=url,
database=db,
port=port,
user=user,
password=password
)
# create a cursor
cur = conn.cursor()
# execute a statement
cur.execute('SELECT version()')
# display the PostgreSQL database server version
db_version = cur.fetchone()
# close the communication with the PostgreSQL
cur.close()
return 1
except (Exception, psycopg2.DatabaseError) as error:
print('Liveness Failed')
sys.stdout.flush()
return 0
finally:
if conn is not None:
conn.close() | 33,799 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.