content stringlengths 22 815k | id int64 0 4.91M |
|---|---|
def enforce_types(target):
"""Class decorator adding type checks to all member functions
"""
def check_types(spec, *args, **kwargs):
parameters = dict(zip(spec.args, args))
parameters.update(kwargs)
for name, value in parameters.items():
with suppress(KeyError): # Assume un-annotated parameters can be any type
type_hint = spec.annotations[name]
if _is_unparameterized_special_typing(type_hint):
continue
if hasattr(type_hint, "__args__") and type_hint.__args__ is not None:
actual_type = type_hint.__args__
else:
actual_type = type_hint
if not isinstance(value, actual_type):
raise TypeError("Unexpected type for '{}' (expected {} but found {})"
.format(name, type_hint, type(value)))
def decorate(func):
spec = inspect.getfullargspec(func)
@wraps(func)
def wrapper(*args, **kwargs):
check_types(spec, *args, **kwargs)
return func(*args, **kwargs)
return wrapper
if inspect.isclass(target):
members = inspect.getmembers(target, predicate=inspect.isfunction)
for name, func in members:
setattr(target, name, decorate(func))
return target
else:
return decorate(target) | 27,100 |
def get_article(URL):
"""
Get an article from one our trusted sources.
Args:
URL: URL string to parse, e.g., http://www.hello.com/world
Returns
Article object if URL was success requested and parsed.
None if it fails to parse or the URL is from a source not
in the trusted list.
"""
try:
output = urlparse(URL)
source = output.netloc.split('.')[1]
except:
print("Failed to parse URL.")
return None
if source not in TRUSTED_SOURCES:
print("URL isn't in TRUSTED_SOURCES")
return None
article = Article(URL)
article.download()
article.parse()
return article | 27,101 |
def main():
"""
Create a segmented array.
Compute basic stats for each segment:
(min, max, mean, standard deviation, total, area)
Write the segmented image and the raster attribute table.
"""
# data dimensions
dims = (1000, 1000)
# create some random data and segment via value > 5000
seg_data = numpy.random.randint(0, 10001, dims).astype('uint32')
seg_data, nlabels = ndimage.label(seg_data > 5000)
# create some random data to calculate stats against
data = numpy.random.ranf(dims)
# create a segments class object
seg = Segments(seg_data, include_zero=True)
# retrieve basic stats (min, max, mean, standard deviation, total, area)
stats_table = seg.basic_statistics(data, dataframe=True)
stats_table.set_index("Segment_IDs", inplace=True)
# join via segment id, specifying 'outer' will account for empty segments
df = pandas.DataFrame({"Histogram": seg.histogram})
stats_table = df.join(stats_table, how='outer')
nrows = stats_table.shape[0]
# assign random colours to each segment
stats_table.insert(1, "Red", numpy.random.randint(0, 256, (nrows)))
stats_table.insert(2, "Green", numpy.random.randint(0, 256, (nrows)))
stats_table.insert(3, "Blue", numpy.random.randint(0, 256, (nrows)))
stats_table.insert(4, "Alpha", 255)
# define 1 output band and add another band later
kwargs = {'width': dims[1],
'height': dims[0],
'count': 1,
'compression': 4,
'chunks': (100, 100),
'blocksize': 100,
'dtype': seg_data.dtype.name}
with kea.open('attribute-table-example.kea', 'w', **kwargs) as src:
src.write(seg_data, 1)
# define the layer type as thematic (labelled, classified etc)
src.write_layer_type(1, kc.LayerType.thematic)
# write the stats table as an attribute table
usage = {"Red": "Red",
"Green": "Green",
"Blue": "Blue",
"Alpha": "Alpha",
"Histogram": "PixelCount"}
src.write_rat(stats_table, 1, usage=usage) | 27,102 |
def crop_central_whiten_images(images=None, height=24, width=24):
"""Crop the central of image, and normailize it for test data.
They are cropped to central of height * width pixels.
Whiten (Normalize) the images.
Parameters
----------
images : 4D Tensor
The tensor or placeholder of images
height : int
The height for central crop.
width: int
The width for central crop.
Returns
-------
result : tuple Tensor
(Tensor for distorted images, Tensor for while loop index)
Examples
--------
>>> X_train, y_train, X_test, y_test = tl.files.load_cifar10_dataset(shape=(-1, 32, 32, 3), plotable=False)
>>> sess = tf.InteractiveSession()
>>> batch_size = 128
>>> x = tf.placeholder(tf.float32, shape=[batch_size, 32, 32, 3])
>>> central_images_op = tl.preprocess.crop_central_whiten_images(images=x, height=24, width=24)
>>> sess.run(tf.initialize_all_variables())
>>> feed_dict={x: X_train[0:batch_size,:,:,:]}
>>> central_images, idx = sess.run(central_images_op, feed_dict=feed_dict)
>>> tl.visualize.images2d(X_train[0:9,:,:,:], second=2, saveable=False, name='cifar10', dtype=np.uint8, fig_idx=20212)
>>> tl.visualize.images2d(central_images[1:10,:,:,:], second=10, saveable=False, name='central_images', dtype=None, fig_idx=23012)
Notes
------
The first image in 'central_images' should be removed.
Code References
----------------
- ``tensorflow.models.image.cifar10.cifar10_input``
"""
print(" [Warning] crop_central_whiten_images will be deprecated due to speed, see TFRecord tutorial for more info...")
try:
batch_size = int(images._shape[0])
except:
raise Exception('unknow batch_size of images')
central_x = tf.Variable(tf.constant(0.1, shape=[1, height, width, 3]))
i = tf.Variable(tf.constant(0))
c = lambda central_x, i: tf.less(i, batch_size)
def body(central_x, i):
# 1. Crop the central [height, width] of the image.
image = tf.image.resize_image_with_crop_or_pad(tf.gather(images, i), height, width)
# 2. Subtract off the mean and divide by the variance of the pixels.
image = tf.image.per_image_whitening(image)
# 5. Append the image to a batch.
image = tf.expand_dims(image, 0)
return tf.concat(0, [central_x, image]), tf.add(i, 1)
result = tf.while_loop(cond=c, body=body, loop_vars=(central_x, i), parallel_iterations=16)
return result | 27,103 |
def get_post(_activePost):
"""
functions to get the input scrapping post url
:return: a list
"""
_link = []
if _activePost == "postLink1":
_scrapping_link = request.form.get("singlePost")
_processed_link = _scrapping_link.replace("www", "m")
_link.append(_processed_link)
elif _activePost == "postLink2":
file = request.files['csvfile']
if file and allowed_file(file.filename):
filename = secure_filename(file.filename)
file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))
_scrapping_link = readCSV(os.path.join(app.config['UPLOAD_FOLDER'], file.filename), "URL")
for i in _scrapping_link:
_link.append(i.replace("www", "m"))
return _link | 27,104 |
def init_app():
"""init sdk app
The appID & app Secret use the Android's application ID and Secret under the same project, next version you can use
the web application's own appId & secret!
"""
# TODO
app_id_at = "Your android application's app id"
app_secret_at = "Your android application's app secret"
app_id_push = "Your Web application' app id "
push_admin.initialize_app(app_id_at, app_secret_at, app_id_push) | 27,105 |
def start(event):
"""
Whether or not return was pressed
"""
return event.type == KEYDOWN and event.key == system["ENTER"] | 27,106 |
def cost(theta, X, y):
"""cost fn is -1(theta) for you to minimize"""
return np.mean(-y * np.log(sigmoid(X @ theta)) - (1 - y) * np.log(1 - sigmoid(X @ theta))) | 27,107 |
def enrich_nodes(nodes, vindplaatsen, articles):
"""
Add some attributes to the nodes.
:param nodes:
:param vindplaatsen:
:return:
"""
nodes = add_year(nodes)
nodes = add_articles(nodes, articles)
nodes = add_versions(nodes, vindplaatsen)
return nodes | 27,108 |
def assert_warns(warning_class: Type[RuntimeWarning], *args: Literal["v", "t"]):
"""
usage.scipy: 6
"""
... | 27,109 |
def has_string(match):
"""Matches if ``str(item)`` satisfies a given matcher.
:param match: The matcher to satisfy, or an expected value for
:py:func:`~hamcrest.core.core.isequal.equal_to` matching.
This matcher invokes the :py:func:`str` function on the evaluated object to
get its length, passing the result to a given matcher for evaluation. If
the ``match`` argument is not a matcher, it is implicitly wrapped in an
:py:func:`~hamcrest.core.core.isequal.equal_to` matcher to check for
equality.
Examples::
has_string(starts_with('foo'))
has_string('bar')
"""
return HasString(wrap_matcher(match)) | 27,110 |
def get_land_sea_mask(gridded_geo_box, \
ancillary_path='/g/data/v10/eoancillarydata/Land_Sea_Rasters'):
"""
Return a land/sea 2D numpy boolean array in which Land = True, Sea = False
for the supplied GriddedGeoBox and using the UTM projected data in the
supplied ancillary_path.
If the specified gridded_geo_box has a non-UTM CRS or a non-native
sample frequency, the data will be reprojected/resampled into the the
gridded_geo_box.
"""
# get lat/long of geo_box origin
to_crs = osr.SpatialReference()
to_crs.SetFromUserInput('EPSG:4326')
origin_longlat = gridded_geo_box.transform_coordinates(gridded_geo_box.origin, to_crs)
# get Land/Sea data file for this bounding box
utmZone = abs(get_utm_zone(origin_longlat))
utmDataPath = '%s/WORLDzone%d.tif' % (ancillary_path, utmZone)
# read the land/sea data
with rio.open(utmDataPath) as ds:
# get the gridded box for the full dataset extent
landSeaDataGGB = GriddedGeoBox.from_dataset(ds)
# read the subset relating to Flinders Islet
window = landSeaDataGGB.window(gridded_geo_box)
out = numpy.zeros(gridded_geo_box.shape, dtype=numpy.uint8)
ds.read(1, window=window, out=out)
return out | 27,111 |
def qEI_brute(gp_, true_function, X_=np.linspace(0, 1, 200), q=3,
niterations=10, nsim=1000):
"""
q steps EI performed with brute force: Brute search on vector X_
"""
gp = copy.copy(gp_)
i = 0
nn = X_.shape[0]
rshape = q * [nn]
qEI_to_evaluate = np.asarray([np.vstack(np.array(comb))
for comb in itertools.product(X_, repeat=q)]).squeeze()
while i < niterations:
bplt.plot_gp(gp, X_, true_function=true_function, nsamples=5, show=False)
qEI_computed = acq.gp_qEI_computation_brute(gp, qEI_to_evaluate, nsim).reshape(rshape)
next_to_evaluate = X_[np.asarray(np.unravel_index(qEI_computed.argmax(),
qEI_computed.shape))]
value_evaluated = true_function(next_to_evaluate)
[plt.axvline(nextpoint, ls = '--', color = 'red')
for nextpoint in next_to_evaluate]
X = np.append(gp.X_train_, next_to_evaluate)
X = X[:, np.newaxis]
y = np.append(gp.y_train_, value_evaluated)
gp.fit(X, y)
i += 1
print(' Best value yet ' + str(gp.X_train_[gp.y_train_.argmin()]))
plt.show()
return gp | 27,112 |
def potential_energy_diff(e_in, e_out):
"""Returns difference in potential energy.
arguments:
e_in - dictionary of energy groups from input file
e_out - dictionary of energy groups from output file
returns:
potential energy difference in units of the input
"""
energy_type = 'Potential'
input_energy = e_in[energy_type]
diff = e_out[energy_type].in_units_of(input_energy.unit) - input_energy
return diff._value | 27,113 |
def test_source(source, sourcer_params):
"""
Paths Source - Test various source paths/urls supported by Sourcer.
"""
try:
sourcer = Sourcer(
source, custom_ffmpeg=return_static_ffmpeg(), verbose=True, **sourcer_params
).probe_stream()
logger.debug("Found Metadata: `{}`".format(sourcer.retrieve_metadata()))
except Exception as e:
if isinstance(e, (ValueError, IOError)):
pytest.xfail("Test Passed!")
else:
pytest.fail(str(e)) | 27,114 |
def CA_potential_profile(pot_init: float, pot_step: float, pot_rest: float,
pot_init_time: float, pot_step_time: float, pot_rest_time: float,
buffer_size: int = 1200, samp_rate: int = 3600) -> tuple:
"""
:param pot_init: Initial potential in V
:param pot_step: Step potential in V
:param pot_rest: Rest potential in V
:param pot_init_time: Time to hold the initial potential in s
This will be elongated as needed to round out the total sample number.
:param pot_step_time: Time to hold the step potential in s
:param pot_rest_time: Time to hold the resting potential in s
:param buffer_size: Samples stored in buffer before callback
:param samp_rate: Sampling rate in samples/s; Use an integral multiple of 120/s and at least 3600 per volt
:return: pot_profile, samp_num_tot: An array holding potentials for each sample and the total sample number
"""
# number of samples for each section
samp_num_init = samp_rate * pot_init_time
samp_num_step = samp_rate * pot_step_time
samp_num_rest = samp_rate * pot_rest_time
# create potential profile array for each section
pot_profile_init = pot_init * np.repeat(1, samp_num_init)
pot_profile_step = pot_step * np.repeat(1, samp_num_step)
pot_profile_rest = pot_rest * np.repeat(1, samp_num_rest)
'''Since the total sample number must be a multiple of the buffer_size,
add additional samples to the initial potential step until it is.'''
# additional samples in the hold step to round off potential profile
additional_hold_sample = 0
# total sample size of the potential profile with extra samples as needed
samp_num_tot = additional_hold_sample + len(pot_profile_init) + len(pot_profile_step) + len(pot_profile_rest)
while samp_num_tot % buffer_size != 0:
additional_hold_sample += 1
samp_num_tot = additional_hold_sample + len(pot_profile_init) + len(pot_profile_step) + len(pot_profile_rest)
# Calculate hold profile
h_profile = np.linspace(pot_init, pot_init, int(additional_hold_sample))
'''Construct the potential profile by combining each individual section'''
pot_profile = np.concatenate((h_profile, pot_profile_init, pot_profile_step, pot_profile_rest))
samp_num_tot = int(len(pot_profile)) # must be an integer
'''Check potential profile to be set'''
plt.title('CA Program Potential', fontsize=16)
plt.xlabel('Time / s', fontsize=16)
plt.ylabel('$E_{\mathrm{in}}$ / V', fontsize=16)
plt.tick_params(axis='both', which='both', direction='in', right=True, top=True)
plt.plot(np.arange(0, len(pot_profile), 1) / samp_rate, pot_profile)
return pot_profile, samp_num_tot | 27,115 |
def generate_model_class(grid_dir, data_dir, Nlon=936, Nlat=1062, Nz=90):
"""
Wrapper function for generating the LLCRegion object describing the
model region. The wrapper automatically reads the grid information.
Default values for grid size are for the Samoan Passage box (Box 12
in Dimitris' notation).
Parameters
----------
grid_dir : str
Path to grid files
data_dir : str
Path to data files
Nlon : int
Number of grid points in the zonal direction
Nlat : int
Number of grid points in the meridional
Nz : int
Number of grid points in the vertical
Returns
-------
m : LLCRegion model class
"""
m = LLCRegion(grid_dir=grid_dir, data_dir=data_dir,
Nlon=Nlon, Nlat=Nlat, Nz=Nz)
print('loading grid...')
m.load_grid()
print(m.grid_size3d)
return m | 27,116 |
def discretize_integrate_2D(model, x_range, y_range):
"""
Discretize model by integrating the model over the pixel.
"""
from scipy.integrate import dblquad
# Set up grid
x = np.arange(x_range[0] - 0.5, x_range[1] + 0.5)
y = np.arange(y_range[0] - 0.5, y_range[1] + 0.5)
values = np.empty((y.size - 1, x.size - 1))
# Integrate over all pixels
for i in range(x.size - 1):
for j in range(y.size - 1):
values[j, i] = dblquad(lambda y, x: model(x, y), x[i], x[i + 1],
lambda x: y[j], lambda x: y[j + 1])[0]
return values | 27,117 |
def get_saved_model_list(ckpt_dir):
"""Return a list of HDF5 models found in ckpt_dir
"""
filenames_list = []
for (root, directories, filenames) in os.walk(ckpt_dir):
filenames_list += filenames
# Break to only keep the top directory
break
ckpt_list = []
for filename in filenames_list:
if filename.endswith(('.h5', '.hdf5')):
ckpt_list += [filename]
ckpt_list.sort(key=natural_keys)
return ckpt_list | 27,118 |
def detail(request, question_id):
"""
HelloWorld 내용 출력
"""
question = get_object_or_404(Question, pk=question_id)
context = {'question': question}
return render(request, 'HelloWorld/question_detail.html', context) | 27,119 |
def get_dataset_metadata(data_directory_path: str, output_directory_path: str = None) -> None:
""" Get the metadata of all available datasets from the Open Reaction Database. """
try:
ids, names, descriptions, row_counts = [], [], [], []
for directory_name in tqdm(
iterable=os.listdir(data_directory_path),
total=len(os.listdir(data_directory_path)),
ascii=True,
ncols=150,
desc=f"Reading the Open Reaction Database dataset files"
):
for file_name in os.listdir(os.path.join(data_directory_path, directory_name)):
file_contents = message_helpers.load_message(
filename=os.path.join(data_directory_path, directory_name, file_name),
message_type=Dataset
)
ids.append(file_contents.dataset_id)
names.append(file_contents.name)
descriptions.append(file_contents.description)
row_counts.append(len(file_contents.reactions))
metadata_dataframe = pd.DataFrame(data={
"dataset_id": ids,
"dataset_name": names,
"dataset_description": descriptions,
"dataset_row_count": row_counts}
)
if output_directory_path is not None:
metadata_dataframe.to_csv(
os.path.join(output_directory_path, "{:%Y%m%d%H%M}_ord_metadata.csv".format(datetime.now())),
index=False
)
print(f"\nThe metadata dataframe ({len(metadata_dataframe.index)}x{len(metadata_dataframe.columns)}) has been "
"successfully generated!\n")
print(metadata_dataframe.head(10))
except Exception as exception_handle:
raise Exception("Exception occurred during the handling of the Open Reaction Database dataset file metadata. "
f"Detailed message:\n{exception_handle}") | 27,120 |
def plot_peridogramm_from_timeseries(data: np.ndarray, kwargs: dict, add_smoothing: bool = False,
f_list: List[Tuple[float, str]] = None,
bg_model: List[np.ndarray] = None, plot_name: str = None):
"""
Directly converts a timeseries and plots it in frequency space
:param data: Timeseries
:param kwargs: Run configuration
:param add_smoothing: Show smoothing
:param f_list: List of frequency markers
"""
f_space = compute_periodogram(data,kwargs)
plot_f_space(f_space, kwargs, add_smoothing, f_list,bg_model,plot_name) | 27,121 |
def TDataStd_BooleanArray_GetID(*args):
"""
* Static methods ============== Returns an ID for array.
:rtype: Standard_GUID
"""
return _TDataStd.TDataStd_BooleanArray_GetID(*args) | 27,122 |
def dt_fits_table():
"""
----------------------------------------------------------------------------------
Demo and test the basic API for FITS tables:
>>> old_state = test_config.setup(url="https://jwst-serverless-mode.stsci.edu")
>>> FITS_FILE = "data/v8q14451j_idc.fits"
>>> tables.ntables(FITS_FILE)
1
>>> for tab in tables.tables(FITS_FILE):
... print(repr(tab))
SimpleTable('v8q14451j_idc.fits', 1, colnames=('DETCHIP', 'DIRECTION', 'FILTER1', 'FILTER2', 'XSIZE', 'YSIZE', 'XREF', 'YREF', 'V2REF', 'V3REF', 'SCALE', 'CX10', 'CX11', 'CX20', 'CX21', 'CX22', 'CX30', 'CX31', 'CX32', 'CX33', 'CX40', 'CX41', 'CX42', 'CX43', 'CX44', 'CY10', 'CY11', 'CY20', 'CY21', 'CY22', 'CY30', 'CY31', 'CY32', 'CY33', 'CY40', 'CY41', 'CY42', 'CY43', 'CY44'), nrows=694)
>>> tab.segment
1
>>> tab.rows[0] # doctest: +ELLIPSIS
(1, 'FORWARD', 'F475W', 'CLEAR2L', 4096, 2048, ...
>>> len(tab.rows[0]) == len(tab.colnames)
True
>>> tab.colnames[0]
'DETCHIP'
>>> tab.columns['DETCHIP'][:1]
(1,)
>>> test_config.cleanup(old_state)
""" | 27,123 |
def get_axis_order():
"""Get the axis_order set by any containing axis_order_scope.
Returns:
List of strings giving an order to use for axis names, or None, if no axis
order is set.
"""
# By storing axis_order in the graph, we can ensure that axis_order_scope is
# thread-safe.
axis_order_list = ops.get_collection(_AXIS_ORDER_KEY)
if axis_order_list:
axis_order, = axis_order_list
else:
axis_order = None
return axis_order | 27,124 |
def check_jax_usage(enabled: bool = True) -> bool:
"""Ensures JAX APIs (e.g. :func:`jax.vmap`) are used correctly with Haiku.
JAX transforms (like :func:`jax.vmap`) and control flow (e.g.
:func:`jax.lax.cond`) expect pure functions to be passed in. Some functions
in Haiku (for example :func:`~haiku.get_parameter`) have side effects and thus
functions using them are only pure after using :func:`~haiku.transform` (et
al).
Sometimes it is convenient to use JAX transforms or control flow before
transforming your function (for example, to :func:`~haiku.vmap` the
application of a module) but when doing so you need to be careful to use the
Haiku overloaded version of the underlying JAX function, which carefully makes
the function(s) you pass in pure functions before calling the underlying JAX
function.
:func:`check_jax_usage` enables checking raw JAX transforms are used
appropriately inside Haiku transformed functions. Incorrect usage of JAX
transforms will result in an error.
Consider the function below, it is not a pure function (a function of its
inputs with no side effects) because we call into a Haiku API
(:func:`~haiku.get_parameter`) which during init will create a parameter and
register it with Haiku.
>>> def f():
... return hk.get_parameter("some_param", [], init=jnp.zeros)
We should not use this with JAX APIs like :func:`jax.vmap` (because it is not
a pure function). :func:`check_jax_usage` allows you to tell Haiku to make
incorrect usages of JAX APIs an error:
>>> previous_value = hk.experimental.check_jax_usage(True)
>>> jax.vmap(f, axis_size=2)()
Traceback (most recent call last):
...
haiku.JaxUsageError: ...
Using the Haiku wrapped version works correctly:
>>> hk.vmap(f, axis_size=2, split_rng=False)()
DeviceArray([0., 0.], dtype=float32)
Args:
enabled: Boolean indicating whether usage should be checked or not.
Returns:
Boolean with the previous value for this setting.
"""
config = get_config()
previous_value, config.check_jax_usage = config.check_jax_usage, enabled
return previous_value | 27,125 |
def features_change(attrname, old, new):
"""Callback for features checkbox group."""
if new == []:
train_button.disabled = True
else:
train_button.disabled = False | 27,126 |
def checkout(
id: str,
workspace: str = get_workspace(),
slug: str = get_slug(),
):
"""Checkout PR by ID"""
url = pr_url.format(workspace=workspace, slug=slug, id=id)
console = Console()
with console.status("[bold green]Loading..."):
resp = get(url)
branch_name = resp["source"]["branch"]["name"]
run_cmd(["git", "checkout", "-b", branch_name])
run_cmd(["git", "pull", "origin", branch_name]) | 27,127 |
def test_build_sql_with_map():
"""
Test ``build_sql`` with a column map.
"""
columns = {f"col{i}_": Integer() for i in range(4)}
bounds = {
"col0_": Equal(1),
"col1_": Range(start=0, end=1, include_start=True, include_end=False),
"col2_": Range(start=None, end=1, include_start=False, include_end=True),
"col3_": Range(start=0, end=None, include_start=False, include_end=True),
}
order = [("col0_", Order.ASCENDING), ("col1_", Order.DESCENDING)]
column_map = {f"col{i}_": letter for i, letter in enumerate("ABCD")}
sql = build_sql(columns, bounds, order, None, column_map, None, 1)
assert sql == (
"SELECT * WHERE A = 1 AND B >= 0 AND B < 1 AND "
"C <= 1 AND D > 0 ORDER BY A, B DESC OFFSET 1"
) | 27,128 |
def score_numeric_deg_ssetype(omega_a, omega_b):
"""
Return the tableau matching score between two Omega matrix entries
omega_a and omega_b, as per Kamat et al (2008),
with effiectvely negative infinty score for SSE type mismatch
Parameters:
omega_a - angle in (-pi, pi]
omega_b - angle in (-pi, pi]
Return value:
score betweem omega_a and omega_b
"""
if (omega_a not in [0,1,2,3] and omega_b not in [0,1,2,3]):
return score_numeric_deg(omega_a, omega_b)
else:
if omega_a != omega_b:
return -99999
else:
return 0 | 27,129 |
def top_menu(context, calling_page=None):
"""
Checks to see if we're in the Play section in order to return pages with
show_in_play_menu set to True, otherwise retrieves the top menu
items - the immediate children of the site root. Also detects 404s in the
Play section.
"""
if (calling_page and in_play(calling_page)) or context.get('play_404', False):
play_models = [
StandardPage,
PersonIndexPage,
WorkIndexPage,
BlogIndexPage
]
menuitems = chain(*[
model.objects.filter(
live=True,
show_in_play_menu=True,
show_in_menus=False
) for model in play_models
])
else:
menuitems = get_site_root(context).get_children().filter(
live=True,
show_in_menus=True
)
return {
'calling_page': calling_page,
'menuitems': menuitems,
# required by the pageurl tag that we want to use within this template
'request': context['request'],
'play_404': context.get('play_404', False)
} | 27,130 |
def DefineJacobian(J, DN, x):
""" This method defines a Jacobian
Keyword arguments:
J -- The Jacobian matrix
DN -- The shape function derivatives
x -- The variable to compute the gradient
"""
[nnodes, dim] = x.shape
localdim = dim - 1
if (dim == 2):
if (nnodes == 2):
J[0,0] = 0.5 * (x[1,0] - x[0,0])
J[1,0] = 0.5 * (x[1,1] - x[0,1])
else:
if (nnodes == 3):
J[0,0] = - (x[0,0] + x[1,0])
J[1,0] = - (x[0,1] + x[1,1])
J[2,0] = - (x[0,2] + x[1,2])
J[0,1] = - (x[0,0] + x[2,0])
J[1,1] = - (x[0,1] + x[2,1])
J[2,1] = - (x[0,2] + x[2,2])
else:
for i in range(dim):
for j in range(localdim):
J[i, j] = 0
for i in range(nnodes):
for k in range(dim):
for m in range(localdim):
J[k,m] += x[i,k] * DN[i,m]
return J | 27,131 |
def wav_vs_gmm(filebasename, gmm_file, gender, custom_db_dir=None):
"""Match a wav file and a given gmm model file and produce a segmentation
file containing the score obtained.
:type filebasename: string
:param filebasename: the basename of the wav file to process
:type gmm_file: string
:param gmm_file: the path of the gmm file containing the voice model
:type gender: char
:param gender: F, M or U, the gender of the voice model
:type custom_db_dir: None or string
:param custom_db_dir: the voice models database to use"""
database = CONFIGURATION.DB_DIR
if custom_db_dir != None:
database = custom_db_dir
gmm_name = os.path.split(gmm_file)[1]
if sys.platform == 'win32':
utils.start_subprocess(JAVA_EXE +' -Xmx256M -cp ' + CONFIGURATION.LIUM_JAR
+ ' fr.lium.spkDiarization.programs.MScore --sInputMask=%s.seg '
+ '--fInputMask=%s.wav --sOutputMask=%s.ident.' + gender + '.'
+ gmm_name + '.seg --sOutputFormat=seg,UTF8 '
+ '--fInputDesc=audio2sphinx,1:3:2:0:0:0,13,1:0:300:4 '
+ '--tInputMask=' + database + '\\' + gender + '\\' + gmm_file
+ ' --sTop=8,' + CONFIGURATION.UBM_PATH
+ ' --sSetLabel=add --sByCluster ' + filebasename)
else:
utils.start_subprocess(JAVA_EXE +' -Xmx256M -cp ' + CONFIGURATION.LIUM_JAR
+ ' fr.lium.spkDiarization.programs.MScore --sInputMask=%s.seg '
+ '--fInputMask=%s.wav --sOutputMask=%s.ident.' + gender + '.'
+ gmm_name + '.seg --sOutputFormat=seg,UTF8 '
+ '--fInputDesc=audio2sphinx,1:3:2:0:0:0,13,1:0:300:4 '
+ '--tInputMask=' + database + '/' + gender + '/' + gmm_file
+ ' --sTop=8,' + CONFIGURATION.UBM_PATH
+ ' --sSetLabel=add --sByCluster ' + filebasename)
utils.ensure_file_exists(filebasename + '.ident.'
+ gender + '.' + gmm_name + '.seg') | 27,132 |
def run_remove_background(args):
"""The full script for the command line tool to remove background RNA.
Args:
args: Inputs from the command line, already parsed using argparse.
Note: Returns nothing, but writes output to a file(s) specified from
command line.
"""
# Load dataset, run inference, and write the output to a file.
# Send logging messages to stdout as well as a log file.
file_dir, file_base = os.path.split(args.output_file)
file_name = os.path.splitext(os.path.basename(file_base))[0]
log_file = os.path.join(file_dir, file_name + ".log")
logging.basicConfig(level=logging.INFO,
format="cellbender:remove-background: %(message)s",
filename=log_file,
filemode="w")
console = logging.StreamHandler()
formatter = logging.Formatter("cellbender:remove-background: "
"%(message)s")
console.setFormatter(formatter) # Use the same format for stdout.
logging.getLogger('').addHandler(console) # Log to stdout and a file.
# Log the command as typed by user.
logging.info("Command:\n" + ' '.join(['cellbender', 'remove-background']
+ sys.argv[2:]))
# Log the start time.
logging.info(datetime.now().strftime('%Y-%m-%d %H:%M:%S'))
logging.info("Running remove-background")
# Load data from file and choose barcodes and genes to analyze.
try:
dataset_obj = \
SingleCellRNACountsDataset(input_file=args.input_file,
expected_cell_count=args.expected_cell_count,
total_droplet_barcodes=args.total_droplets,
fraction_empties=args.fraction_empties,
model_name=args.model,
gene_blacklist=args.blacklisted_genes,
exclude_antibodies=args.exclude_antibodies,
low_count_threshold=args.low_count_threshold,
fpr=args.fpr)
except OSError:
logging.error(f"OSError: Unable to open file {args.input_file}.")
sys.exit(1)
# Instantiate latent variable model and run full inference procedure.
inferred_model = run_inference(dataset_obj, args)
# Write outputs to file.
try:
dataset_obj.save_to_output_file(args.output_file,
inferred_model,
posterior_batch_size=args.posterior_batch_size,
cells_posterior_reg_calc=args.cells_posterior_reg_calc,
save_plots=True)
logging.info("Completed remove-background.")
logging.info(datetime.now().strftime('%Y-%m-%d %H:%M:%S\n'))
# The exception allows user to end inference prematurely with CTRL-C.
except KeyboardInterrupt:
# If partial output has been saved, delete it.
full_file = args.output_file
# Name of the filtered (cells only) file.
file_dir, file_base = os.path.split(full_file)
file_name = os.path.splitext(os.path.basename(file_base))[0]
filtered_file = os.path.join(file_dir,
file_name + "_filtered.h5")
if os.path.exists(full_file):
os.remove(full_file)
if os.path.exists(filtered_file):
os.remove(filtered_file)
logging.info("Keyboard interrupt. Terminated without saving.\n") | 27,133 |
def build_reg_text_tree(text, part):
"""Build up the whole tree from the plain text of a single regulation. This
only builds the regulation text part, and does not include appendices or
the supplement. """
title, body = utils.title_body(text)
label = [str(part)]
subparts_list = []
subpart_locations = subparts(body)
if subpart_locations:
pre_subpart = body[:subpart_locations[0][0]]
first_emptypart, children_text = build_subparts_tree(
pre_subpart, part, build_empty_part)
if pre_subpart.strip() and first_emptypart.children:
subparts_list.append(first_emptypart)
else:
children_text = pre_subpart
for start, end in subpart_locations:
subpart_body = body[start:end]
subpart, _ = build_subparts_tree(
subpart_body, part, lambda p: build_subpart(subpart_body, p))
subparts_list.append(subpart)
else:
emptypart, children_text = build_subparts_tree(
body, part, build_empty_part)
if emptypart.children:
subparts_list.append(emptypart)
else:
return struct.Node(
text, [build_empty_part(part)], label, title)
return struct.Node(children_text, subparts_list, label, title) | 27,134 |
def check_url_alive(url, accept_codes=[401]):
"""Validate github repo exist or not."""
try:
logger.info("checking url is alive", extra={"url": url})
response = request.urlopen(url)
status_code = response.getcode()
if status_code in accept_codes or status_code // 100 in (2, 3):
return True
except Exception as exc:
logger.debug("Unable to reach url", extra={"exception": str(exc)})
return False | 27,135 |
def solve(banks):
"""Calculate number of steps needed to exit the maze
:banks: list of blocks in each bank
:return: number of redistribtion cycles to loop
>>> solve([0, 2, 7, 0])
4
"""
seen = set()
loops = 0
mark = 0
for cycle in count(1):
# find value and the index of the bank with the largest block
m = max(banks)
i = banks.index(m)
# reset the largest bank
banks[i] = 0
# redistribute its blocks
q, r = divmod(m, len(banks))
banks = [x + q for x in banks]
for j in range(r):
banks[(i + j + 1) % len(banks)] += 1
# check if we've seen this configuration before
b = tuple(banks)
if b in seen:
loops += 1
if loops > 1:
return cycle - mark
else:
seen = set()
mark = cycle
seen.add(b) | 27,136 |
def evaluate_python_expression(body):
"""Evaluate the given python expression, returning its result. This is useful if the
front end application needs to do real-time processing on task data. If for instance
there is a hide expression that is based on a previous value in the same form.
The response includes both the result, and a hash of the original query, subsequent calls
of the same hash are unnecessary. """
try:
script_engine = CustomBpmnScriptEngine()
result = script_engine._evaluate(body['expression'], **body['data'])
return {"result": result, "expression": body['expression'], "key": body['key']}
except Exception as e:
return {"result": False, "expression": body['expression'], "key": body['key'], "error": str(e)} | 27,137 |
def DictFilter(alist, bits):
"""Translate bits from EDID into a list of strings.
Args:
alist: A list of tuples, with the first being a number and second a string.
bits: The bits from EDID that indicate whether each string is supported by
this EDID or not.
Returns:
A dict of strings and bools.
"""
d = collections.OrderedDict()
for x, s in alist:
d[s] = bool(bits & x)
return d | 27,138 |
def test_extract_from_logfile(runpath):
"""Test extracting values from a logfile via regex matching."""
logname = "file.log"
a = "1"
b = "23a"
message = "Value a={a} b={b}".format(a=a, b=b)
log_regexps = [
re.compile(r".*a=(?P<a>[a-zA-Z0-9]*) .*"),
re.compile(r".*b=(?P<b>[a-zA-Z0-9]*).*"),
]
app = CustomApp(
name="App",
binary="echo",
args=[message, ">", logname],
logname=logname,
log_regexps=log_regexps,
shell=True,
runpath=runpath,
)
with app:
assert app.extracts["a"] == a
assert app.extracts["b"] == b | 27,139 |
def mentions(request):
"""Mentions view."""
return render(request, "mentions.html", {"site_title": "Mentions legales"}) | 27,140 |
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry):
"""activity assistant form a config entry
is only called once the whole magic has to happen here
"""
#_LOGGER.warning(str(entry.version))
#_LOGGER.warning(str(entry.entry_id))
#_LOGGER.warning(str(entry.title))
#_LOGGER.warning(str(entry.data))
#_LOGGER.warning(str(entry.source))
#_LOGGER.warning(str(entry.connection_class))
hass.data.setdefault(DOMAIN, {})
zeroconf = await async_get_instance(hass)
tmp = zeroconf.get_service_info(ZCNF_TYPE, ZCNF_NAME + '.' + ZCNF_TYPE)
val_dict = zeroconf_Info2Values(tmp)
act_assist = ActAssist(
aiohttp_client.async_get_clientsession(hass),
val_dict[KEY_HOSTNAME],
val_dict['port'],
val_dict[KEY_WEBHOOK]
)
hass.data[DOMAIN].update({entry.entry_id: act_assist})
_LOGGER.warning("saved ActAssistApi in : " + str(entry.entry_id))
# create binary state sensor
for component in PLATFORMS:
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(entry, component)
)
return True | 27,141 |
def delete(id):
""" Used by the product page to delete a product. Doesn't actually delete it, just sets the quantity to 0. """
db = get_db()
b_id = session.get("user_id")
query = "UPDATE product SET quantity = 0 WHERE product_id = ? AND for_business = ?"
db.execute(query, (id, b_id,))
db.commit()
return redirect(url_for("main.products")) | 27,142 |
def test_hasname(name, exists):
"""Check if a name exist in registry"""
registry = RstConfigSite()
registry.update({
'foo': 42,
'bar': True,
})
assert registry.has_name(name) == exists | 27,143 |
def resume_training(model_to_resume: str,
dataset_oversampling: Dict[str, int],
checkpoint: Optional[str] = None,
epochs: Optional[int] = None
):
"""Resume training on a partially trained model (or finetune an existing model)
:param model_to_resume: path to the model directory of the model to resume training
:param dataset_oversampling: dictionary mapping dataset names to integer counts of how much
to oversample them
:param checkpoint: optional string to specify which checkpoint to resume from. Uses the latest
if not specified
:param epochs: Optional int specifying how many epochs to train for. If not detailed, runs for 24
"""
out = ModelDir(model_to_resume)
train_params = out.get_last_train_params()
evaluators = train_params["evaluators"]
params = train_params["train_params"]
params.num_epochs = epochs if epochs is not None else 24
model = out.get_model()
notes = None
dry_run = False
data = prepare_data(model, TrainConfig(), dataset_oversampling)
if checkpoint is None:
checkpoint = tf.train.latest_checkpoint(out.save_dir)
_train_async(
model=model,
data=data,
checkpoint=checkpoint,
parameter_checkpoint=None,
save_start=False,
train_params=params,
evaluators=evaluators,
out=out,
notes=notes,
dry_run=dry_run,
start_eval=False
) | 27,144 |
def from_meshmaker(filename_or_dict, material="dfalt"):
"""
Generate a mesh from a block MESHM.
Parameters
----------
filename_or_dict: str or dict
Input file name or parameters dict with key "meshmaker".
material : str, optional, default 'dfalt'
Default material name.
"""
if isinstance(filename_or_dict, str):
parameters = read(filename_or_dict, file_format="tough")
else:
parameters = filename_or_dict
if "meshmaker" not in parameters:
raise ValueError()
if "type" not in parameters["meshmaker"]:
raise ValueError()
if parameters["meshmaker"]["type"] not in {"xyz", "rz2d", "rz2dl"}:
raise ValueError()
# XYZ
if parameters["meshmaker"]["type"] == "xyz":
dx_, dy_, dz_ = parse_xyz(parameters["meshmaker"]["parameters"])
dx, dy, dz = [], [], []
for increment in dx_:
append(dx, **increment)
for increment in dy_:
append(dy, **increment)
for increment in dz_:
append(dz, **increment)
if not len(dx):
dx = [1.0]
if not len(dy):
dy = [1.0]
if not len(dz):
dz = [1.0]
return structured_grid(dx, dy, dz, material=material)
# RZ2D
else:
dr_, dz_ = parse_rz2d(parameters["meshmaker"]["parameters"])
dr, dz = [], []
for increment in dr_:
append(dr, **increment)
for increment in dz_:
append(dz, **increment)
if not len(dr):
dr = [1.0]
if not len(dz):
dz = [1.0]
return cylindric_grid(
dr, dz, layer=parameters["meshmaker"]["type"] == "rz2dl", material=material
) | 27,145 |
def compute_qtys_new_halos_pk(mvir, rvir, redshift, age_yr):
"""
Creates a new galaxy along with the new halo.
Integrates since the start of the Universe.
Updates the initiated quantities with the values of interest.
:param mvir: list of mvir [Msun], length = n.
:param rvir: list of rvir [kpc] , length = n.
:param redshift: redshift of the snapshot replicated n times.
:param age_yr: age of the Universe for the snapshot replicated n times.
Typically inputs should be :
* mvir=self.f1['/halo_properties/mvir'].value[self.mask_f1_new_halos],
* rvir=self.f1['/halo_properties/rvir'].value[self.mask_f1_new_halos],
* age_yr=self.f1.attrs['age_yr']
returns
mvir_dot, rvir_dot, dMdt, dmdt_star, star_formation_rate, stellar_mass
"""
f_b=model.f_b
epsilon = model.epsilon(mvir, redshift )
f_lost = f_loss(age_yr)
# evaluate equation (4)
mvir_dot = mvir / age_yr
# no pseudo evolution correction
dMdt = mvir_dot
# evaluate equation (1)
dmdt_star = f_b * dMdt * epsilon
# evaluate accretion: 0 in this first step
# self.dmdt_star_accretion = n.zeros_like(self.dmdt_star)
# evaluate equation (11)
# equation (12)
# evaluate stellar mass
star_formation_rate = dmdt_star * (1. - f_lost)
return mvir_dot, rvir / age_yr, dMdt, dmdt_star, star_formation_rate, star_formation_rate * age_yr | 27,146 |
def rank():
"""A function which returns the Horovod rank of the calling process.
Returns:
An integer scalar with the Horovod rank of the calling process.
"""
rank = MPI_LIB_CTYPES.horovod_tensorflow_rank()
if rank == -1:
raise ValueError(
'Horovod has not been initialized; use horovod.tensorflow.init().')
return rank | 27,147 |
def main():
"""
Purpose:
Test the function
Args:
N/A
Returns:
N/A
"""
# Use the test data we have
# df = pd.read_csv("../../data/analysis_data_roadway_blocks.csv")
# 1400 - 1413 BLOCK OF SPRING ROAD NW
# sample address
address = "600 Farragut St. NW"
print(f"Getting traffic calming features for {address}")
text = get_traffic_calming(address)
print(text) | 27,148 |
def getRAMSizeOSX() -> CmdOutput:
"""Returns the RAM size in bytes.
Returns:
CmdOutput: The output of the command, as a `CmdOutput` instance containing
`stdout` and `stderr` as attributes.
"""
return runCommand(exe_args=ExeArgs("sysctl", ["-n", "hw.memsize"])) | 27,149 |
def apply_edge_filters(graph: networkx.MultiDiGraph, edge_filters: Dict[str, Union[str, Set]]) -> None:
"""
Apply filters to graph and remove edges that do not pass given filters.
Parameters
----------
graph: networkx.MultiDiGraph
The graph
edge_filters: Dict[str, Union[str, Set]]
Edge filters
"""
edges_to_remove = []
for subject_node, object_node, key, data in graph.edges(keys=True, data=True):
pass_filter = True
for k, v in edge_filters.items():
if k == 'edge_label':
if data[k] not in v:
pass_filter = False
elif k == 'relation':
if data[k] not in v:
pass_filter = False
if not pass_filter:
edges_to_remove.append((subject_node, object_node, key))
for edge in edges_to_remove:
# removing edge that fails edge filters
log.debug(f"Removing edge {edge}")
graph.remove_edge(edge[0], edge[1], edge[2]) | 27,150 |
def load_data(filepath, columns=['title','abstract']):
"""Loads specified columns of csv/excel data.
Arguments
---------
filepath: str
Path to file (e.g. 'data.csv')
columns: list
List of strings specifying the column names in the data to load.
Returns
-------
pandas.DataFrame
Pandas object containing the loaded tabular data. If labels are not
loaded, a 'label_included' column is added (filled with -1).
"""
file_type = filepath.split('.')[-1]
if file_type == 'csv':
df = pd.read_csv(filepath, delimiter=';',
encoding='utf-8',
usecols=columns)
elif file_type == 'xlsx':
df = pd.read_excel(filepath, usecols=columns)
else:
raise ValueError('Filetype not supported.')
if 'label_included' not in df.columns:
df['label_included'] = np.full(df.shape[0], -1, dtype=int)
return df | 27,151 |
def pre_process(image):
"""
Invert pixel intensity of 'images' (to compensate the conversion into image with imwrite).
"""
return 1 - image * 255 | 27,152 |
def is_source_path(path):
"""Check if path is source code path.
Parameters
----------
path : str
A possible path
Returns
-------
valid : bool
Whether path is a possible source path
"""
if os.path.exists(path):
return True
if path.find("\n") != -1:
return False
spath = path.rsplit(".", 1)
return len(spath) == 2 and spath[1].strip() == spath[1] | 27,153 |
def get_examples_to_execute(
predictions: Sequence[inference.Prediction], inference_config: inference.Config
) -> List[official_evaluation.ExecutionInstructions]:
"""
Converts predictions from a model into sqlite execution instructions. If abstract SQL was used, converts back to fully-specfied SQL.
"""
if FLAGS.using_abstract_sql:
predictions = restore_asql_wrapper(predictions)
# Load the database tables.
schema_obj = inference.load_schema_obj(
FLAGS.eval_dataset_name, FLAGS.original_data_directory
)
# Now match with the original data and save
return inference.match_with_dataset(inference_config, predictions, schema_obj) | 27,154 |
def verify_query(ctx):
"""
Verify a LQL query.
"""
label_widget = ctx.get_state(state="query_builder", key="query_label")
lql_query = ctx.get("lql_query")
evaluator_id = ctx.get("lql_evaluator")
try:
_ = ctx.client.queries.validate(
lql_query, evaluator_id=evaluator_id)
except http_session.ApiError as err:
label_widget.value = "Failure to verify: {0}".format(err)
return False
label_widget.value = "LQL Verified."
return True | 27,155 |
def data_feature_engineering(data):
"""
Add features to the data for later use
state_code, weekday, month, year
"""
data['state_code'] = data['state'].map(us_state_abbrev)
data['weekday'] = pd.to_datetime(data['date']).dt.weekday
data['weekday'] = data['weekday'].map(weekday_map)
month_dict = dict(enumerate(calendar.month_abbr))
data['month'] = pd.to_datetime(data['date']).dt.month
data['month'] = data['month'].map(month_dict)
data['year'] = pd.to_datetime(data['date']).dt.year
return data | 27,156 |
def gauss_to_post(config, num_jobs):
"""
Multiprocessing function that does Gaussian selection and posterior extraction
See:
- http://kaldi-asr.org/doc/gmm-global-get-post_8cc.html
- http://kaldi-asr.org/doc/scale-post_8cc.html
for more details
on the Kaldi binary this runs.
Also see https://github.com/kaldi-asr/kaldi/blob/master/egs/wsj/s5/steps/online/nnet2/train_ivector_extractor.sh
for the original bash script that this function was based on.
Parameters
----------
directory : str
Directory of i-vector extractor training
config : :class:`~aligner.config.iVectorExtractorConfig`
Configuration object for training
diag_ubm_directory : str
Directory of the diagonal UBM
gmm_feats : str
Path to features with online CMVN
num_jobs : int
The number of processes to use in calculation
"""
jobs = [(config, x) for x in range(num_jobs)]
with mp.Pool(processes=num_jobs, initializer=init, initargs=(os.environ.copy(),)) as pool:
results = [pool.apply_async(gauss_to_post_func, args=i) for i in jobs]
output = [p.get() for p in results] | 27,157 |
def test_bracketing():
"""The method getRegularlySampledBracketingPrfs() has some fairly complicated
bookkeeping to find the locations of the 4 prfs that brack the input col,row
in 2d space. It internally checks this bookkeeping is correct and raises
an assert on failure. This test exercises all 4 paths in the code.
"""
obj = prf.TessPrf(datapath)
#This raises an assertion error
obj.getPrfAtColRow(1587, 1710, 1, 1, 1)
obj.getPrfAtColRow(1581, 1537, 1, 1, 1) #A
obj.getPrfAtColRow(1579, 1537, 1, 1, 1) #S
obj.getPrfAtColRow(1579, 1535, 1, 1, 1) #T
obj.getPrfAtColRow(1581, 1535, 1, 1, 1) | 27,158 |
def rename_folder(name, path):
""" Adapt the building block folder name.
Args:
name (str): Name to personalize the folder name.
path (str): Path to find the files.
"""
source = os.path.join(path, "src", "bb")
destination = os.path.join(path, "src", name)
os.rename(source, destination) | 27,159 |
def train_ei_oc(emotion, model, algorithm, evaluation, finetune, baseline, preprocessor=None):
"""
2. Task EI-oc: Detecting Emotion Intensity (ordinal classification)
Given:
a tweet
an emotion E (anger, fear, joy, or sadness)
Task: classify the tweet into one of four ordinal classes of intensity of E
that best represents the mental state of the tweeter:
0: no E can be inferred
1: low amount of E can be inferred
2: moderate amount of E can be inferred
3: high amount of E can be inferred
For each language: 4 training sets and 4 test sets: one for each emotion E.
:param emotion: emotions = ["anger", "fear", "joy", "sadness"]
:param pretrained:
:param finetune:
:param unfreeze:
:return:
"""
if preprocessor is None:
preprocessor = twitter_preprocess()
model_config = TASK1_EIOC
X_train, y_train = parse(task='EI-oc', emotion=emotion, dataset="train")
X_dev, y_dev = parse(task='EI-oc', emotion=emotion, dataset="dev")
X_test, y_test = parse(task='EI-oc', emotion=emotion, dataset="gold")
# keep only scores
y_train = [y[1] for y in y_train]
y_dev = [y[1] for y in y_dev]
y_test = [y[1] for y in y_test]
name = model_config["name"] + "_" + emotion
X_train = preprocessor("{}_{}".format(name, "train"), X_train)
X_dev = preprocessor("{}_{}".format(name, "dev"), X_dev)
X_test = preprocessor("{}_{}".format(name, "test"), X_test)
params = []
params_list = []
res_dev_list = []
res_test_list = []
if algorithm == 'LoR':
if finetune == 'true':
for LoR_C in numpy.arange(10,1000,5)/100:
params = (LoR_C)
print("Now training with parameters: C: {}".format(LoR_C))
model.set_params(clf__C=LoR_C,clf__solver='saga',clf__n_jobs=-1)
fit = fit_function(model, evaluation, X_train, y_train, X_dev, y_dev, X_test, y_test, params, res_dev_list, res_test_list, params_list)
print("Best result on gold set: ", max(res_test_list, key=lambda x:x["pearson"]))
print("Best params: ", params_list[res_test_list.index(max(res_test_list, key=lambda x:x["pearson"]))])
else:
if emotion == 'joy':
if baseline == 'true':
LoR_C = 3.1
else:
LoR_C = 3.5
elif emotion == 'sadness':
LoR_C = 1.085
elif emotion == 'fear':
LoR_C = 3.5
elif emotion == 'anger':
if baseline == 'true':
LoR_C = 2.25
else:
LoR_C = 3.8
params = (LoR_C)
model.set_params(clf__C=LoR_C,clf__solver='saga',clf__n_jobs=-1)
fit = fit_function(model, evaluation, X_train, y_train, X_dev, y_dev, X_test, y_test, params, res_dev_list, res_test_list, params_list)
elif algorithm == 'SVC':
if finetune == 'true':
for SVC_C in numpy.arange(10,50,1)/10:
for SVC_gamma in numpy.arange(65,500,15)/100:
params = (SVC_C,SVC_gamma)
print("Now training with parameters: C: {}, Gamma: {}".format(SVC_C,SVC_gamma))
model.set_params(clf__C=SVC_C,clf__gamma=SVC_gamma)
fit = fit_function(model, evaluation, X_train, y_train, X_dev, y_dev, X_test, y_test, params, res_dev_list, res_test_list, params_list)
print("Best result on gold set: ", max(fit[1], key=lambda x:x["pearson"]))
print("Best params: ", fit[2][res_test_list.index(max(res_test_list, key=lambda x:x["pearson"]))])
else:
if emotion == 'joy':
if baseline == 'true':
SVC_C = 3.1
SVC_gamma = 0.95
else:
SVC_C = 2.7
SVC_gamma = 3.35
elif emotion == 'sadness':
if baseline == 'true':
SVC_C = 2.5
SVC_gamma = 1.25
else:
SVC_C = 2.2
SVC_gamma = 2.6
elif emotion == 'fear':
if baseline == 'true':
SVC_C = 2.1
SVC_gamma = 0.65
else:
SVC_C = 4.9
SVC_gamma = 4.4
elif emotion == 'anger':
if baseline == 'true':
SVC_C = 1.8
SVC_gamma = 1.7
else:
SVC_C = 2.6
SVC_gamma = 4.4
params = (SVC_C,SVC_gamma)
model.set_params(clf__C=SVC_C,clf__gamma=SVC_gamma)
fit = fit_function(model, evaluation, X_train, y_train, X_dev, y_dev, X_test, y_test, params, res_dev_list, res_test_list, params_list)
res_dev = fit[0][fit[1].index(max(fit[1], key=lambda x:x["pearson"]))]
res_test = max(fit[1], key=lambda x:x["pearson"])
return res_dev, res_test | 27,160 |
def new_person():
"""Fuction adds a new person into its database."""
name = 'interviewer'
filename = 'interviewer.jpeg'
image = cv.imread(filename)
encodings =[]
conv_image = cv.cvtColor(image,cv.COLOR_BGR2RGB)
encodings.append(fc.face_encodings(conv_image , fc.face_locations(image))[0])
with open('knownpeeps/'+name+'.pickle', 'wb') as handle:
pickle.dump(encodings, handle, protocol=pickle.HIGHEST_PROTOCOL) | 27,161 |
def cross(genom1, genom2, mutation_rate, widths, bounds):
"""
Generates a child_genom by breeding 2 parent_genoms with
a mutation chance = mutation rate = [0, 1].
"""
child_genom = []
for i in range(len(genom1)):
if widths[i] == 0:
child_genom.append(genom1[i])
continue
if random() < mutation_rate:
# Mutation
child_genom.append(random() * widths[i] + bounds[i][0])
else:
# Breeding
rand = round(random())
child_genom.append(rand * genom1[i] + (1-rand) * genom2[i])
return child_genom | 27,162 |
def version_0_2(path_in, path_out_base, skip_if_exists = True):
"""
* name is based on start time (not launch time)
:param path_in:
:param path_out_base:
:param skip_if_exists:
:return:
"""
version = 'v0.2'
content = raw.read_file(path_in)
name_new = generate_name(content)
path_out = path_out_base.joinpath(name_new)
if path_out.is_file() and skip_if_exists:
print('\t File exists')
return None
ds = create_dataset(content, version = version)
# path_out
ds.to_netcdf(path_out)
return ds | 27,163 |
def value_or_dash(value):
"""Converts the given value to a unicode dash if the value does
not exist and does not equal 0."""
if not value and value != 0:
return u'\u2013'.encode('utf-8')
return value | 27,164 |
def distroy_db(app):
"""
wipe the database
"""
pass | 27,165 |
def jaccard_index(box_a, box_b, indices=[]):
"""
Compute the Jaccard Index (Intersection over Union) of 2 boxes. Each box is (x1, y1, x2, y2).
:param box_a:
:param box_b:
:param indices: The indices of box_a and box_b as [box_a_idx, box_b_idx].
Helps in debugging DivideByZero errors
:return:
"""
# area of bounding boxes
area_A = (box_a[2] - box_a[0]) * (box_a[3] - box_a[1])
area_B = (box_b[2] - box_b[0]) * (box_b[3] - box_b[1])
xA = max(box_a[0], box_b[0])
yA = max(box_a[1], box_b[1])
xB = min(box_a[2], box_b[2])
yB = min(box_a[3], box_b[3])
intersection = (xB - xA) * (yB - yA)
union = area_A + area_B - intersection
# return the intersection over union value
try:
if union <= 0:
iou = 0
else:
iou = intersection / union
except:
print(indices)
print(box_a)
print(box_b)
print(area_A, area_B, intersection)
exit(1)
return iou | 27,166 |
def ComputePerSliceMetrics( # pylint: disable=invalid-name
slice_result: beam.pvalue.PCollection,
eval_shared_model: types.EvalSharedModel,
desired_batch_size: Optional[int] = None,
compute_with_sampling: Optional[bool] = False,
random_seed_for_testing: Optional[int] = None) -> beam.pvalue.PCollection:
"""PTransform for computing, aggregating and combining metrics.
Args:
slice_result: Incoming PCollection consisting of slice key and extracts.
eval_shared_model: Shared model parameters for EvalSavedModel.
desired_batch_size: Optional batch size for batching in Aggregate.
compute_with_sampling: True to compute with sampling.
random_seed_for_testing: Seed to use for unit testing.
Returns:
PCollection of (slice key, dict of metrics).
"""
# TODO(b/123516222): Remove this workaround per discussions in CL/227944001
slice_result.element_type = beam.typehints.Any
return (
slice_result
# _ModelLoadingIdentityFn loads the EvalSavedModel into memory
# under a shared handle that can be used by subsequent steps.
# Combiner lifting and producer-consumer fusion should ensure
# that these steps run in the same process and memory space.
# TODO(b/69566045): Remove _ModelLoadingIdentityFn and move model
# loading to CombineFn.setup after it is available in Beam.
| 'LoadModel' >> beam.ParDo(
_ModelLoadingIdentityFn(eval_shared_model=eval_shared_model))
| 'CombinePerSlice' >> beam.CombinePerKey(
_AggregateCombineFn(
eval_shared_model=eval_shared_model,
desired_batch_size=desired_batch_size,
compute_with_sampling=compute_with_sampling,
seed_for_testing=random_seed_for_testing))
| 'InterpretOutput' >> beam.ParDo(
_ExtractOutputDoFn(eval_shared_model=eval_shared_model))) | 27,167 |
def null() -> ColumnExpr:
"""Equivalent to ``lit(None)``, the ``NULL`` value
:return: ``lit(None)``
.. admonition:: New Since
:class: hint
**0.6.0**
"""
return lit(None) | 27,168 |
def uCSIsThaana(code):
"""Check whether the character is part of Thaana UCS Block """
ret = libxml2mod.xmlUCSIsThaana(code)
return ret | 27,169 |
def geomprojlib_Curve2d(*args):
"""
* gives the 2d-curve of a 3d-curve lying on a surface ( uses GeomProjLib_ProjectedCurve ) The 3dCurve is taken between the parametrization range [First, Last] <Tolerance> is used as input if the projection needs an approximation. In this case, the reached tolerance is set in <Tolerance> as output. WARNING : if the projection has failed, this method returns a null Handle.
:param C:
:type C: Handle_Geom_Curve &
:param First:
:type First: float
:param Last:
:type Last: float
:param S:
:type S: Handle_Geom_Surface &
:param UFirst:
:type UFirst: float
:param ULast:
:type ULast: float
:param VFirst:
:type VFirst: float
:param VLast:
:type VLast: float
:param Tolerance:
:type Tolerance: float &
:rtype: Handle_Geom2d_Curve
* gives the 2d-curve of a 3d-curve lying on a surface ( uses GeomProjLib_ProjectedCurve ) The 3dCurve is taken between the parametrization range [First, Last] <Tolerance> is used as input if the projection needs an approximation. In this case, the reached tolerance is set in <Tolerance> as output. WARNING : if the projection has failed, this method returns a null Handle.
:param C:
:type C: Handle_Geom_Curve &
:param First:
:type First: float
:param Last:
:type Last: float
:param S:
:type S: Handle_Geom_Surface &
:param Tolerance:
:type Tolerance: float &
:rtype: Handle_Geom2d_Curve
* gives the 2d-curve of a 3d-curve lying on a surface ( uses GeomProjLib_ProjectedCurve ) The 3dCurve is taken between the parametrization range [First, Last] If the projection needs an approximation, Precision::PApproximation() is used. WARNING : if the projection has failed, this method returns a null Handle.
:param C:
:type C: Handle_Geom_Curve &
:param First:
:type First: float
:param Last:
:type Last: float
:param S:
:type S: Handle_Geom_Surface &
:rtype: Handle_Geom2d_Curve
* gives the 2d-curve of a 3d-curve lying on a surface ( uses GeomProjLib_ProjectedCurve ). If the projection needs an approximation, Precision::PApproximation() is used. WARNING : if the projection has failed, this method returns a null Handle.
:param C:
:type C: Handle_Geom_Curve &
:param S:
:type S: Handle_Geom_Surface &
:rtype: Handle_Geom2d_Curve
* gives the 2d-curve of a 3d-curve lying on a surface ( uses GeomProjLib_ProjectedCurve ). If the projection needs an approximation, Precision::PApproximation() is used. WARNING : if the projection has failed, this method returns a null Handle. can expand a little the bounds of surface
:param C:
:type C: Handle_Geom_Curve &
:param S:
:type S: Handle_Geom_Surface &
:param UDeb:
:type UDeb: float
:param UFin:
:type UFin: float
:param VDeb:
:type VDeb: float
:param VFin:
:type VFin: float
:rtype: Handle_Geom2d_Curve
* gives the 2d-curve of a 3d-curve lying on a surface ( uses GeomProjLib_ProjectedCurve ). If the projection needs an approximation, Precision::PApproximation() is used. WARNING : if the projection has failed, this method returns a null Handle. can expand a little the bounds of surface
:param C:
:type C: Handle_Geom_Curve &
:param S:
:type S: Handle_Geom_Surface &
:param UDeb:
:type UDeb: float
:param UFin:
:type UFin: float
:param VDeb:
:type VDeb: float
:param VFin:
:type VFin: float
:param Tolerance:
:type Tolerance: float &
:rtype: Handle_Geom2d_Curve
"""
return _GeomProjLib.geomprojlib_Curve2d(*args) | 27,170 |
def histogram(ds, x, z=None, **plot_opts):
"""Dataset histogram.
Parameters
----------
ds : xarray.Dataset
The dataset to plot.
x : str, sequence of str
The variable(s) to plot the probability density of. If sequence, plot a
histogram of each instead of using a ``z`` coordinate.
z : str, optional
If given, range over this coordinate a plot a histogram for each.
row : str, optional
Dimension to vary over as a function of rows.
col : str, optional
Dimension to vary over as a function of columns.
plot_opts
See ``xyzpy.plot.core.PLOTTER_DEFAULTS``.
"""
return Histogram(ds, x, z=z, **plot_opts) | 27,171 |
def get_full_file_path(filepath_parts, path, merge_point=None):
""""Typical path formats in json files are like:
"parent": "block/cube",
"textures": {
"top": "block/top"
}
This checks in filepath_parts of the blender file,
matches the base path, then merges the input path, e.g.
path = "block/cube"
merge_point = "models"
filepath_parts = ["C:", "minecraft", "resources", "models", "block", "cobblestone.json"]
|
Matched merge point
joined parts = ["C:", "minecraft", "resources", "models"] + ["block", "cube"]
"""
path_chunks = os.path.split(path)
# match base path
if merge_point is not None:
idx_base_path = index_of(merge_point, filepath_parts)
else:
idx_base_path = index_of(path_chunks[0], filepath_parts)
if idx_base_path != -1:
# system agnostic path join
joined_path = os.path.join(os.sep, filepath_parts[0] + os.sep, *filepath_parts[1:idx_base_path+1], *path.split("/"))
return joined_path
else:
return path | 27,172 |
def test_fst_send_oom(dev, apdev, test_params):
"""FST send action OOM"""
ap1, ap2, sta1, sta2 = fst_module_aux.start_two_ap_sta_pairs(apdev)
try:
fst_module_aux.connect_two_ap_sta_pairs(ap1, ap2, sta1, sta2)
hapd = ap1.get_instance()
sta = sta1.get_instance()
dst = sta.own_addr()
src = apdev[0]['bssid']
# Create session
initiator = ap1
responder = sta1
new_iface = ap2.ifname()
new_peer_addr = ap2.get_actual_peer_addr()
resp_newif = sta2.ifname()
peeraddr = None
initiator.add_peer(responder, peeraddr, new_peer_addr)
sid = initiator.add_session()
initiator.configure_session(sid, new_iface)
with alloc_fail(hapd, 1, "fst_session_send_action"):
res = initiator.grequest("FST-MANAGER SESSION_INITIATE " + sid)
if not res.startswith("FAIL"):
raise Exception("Unexpected SESSION_INITIATE result")
res = initiator.grequest("FST-MANAGER SESSION_INITIATE " + sid)
if not res.startswith("OK"):
raise Exception("SESSION_INITIATE failed")
tests = [ "", "foo", sid, sid + " foo", sid + " foo=bar" ]
for t in tests:
res = initiator.grequest("FST-MANAGER SESSION_SET " + t)
if not res.startswith("FAIL"):
raise Exception("Invalid SESSION_SET accepted")
with alloc_fail(hapd, 1, "fst_session_send_action"):
res = initiator.grequest("FST-MANAGER SESSION_TEARDOWN " + sid)
if not res.startswith("FAIL"):
raise Exception("Unexpected SESSION_TEARDOWN result")
finally:
fst_module_aux.disconnect_two_ap_sta_pairs(ap1, ap2, sta1, sta2)
fst_module_aux.stop_two_ap_sta_pairs(ap1, ap2, sta1, sta2) | 27,173 |
def set_seed(seed: int) -> None:
"""
Set seed for reproducibility.
:param int seed: seed.
"""
random.seed(seed)
np.random.seed(seed) | 27,174 |
def get_resource(name):
"""Convenience method for retrieving a package resource."""
return pkg_resources.resource_stream(__name__, name) | 27,175 |
def get_team(args):
"""Return authenticated team token data."""
return Team.query.get(args['team_id']) | 27,176 |
def test_et_stack_run_proba():
"""[EnsembleTransformer | Stack | Prep] retrieves fit predictions."""
run(EnsembleTransformer, 'stack', True, True, folds=3) | 27,177 |
def make_unweight_time_optimal_supervisor(comp_names, req_names, evt_pairs,
sup_name):
"""
Compute a non weighted time optimal supervisor.
@param comp_names: Available components (weighted automata).
@type comp_names: C{list} of L{str}
@param req_names: Available requirements (unweighted automata).
@type req_names: C{list} of L{str}
@param evt_pairs: Additional event pairs (eg "{(a, b), (c, e)}", "type1",
or "type2")
@type evt_pairs: C{str}
@param sup_name: Name of the resulting supervisor.
@type sup_name: C{str}
"""
common.print_line("Started time unweight optimal supervisor computations "
"(version %s)" % automata.version)
coll = collection.Collection()
comp_list = load_unweight_automata(coll, comp_names, False, True)
req_list = frontend.load_automata(coll, req_names, False, True)
evt_pairs = taskresource.process_event_pairs(coll, req_list, evt_pairs)
result = compute_weight.compute_unweight_time_optimal_supervisor(
comp_list, req_list,
evt_pairs)
if result is None:
common.print_line('Could not compute the weighted supervisor')
return
wsup = result
#one = maxplus.make_rowmat(0, heap_len)
#one = maxplus.otimes_mat_mat(one, wmap[wsup.initial])
#biggest = one.get_scalar()
#common.print_line("Sub-optimal makespan is %s" % biggest)
#wsup = weighted_supervisor.reduce_automaton(wsup, wmap, eventdata,
# heap_len)
frontend.dump_stats("Computed unweighted supervisor", wsup)
save_weighted_automaton(wsup, "Supervisor is saved in %s\n", sup_name) | 27,178 |
def config_sanity_check(config: dict) -> dict:
"""
Check if the given config satisfies the requirements.
:param config: entire config.
"""
# back compatibility support
config = parse_v011(config)
# check model
if config["train"]["method"] == "conditional":
if config["dataset"]["train"]["labeled"] is False: # unlabeled
raise ValueError(
"For conditional model, data have to be labeled, got unlabeled data."
)
return config | 27,179 |
def parse_yaml() -> Dataset:
"""Test that 'after' parameters are properly read"""
d = yaml.safe_load(f)
dataset = d.get("dataset")[0]
d: FidesopsDataset = FidesopsDataset.parse_obj(dataset)
return convert_dataset_to_graph(d, "ignore") | 27,180 |
def run(
desc_file, results_dir, start_date, walltime, n_days=1, no_submit=False, quiet=False
):
"""Create and populate a temporary run directory, and a run script,
and submit the run to the queue manager.
The run script is stored in :file:`SoGWW3.sh` in the temporary run directory.
That script is submitted to the queue manager in a subprocess.
:param desc_file: File path/name of the YAML run description file.
:type desc_file: :py:class:`pathlib.Path`
:param results_dir: Path of the directory in which to store the run results;
it will be created if it does not exist.
:type results_dir: :py:class:`pathlib.Path`
:param start_date: Date to start run execution on.
:type :py:class:`arrow.Arrow`:
:param str walltime: HPC batch job walltime to use for the run;
formatted as :kbd:`HH:MM:SS`.
:param int n_days: Number of days of runs to execute in the batch job.
:param boolean no_submit: Prepare the temporary run directory,
and the run script to execute the WaveWatch III® run,
but don't submit the run to the queue.
:param boolean quiet: Don't show the run directory path message;
the default is to show the temporary run directory
path.
:returns: Message generated by queue manager upon submission of the
run script.
:rtype: str
"""
run_desc = nemo_cmd.prepare.load_run_desc(desc_file)
run_id = nemo_cmd.prepare.get_run_desc_value(run_desc, ("run_id",))
runs_dir = nemo_cmd.prepare.get_run_desc_value(
run_desc, ("paths", "runs directory"), resolve_path=True
)
mod_def_ww3_path = nemo_cmd.prepare.get_run_desc_value(
run_desc, ("grid", "mod_def.ww3 file"), resolve_path=True
)
current_forcing_dir = nemo_cmd.prepare.get_run_desc_value(
run_desc, ("forcing", "current"), resolve_path=True
)
wind_forcing_dir = nemo_cmd.prepare.get_run_desc_value(
run_desc, ("forcing", "wind"), resolve_path=True
)
days = list(arrow.Arrow.range("day", start_date, limit=n_days))
run_start_dates_yyyymmdd = (
[start_date.format("YYYYMMDD")]
if n_days == 1
else [day.format("YYYYMMDD") for day in days]
)
results_dirs = (
[_resolve_results_dir(results_dir)]
if n_days == 1
else [
_resolve_results_dir(results_dir) / (day.format("DDMMMYY").lower())
for day in days
]
)
tmp_run_dir_timestamp = arrow.now().format("YYYY-MM-DDTHHmmss.SSSSSSZ")
tmp_run_dirs = (
[runs_dir / f"{run_id}_{tmp_run_dir_timestamp}"]
if n_days == 1
else [
runs_dir
/ f"{run_id}_{day.format('DDMMMYY').lower()}_{tmp_run_dir_timestamp}"
for day in days
]
)
for day, day_results_dir, tmp_run_dir in zip(days, results_dirs, tmp_run_dirs):
day_run_id = run_id
try:
restart_path = nemo_cmd.prepare.get_run_desc_value(
run_desc, ("restart", "restart.ww3"), resolve_path=True, fatal=False
)
except KeyError:
restart_path = ""
cookiecutter_context = {
"tmp_run_dir": tmp_run_dir,
"run_start_dates_yyyymmdd": "\n ".join(run_start_dates_yyyymmdd),
"results_dirs": "\n ".join(map(os.fspath, results_dirs)),
"work_dirs": "\n ".join(map(os.fspath, tmp_run_dirs)),
"batch_directives": _sbatch_directives(run_desc, day_results_dir, walltime),
"module_loads": "module load netcdf-fortran-mpi/4.4.4",
"run_id": run_id,
"runs_dir": runs_dir,
"run_start_date_yyyymmdd": start_date.format("YYYYMMDD"),
"run_end_date_yyyymmdd": start_date.shift(days=+1).format("YYYYMMDD"),
"mod_def_ww3_path": mod_def_ww3_path,
"current_forcing_dir": current_forcing_dir,
"wind_forcing_dir": wind_forcing_dir,
"restart_path": restart_path,
"results_dir": day_results_dir,
}
if n_days > 1:
day_run_id = f"{run_id}_{day.format('DDMMMYY').lower()}"
if restart_path:
daym1_ddmmmyy = day.shift(days=-1).format("DDMMMYY").lower()
restart_path = (
restart_path.parent.parent / daym1_ddmmmyy
) / restart_path.name
else:
logger.warning(
"You have requested a multi-day run with no restart file path. "
"Each day of the run will start from calm wave fields. "
"Is this really what you want?"
)
cookiecutter_context.update(
{
"run_id": day_run_id,
"run_start_date_yyyymmdd": day.format("YYYYMMDD"),
"run_end_date_yyyymmdd": day.shift(days=+1).format("YYYYMMDD"),
"restart_path": restart_path,
}
)
cookiecutter.main.cookiecutter(
os.fspath(Path(__file__).parent.parent / "cookiecutter"),
no_input=True,
output_dir=runs_dir,
extra_context=cookiecutter_context,
)
day_run_desc = deepcopy(run_desc)
day_run_desc.update(
{"run_id": day_run_id, "restart": {"restart.ww3": os.fspath(restart_path)}}
)
_write_tmp_run_dir_run_desc(day_run_desc, tmp_run_dir, desc_file, n_days)
if not quiet:
logger.info(f"Created temporary run directory {tmp_run_dir}")
day_results_dir.mkdir(parents=True, exist_ok=True)
try:
for tmp_run_dir in tmp_run_dirs[1:]:
(tmp_run_dir / "SoGWW3.sh").unlink()
except IndexError:
# len(tmp_run_dirs) == 1 for n_days == 1
pass
run_script_file = tmp_run_dirs[0] / "SoGWW3.sh"
if not quiet:
logger.info(f"Wrote job run script to {run_script_file}")
if no_submit:
return
sbatch_cmd = f"sbatch {run_script_file}"
submit_job_msg = subprocess.run(
shlex.split(sbatch_cmd),
check=True,
universal_newlines=True,
stdout=subprocess.PIPE,
).stdout
return submit_job_msg | 27,181 |
def race_data_cleaning(race_ethnicity_path):
"""Clean and relabel birth data based on race/ ethnicity."""
# Read in CSV.
race_df = pd.read_csv(race_ethnicity_path, na_values='*', engine='python')
# Fill na values with 0.
race_df.fillna(value=0, inplace=True)
# Drop default sort column.
race_df.drop(labels='sort', axis=1, inplace=True)
# Rename columns for ease of access.
race_df.rename(columns={'birth count': 'birth_count',
'birth count_pct': 'birth_percentage',
'county name': 'county',
'ethnicity desc': 'ethnicity',
'low birth weight ind desc': 'weight_indicator',
'race catg desc': 'race',
'year desc': 'year'
},
inplace=True
)
# Rename specific values for ease of access.
race_df.replace(to_replace=['2017 **',
'Low birth weight (<2500g)',
'Normal birth weight (2500g+)',
'African American (Black)',
'Pacific Islander/Hawaiian',
'Unknown/Not Reported'
],
value=[2017, 'low', 'normal',
'African American', 'Pacific Islander',
'Unknown'
],
inplace=True
)
# Clear irrelevant rows.
race_df = race_df[race_df.weight_indicator != 'Total']
race_df = race_df[race_df.year != 'Total']
# Convert years to numbers for ease of access.
race_df.year = pd.to_numeric(race_df.year)
return race_df | 27,182 |
def validate_resource_policy(policy_document):
"""validate policy_document. Between 1 to 5120"""
if not isinstance(policy_document, policytypes):
raise ValueError("PolicyDocument must be a valid policy document")
if isinstance(policy_document, str) and not json_checker(policy_document):
raise ValueError("PolicyDocument must be a valid JSON formated string")
if isinstance(policy_document, dict):
policy_document_text = json.dumps(policy_document)
elif isinstance(policy_document, str):
policy_document_text = policy_document
else:
policy_document_text = policy_document.to_json()
# NB: {} empty dict is 2 length
if len(policy_document_text) < 3:
raise ValueError("PolicyDocument must not be empty")
if len(policy_document_text) > 5120:
raise ValueError("PolicyDocument maximum length must not exceed 5120")
return policy_document | 27,183 |
def ResolveOrganizationSecurityPolicyId(org_security_policy, display_name,
organization_id):
"""Returns the security policy id that matches the display_name in the org.
Args:
org_security_policy: the organization security policy.
display_name: the display name of the security policy to be resolved.
organization_id: the organization ID which the security policy belongs to.
Returns:
Security policy resource ID.
"""
response = org_security_policy.List(
parent_id=organization_id, only_generate_request=False)
sp_id = None
for sp in response[0].items:
if sp.displayName == display_name:
sp_id = sp.name
break
if sp_id is None:
log.error(
'Invalid display name: {0}. No Security Policy with this display name exists.'
.format(display_name))
sys.exit()
return sp_id | 27,184 |
def dump_annotations_workflow(
gc, slide_id, local, monitorPrefix='',
save_json=True, save_sqlite=False, dbcon=None,
callback=None, callback_kwargs=None):
"""Dump annotations for single slide into the local folder.
Parameters
-----------
gc : girder_client.GirderClient
authenticated girder client instance
slide_id : str
girder id of item (slide)
monitorPrefix : str
prefix to monitor string
local : str
local path to dump annotations
save_json : bool
whether to dump annotations as json file
save_sqlite : bool
whether to save the backup into an sqlite database
dbcon : sqlalchemy.create_engine.connect() object
IGNORE THIS PARAMETER!! This is used internally.
callback : function
function to call that takes in AT LEAST the following params
- item: girder response with item information
- annotations: loaded annotations
- local: local directory
- monitorPrefix: string
callback_kwargs : dict
kwargs to pass along to callback
"""
callback_kwargs = callback_kwargs or {}
try:
item = gc.get('/item/%s' % slide_id)
savepath_base = os.path.join(local, item['name'])
# dump item information json
if save_json:
print("%s: save item info" % monitorPrefix)
with open(savepath_base + '.json', 'w') as fout:
json.dump(item, fout)
# save folder info to sqlite
if save_sqlite:
_add_item_to_sqlite(dbcon, item)
# pull annotation
print("%s: load annotations" % monitorPrefix)
annotations = gc.get('/annotation/item/' + item['_id'])
if annotations is not None:
# dump annotations to JSON in local folder
if save_json:
print("%s: save annotations" % monitorPrefix)
with open(savepath_base + '_annotations.json', 'w') as fout:
json.dump(annotations, fout)
# run callback
if callback is not None:
print("%s: run callback" % monitorPrefix)
callback(
item=item, annotations=annotations, local=local,
dbcon=dbcon, monitorPrefix=monitorPrefix,
**callback_kwargs)
except Exception as e:
print(str(e)) | 27,185 |
def load_dataset(filenames, batch_size, corruption_func, crop_size):
"""
The following function generates a
random data out of filenames,
it keeps a cache dictionary of images,
once it is called, it generates
2 patches, out of the random image,
one is sampled from image-0.5, the other
is sampled from blurred_image-0.5.
:param filenames: List of image files
:param batch_size: The size of the batch that returned in each call
:param corruption_func: corruption function (has to work on 2d numpy arrays)
:param crop_size:A tuple (height, width) specifying the crop size of the patches to extract
:return: the function returns two batches of size batch size, one is related to the blurred image (blurred by corruption func),
the other one is related to the original one.
"""
# start with an empty dict
image_dict = {}
crop_height, crop_width = crop_size
larger_crop = (crop_height * 3, crop_width * 3)
while True:
indexes = np.random.choice(range(len(filenames)), batch_size)
source_batch = np.zeros((batch_size, crop_height, crop_width, 1))
target_batch = np.zeros((batch_size, crop_height, crop_width, 1))
i = 0
for index in indexes:
image_name = filenames[index]
if image_name not in image_dict.keys():
# add image to the cache
image_dict[image_name] = read_image(image_name, GRAY_SCALE)
image = image_dict[image_name]
larger_patch = sample_patch(image, larger_crop)
larger_patch_corrupted = corruption_func(larger_patch)
regular_patch, corrupted_patch = sample_patch(larger_patch, crop_size, larger_patch_corrupted)
source_batch[i] = corrupted_patch[:, :, np.newaxis] - 0.5
target_batch[i] = regular_patch[:, :, np.newaxis] - 0.5
i += 1
yield source_batch, target_batch | 27,186 |
def upsample_filt(size):
"""
Make a 2D bilinear kernel suitable for upsampling of the given (h, w) size.
"""
factor = (size + 1) // 2
if size % 2 == 1:
center = factor - 1
else:
center = factor - 0.5
og = np.ogrid[:size, :size]
return (1 - abs(og[0] - center) / factor) * \
(1 - abs(og[1] - center) / factor) | 27,187 |
def test_bossac_create_with_adafruit(cc, req, bcfg_ini, bcfg_check,
bcfg_val, get_cod_par, sup,
runner_config):
"""
Test SAM-BA extended protocol with Adafruit UF2 variation
Requirements:
SDK >= 0.12.0
Configuration:
Extended bootloader
CONFIG_USE_DT_CODE_PARTITION=y
CONFIG_BOOTLOADER_BOSSA_ADAFRUIT_UF2=y
with zephyr,code-partition
Input:
--bossac-port
Output:
--offset
"""
runner = BossacBinaryRunner(runner_config, port=TEST_BOSSAC_PORT)
with patch('os.path.isfile', side_effect=os_path_isfile_patch):
runner.run('flash')
assert cc.call_args_list == [call(x) for x in EXPECTED_COMMANDS_WITH_EXTENDED] | 27,188 |
def _MAC_hash(mac_str):
"""
Returns MAC hash value in uppercase hexadecimal form and truncated to
32 characters.
"""
return MD5.new(mac_str).hexdigest().upper()[:32] | 27,189 |
def divide_rows(matrix, column, in_place=False):
"""Divide each row of `matrix` by the corresponding element in `column`.
The result is as follows: out[i, j] = matrix[i, j] / column[i]
Parameters
----------
matrix : np.ndarray, scipy.sparse.csc_matrix or csr_matrix, shape (M, N)
The input matrix.
column : a 1D np.ndarray, shape (M,)
The column dividing `matrix`.
in_place : bool (optional, default False)
Do the computation in-place.
Returns
-------
out : same type as `matrix`
The result of the row-wise division.
"""
if in_place:
out = matrix
else:
out = matrix.copy()
if type(out) in [sparse.csc_matrix, sparse.csr_matrix]:
if type(out) == sparse.csr_matrix:
convert_to_csr = True
out = out.tocsc()
else:
convert_to_csr = False
column_repeated = np.take(column, out.indices)
nz = out.data.nonzero()
out.data[nz] /= column_repeated[nz]
if convert_to_csr:
out = out.tocsr()
else:
out /= column[:, np.newaxis]
return out | 27,190 |
def multiSMC(nruns=10, nprocs=0, out_func=None, collect=None, **args):
"""Run SMC algorithms in parallel, for different combinations of parameters.
`multiSMC` relies on the `multiplexer` utility, and obeys the same logic.
A basic usage is::
results = multiSMC(fk=my_fk_model, N=100, nruns=20, nprocs=0)
This runs the same SMC algorithm 20 times, using all available CPU cores.
The output, ``results``, is a list of 20 dictionaries; a given dict corresponds
to a single run, and contains the following (key, value) pairs:
+ ``'run'``: a run identifier (a number between 0 and nruns-1)
+ ``'output'``: the corresponding SMC object (once method run was completed)
Since a `SMC` object may take a lot of space in memory (especially when
the option ``store_history`` is set to True), it is possible to require
`multiSMC` to store only some chosen summary of the SMC runs, using option
`out_func`. For instance, if we only want to store the estimate
of the log-likelihood of the model obtained from each particle filter::
of = lambda pf: pf.logLt
results = multiSMC(fk=my_fk_model, N=100, nruns=20, out_func=of)
It is also possible to vary the parameters. Say::
results = multiSMC(fk=my_fk_model, N=[100, 500, 1000])
will run the same SMC algorithm 30 times: 10 times for N=100, 10 times for
N=500, and 10 times for N=1000. The number 10 comes from the fact that we
did not specify nruns, and its default value is 10. The 30 dictionaries
obtained in results will then contain an extra (key, value) pair that will
give the value of N for which the run was performed.
It is possible to vary several arguments. Each time a list must be
provided. The end result will amount to take a *cartesian product* of the
arguments::
results = multiSMC(fk=my_fk_model, N=[100, 1000], resampling=['multinomial',
'residual'], nruns=20)
In that case we run our algorithm 80 times: 20 times with N=100 and
resampling set to multinomial, 20 times with N=100 and resampling set to
residual and so on.
Finally, if one uses a dictionary instead of a list, e.g.::
results = multiSMC(fk={'bootstrap': fk_boot, 'guided': fk_guided}, N=100)
then, in the output dictionaries, the values of the parameters will be replaced
by corresponding keys; e.g. in the example above, {'fk': 'bootstrap'}. This is
convenient in cases such like this where the parameter value is some non-standard
object.
Parameters
----------
* nruns: int, optional
number of runs (default is 10)
* nprocs: int, optional
number of processors to use; if negative, number of cores not to use.
Default value is 1 (no multiprocessing)
* out_func: callable, optional
function to transform the output of each SMC run. (If not given, output
will be the complete SMC object).
* collect: list of collectors, or 'off'
this particular argument of class SMC may be a list, hence it is "protected"
from Cartesianisation
* args: dict
arguments passed to SMC class (except collect)
Returns
-------
A list of dicts
See also
--------
`utils.multiplexer`: for more details on the syntax.
"""
f = _identity if out_func is None else _picklable_f(out_func)
return utils.multiplexer(f=f, nruns=nruns, nprocs=nprocs, seeding=True,
protected_args={'collect': collect},
**args) | 27,191 |
def test_X_setter():
"""Assert that the X setter changes the feature set."""
atom = ATOMClassifier(X_bin, y_bin, random_state=1)
atom.X = atom.X.iloc[:, :10]
assert atom.X.shape == (len(X_bin), 10) | 27,192 |
def colon_event_second(colon_word, words, start):
"""The second <something>
<something> can be:
* <day-name> -- the second day of that name in a month
"""
if len(words) != 1:
raise GiveUp('Expected a day name, in {}'.format(
colon_what(colon_word, words)))
elif words[0].capitalize() not in DAYS:
raise GiveUp('Expected a day name, not {!r}. in {}'.format(
words[0], colon_what(colon_word, words)))
day_name = words[0].capitalize()
date = calc_ordinal_day(start, 2, day_name)
event = Event(date)
event.repeat_ordinal.add((2, day_name))
event.colon_date = colon_what(colon_word, words)
return event | 27,193 |
def outfalls_to_dfs(model, model_id):
""" Read a .CSV into a Pandas DataFrame until a blank line is found, then stop.
"""
global RELEVANT_NODES
RELEVANT_NODES = get_nodes_from_links(model, model_id)
start = build_groups_dicts(model)['nodes_outfalls']['start']
skip_rows = build_groups_dicts(model)['nodes_outfalls']['line_to_skip']
header = build_groups_dicts(model)['nodes_outfalls']['header']
df = pd.DataFrame()
with open(model, newline='') as f:
contents = []
r = csv.reader(f)
for i, line in enumerate(r):
if i > start + 1:
if i != skip_rows:
if not line:
break
# elif (i == start + 1):
# headers = line
else:
if len(RELEVANT_NODES) == 0:
contents.append(line[0].split())
else:
if line[0].split()[0] in RELEVANT_NODES:
contents.append(line[0].split())
df = pd.DataFrame(data = contents, columns= [col.lower().replace("-", "_").replace("%", "").replace(" ", "_") for col in header],)
df.insert(0, 'model_id', model_id)
print('outfalls','df created!')
return df | 27,194 |
def get_count(path, **kwargs):
"""
Return the number of items in an dictionary or array
:param path: Path to the dictionary or array to count
This operation is only valid in :cb_bmeth:`lookup_in`
.. versionadded:: 2.2.5
"""
return _gen_3spec(_P.SDCMD_GET_COUNT, path, **kwargs) | 27,195 |
def test_basic_circle_net():
"""Test basic circle net forward
"""
circle_net = BasicCircleNet(
spatial_dims=2,
in_channels=5,
out_channels_heatmap=1,
out_channels_radius=3,
)
tensor = torch.from_numpy(
np.arange(3 * 5 * 32 * 32).reshape(
3, 5, 32, 32
)
).float()
outputs = circle_net(tensor)
assert outputs['radius'].shape == (3, 3, 32, 32)
assert outputs['heatmap'].shape == (3, 1, 32, 32) | 27,196 |
def error_message() -> str:
"""Error message for invalid input"""
return 'Invalid input. Use !help for a list of commands.' | 27,197 |
def has_substring(string):
"""
Validate that the given substring is part of the given string.
>>> f = has_substring('foobarhamjam')
>>> f('arham')
True
>>> f('barham')
True
>>> f('FOO')
False
>>> f('JAMHAM')
False
:param str string: Main string to compare against.
:rtype: A validator function.
"""
def validator(substring):
return substring in string
return validator | 27,198 |
def ReplyGump(gumpid: int, buttonid: int, switches: int, textentries: int, str6: str):
"""
Sends a button reply to server gump, parameters are gumpID and buttonID.
:param gumpid: ItemID / Graphic such as 0x3db.
:param buttonid: Gump button ID.
:param switches: Integer value - See description for usage. (Optional)
:param textentries: Not specified - See description for usage. (Optional)
:param str6: not documented
"""
pass | 27,199 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.