content
stringlengths 22
815k
| id
int64 0
4.91M
|
|---|---|
def decay_value(base_value, decay_rate, decay_steps, step):
""" decay base_value by decay_rate every decay_steps
:param base_value:
:param decay_rate:
:param decay_steps:
:param step:
:return: decayed value
"""
return base_value*decay_rate**(step/decay_steps)
| 17,900
|
def setup_flask_app(manager_ip='localhost',
driver='',
hash_salt=None,
secret_key=None):
"""Setup a functioning flask app, when working outside the rest-service
:param manager_ip: The IP of the manager
:param driver: SQLA driver for postgres (e.g. pg8000)
:param hash_salt: The salt to be used when creating user passwords
:param secret_key: Secret key used when hashing flask tokens
:return: A Flask app
"""
app = Flask(__name__)
manager_config.load_configuration(from_db=False)
with app.app_context():
app.config['SQLALCHEMY_DATABASE_URI'] = manager_config.db_url
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
app.config['ENV'] = 'production'
set_flask_security_config(app, hash_salt, secret_key)
Security(app=app, datastore=user_datastore)
Migrate(app=app, db=db)
db.init_app(app)
app.app_context().push()
return app
| 17,901
|
def download_file(file_id, unique_id, credentials):
"""Downloads a file from google drive if user has been authenticated using oauth2
Args:
file_id (str): [The google drive id of the file]
unique_id (str): [The name of the video that is to be used for stored file]
Returns:
bool: [whether the file has been successfully downloaded or not]
"""
http = credentials.authorize(httplib2.Http())
service = discovery.build("drive", "v3", http=http)
request = service.files().get_media(fileId=file_id)
fh = BytesIO()
# Initialise a downloader object to download the file
# Downloads in chunks of 2MB
downloader = MediaIoBaseDownload(fh, request, chunksize=2048000)
done = False
try:
# Download the data in chunks
while not done:
status, done = downloader.next_chunk()
fh.seek(0)
# Write the received data to the file
with open(f"./{videos_dir}/{unique_id}", "wb") as f:
shutil.copyfileobj(fh, f)
print("File Downloaded")
# Return True if file Downloaded successfully
return True
except Exception as e:
print(str(e))
# Return False if something went wrong
print("Something went wrong.")
return False
| 17,902
|
def scantree(path):
# type: (str) -> os.DirEntry
"""Recursively scan a directory tree
:param str path: path to scan
:rtype: DirEntry
:return: DirEntry via generator
"""
for entry in scandir(path):
if entry.is_dir(follow_symlinks=True):
# due to python2 compat, cannot use yield from here
for t in scantree(entry.path):
yield t
else:
yield entry
| 17,903
|
def join_lines(new_lines, txt):
"""Joins lines, adding a trailing return if the original text had one."""
return add_ending('\n'.join(new_lines), txt)
| 17,904
|
def read_db_mysql(db_conn, tablename):
""" Read data from My SQL """
conn = db_conn
cursor = conn.cursor()
cursor.execute('SELECT * FROM %s' % tablename)
| 17,905
|
def get_components_with_metrics(config):
"""
:type: config mycroft_holmes.config.Config
"""
storage = MetricsStorage(config=config)
components = []
for feature_name, feature_spec in config.get_features().items():
feature_id = config.get_feature_id(feature_name)
metrics = config.get_metrics_for_feature(feature_name)
try:
score = storage.get(feature_id, feature_metric='score')
except MycroftMetricsStorageError:
score = None
component = {
'id': feature_id,
# feature's metadata
'name': feature_name,
'docs': feature_spec.get('url'),
'repo': feature_spec.get('repo'),
# fetch metrics and calculated score
'metrics': metrics,
'score': score or 0, # always an int, as we sort latter on
# link to a feature's dashboard
'url': url_for('dashboard.feature', feature_id=feature_id, _external=True),
}
components.append(component)
# sort components by score (descending)
components = sorted(components, key=lambda item: item['score'], reverse=True)
return components
| 17,906
|
def test_show_source_option(tmpdir, capsys):
"""Ensure that --show-source and --no-show-source work."""
with tmpdir.as_cwd():
tmpdir.join("tox.ini").write("[flake8]\nshow_source = true\n")
tmpdir.join("t.py").write("import os\n")
_call_main(["t.py"], retv=1)
expected = """\
t.py:1:1: F401 'os' imported but unused
import os
^
"""
out, err = capsys.readouterr()
assert out == expected
assert err == ""
with tmpdir.as_cwd():
_call_main(["t.py", "--no-show-source"], retv=1)
expected = """\
t.py:1:1: F401 'os' imported but unused
"""
out, err = capsys.readouterr()
assert out == expected
assert err == ""
| 17,907
|
def parse_container_args(field_type: type) -> types.Union[ParamType, types.Tuple[ParamType]]:
"""Parses the arguments inside a container type (lists, tuples and so on).
Args:
field_type (type): pydantic field type
Returns:
types.Union[ParamType, types.Tuple[ParamType]]: single click-compatible type or a tuple
"""
assert is_container(field_type), "Field type is not a container"
args = types.get_args(field_type)
# Early out for untyped containers: standard lists, tuples, List[Any]
# Use strings when the type is unknown, avoid click's type guessing
if len(args) == 0:
return str
# Early out for homogenous containers: Tuple[int], List[str]
if len(args) == 1:
return parse_single_arg(args[0])
# Early out for homogenous tuples of indefinite length: Tuple[int, ...]
if len(args) == 2 and args[1] is Ellipsis:
return parse_single_arg(args[0])
# Then deal with fixed-length containers: Tuple[str, int, int]
return tuple(parse_single_arg(arg) for arg in args)
| 17,908
|
def test_get_submitted_posts(scheduler, schedule):
"""Test getting submitted posts."""
scheduler.schedule = schedule
schedule.posts[0].submission_id = "cute_id_1"
schedule.posts[1].submission_id = "cute_id_2"
assert len(scheduler.get_submitted_posts()) == 2
posts = scheduler.get_submitted_posts(skip_post=schedule.posts[0])
assert posts[0] == schedule.posts[1]
| 17,909
|
def decrypt_from_base64(ciphertext_bs64, decrypt_key: str , iv : str) -> str:
"""From base64 ciphertext decrypt to string.
"""
aes: AES_Turbe = AES_Turbe(decrypt_key,iv)
content_str = aes.decrypt_from_base64(ciphertext_bs64)
return content_str
| 17,910
|
def download_wave_interim(path, N, W, S, E):
"""Download wave data."""
server = ECMWFDataServer()
server.retrieve({
"class": "e4",
"dataset": "interim",
"date": "2016-05-01/to/2016-08-01",
"levtype": "sfc",
"param": "229.140/230.140/232.140",
"step": "0",
"stream": "wave",
"time": "00:00:00/06:00:00/12:00:00/18:00:00",
'area': str(N) + "/" + str(W) + "/" + str(S) + "/" + str(E),
'format': "netcdf",
'target': path+"/data_dir/summer2016_wave_data.nc"
})
| 17,911
|
def _resize_data(image, mask):
"""Resizes images to smaller dimensions."""
image = tf.image.resize_images(image, [480, 640])
mask = tf.image.resize_images(mask, [480, 640])
return image, mask
| 17,912
|
def testopen():
"""Tests that the RDF parser is capable of loading an RDF file
successfully."""
r = RDFParser(open("tests/resources/rdf/pass.rdf"))
assert r.rdf
| 17,913
|
def dist_Mpc_to_geo(dist):
"""convert distance from Mpc to geometric units (i.e., metres)"""
return dist * Mpc
| 17,914
|
def setup_virtualenv():
"""
Setup a server virtualenv.
"""
require('settings', provided_by=[production, staging])
run('virtualenv -p %(python)s --no-site-packages %(virtualenv_path)s' % env)
run('source %(virtualenv_path)s/bin/activate' % env)
| 17,915
|
def upload_to_bucket(file_path, filename):
"""
Upload file to S3 bucket
"""
s3_client = boto3.client('s3')
success = False
try:
response = s3_client.upload_file(file_path, AWS_S3_BUCKET_NAME, filename)
success = True
except ClientError as e:
logger.error('Error at %s', 'boto3.exceptions.ClientError', exc_info=e)
return success
| 17,916
|
def osqueryd_log_parser(osqueryd_logdir=None,
backuplogdir=None,
maxlogfilesizethreshold=None,
logfilethresholdinbytes=None,
backuplogfilescount=None,
enablediskstatslogging=False,
topfile_for_mask=None,
mask_passwords=False):
"""
Parse osquery daemon logs and perform log rotation based on specified parameters
osqueryd_logdir
Directory path where osquery result and snapshot logs would be created
backuplogdir
Directory path where hubble should create log file backups post log rotation
maxlogfilesizethreshold
Log file size threshold in bytes. If osquery log file size is greter than this value,
then logs will only be roatated but not parsed
logfilethresholdinbytes
Log file size threshold in bytes. If osquery log file is greter than this value,
then log rotation will be done once logs have been processed
backuplogfilescount
Number of log file backups to keep
enablediskstatslogging
Enable logging of disk usage of /var/log partition. Default is False
topfile_for_mask
This is the location of the top file from which the masking information
will be extracted
mask_passwords
Defaults to False. If set to True, passwords mentioned in the
return object are masked
"""
ret = []
if not osqueryd_logdir:
osqueryd_logdir = __opts__.get('osquerylogpath')
result_logfile = os.path.normpath(os.path.join(osqueryd_logdir, 'osqueryd.results.log'))
snapshot_logfile = os.path.normpath(os.path.join(osqueryd_logdir, 'osqueryd.snapshots.log'))
log.debug("Result log file resolved to: %s", result_logfile)
log.debug("Snapshot log file resolved to: %s", snapshot_logfile)
backuplogdir = backuplogdir or __opts__.get('osquerylog_backupdir')
logfilethresholdinbytes = logfilethresholdinbytes or __opts__.get('osquery_logfile_maxbytes')
maxlogfilesizethreshold = maxlogfilesizethreshold or __opts__.get(
'osquery_logfile_maxbytes_toparse')
backuplogfilescount = backuplogfilescount or __opts__.get('osquery_backuplogs_count')
if os.path.exists(result_logfile):
logfile_offset = _get_file_offset(result_logfile)
event_data = _parse_log(result_logfile,
logfile_offset,
backuplogdir,
logfilethresholdinbytes,
maxlogfilesizethreshold,
backuplogfilescount,
enablediskstatslogging)
if event_data:
ret += event_data
else:
log.warn("Specified osquery result log file doesn't exist: %s", result_logfile)
if os.path.exists(snapshot_logfile):
logfile_offset = _get_file_offset(snapshot_logfile)
event_data = _parse_log(snapshot_logfile,
logfile_offset,
backuplogdir,
logfilethresholdinbytes,
maxlogfilesizethreshold,
backuplogfilescount,
enablediskstatslogging)
if event_data:
ret += event_data
else:
log.warn("Specified osquery snapshot log file doesn't exist: %s", snapshot_logfile)
ret = _update_event_data(ret)
if mask_passwords:
log.info("Perform masking")
_mask_object(ret, topfile_for_mask)
return ret
| 17,917
|
def generate_windows(image, windowsize, stride):
"""creates an generator of sliding window along the width of an image
Args:
image (2 or 3 dimensional numpy array): the image the sliding windows are created for
windowsize (int): width of the sliding window
stride (int): stepsize of the sliding
Returns:
generator for the sliding windows
"""
assert len(image.shape) > 1 and len(image.shape) < 4
number_windows = int(math.ceil(_width(image) / float(stride)))
for i in range(number_windows):
window = image[:,i*stride:i*stride+windowsize]
assert _height(window) == _height(image)
yield window
| 17,918
|
def validate_json(value):
"""Validates a JSON snippet.
"""
try:
json.loads(value)
except:
raise ValidationError(_('Ivalid JSON syntax'))
| 17,919
|
def get_filenames(data_dir, mode, valid_id, pred_id, overlap_step, patch_size):
"""Returns a list of filenames."""
if mode == 'train':
train_files = [
os.path.join(data_dir, 'subject-%d.tfrecords' % i)
for i in range(1, 11)
if i != valid_id
]
for f in train_files:
assert os.path.isfile(f), \
('Run generate_tfrecord.py to generate training files.')
return train_files
elif mode == 'valid':
valid_file = os.path.join(data_dir,
'subject-%d-valid-%d-patch-%d.tfrecords' % (valid_id, overlap_step, patch_size))
assert os.path.isfile(valid_file), \
('Run generate_tfrecord.py to generate the validation file.')
return [valid_file]
elif mode == 'pred':
pred_file = os.path.join(data_dir,
'subject-%d-pred-%d-patch-%d.tfrecords' % (pred_id, overlap_step, patch_size))
assert os.path.isfile(pred_file), \
('Run generate_tfrecord.py to generate the prediction file.')
return [pred_file]
| 17,920
|
def downsample_data( data, factor, hdr ):
"""Resample data and update the header appropriately
If factor < 1, this is *upsampling*.
Use this function to just return the data and hdr parts in case you want to do further operations prior to saving.
order=3 appears to crash Python 64-bit on Windows when the image is very large (800x500x500) and the method is trilinear. Order=1 works.
"""
fraction = 1.0 / factor
# ds_data = ndimage.interpolation.zoom(data, zoom=fraction, order=1) # default order=3
# order=3 default
# order=1 for very high-resolution images (default crashes)
# order=0 for nearest neighbour
if len(data.shape) > 3:
print(" Data shape is {0}. Only the first three dimensions will be considered! (The output will be 3D: data[:,:,:,0])".format(data.shape))
ds_data = ndimage.interpolation.zoom(data[:,:,:,0], zoom=fraction, order=0)
else:
ds_data = ndimage.interpolation.zoom(data, zoom=fraction, order=0)
ds_hdr = copy.deepcopy(hdr)
ds_hdr.set_data_shape(ds_data.shape)
new_pixdims = hdr.structarr['pixdim'][1:4] * factor
print("Pixdims old: {0}, new: {1}.".format(hdr.structarr['pixdim'][1:4], new_pixdims))
ds_hdr.structarr['pixdim'][1:4] = new_pixdims
sform_old = hdr.get_sform()
print sform_old
resampling_affine = create_affine(trans=[factor,factor,factor], scale=[factor, factor, factor])
# Create the new sform matrix
sform_new = sform_old.dot(resampling_affine)
# Keep the exact-same translation elements
sform_new[0:3,3] = sform_old[0:3,3]
print sform_new
ds_hdr.set_sform(sform_new)
# hdr_new.set_sform(np.eye(4))
# hdr_new['srow_x'][0] = hdr_new['pixdim'][1]
# hdr_new['srow_y'][1] = hdr_new['pixdim'][2]
# hdr_new['srow_z'][2] = hdr_new['pixdim'][3]
# hdr_new.get_sform()
# hdr_new['srow_x'][3] = hdr_new['pixdim'][1]
# hdr_new['srow_y'][3] = hdr_new['pixdim'][2]
# hdr_new['srow_z'][3] = hdr_new['pixdim'][3]
return ds_data, ds_hdr
# End of downsample_data() definition
| 17,921
|
def plot_hexplot(star_pars, means, covs, chain, iter_count, prec=None,
save_dir='', file_stem='', title=''):
"""
Generates hex plot in the provided directory
Paramters
---------
star_pars : dict
'xyzuvw'
'xyzuvw_cov'
'times'
'something else...'
means : dict
'fitted_now'
'fitted_then'
'origin_now' - optional (currently not in use)
'origin_then' - optional
covs : dict
'fitted_now'
'fitted_then'
'origin_now' - optional (currently not in use)
'origin_then' - optional
chain:
iter_count : integer
"""
logging.info("In plot_hexplot, iter {}".format(iter_count))
ngroups = covs['fitted_then'].shape[0]
# INITIALISE PLOT
plt.clf()
f, ((ax1, ax2, ax3), (ax4, ax5, ax6)) = plt.subplots(2, 3)
f.set_size_inches(30, 20)
f.suptitle(title)
# PLOT THE OVAL PLOTS
plot_fit(star_pars, means, covs, ngroups, iter_count, ax1, 0, 1)
plot_fit(star_pars, means, covs, ngroups, iter_count, ax2, 3, 4)
plot_fit(star_pars, means, covs, ngroups, iter_count, ax4, 0, 3)
plot_fit(star_pars, means, covs, ngroups, iter_count, ax5, 1, 4)
plot_fit(star_pars, means, covs, ngroups, iter_count, ax6, 2, 5)
# PLOT THE HISTOGRAMS
age_samples = get_age_samples(ngroups, chain)
if age_samples is not None:
plot_age_hist(age_samples, ax3)
f.savefig(save_dir+"hexplot_"+file_stem+"{:02}.pdf".format(iter_count),
bbox_inches='tight', format='pdf')
f.clear()
| 17,922
|
def check_if_present(driver: webdriver.Firefox, selector: str):
""" Checks if element is present on page by css selector """
return bool(driver.find_elements_by_css_selector(selector))
| 17,923
|
def test_matcher_end_zero_plus(en_vocab):
"""Test matcher works when patterns end with * operator. (issue 1450)"""
matcher = Matcher(en_vocab)
pattern = [{"ORTH": "a"}, {"ORTH": "b", "OP": "*"}]
matcher.add("TSTEND", [pattern])
nlp = lambda string: Doc(matcher.vocab, words=string.split())
assert len(matcher(nlp("a"))) == 1
assert len(matcher(nlp("a b"))) == 2
assert len(matcher(nlp("a c"))) == 1
assert len(matcher(nlp("a b c"))) == 2
assert len(matcher(nlp("a b b c"))) == 3
assert len(matcher(nlp("a b b"))) == 3
| 17,924
|
def save(ps, save_path):
"""
Save function saves the parameters as a side effect
"""
os.makedirs(save_path, exist_ok=True)
save_file = os.path.join(save_path, "saved")
with open(save_file, "wb") as file_to_save:
pickle.dump(ps, file_to_save)
| 17,925
|
def list_pdf_paths(pdf_folder):
"""
list of pdf paths in pdf folder
"""
return glob(os.path.join(pdf_folder, '*', '*', '*.pdf'))
| 17,926
|
def entities(hass):
"""Initialize the test switch."""
platform = getattr(hass.components, "test.switch")
platform.init()
yield platform.ENTITIES
| 17,927
|
def cmi(x, y, z, k=3, base=2):
"""Mutual information of x and y, conditioned on z
x,y,z should be a list of vectors, e.g. x = [[1.3], [3.7], [5.1], [2.4]]
if x is a one-dimensional scalar and we have four samples
"""
assert len(x)==len(y), 'Lists should have same length.'
assert k <= len(x) - 1, 'Set k smaller than num samples - 1.'
intens = 1e-10 # Small noise to break degeneracy, see doc.
x = [list(p + intens*nr.rand(len(x[0]))) for p in x]
y = [list(p + intens*nr.rand(len(y[0]))) for p in y]
z = [list(p + intens*nr.rand(len(z[0]))) for p in z]
points = zip2(x,y,z)
# Find nearest neighbors in joint space, p=inf means max-norm.
tree = ss.cKDTree(points)
dvec = [tree.query(point, k+1, p=float('inf'))[0][k] for point in points]
a = avgdigamma(zip2(x,z), dvec)
b = avgdigamma(zip2(y,z), dvec)
c = avgdigamma(z,dvec)
d = digamma(k)
return (-a-b+c+d) / log(base)
| 17,928
|
def test_question_shows_choices(browser: DriverAPI, registry, web_server, dbsession):
"""If question has active choices they are shown on Show screen, albeit not editable."""
from .tutorial import Question
from .tutorial import Choice
with transaction.manager:
q = Question(question_text="What is love")
dbsession.add(q)
dbsession.flush()
q_slug = uuid_to_slug(q.uuid)
c = Choice(choice_text="Baby don't hurt me", question=q)
dbsession.add(c)
dbsession.flush()
b = browser
create_logged_in_user(dbsession, registry, web_server, browser, admin=True)
b.visit("{}/admin/models/question/{}/show".format(web_server, q_slug))
assert b.is_text_present("Baby don't hurt me")
| 17,929
|
def connected_plate():
"""Detects which plate from the PMA is connected to the device.
Returns:
FirmwareDeviceID: device ID of the connected plate. None if not detected
"""
for plate_id in (
FirmwareDeviceID.pt4_foundation_plate,
FirmwareDeviceID.pt4_expansion_plate,
):
status = __get_fw_device_status(plate_id)
if status.get("connected") is True:
return plate_id
return None
| 17,930
|
def plot_image(dataset, choice=None):
"""
Visual image and label
"""
if choice:
idx = int(choice)
else:
idx = random.randint(0, len(dataset))
image = dataset[idx][0]
face = int(dataset[idx][1])
mouth = int(dataset[idx][2])
eyebrow = int(dataset[idx][3])
eye = int(dataset[idx][4])
nose = int(dataset[idx][5])
jaw = int(dataset[idx][6])
plt.imshow(image)
plt.title(f"Face:{face} - Mouth:{mouth} - EyeBrow:{eyebrow} - Eye:{eye} - Nose:{nose} - Jaw:{jaw}")
plt.show()
| 17,931
|
def islogin(_a=None):
"""
是否已经登录,如果已经登录返回token,否则False
"""
if _a is None:
global a
else:
a = _a
x=a.get(DOMAIN+"/apps/files/desktop/own",o=True)
t=a.b.find("input",{"id":"request_token"})
if t is None:
t = a.b.find("input",{"id":"oc_requesttoken"})
if t is None:
return False
else:
return t["value"]
| 17,932
|
def discrete_coons_patch(ab, bc, dc, ad):
"""Creates a coons patch from a set of four or three boundary
polylines (ab, bc, dc, ad).
Parameters
----------
ab : list[[float, float, float] | :class:`~compas.geometry.Point`]
The XYZ coordinates of the vertices of the first polyline.
bc : list[[float, float, float] | :class:`~compas.geometry.Point`]
The XYZ coordinates of the vertices of the second polyline.
dc : list[[float, float, float] | :class:`~compas.geometry.Point`]
The XYZ coordinates of the vertices of the third polyline.
ad : list[[float, float, float] | :class:`~compas.geometry.Point`]
The XYZ coordinates of the vertices of the fourth polyline.
Returns
-------
list[[float, float, float]]
The points of the coons patch.
list[list[int]]
List of faces, with every face a list of indices into the point list.
Notes
-----
The vertices of the polylines are assumed to be in the following order::
b -----> c
^ ^
| |
| |
a -----> d
To create a triangular patch, one of the input polylines should be None.
(Warning! This will result in duplicate vertices.)
For more information see [1]_ and [2]_.
References
----------
.. [1] Wikipedia. *Coons patch*.
Available at: https://en.wikipedia.org/wiki/Coons_patch.
.. [2] Robert Ferreol. *Patch de Coons*.
Available at: https://www.mathcurve.com/surfaces/patchcoons/patchcoons.shtml
Examples
--------
>>>
"""
if not ab:
ab = [ad[0]] * len(dc)
if not bc:
bc = [ab[-1]] * len(ad)
if not dc:
dc = [bc[-1]] * len(ab)
if not ad:
ad = [dc[0]] * len(bc)
n = len(ab)
m = len(bc)
n_norm = normalize_values(range(n))
m_norm = normalize_values(range(m))
array = [[0] * m for i in range(n)]
for i, ki in enumerate(n_norm):
for j, kj in enumerate(m_norm):
# first function: linear interpolation of first two opposite curves
lin_interp_ab_dc = add_vectors(scale_vector(ab[i], (1 - kj)), scale_vector(dc[i], kj))
# second function: linear interpolation of other two opposite curves
lin_interp_bc_ad = add_vectors(scale_vector(ad[j], (1 - ki)), scale_vector(bc[j], ki))
# third function: linear interpolation of four corners resulting a hypar
a = scale_vector(ab[0], (1 - ki) * (1 - kj))
b = scale_vector(bc[0], ki * (1 - kj))
c = scale_vector(dc[-1], ki * kj)
d = scale_vector(ad[-1], (1 - ki) * kj)
lin_interp_a_b_c_d = sum_vectors([a, b, c, d])
# coons patch = first + second - third functions
array[i][j] = subtract_vectors(add_vectors(lin_interp_ab_dc, lin_interp_bc_ad), lin_interp_a_b_c_d)
# create vertex list
vertices = []
for i in range(n):
vertices += array[i]
# create face vertex list
faces = []
for i in range(n - 1):
for j in range(m - 1):
faces.append([i * m + j, i * m + j + 1, (i + 1) * m + j + 1, (i + 1) * m + j])
return vertices, faces
| 17,933
|
def w(shape, stddev=0.01):
"""
@return A weight layer with the given shape and standard deviation. Initialized with a
truncated normal distribution.
"""
return tf.Variable(tf.truncated_normal(shape, stddev=stddev))
| 17,934
|
def lr_mult(alpha):
"""Decreases the learning rate update by a factor of alpha."""
@tf.custom_gradient
def _lr_mult(x):
def grad(dy):
return dy * alpha * tf.ones_like(x)
return x, grad
return _lr_mult
| 17,935
|
def get_png_string(mask_array):
"""Builds PNG string from mask array.
Args:
mask_array (HxW): Mask array to generate PNG string from.
Returns: String of mask encoded as a PNG.
"""
# Convert the new mask back to an image.
image = PIL.Image.fromarray(mask_array.astype('uint8')).convert('RGB')
# Save the new image to a PNG byte string.
byte_buffer = io.BytesIO()
image.save(byte_buffer, format='png')
byte_buffer.seek(0)
return byte_buffer.read()
| 17,936
|
def rotate90ccw(v):
"""Rotate 2d vector 90 degrees counter clockwise
"""
return (-(v[1]), v[0])
| 17,937
|
def GetListOfCellTestPointsNearestListOfPointsV5(inInputFilter, pointList):
"""for each point in the list, find the cell test point (e.g. center of
cell bounding box) which is nearest the test point. Use MPI to work
in parallel"""
thisProcessNearestCellPointList, thisProcDistSqrdList = \
GetCellsClosestToPointsOnThisProcessFromParaViewFilterV5(inInputFilter, pointList)
nearestCellList, distanceList = UseMpiToGetGlobalCellPointsClosestV5(
inInputFilter, thisProcessNearestCellPointList, thisProcDistSqrdList)
return nearestCellList
| 17,938
|
def main(to_dir, from_dir):
"""Copy CWL files."""
num = copy_cwl_files(from_dir=from_dir, to_dir=to_dir)
if num > 0:
click.echo('Copied {} CWL files to "{}".'.format(num, to_dir))
else:
msg = 'No CWL files found in "{}". Copied 0 files'.format(from_dir)
click.echo(msg)
| 17,939
|
def _BuildBaseMTTCmd(args, host):
"""Build base MTT cmd."""
remote_mtt_binary = _REMOTE_MTT_BINARY_FORMAT % host.context.user
remote_cmd = [remote_mtt_binary]
if args.very_verbose:
remote_cmd += ['-vv']
elif args.verbose:
remote_cmd += ['-v']
# We copy the mtt binary inside mtt_lab to remote host,
# there is not need to update the mtt binary on the remote host.
remote_cmd += ['--no_check_update']
return remote_cmd
| 17,940
|
def get_service(credentials=get_credentials()):
"""Gets GMail service, given credentials"""
return apiclient.discovery.build("gmail", "v1", credentials=credentials)
| 17,941
|
def stageTweets(df_tweets, df_update):
"""This function appends downloaded tweets to the staging table."""
try:
engine = start_engine()
df_tweets.to_sql(name='tweetsstaging', con=engine, schema='tweets_db', if_exists='append', index=False)
df_update.to_sql(name='updates', con=engine, schema='tweets_db', if_exists = 'append', index=False)
except:
print('Failed to upload tweets to database.')
# pass
# finally:
# print('Exit stageTweets')
| 17,942
|
def test_write_config():
"""
Unit test of write_config.
"""
write_config(**config_params)
| 17,943
|
def assert_almost_equal(
actual: numpy.ndarray,
desired: numpy.ndarray,
decimal: int,
err_msg: Literal["zolotarev"],
):
"""
usage.scipy: 1
"""
...
| 17,944
|
def edb_client_server_info(edb: ElectrolyteDB) -> dict:
"""
Perform an operation that ensures that the `edb` fixture has a client that:
- Is able to connect to the server (non-mock), or
- Is a "convincing fake" (mock), so that test functions using `edb` can expect a realistic behavior
Additionally, if this fixture is dispatched before "real" users (here this is done by using a `test__` prefix with two underscores),
it avoids any first-use inconsistencies, such as e.g. the time spent to wait for a connection
being counted as part of the duration of the first test for which the `edb` fixture is instantiated.
"""
return edb._client.server_info()
| 17,945
|
def _localoffload(offline, docs, errordocs=None, debug=False):
"""
Setup the offloading of data into pickles.
Prepares data to be pickled and generates a message on how to correctly modify target data, should a misconfiguration have occurred.
Keyword arguments:
offline -- dictionary of offline ES clusters
docs -- ES documents to be sent to 'data index' for each cluster
errordocs -- ES documents to be sent to 'error index'
"""
basedir = './cfg/tmp/'
checkmakedir(basedir)
datadir = basedir + 'data/'
checkmakedir(datadir)
pickle = []
err_pickle = []
if docs:
docid = str(uuid())
_dumplist(docs, datadir + docid)
pickle.append(docid)
if errordocs:
errid = str(uuid())
_dumplist(errordocs, datadir + errid)
err_pickle.append(errid)
for cluster in offline:
# makes a json for each
clusterfile = basedir + cluster.pop('name_ts', 0) + '.json'
cluster['pickle'] = pickle
cluster['err_pickle'] = err_pickle
if os.path.exists(clusterfile):
f = open(clusterfile, mode='r+')
old = json.load(f)
cluster['pickle'] += old['pickle']
cluster['err_pickle'] = old['err_pickle']
f.seek(0)
else:
f = open(clusterfile, mode='w')
cluster['instructions'] = "keep the name the same but change any of the incorrect information about the cluster if need. Ignore the pickle fields as they point to the data that will be sent. Do NOT touch pickle nor err_picke fields"
cluster['path_to_data'] = datadir
json.dump(cluster, f, indent=4, sort_keys=True)
f.close()
| 17,946
|
def test_get_pylintrc_path(pylintrc_files, mocker):
"""Test that get_pylintrc_path finds the expected one in the hiearchy."""
search_paths, expected_path, __ = pylintrc_files
mocker.patch("pylint.config.os.path.expanduser",
return_value=search_paths[HOME_DIR])
actual_path = get_pylintrc_path(
search_paths=list(search_paths.values()),
home_path=search_paths[HOME_DIR],
)
assert actual_path == expected_path
| 17,947
|
def convert_to_int_list(dataframe: pd.Series) -> "list[list[int]]":
"""
Takes a dataframe with a string representation of a list of ints
and converts that into a list of lists of ints
"""
result_list = []
for row in dataframe:
result_list.append([int(x) for x in row[1:-1].split(", ")])
return result_list
| 17,948
|
def test_data_no_missing_envvar_data():
"""Ensure the ENVVAR_DATA covers all entries"""
entry_names = [entry.name for entry in NavigatorConfiguration.entries]
data_names = [entry[0] for entry in ENVVAR_DATA]
assert entry_names == data_names
| 17,949
|
def get_prism_daily_single(variable,
date,
return_path=False,
**kwargs):
"""Download data for a single day
Parameters
----------
variable : str
Either tmean, tmax, tmin, or ppt
date : str
The date to download in the format YYYY-MM-DD
dest_path : str, optional
Folder to download to, defaults to the current working directory.
return_path : bool, optional
Returns the full path to the final bil file, default False
keep_zip : bool, optional
Keeps the originally downloaded zip file, default True
"""
daily = PrismDaily(variable=variable,
min_date=date,
max_date=date,
**kwargs)
daily.download()
daily.close()
if return_path:
return daily._local_bil_filename(daily.dates[0])
| 17,950
|
def handle_filestreams(list_of_contents, list_of_names):
"""
Args:
list_of_contents:
list_of_names:
"""
if len(list_of_contents) == 1:
content = list_of_contents[0]
filename = list_of_names[0]
else:
raise Exception("Multiple files not supported") # TODO
content_type, content_string = content.split(',')
decoded = base64.b64decode(content_string)
if 'csv' in filename: # Assume that the user uploaded a CSV file
file = io.StringIO(decoded.decode('utf-8'))
elif 'xls' in filename: # Assume that the user uploaded an excel file
file = io.BytesIO(decoded)
elif 'tsv' in filename: # Assume that the user uploaded an tsv file
file = io.StringIO(decoded.decode('utf-8'))
elif 'txt' in filename: # Assume that the user uploaded either a tsv or csv file
file = io.StringIO(decoded.decode('utf-8'))
else:
raise IOError("Unable to read table file.")
return file
| 17,951
|
def grid_count(grid, shape=None, interpolation='linear', bound='zero',
extrapolate=False):
"""Splatting weights with respect to a deformation field (pull adjoint).
Notes
-----
{interpolation}
{bound}
Parameters
----------
grid : ([batch], *inshape, dim) tensor
Transformation field.
shape : sequence[int], default=inshape
Output shape
interpolation : int or sequence[int], default=1
Interpolation order.
bound : BoundType, or sequence[BoundType], default='zero'
Boundary conditions.
extrapolate : bool or int, default=True
Extrapolate out-of-bound data.
Returns
-------
output : ([batch], 1, *shape) tensor
Spatting weights.
"""
dim = grid.shape[-1]
grid_no_batch = grid.dim() == dim + 1
if grid_no_batch:
grid = grid[None]
if shape is None:
shape = tuple(grid.shape[1:-1])
out = GridCount.apply(grid, shape, interpolation, bound, extrapolate)
if grid_no_batch:
out = out[0]
return out
| 17,952
|
def check_spf_record(lookup, spf_record):
"""
Check that all parts of lookup appear somewhere in the given SPF record, resolving
include: directives recursively
"""
not_found_lookup_parts = set(lookup.split(" "))
_check_spf_record(not_found_lookup_parts, spf_record, 0)
return not not_found_lookup_parts
| 17,953
|
def local_tz2() -> pytz.BaseTzInfo:
"""
Second timezone for the second user
"""
return pytz.timezone("America/Los_Angeles")
| 17,954
|
def get_chemistry_info(sam_header, input_filenames, fail_on_missing=False):
"""Get chemistry triple information for movies referenced in a SAM
header.
Args:
sam_header: a pysam.Samfile.header, which is a multi-level dictionary.
Movie names are read from RG tags in this header.
input_filenames: a list of bas, bax, or fofn filenames.
fail_on_missing: if True, raise an exception if the chemistry
information for a movie in the header cannot be
found. If False, just log a warning.
Returns:
a list of strings that can be written as DS tags to RG entries in the
header of a new SAM or BAM file. For example,
['BINDINGKIT:xxxx;SEQUENCINGKIT:yyyy;SOFTWAREVERSION:2.0']
Raises:
ChemistryLoadingException if chemistry information cannot be found
for a movie in the header and fail_on_missing is True.
"""
# First get the full list of ba[sx] files, reading through any fofn or xml
# inputs
bas_filenames = []
for filename in input_filenames:
bas_filenames.extend(FofnIO.enumeratePulseFiles(filename))
# Then get the chemistry triple for each movie in the list of bas files
triple_dict = {}
for bas_filename in bas_filenames:
bas_file = BasH5IO.BasH5Reader(bas_filename)
movie_name = bas_file.movieName
chem_triple = bas_file.chemistryBarcodeTriple
triple_dict[movie_name] = chem_triple
# Finally, find the movie names that appear in the header and create CO
# lines with the chemistry triple
if 'RG' not in sam_header:
return []
rgds_entries = {}
for rg_entry in sam_header['RG']:
rg_id = rg_entry['ID']
rg_movie_name = rg_entry[MOVIENAME_TAG]
try:
rg_chem_triple = triple_dict[rg_movie_name]
rgds_entries[rg_id] = rg_chem_triple
except KeyError:
err_msg = ("Cannot find chemistry information for movie {m}."
.format(m=rg_movie_name))
if fail_on_missing:
raise ChemistryLoadingException(err_msg)
else:
log.warning(err_msg)
rgds_strings = format_rgds_entries(rgds_entries)
return rgds_strings
| 17,955
|
def json_catalog(request, domain='djangojs', packages=None):
"""
Return the selected language catalog as a JSON object.
Receives the same parameters as javascript_catalog(), but returns
a response with a JSON object of the following format:
{
"catalog": {
# Translations catalog
},
"formats": {
# Language formats for date, time, etc.
},
"plural": '...' # Expression for plural forms, or null.
}
"""
warnings.warn(
"The json_catalog() view is deprecated in favor of the "
"JSONCatalog view.", RemovedInDjango20Warning, stacklevel=2
)
locale = _get_locale(request)
packages = _parse_packages(packages)
catalog, plural = get_javascript_catalog(locale, domain, packages)
data = {
'catalog': catalog,
'formats': get_formats(),
'plural': plural,
}
return http.JsonResponse(data)
| 17,956
|
def init_scp_large_resource_from_kwargs(resource, uri, archive, scp_host, user_dict):
"""
Method initializes scp resource from resource informations and user
credentials.
Parameters
----------
resource : str
resource name, same as LargeResource.RESOURCE_NAME
uri : str
resource uri, same as LargeResource.URI
archive : str
archive type, see LargeResource.ARCHIVE
scp_host : str
remote host adress, see SCPLargeResource.SCP_HOST_KEY
user_dict : dict(str, str)
user dictionary that may contain scp_user that defines username,
scp_private_key that defines path to private key, scp_pass_key that defines user
password
"""
if SCPLargeResource.SCP_USER_KEY not in user_dict:
# if your username is same as the one on the server
scp_user = getpass.getuser()
else:
scp_user = user_dict[SCPLargeResource.SCP_USER_KEY]
scp_private_key = user_dict.get(SCPLargeResource.SCP_PRIVATE_KEY, None)
scp_pass_key = user_dict.get(SCPLargeResource.SCP_PASS_KEY, None)
config = {
LargeResource.URI: uri,
LargeResource.RESOURCE_NAME: resource,
LargeResource.ARCHIVE: archive,
SCPLargeResource.SCP_HOST_KEY: scp_host,
SCPLargeResource.SCP_USER_KEY: scp_user,
SCPLargeResource.SCP_PRIVATE_KEY: scp_private_key,
SCPLargeResource.SCP_PASS_KEY: scp_pass_key,
}
SCPLargeResource(**config)
| 17,957
|
def read_global_config() -> Dict[Text, Any]:
"""Read global Rasa configuration."""
# noinspection PyBroadException
try:
return rasa.utils.io.read_yaml_file(GLOBAL_USER_CONFIG_PATH)
except Exception:
# if things go south we pretend there is no config
return {}
| 17,958
|
def test_print_empty_tree():
"""
Проверка, что если в дереве нет ни одного сохраненного значения, оно распечатывается в виде "болванки".
"""
tree = NamedTree()
assert str(tree) == '<NamedTree empty object>'
| 17,959
|
def test_pid_uparrow1():
"""
"""
d = bivariates['boom']
pid = PID_uparrow(d)
assert pid[((0,), (1,))] == pytest.approx(0.666666666666666667, abs=1e-4)
assert pid[((0,),)] == pytest.approx(0.0, abs=1e-4)
assert pid[((1,),)] == pytest.approx(0.0, abs=1e-4)
assert pid[((0, 1),)] == pytest.approx(0.45914791702724411, abs=1e-4)
| 17,960
|
def get_uuid_hex(digest_size: int = 10) -> str:
"""Generate hex of uuid4 with the defined size."""
return blake2b(uuid4().bytes, digest_size=digest_size).hexdigest()
| 17,961
|
def crc16(data):
"""CRC-16-CCITT computation with LSB-first and inversion."""
crc = 0xffff
for byte in data:
crc ^= byte
for bits in range(8):
if crc & 1:
crc = (crc >> 1) ^ 0x8408
else:
crc >>= 1
return crc ^ 0xffff
| 17,962
|
def _unlink_f(filename):
""" Call os.unlink, but don't die if the file isn't there. This is the main
difference between "rm -f" and plain "rm". """
try:
os.unlink(filename)
return True
except OSError, e:
if e.errno not in (errno.ENOENT, errno.EPERM, errno.EACCES,errno.EROFS):
raise
return False
| 17,963
|
def get_member_expr_fullname(expr: MemberExpr) -> str:
"""Return the qualified name representation of a member expression.
Return a string of form foo.bar, foo.bar.baz, or similar, or None if the
argument cannot be represented in this form.
"""
if isinstance(expr.expr, NameExpr):
initial = expr.expr.name
elif isinstance(expr.expr, MemberExpr):
initial = get_member_expr_fullname(expr.expr)
else:
return None
return '{}.{}'.format(initial, expr.name)
| 17,964
|
def _read_dino_waterlvl_metadata(f, line):
"""read dino waterlevel metadata
Parameters
----------
f : text wrapper
line : str
line with meta dictionary keys
meta_dic : dict (optional)
dictionary with metadata
Returns
-------
meta : dict
dictionary with metadata
"""
meta_keys = line.strip().split(",")
meta_values = f.readline().strip().split(",")
meta = {}
for key, value in zip(meta_keys, meta_values):
key = key.strip()
if key in ["X-coordinaat", "Y-coordinaat"]:
if key == "X-coordinaat":
meta["x"] = float(value)
elif key == "Y-coordinaat":
meta["y"] = float(value)
elif key == "Locatie":
meta["locatie"] = value
meta["name"] = value
return meta
| 17,965
|
def enumerated_endec(v, x):
"""Pass the value to Enumerated, construct a tag from the hex string,
and compare results of encode and decoding each other."""
if _debug: enumerated_endec._debug("enumerated_endec %r %r", v, x)
tag = enumerated_tag(x)
if _debug: enumerated_endec._debug(" - tag: %r, %r", tag, tag.tagData)
obj = Enumerated(v)
if _debug: enumerated_endec._debug(" - obj: %r, %r", obj, obj.value)
assert enumerated_encode(obj) == tag
assert enumerated_decode(tag) == obj
| 17,966
|
def walk_files( source, paths ):
"""
accept a list of files and/or directories and yeild
each file name. recursivley drops into each directory found looking
for more files.
"""
paths = [ (p,None) for p in paths ]
while len(paths):
path,base = paths.pop(0)
if not base:
base,_ = source.split(path)
if source.isdir(path):
for item in source.listdir(path):
abspath = source.join(path,item)
#relpath = source.relpath(abspath,base)
paths.append( (abspath,base) )
else:
yield path,base
| 17,967
|
def async_get_url(
hass: HomeAssistant,
*,
require_ssl: bool = False,
require_standard_port: bool = False,
allow_internal: bool = True,
allow_external: bool = True,
allow_cloud: bool = True,
allow_ip: bool = True,
prefer_external: bool = False,
prefer_cloud: bool = False,
) -> str:
"""Get a URL to this instance."""
order = [TYPE_URL_INTERNAL, TYPE_URL_EXTERNAL]
if prefer_external:
order.reverse()
# Try finding an URL in the order specified
for url_type in order:
if allow_internal and url_type == TYPE_URL_INTERNAL:
try:
return _async_get_internal_url(
hass,
allow_ip=allow_ip,
require_ssl=require_ssl,
require_standard_port=require_standard_port,
)
except NoURLAvailableError:
pass
if allow_external and url_type == TYPE_URL_EXTERNAL:
try:
return _async_get_external_url(
hass,
allow_cloud=allow_cloud,
allow_ip=allow_ip,
prefer_cloud=prefer_cloud,
require_ssl=require_ssl,
require_standard_port=require_standard_port,
)
except NoURLAvailableError:
pass
# We have to be honest now, we have no viable option available
raise NoURLAvailableError
| 17,968
|
def validate_local_model(models_path: str, model_id: str) -> bool:
"""Validate local model by id.
Args:
models_path: Path to the models folder.
model_id: Model id.
"""
model_correct = True
model_path = os.path.join(models_path, model_id)
if not os.path.exists(model_path):
model_correct = False
else:
try:
_ = get_model_type_name(models_path, model_id)
except FileNotFoundError:
model_correct = False
return model_correct
| 17,969
|
def strip(prefix: Seq, seq: Seq, partial=False, cmp=NOT_GIVEN) -> Iter:
"""
If seq starts with the same elements as in prefix, remove them from
result.
Args:
prefix:
Prefix sequence to possibly removed from seq.
seq:
Sequence of input elements.
partial:
If True, remove partial matches with prefix.
cmp:
If given, uses as a comparation function between elements of prefix
and sequence. It removes elements that cmp(x, y) returns True.
Examples:
>>> ''.join(strip("ab", "abcd"))
'cd'
>>> strip(sk.repeat(3), range(6), partial=True, cmp=(X > Y))
sk.iter([3, 4, 5])
"""
if partial:
cmp = NOT_GIVEN.resolve(cmp, op.eq)
return Iter(_strip_partial(iter(prefix), iter(seq), cmp=cmp))
elif cmp is NOT_GIVEN:
return Iter(_strip_full(tuple(prefix), iter(seq)))
else:
return Iter(_strip_full_cmp(tuple(prefix), iter(seq), cmp))
| 17,970
|
def test_table_model():
"""Get a table model instance for each test function."""
# Create the device under test (dut) and connect to the database.
dut = RAMSTKOpStressTable()
yield dut
# Unsubscribe from pypubsub topics.
pub.unsubscribe(dut.do_get_attributes, "request_get_opstress_attributes")
pub.unsubscribe(dut.do_set_attributes, "request_set_opstress_attributes")
pub.unsubscribe(dut.do_set_attributes, "wvw_editing_opstress")
pub.unsubscribe(dut.do_update, "request_update_opstress")
pub.unsubscribe(dut.do_select_all, "selected_revision")
pub.unsubscribe(dut.do_get_tree, "request_get_opstress_tree")
pub.unsubscribe(dut.do_delete, "request_delete_opstress")
pub.unsubscribe(dut.do_insert, "request_insert_opstress")
# Delete the device under test.
del dut
| 17,971
|
def _read_unicode_table(instream, separator, startseq, encoding):
"""Read the Unicode table in a PSF2 file."""
raw_table = instream.read()
entries = raw_table.split(separator)[:-1]
table = []
for point, entry in enumerate(entries):
split = entry.split(startseq)
code_points = [_seq.decode(encoding) for _seq in split]
# first entry is separate code points, following entries (if any) are sequences
table.append([_c for _c in code_points[0]] + code_points[1:])
return table
| 17,972
|
def runner():
"""Provides a command-line test runner."""
return CliRunner()
| 17,973
|
def matsubara_exponents(coup_strength, bath_broad, bath_freq, beta, N_exp):
"""
Calculates the exponentials for the correlation function for matsubara
terms. (t>=0)
Parameters
----------
coup_strength: float
The coupling strength parameter.
bath_broad: float
A parameter characterizing the FWHM of the spectral density, i.e.,
the cavity broadening.
bath_freq: float
The cavity frequency.
beta: float
The inverse temperature.
N_exp: int
The number of exponents to consider in the sum.
Returns
-------
ck: ndarray
A 1D array with the prefactors for the exponentials
vk: ndarray
A 1D array with the frequencies
"""
lam = coup_strength
gamma = bath_broad
w0 = bath_freq
N_exp = N_exp
omega = np.sqrt(w0 ** 2 - (gamma / 2) ** 2)
a = omega + 1j * gamma / 2.0
aa = np.conjugate(a)
coeff = (-4 * gamma * lam ** 2 / np.pi) * ((np.pi / beta) ** 2)
vk = np.array([-2 * np.pi * n / (beta) for n in range(1, N_exp)])
ck = np.array(
[
n
/ (
(a ** 2 + (2 * np.pi * n / beta) ** 2)
* (aa ** 2 + (2 * np.pi * n / beta) ** 2)
)
for n in range(1, N_exp)
]
)
return coeff * ck, vk
| 17,974
|
def reverse(array):
"""Return `array` in reverse order.
Args:
array (list|string): Object to process.
Returns:
list|string: Reverse of object.
Example:
>>> reverse([1, 2, 3, 4])
[4, 3, 2, 1]
.. versionadded:: 2.2.0
"""
# NOTE: Using this method to reverse object since it works for both lists
# and strings.
return array[::-1]
| 17,975
|
def compute_segregation_profile(gdf,
groups=None,
distances=None,
network=None,
decay='linear',
function='triangular',
precompute=True):
"""Compute multiscalar segregation profile.
This function calculates several Spatial Information Theory indices with
increasing distance parameters.
Parameters
----------
gdf : geopandas.GeoDataFrame
geodataframe with rows as observations and columns as population
variables. Note that if using a network distance, the coordinate
system for this gdf should be 4326. If using euclidian distance,
this must be projected into planar coordinates like state plane or UTM.
groups : list
list of variables .
distances : list
list of floats representing bandwidth distances that define a local
environment.
network : pandana.Network (optional)
A pandana.Network likely created with
`segregation.network.get_osm_network`.
decay : str (optional)
decay type to be used in pandana accessibility calculation (the
default is 'linear').
function: 'str' (optional)
which weighting function should be passed to pysal.lib.weights.Kernel
must be one of: 'triangular','uniform','quadratic','quartic','gaussian'
precompute: bool
Whether the pandana.Network instance should precompute the range
queries.This is true by default, but if you plan to calculate several
segregation profiles using the same network, then you can set this
parameter to `False` to avoid precomputing repeatedly inside the
function
Returns
-------
dict
dictionary with distances as keys and SIT statistics as values
Notes
-----
Based on Sean F. Reardon, Stephen A. Matthews, David O’Sullivan, Barrett A. Lee, Glenn Firebaugh, Chad R. Farrell, & Kendra Bischoff. (2008). The Geographic Scale of Metropolitan Racial Segregation. Demography, 45(3), 489–514. https://doi.org/10.1353/dem.0.0019.
Reference: :cite:`Reardon2008`.
"""
gdf = gdf.copy()
gdf[groups] = gdf[groups].astype(float)
indices = {}
indices[0] = MultiInformationTheory(gdf, groups).statistic
if network:
if not gdf.crs['init'] == 'epsg:4326':
gdf = gdf.to_crs(epsg=4326)
groups2 = ['acc_' + group for group in groups]
if precompute:
maxdist = max(distances)
network.precompute(maxdist)
for distance in distances:
distance = np.float(distance)
access = calc_access(gdf,
network,
decay=decay,
variables=groups,
distance=distance,
precompute=False)
sit = MultiInformationTheory(access, groups2)
indices[distance] = sit.statistic
else:
for distance in distances:
w = Kernel.from_dataframe(gdf,
bandwidth=distance,
function=function)
sit = SpatialInformationTheory(gdf, groups, w=w)
indices[distance] = sit.statistic
return indices
| 17,976
|
def calculate_lbp_pixel(image, x, y):
"""Perform the LBP operator on a given pixel.
Order and format:
32 | 64 | 128
----+-----+-----
16 | 0 | 1
----+-----+-----
8 | 4 | 2
:param image: Input image
:type: numpy.ndarray
:param x: Column pixel of interest
:type: int
:param y: Row pixel of interst
:type: int
:return: LBP value
:rtype: numpy.ndarray
"""
center = image[x][y]
binary_code = np.empty(8)
binary_code[0] = threshold_pixel(image, center, x, y + 1) # Right
binary_code[1] = threshold_pixel(image, center, x + 1, y + 1) # Bottom Right
binary_code[2] = threshold_pixel(image, center, x + 1, y) # Bottom
binary_code[3] = threshold_pixel(image, center, x + 1, y - 1) # Bottom Left
binary_code[4] = threshold_pixel(image, center, x, y - 1) # Left
binary_code[5] = threshold_pixel(image, center, x - 1, y - 1) # Top Left
binary_code[6] = threshold_pixel(image, center, x - 1, y) # Top
binary_code[7] = threshold_pixel(image, center, x - 1, y + 1) # Top Right
weights = np.array([1, 2, 4, 8, 16, 32, 64, 128])
lbp_value = np.dot(binary_code, weights).astype(np.uint8)
return lbp_value
| 17,977
|
def test_gradient_sparse_var():
"""
https://www.tensorflow.org/beta/guide/effective_tf2
"""
target = tf.constant([[1., 0., 0.], [1., 0., 0.]])
v = tf.Variable([0.5, 0.5])
x = tx.Lambda([],
fn=lambda _: tf.SparseTensor([[0, 0], [1, 1]], v, [2, 3]),
n_units=3,
var_list=v)
assert isinstance(x(), tf.SparseTensor)
assert len(x.trainable_variables) == 1
y = tx.Linear(x, n_units=3)
# a graph without inputs needs to have missing inputs declared
# otherwise it will try to add the inputs detected to inputs
graph = tx.Graph.build(inputs=None,
outputs=y)
fn = graph.as_function()
@tf.function
def loss(labels):
return tf.reduce_mean(tf.pow(labels - fn(), 2))
with tf.GradientTape() as tape:
loss_val = loss(target)
assert tx.same_shape(tape.gradient(loss_val, v), v.value())
| 17,978
|
def get_setup_and_moves(sgf_game, board=None):
"""Return the initial setup and the following moves from an Sgf_game.
Returns a pair (board, plays)
board -- boards.Board
plays -- list of pairs (colour, move)
moves are (row, col), or None for a pass.
The board represents the position described by AB and/or AW properties
in the root node.
The moves are from the game's 'leftmost' variation.
Raises ValueError if this position isn't legal.
Raises ValueError if there are any AB/AW/AE properties after the root
node.
Doesn't check whether the moves are legal.
If the optional 'board' parameter is provided, it must be an empty board of
the right size; the same object will be returned.
"""
size = sgf_game.get_size()
if board is None:
board = boards.Board(size)
else:
if board.side != size:
raise ValueError("wrong board size, must be %d" % size)
if not board.is_empty():
raise ValueError("board not empty")
root = sgf_game.get_root()
nodes = sgf_game.main_sequence_iter()
ab, aw, ae = root.get_setup_stones()
if ab or aw:
is_legal = board.apply_setup(ab, aw, ae)
if not is_legal:
raise ValueError("setup position not legal")
colour, raw = root.get_raw_move()
if colour is not None:
raise ValueError("mixed setup and moves in root node")
nodes.next()
moves = []
for node in nodes:
if node.has_setup_stones():
raise ValueError("setup properties after the root node")
colour, raw = node.get_raw_move()
if colour is not None:
moves.append((colour, sgf_properties.interpret_go_point(raw, size)))
return board, moves
| 17,979
|
def add_timeseries(
platform: Platform, server: str, dataset: str, constraints
): # pylint: disable=too-many-locals
"""Add datatypes for a new dataset to a platform.
See instructions in Readme.md"""
e = ERDDAP(server)
info = pd.read_csv(e.get_info_url(dataset, response="csv"))
info_vars = info[info["Row Type"] == "variable"]
print(f"Opened dataset from ERDDAP and found variables: {''.join(info_vars)}")
variables = [
var
for var in info_vars["Variable Name"]
if var
not in [
"time",
"station",
"mooring_site_desc",
"longitude",
"latitude",
"depth",
]
and "_qc" not in var
]
# extract times
start_time = convert_time(
info[info["Attribute Name"] == "time_coverage_start"]["Value"].to_numpy()[0]
)
end_time = convert_time(
info[info["Attribute Name"] == "time_coverage_end"]["Value"].to_numpy()[0]
)
yesterday = datetime.utcnow() - timedelta(hours=24)
if end_time > yesterday:
end_time = None
# get depths
e.dataset_id = dataset
e.response = "nc"
e.variables = variables
e.protocol = "tabledap"
e.constraints = constraints.copy()
e.constraints["time>="] = yesterday
try:
ds = e.to_xarray()
except HTTPError:
logger.error(
"Either the dataset was invalid, the server was down, or the dataset has not been updated in the last day"
)
return
try:
buffer = BufferType.objects.get(name=ds.buffer_type)
except BufferType.DoesNotExist:
logger.info(f"Searched for buffer type does not exist: {ds.buffer_type}")
return
except AttributeError:
logger.info(f"{dataset} does not have a defined buffer_type")
buffer = False
erddap_server = ErddapServer.objects.get(base_url=server)
erddap_dataset, _ = ErddapDataset.objects.get_or_create(
name=dataset, server=erddap_server
)
for var in ds.variables:
if var not in [
"time",
"depth",
"time_modified",
"time_created",
"water_depth",
"longitude",
"latitude",
"mooring_side_desc",
]:
data_array = ds[var]
try:
try:
data_type = DataType.objects.get(
standard_name=data_array.standard_name
)
except AttributeError:
try:
data_type = DataType.objects.get(long_name=data_array.long_name)
except AttributeError:
data_type = DataType.objects.filter(
short_name=data_array.short_name
).first()
except DataType.DoesNotExist:
if all(
attr in dir(data_array)
for attr in ["standard_name", "long_name", "units"]
):
data_type = DataType(
standard_name=data_array.standard_name,
long_name=data_array.long_name,
# short_name=data_array.short_name,
units=data_array.units,
)
try:
data_type.short_name = data_array.short_name
except AttributeError:
logger.info(f"{var} does not have a short name")
data_type.save()
else:
logger.warning(f"Unable to load or create datatype for {var}")
finally:
try:
if data_type:
time_series = TimeSeries(
platform=platform,
variable=var,
data_type=data_type,
start_time=start_time,
end_time=end_time,
constraints=constraints,
dataset=erddap_dataset,
)
if buffer:
time_series.buffer_type = buffer
time_series.save()
except UnboundLocalError:
logger.warning(f"No datatype for {var}")
data_type = None
| 17,980
|
def help_text_metadata(label=None, description=None, example=None):
"""
Standard interface to help specify the required metadata fields for helptext to
work correctly for a model.
:param str label: Alternative name for the model.
:param str description: Long description of the model.
:param example: A concrete example usage of the model.
:return dict: Dictionary of the help text metadata
"""
return {
'label': label,
'description': description,
'example': example
}
| 17,981
|
def conv1d_stack(sequences, filters, activations, name=None):
"""Convolve a jagged batch of sequences with a stack of filters.
This is equivalent to running several `conv1d`s on each `sequences[i]` and
reassembling the results as a `Jagged`. The padding is always 'SAME'.
Args:
sequences: 4-D `Jagged` tensor.
filters: List of 3-D filters (one filter per layer). Must have odd width.
activations: List of activation functions to apply after each layer, or
None to indicate no activation.
name: Optional name for this operation.
Returns:
`Jagged` convolution results.
Raises:
TypeError: If sequences is not Jagged.
ValueError: If the filters or activations are invalid.
"""
if not isinstance(sequences, Jagged):
raise TypeError('Expected Jagged sequences, got %s' % type(Jagged))
if len(filters) != len(activations):
raise ValueError('Got %d filters != %d activations' %
(len(filters), len(activations)))
if not filters:
return sequences
with tf.name_scope(name, 'jagged_conv1d_stack') as name:
# Compute maximum filter width
filters = [tf.convert_to_tensor(f, name='filter') for f in filters]
width = 0
for filt in filters:
shape = filt.get_shape()
if shape.ndims != 3 or shape[0] is None or shape[0].value % 2 == 0:
raise ValueError('Expected known odd filter width, got shape %s' %
shape)
width = max(width, shape[0].value)
between = width // 2 # Rounds down since width is odd
# Add 'between' zeros between each sequence
flat = sequences.flat
sizes = flatten(sequences.sizes)
size = tf.size(sizes)
flat_shape = tf.shape(flat)
flat_len = flat_shape[0]
indices = (tf.range(flat_len) + repeats(between * tf.range(size), sizes))
padded_len = between * tf.nn.relu(size - 1) + flat_len
flat = tf.unsorted_segment_sum(flat, indices, padded_len)[None]
# Make a mask to reset between portions to zero
if len(filters) > 1:
mask = tf.unsorted_segment_sum(
tf.ones(flat_shape[:1], dtype=flat.dtype), indices, padded_len)
mask = mask[:, None]
# Do each convolution
for i, (filt, activation) in enumerate(zip(filters, activations)):
if i:
flat *= mask
flat = tf.nn.conv1d(flat, filt, stride=1, padding='SAME')
if activation is not None:
flat = activation(flat)
# Extract results and repackage as a Jagged
flat = tf.squeeze(flat, [0])
flat = tf.gather(flat, indices, name=name)
return Jagged(sequences.sizes, flat)
| 17,982
|
def dh_mnthOfYear(value, pattern):
"""
Helper for decoding a single integer value.
The value should be >=1000, no conversion,
no rounding (used in month of the year)
"""
return dh_noConv(value, pattern, _formatLimit_MonthOfYear[0])
| 17,983
|
def build_team(datafile, salary_col, position_col, prediction_col, cap=60000, legal_teams=None):
"""
Construct teams from a set of prediction data
:param str datafile: saved prediction data (pickle file)
:param str salary_col: name of salary column
:param str position_col: name of position column
:param str prediction_col: name of prediction column to use
:param list[str] legal_teams: an optional list of legal NBA teams for the game
:return pd.DataFrame: prediction data for chosen team
"""
player_data = pd.read_pickle(datafile)
# Load real names for later use
player_data['name'] = player_data['bref_id'].apply(id2name)
if legal_teams:
player_data = player_data[player_data['Tm'].isin(legal_teams)]
# Ditch any undefined rows for salary / position / prediction as they will break the solver
player_data.dropna(subset=[salary_col, position_col, prediction_col], inplace=True)
# Cast player cost column to integers; this will also break the solver! :)
player_data[salary_col] = player_data[salary_col].astype(int)
# an optimization: speed up computation by only keeping the best-projected two players at each (position, salary).
# this should mean we only keep players we could potentially use
# it is hypothetically true that this could burn us if we get hit by the "too many players from team X" consideration
#grouped_player_data = player_data.groupby([salary_col, position_col], sort=False)
# this actually figures out how many players we need at the given position and keeps only that many at each salary level
#candidates = grouped_player_data.apply(lambda group: group.sort(prediction_col).tail(positions[group[position_col].iloc[0]]))
#
# more detailed, even more aggressive sketchier optimization: remove all players which are strictly worse than others
# (all players for whom two players are better and at least as cheap -- or one for centers. I hard coded that to save time)
# this could burn us pretty hard if we run into a team constraint in the end
def dominators(row):
return len(player_data[(player_data['predicted'] > row['predicted'])
& (player_data['salary'] <= row['salary'])
& (player_data['pos'] == row['pos'])])
player_data['dominators'] = player_data.apply(dominators, axis=1)
candidates = player_data[(player_data['dominators'] == 0) |
((player_data['pos'] != 'C') & (player_data['dominators'] <= 1))]
candidates.set_index('bref_id', inplace=True)
while True: # because python doesn't have do... while
best_team = best_vorp(data=candidates,
cost_column=salary_col,
value_column=prediction_col,
type_column=position_col,
required_types=positions,
cap=cap,
debug_print_fn=print)
# Implement an additional constraint -- we can't have more than 4 players from the same team.
# We'll actually be a little stricter and try to restrict it at 3 (see MAX_PLAYERS_PER_TEAM).
teams_of_selection = Counter(candidates.loc[best_team, 'Tm'].values)
most_common_team, count = teams_of_selection.popitem()
if count <= MAX_PLAYERS_PER_TEAM:
return candidates.loc[best_team]
else:
# Nope, this is an illegal team. Try to help us generate a real one by dropping the lowest-valued player
# on the team from the list of possible candidates.
players_on_most_common_team = [c for c in best_team if candidates.loc[c, 'Tm'] == most_common_team]
team_players = candidates.loc[players_on_most_common_team].copy()
team_players['value'] = team_players[prediction_col].divide(team_players[salary_col])
team_players.sort('value', inplace=True)
worst_player = team_players.iloc[0].name
print('Ideal team had %d players from %s. Banning player: %s' % (count, most_common_team, worst_player))
candidates = candidates.drop([worst_player])
| 17,984
|
def serialize_remote_exception(failure_info):
"""Prepares exception data to be sent over rpc.
Failure_info should be a sys.exc_info() tuple.
"""
tb = traceback.format_exception(*failure_info)
failure = failure_info[1]
kwargs = {}
if hasattr(failure, 'kwargs'):
kwargs = failure.kwargs
# NOTE(matiu): With cells, it's possible to re-raise remote, remote
# exceptions. Lets turn it back into the original exception type.
cls_name = six.text_type(failure.__class__.__name__)
mod_name = six.text_type(failure.__class__.__module__)
if (cls_name.endswith(_REMOTE_POSTFIX) and
mod_name.endswith(_REMOTE_POSTFIX)):
cls_name = cls_name[:-len(_REMOTE_POSTFIX)]
mod_name = mod_name[:-len(_REMOTE_POSTFIX)]
data = {
'class': cls_name,
'module': mod_name,
'message': six.text_type(failure),
'tb': tb,
'args': failure.args,
'kwargs': kwargs
}
json_data = jsonutils.dumps(data)
return json_data
| 17,985
|
def ai_turn(board: Board, n: int, player: int) -> None:
"""Process ai's turn."""
print("(AI's turn.)")
non_sym_enemy_pawns = get_non_sym_pawns(board, n, player)
# if AI plays the very first turn, put pawn at center of board
if board == new_board(n):
x = (n-1) // 2
y = x
# otherwise, play symmetrically
elif non_sym_enemy_pawns != []:
x = n-1 - non_sym_enemy_pawns[0][0]
y = n-1 - non_sym_enemy_pawns[0][1]
# or play random if no symmetrical move is possible
else:
x, y = select_random_square(board, n, player)
print("Putting a pawn at [" + str(x) + ";" + str(y) + "].")
put_pawn_at(board, player, x, y)
| 17,986
|
def build_eval_graph(features, model):
"""
builds evaluation graph
"""
_params = {}
logger.debug("building evaluation graph: %s.", _params)
with tf.variable_scope("loss"):
loss = tf.losses.sigmoid_cross_entropy(multi_class_labels=tf.expand_dims(features["sentiment"], axis=1),
logits=model["logits"])
model = {"loss": loss, "eval_args": _params}
return model
| 17,987
|
def in_common(routes):
"""routes is a list of lists, each containing a route to a peer."""
r = []
branch = False
for n in izip_any(*routes): #itertools.izip(*routes):
# strip dead nodes
f = [i for i in n if i != '*']
# ignore all dead nodes
if len(f) == 0:
continue
c = [ (f.count(x), x) for x in f ]
c.sort()
if debug:
pprint(c)
top = c[-1][0]
# majority wins
if top > 2 and top > (len(f) * 0.50):
f = [c[-1][1]]
if len(set(f)) == 1:
r.append(f[0])
else:
# more than one unique node, the tree has branched
branch = True
break
return (branch, r)
| 17,988
|
def process_polygon(coordinates):
"""Pass list of co-ordinates to Shapely Polygon function and get polygon object"""
return Polygon(coordinates)
| 17,989
|
def split_function(vector, column, value):
"""
Split function
"""
return vector[column] >= value
| 17,990
|
def merge_ribo_files(destination_file, source_file_list):
"""
Merges the experiments in the given source files and writes
the result in the destination file.
The ribo files need to be compatible
(same left / right span, same metagene radius, same reference)
Because of the compatibility, parameters (attributes), refrence
etc. of the new file is the same as the merged file,
The source files are not allowed to have experiments of the same name
as this creates ambiguity.
Parameters
----------
destination_file : Destination ribo file path
source_file_list : List of ribo file paths to be merged
"""
if len(source_file_list) < 2:
print("Please provide at least two input ribo files")
exit(1)
source_handle_list = [ (h5py.File(f , "r"), f )
for f in source_file_list ]
destination_ribo_handle = h5py.File( destination_file, "w" )
merge_ribos( destination_ribo_handle, source_handle_list )
[s[0].close() for s in source_handle_list]
destination_ribo_handle.close()
| 17,991
|
def blackwell(Sv, theta, phi, r,
r0=10, r1=1000,
tSv=-75, ttheta=702, tphi=282,
wtheta=28 , wphi=52):
"""
Detects and mask seabed using the split-beam angle and Sv, based in
"Blackwell et al (2019), Aliased seabed detection in fisheries acoustic
data". Complete article here: https://arxiv.org/abs/1904.10736
Args:
Sv (float): 2D numpy array with Sv data (dB)
theta (float): 2D numpy array with the along-ship angle (degrees)
phi (float): 2D numpy array with the athwart-ship angle (degrees)
r (float): 1D range array (m)
r0 (int): minimum range below which the search will be performed (m)
r1 (int): maximum range above which the search will be performed (m)
tSv (float): Sv threshold above which seabed is pre-selected (dB)
ttheta (int): Theta threshold above which seabed is pre-selected (dB)
tphi (int): Phi threshold above which seabed is pre-selected (dB)
wtheta (int): window's size for mean square operation in Theta field
wphi (int): window's size for mean square operation in Phi field
Returns:
bool: 2D array with seabed mask
"""
# delimit the analysis within user-defined range limits
r0 = np.nanargmin(abs(r - r0))
r1 = np.nanargmin(abs(r - r1)) + 1
Svchunk = Sv[r0:r1, :]
thetachunk = theta[r0:r1, :]
phichunk = phi[r0:r1, :]
# get blur kernels with theta & phi width dimensions
ktheta = np.ones((wtheta, wtheta))/wtheta**2
kphi = np.ones((wphi , wphi ))/wphi **2
# perform mean square convolution and mask if above theta & phi thresholds
thetamaskchunk = convolve2d(thetachunk, ktheta, 'same',
boundary='symm')**2 > ttheta
phimaskchunk = convolve2d(phichunk, kphi, 'same',
boundary='symm')**2 > tphi
anglemaskchunk = thetamaskchunk | phimaskchunk
# if aliased seabed, mask Sv above the Sv median of angle-masked regions
if anglemaskchunk.any():
Svmedian_anglemasked = log(np.nanmedian(lin(Svchunk[anglemaskchunk])))
if np.isnan(Svmedian_anglemasked):
Svmedian_anglemasked = np.inf
if Svmedian_anglemasked < tSv:
Svmedian_anglemasked = tSv
Svmaskchunk = Svchunk > Svmedian_anglemasked
# label connected items in Sv mask
items = nd.label(Svmaskchunk, nd.generate_binary_structure(2,2))[0]
# get items intercepted by angle mask (likely, the seabed)
intercepted = list(set(items[anglemaskchunk]))
if 0 in intercepted:
intercepted.remove(intercepted==0)
# combine angle-intercepted items in a single mask
maskchunk = np.zeros(Svchunk.shape, dtype=bool)
for i in intercepted:
maskchunk = maskchunk | (items==i)
# add data above r0 and below r1 (removed in first step)
above = np.zeros((r0, maskchunk.shape[1]), dtype=bool)
below = np.zeros((len(r) - r1, maskchunk.shape[1]), dtype=bool)
mask = np.r_[above, maskchunk, below]
anglemask = np.r_[above, anglemaskchunk, below] # TODO remove
# return empty mask if aliased-seabed was not detected in Theta & Phi
else:
mask = np.zeros_like(Sv, dtype=bool)
return mask, anglemask
| 17,992
|
def test_1D_rows_in_simplex_invariant():
"""Test 1D rows in simplex unchanged."""
n_features = 1
n_samples = 15
X = np.ones((n_samples, n_features))
projection = simplex_project_rows(X) # pylint: disable=no-value-for-parameter
assert np.all(projection == X)
| 17,993
|
def setWindowRectangle(x, y=None, w=None, h=None, mngr=None, be=None):
""" Position the current Matplotlib figure at the specified position
"""
if y is None:
y = x[1]
w = x[2]
h = x[3]
x = x[0]
if mngr is None:
mngr = plt.get_current_fig_manager()
be = matplotlib.get_backend()
if be == 'WXAgg':
mngr.canvas.manager.window.SetPosition((x, y))
mngr.canvas.manager.window.SetSize((w, h))
elif be == 'agg':
mngr.canvas.manager.window.SetPosition((x, y))
mngr.canvas.manager.window.resize(w, h)
elif be == 'module://IPython.kernel.zmq.pylab.backend_inline':
pass
else:
# assume Qt canvas
mngr.canvas.manager.window.move(x, y)
mngr.canvas.manager.window.resize(w, h)
mngr.canvas.manager.window.setGeometry(x, y, w, h)
| 17,994
|
def _excitation_operator( # pylint: disable=invalid-name
edge_list: np.ndarray, p: int, q: int, h1_pq: float
) -> SparsePauliOp:
"""Map an excitation operator to a Pauli operator.
Args:
edge_list: representation of graph specifying neighboring qubits.
p: First Fermionic-mode index.
q: Second Fermionic-mode index. You must ensure that p < q.
h1_pq: Numerical coefficient of the term.
Returns:
The result of the Fermionic to Pauli operator mapping.
""" # pylint: disable=missing-raises-doc
if p >= q:
raise ValueError("Expected p < q, got p = ", p, ", q = ", q)
b_a = _edge_operator_bi(edge_list, p)
b_b = _edge_operator_bi(edge_list, q)
a_ab = _edge_operator_aij(edge_list, p, q)
return (-1j * 0.5 * h1_pq) * ((b_b & a_ab) + (a_ab & b_a))
| 17,995
|
def quota():
"""Show current youtube calculated quota usage."""
limit = ConfigManager.get(Provider.youtube).data["quota_limit"]
usage = YouService.get_quota_usage()
pt_date = YouService.quota_date(obj=True)
next_reset = timedelta(
hours=23 - pt_date.hour,
minutes=59 - pt_date.minute,
seconds=60 - pt_date.second,
)
values = [
(magenta("Provider:"), Provider.youtube),
(magenta("Limit:"), limit),
(magenta("Usage:"), usage),
(magenta("Next reset:"), str(next_reset)),
]
click.secho(
tabulate(values, tablefmt="plain", colalign=("right", "left")) # type: ignore
)
| 17,996
|
def send_message( message, node, username, password, resource, max_attempts=1 ):
""" broadcast this message thru lvalert """
tmpfilename = "tmpfile.json"
tmpfile = open(tmpfilename, "w")
tmpfile.write( message )
tmpfile.close()
cmd = "lvalert_send -a %s -b %s -r %s -n %s -m %d --file %s"%(username, password, resource, node, max_attempts, tmpfilename)
return sp.Popen(cmd.split()).wait()
| 17,997
|
def test_default_contribution():
"""
verify DEFAULT_CONTRIBUTION is an InitialContribution
with a starting_value of 1
"""
assert DEFAULT_CONTRIBUTION.get_contribution_for_year(0) == 10000
assert DEFAULT_CONTRIBUTION.get_contribution_for_year(1) == 0
| 17,998
|
def create_bcs(field_to_subspace, Lx, Ly, solutes,
V_boundary,
enable_NS, enable_PF, enable_EC,
**namespace):
""" The boundary conditions are defined in terms of field. """
boundaries = dict(wall=[Wall()])
bcs = dict(
wall=dict()
)
bcs_pointwise = dict()
noslip = Fixed((0., 0.))
# Navier-Stokes
if enable_NS:
bcs["wall"]["u"] = noslip
bcs_pointwise["p"] = (0., "x[0] < DOLFIN_EPS && x[1] < DOLFIN_EPS")
# Electrochemistry
if enable_EC:
bcs["wall"]["V"] = Fixed(V_boundary)
return boundaries, bcs, bcs_pointwise
| 17,999
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.