content stringlengths 22 815k | id int64 0 4.91M |
|---|---|
def read_molecules(filename):
"""Read a file into an OpenEye molecule (or list of molecules).
Parameters
----------
filename : str
The name of the file to read (e.g. mol2, sdf)
Returns
-------
molecule : openeye.oechem.OEMol
The OEMol molecule read, or a list of molecules if multiple molecules are read.
If no molecules are read, None is returned.
"""
ifs = oechem.oemolistream(filename)
molecules = list()
for mol in ifs.GetOEMols():
mol_copy = oechem.OEMol(mol)
molecules.append(mol_copy)
ifs.close()
if len(molecules) == 0:
return None
elif len(molecules) == 1:
return molecules[0]
else:
return molecules | 5,332,000 |
def euler(derivative):
"""
Euler method
"""
return lambda t, x, dt: (t + dt, x + derivative(t, x) * dt) | 5,332,001 |
def detect_encoding_type(input_geom):
"""
Detect geometry encoding type:
- ENC_WKB: b'\x01\x01\x00\x00\x00\x00\x00\x00\x00\x00H\x93@\x00\x00\x00\x00\x00\x9d\xb6@'
- ENC_EWKB: b'\x01\x01\x00\x00 \xe6\x10\x00\x00\x00\x00\x00\x00\x00H\x93@\x00\x00\x00\x00\x00\x9d\xb6@'
- ENC_WKB_HEX: '0101000000000000000048934000000000009DB640'
- ENC_EWKB_HEX: '0101000020E6100000000000000048934000000000009DB640'
- ENC_WKB_BHEX: b'0101000000000000000048934000000000009DB640'
- ENC_EWKB_BHEX: b'0101000020E6100000000000000048934000000000009DB640'
- ENC_WKT: 'POINT (1234 5789)'
- ENC_EWKT: 'SRID=4326;POINT (1234 5789)'
"""
if isinstance(input_geom, shapely.geometry.base.BaseGeometry):
return ENC_SHAPELY
if isinstance(input_geom, str):
if _is_hex(input_geom):
return ENC_WKB_HEX
else:
srid, geom = _extract_srid(input_geom)
if not geom:
return None
if srid:
return ENC_EWKT
else:
return ENC_WKT
if isinstance(input_geom, bytes):
try:
ba.unhexlify(input_geom)
return ENC_WKB_BHEX
except Exception:
return ENC_WKB
return None | 5,332,002 |
def GetFilesToConcatenate(input_directory):
"""Get list of files to concatenate.
Args:
input_directory: Directory to search for files.
Returns:
A list of all files that we would like to concatenate relative
to the input directory.
"""
file_list = []
for dirpath, _, files in os.walk(input_directory):
for input_file in files:
file_list.append(
os.path.relpath(
os.path.join(dirpath, input_file),
input_directory))
return file_list | 5,332,003 |
def square_spiral(turn=5, size=75):
"""
Draws a "spiral" of squares.
Works best if `turn` is a factor of 360.
"""
rect_spiral(turn, size, size) | 5,332,004 |
def unet_weights(input_size = (256,256,1), learning_rate = 1e-4, weight_decay = 5e-7):
"""
Weighted U-net architecture.
The tuple 'input_size' corresponds to the size of the input images and labels.
Default value set to (256, 256, 1) (input images size is 256x256).
The float 'learning_rate' corresponds to the learning rate value for the training.
Defaut value set to 1e-4.
The float 'weight_decay' corresponds to the weight decay value for the training.
Default value set to 5e-7.
"""
# Get input.
input_img = Input(input_size)
# Get weights.
weights = Input(input_size)
# Layer 1.
conv1 = Conv2D(64, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(input_img)
conv1 = Conv2D(64, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv1)
pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)
# Layer 2.
conv2 = Conv2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool1)
conv2 = Conv2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv2)
pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)
# Layer 3.
conv3 = Conv2D(256, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool2)
conv3 = Conv2D(256, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv3)
pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)
# Layer 4.
conv4 = Conv2D(512, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool3)
conv4 = Conv2D(512, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv4)
drop4 = Dropout(0.5)(conv4)
pool4 = MaxPooling2D(pool_size=(2, 2))(drop4)
# layer 5.
conv5 = Conv2D(1024, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool4)
conv5 = Conv2D(1024, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv5)
drop5 = Dropout(0.5)(conv5)
# Layer 6.
up6 = Conv2D(512, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(drop5))
merge6 = concatenate([drop4,up6], axis = 3)
conv6 = Conv2D(512, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge6)
conv6 = Conv2D(512, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv6)
# Layer 7.
up7 = Conv2D(256, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(conv6))
merge7 = concatenate([conv3,up7], axis = 3)
conv7 = Conv2D(256, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge7)
conv7 = Conv2D(256, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv7)
# Layer 8.
up8 = Conv2D(128, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(conv7))
merge8 = concatenate([conv2,up8], axis = 3)
conv8 = Conv2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge8)
conv8 = Conv2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv8)
# Layer 9.
up9 = Conv2D(64, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(conv8))
merge9 = concatenate([conv1,up9], axis = 3)
conv9 = Conv2D(64, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge9)
conv9 = Conv2D(64, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv9)
conv9 = Conv2D(2, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv9)
# Final layer (output).
conv10 = Conv2D(1, 1, activation = 'sigmoid')(conv9)
# Specify input (image + weights) and output.
model = Model(inputs = [input_img, weights], outputs = conv10)
# Use Adam optimizer, custom weighted binary cross-entropy loss and specify metrics
# Also use weights inside the loss function.
model.compile(optimizer = Adam(lr = learning_rate, decay = weight_decay), loss = binary_crossentropy_weighted(weights), metrics = ['accuracy'])
return model | 5,332,005 |
def std(x, axis=None, keepdims=False):
"""Standard deviation of a tensor, alongside the specified axis. """
return T.std(x, axis=axis, keepdims=keepdims) | 5,332,006 |
def validate_email_address(
value=_undefined,
allow_unnormalized=False,
allow_smtputf8=True,
required=True,
):
"""
Checks that a string represents a valid email address.
By default, only email addresses in fully normalized unicode form are
accepted.
Validation logic is based on the well written and thoroughly researched
[email-validator](https://pypi.org/project/email-validator/) library.
By default, `validate_email_address` will only accept email addresses in
the normalized unicode form returned by `email_validator.validate_email`.
Despite the conflict with this library's naming convention, we recommend
that you use `email-validator` for validation and sanitisation of untrusted
input.
:param str value:
The value to be validated.
:param bool allow_unnormalized:
Whether or not to accept addresses that are not completely normalized.
Defaults to False, as in most cases you will want equivalent email
addresses to compare equal.
:param bool allow_smtputf8:
Whether or not to accept email addresses with local parts that can't be
encoded as plain ascii. Defaults to True, as such email addresses are
now common and very few current email servers do not support them.
:param bool required:
Whether the value can be `None`. Defaults to `True`.
:raises TypeError:
If the value is not a unicode string.
:raises ValueError:
If the value is not an email address, or is not normalized.
"""
validate = _email_address_validator(
allow_unnormalized=allow_unnormalized,
allow_smtputf8=allow_smtputf8,
required=required,
)
if value is not _undefined:
validate(value)
else:
return validate | 5,332,007 |
def help():
"""Help"""
print("hello, world!") | 5,332,008 |
def rotation_matrix_from_quaternion(quaternion):
"""Return homogeneous rotation matrix from quaternion."""
q = numpy.array(quaternion, dtype=numpy.float64)[0:4]
nq = numpy.dot(q, q)
if nq == 0.0:
return numpy.identity(4, dtype=numpy.float64)
q *= math.sqrt(2.0 / nq)
q = numpy.outer(q, q)
return numpy.array((
(1.0-q[1,1]-q[2,2], q[0,1]-q[2,3], q[0,2]+q[1,3], 0.0),
( q[0,1]+q[2,3], 1.0-q[0,0]-q[2,2], q[1,2]-q[0,3], 0.0),
( q[0,2]-q[1,3], q[1,2]+q[0,3], 1.0-q[0,0]-q[1,1], 0.0),
( 0.0, 0.0, 0.0, 1.0)
), dtype=numpy.float64) | 5,332,009 |
def get_woosh_dir(url, whoosh_base_dir):
"""
Based on the bigbed url and base whoosh directory
from settings generate the path for whoosh directory for index of this bed file
"""
path = urlparse(url).path
filename = path.split('/')[-1]
whoosh_dir = os.path.join(whoosh_base_dir, filename)
return whoosh_dir | 5,332,010 |
def get_metadata(doi):
"""Extract additional metadata of paper based on doi."""
headers = {"accept": "application/x-bibtex"}
title, year, journal = '', '', ''
sessions = requests.Session()
retry = Retry(connect=3, backoff_factor=0.5)
adapter = HTTPAdapter(max_retries=retry)
sessions.mount('http://', adapter)
sessions.mount('https://', adapter)
try:
response = requests.get("http://dx.doi.org/" + doi, headers=headers)
except requests.exceptions.ConnectionError:
print "ConnectionError"
return title, year, journal
if (response.status_code != 200):
print 'Did not find '+doi+' article, error code '+str(response.status_code)
else:
try:
line = response.text.encode()
line = line.split('\n\t')
line = line[1:]
except UnicodeEncodeError:
print "UnicodeEncodeError"
return title, year, journal
for field in line:
if len(field) >= 8 and field[0:6] == "year =":
year = field[7:-1]
if len(field) >= 9 and field[0:7] == "title =":
title = field[9:-2]
if len(field) >= 11 and field[0:9] == "journal =":
journal = field[11:-3]
return title, year, journal | 5,332,011 |
def format_channel(channel):
""" Returns string representation of <channel>. """
if channel is None or channel == '':
return None
elif type(channel) == int:
return 'ch{:d}'.format(channel)
elif type(channel) != str:
raise ValueError('Channel must be specified in string format.')
elif 'ch' in channel:
return channel
elif channel.lower() in 'rgb':
return format_channel('rgb'.index(channel.lower()))
elif channel.lower() in ('red', 'green', 'blue'):
return format_channel('rgb'.index(channel.lower()[0]))
else:
raise ValueError('Channel string not recognized.') | 5,332,012 |
def make_mask(img_dataset,mask_parms,storage_parms):
"""
.. todo::
This function is not yet implemented
Make a region to identify a mask for use in deconvolution.
One or more of the following options are allowed
- Supply a mask in the form of a cngi.image.region
- Run an auto-masking algorithm to detect structure and define a cngi.image.region
- Apply a pblimit based mask
An existing deconvolution mask from img_dataset may either be included in the above, or ignored.
The output is a region (array?) in the img_dataset containing the intersection of all above regions
Returns
-------
img_dataset : xarray.core.dataset.Dataset
""" | 5,332,013 |
def parse_standards_from_spreadsheeet(
cre_file: List[Dict[str, Any]], result: db.Standard_collection
) -> None:
"""given a yaml with standards, build a list of standards in the db"""
hi_lvl_CREs = {}
cres = {}
if "CRE Group 1" in cre_file[0].keys():
hi_lvl_CREs, cres = parsers.parse_v1_standards(cre_file)
elif "CRE:name" in cre_file[0].keys():
cres = parsers.parse_export_format(cre_file)
elif any(key.startswith("CRE hierarchy") for key in cre_file[0].keys()):
cres = parsers.parse_hierarchical_export_format(cre_file)
else:
cres = parsers.parse_v0_standards(cre_file)
# register groupless cres first
for _, cre in cres.items():
register_cre(cre, result)
# groups
# TODO :(spyros) merge with register_cre above
for name, doc in hi_lvl_CREs.items():
dbgroup = result.add_cre(doc)
for link in doc.links:
if type(link.document).__name__ == defs.CRE.__name__:
dbcre = register_cre(link.document, result)
result.add_internal_link(group=dbgroup, cre=dbcre, type=link.ltype)
elif type(link.document).__name__ == defs.Standard.__name__:
dbstandard = register_standard(link.document, result)
result.add_link(cre=dbgroup, standard=dbstandard, type=link.ltype) | 5,332,014 |
def download_emoji_texture(load=True): # pragma: no cover
"""Download emoji texture.
Parameters
----------
load : bool, optional
Load the dataset after downloading it when ``True``. Set this
to ``False`` and only the filename will be returned.
Returns
-------
pyvista.Texture or str
DataSet or filename depending on ``load``.
Examples
--------
>>> from pyvista import examples
>>> dataset = examples.download_emoji_texture()
>>> dataset.plot(cpos="xy")
"""
return _download_and_read('emote.jpg', texture=True, load=load) | 5,332,015 |
def create_tables_in_database(
db_configuration: DatabaseConnectionConfig, model_base: Type[declarative_base], schema_name: str):
"""Creates the tables for the models in the database"""
with DatabaseConnection.get_db_connection(
db_connection_config=db_configuration) as db_connection:
db_engine = db_connection.connection_engine
try:
db_engine.execute(orm.schema.CreateSchema(schema_name))
except (sqlite3.OperationalError, orm.exc.OperationalError):
db_engine.execute(f"ATTACH ':memory:' AS {schema_name}")
except orm.exc.ProgrammingError:
pass
model_base.metadata.create_all(bind=db_engine) | 5,332,016 |
def the_task_is_created(step):
""" Assertions to check if TASK is created with the expected data """
assert_true(world.response.ok, 'RESPONSE BODY: {}'.format(world.response.content))
response_headers = world.response.headers
assert_equals(response_headers[CONTENT_TYPE], world.headers[ACCEPT_HEADER],
'RESPONSE HEADERS: {}'.format(world.response.headers))
response_body = response_body_to_dict(world.response, world.headers[ACCEPT_HEADER], with_attributes=True,
xml_root_element_name=TASK)
assert_in(world.node_name, response_body[DESCRIPTION])
assert_equals(world.tenant_id, response_body[TASK_VDC])
m = re.search('/task/(.*)$', response_body[TASK_HREF])
world.task_id = m.group(1) | 5,332,017 |
def shuffle_sequence(sequence: str) -> str:
"""Shuffle the given sequence.
Randomly shuffle a sequence, maintaining the same composition.
Args:
sequence: input sequence to shuffle
Returns:
tmp_seq: shuffled sequence
"""
tmp_seq: str = ""
while len(sequence) > 0:
max_num = len(sequence)
rand_num = random.randrange(max_num)
tmp_char = sequence[rand_num]
tmp_seq += tmp_char
tmp_str_1 = sequence[:rand_num]
tmp_str_2 = sequence[rand_num + 1:]
sequence = tmp_str_1 + tmp_str_2
return tmp_seq | 5,332,018 |
def hibernate(debug=False):
"""
Shortcut for hibernate command (need sudo).
:param debug: flag for using debug mode
:type debug:bool
:return: None
"""
power_control("pm-hibernate", debug) | 5,332,019 |
def bayesian_twosample(countsX, countsY, prior=None):
"""
Calculates a Bayesian-like two-sample test between `countsX` and `countsY`.
The idea is taken from [1]_. We assume the counts are generated IID. Then
we use Dirichlet prior to infer the underlying discrete distribution.
In the null hypothesis, we say that `countsX` and `countsY` were generated
from the same underlying distribution q. The alternative hypothesis is that
`countsX` and `countsY` were generated by different distribution.
A log Bayes factor is calculated:
\chi = log P(X, Y | H_1) - log P(X, Y | H_0)
If \chi is greater than 0, then reject the null hypothesis.
To calculate P(X, Y | H_1), we calculate the product of evidences from
two independent Bayesian inferences. That is, P(X)P(Y). To calculate
P(X, Y | H_0), we combine the counts and calculate the evidence from a
single Bayesian inference.
Parameters
----------
countsX : array-like, shape (n,)
The counts for X.
countsY : array-like, shape (n,)
The counts for Y.
prior : array-like, shape (n,)
The Dirichlet hyper-parameters to use during inference. If `None`, we
use Jeffrey's prior.
Returns
-------
reject : bool
If `True`, then the null hypothesis is rejected and the counts should
be considered as generated from different distributions.
chi : float
The base-2 logarithm of the evidence ratio. If this value is greater
than 0, then we reject the null hypothesis.
Examples
--------
>>> bayesian_twosample([1,10], [2,3])
(True, 0.11798407303051839)
>>> bayesian_twosample([1,30], [20,30])
(True, 9.4347501426274931)
References
----------
.. [1] Karsten M. Borgwardt and Zoubin Ghahramani, "Bayesian two-sample
tests". http://arxiv.org/abs/0906.4032
"""
if prior is None:
# Use Jeffrey's prior for Dirichlet distributions.
prior = np.ones(len(countsX)) * 0.5
countsX = np.asarray(countsX)
countsY = np.asarray(countsY)
chi = log_evidence(countsX, prior) + log_evidence(countsY, prior)
chi -= log_evidence(countsX + countsY, prior)
reject = chi > 0
return reject, chi | 5,332,020 |
def load_image(image_path):
"""
loads an image from the specified image path
:param image_path:
:return: the loaded image
"""
image = Image.open(image_path)
image = loader(image)
image = image.unsqueeze(0)
image = image.to(device, torch.float)
return image | 5,332,021 |
def feed_reader(url):
"""Returns json from feed url"""
content = retrieve_feed(url)
d = feed_parser(content)
json_string = json.dumps(d, ensure_ascii=False)
return json_string | 5,332,022 |
def create_app(env_name):
"""
Create app
"""
# app initiliazation
app = Flask(__name__)
app.config.from_object(app_config[env_name])
cors = CORS(app)
# initializing bcrypt and db
bcrypt.init_app(app)
db.init_app(app)
app.register_blueprint(book_blueprint, url_prefix='/api/v1/books')
@app.route('/', methods=['GET'])
def index():
"""
example endpoint
"""
return 'Congratulations! Your part 2 endpoint is working'
return app | 5,332,023 |
def test_md020_bad_multiple_within_paragraph_separated_shortcut_image_multi():
"""
Test to make sure we get the expected behavior after scanning a good file from the
test/resources/rules/md020 directory that has a closed atx heading with bad spacing
inside of the start hashes, multiple times in the same paragraph.
"""
# Arrange
scanner = MarkdownScanner()
supplied_arguments = [
"scan",
"test/resources/rules/md020/multiple_within_paragraph_separated_shortcut_image_multi.md",
]
expected_return_code = 1
expected_output = (
"test/resources/rules/md020/multiple_within_paragraph_separated_shortcut_image_multi.md:4:3: "
+ "MD020: No space present inside of the hashes on a possible Atx Closed Heading. (no-missing-space-closed-atx)\n"
)
expected_error = ""
# Act
execute_results = scanner.invoke_main(arguments=supplied_arguments)
# Assert
execute_results.assert_results(
expected_output, expected_error, expected_return_code
) | 5,332,024 |
def test_update_rate():
"""
Testing that the update methods get a correct timedelta
"""
# TODO : investigate if node multiprocessing plugin would help simplify this
# playing with list to pass a reference to this
testing_last_update = [time.time()]
testing_time_delta = []
acceptable_timedelta = []
def testing_update(self, timedelta, last_update, time_delta, ok_timedelta):
time_delta.append(time.time() - last_update[-1])
last_update.append(time.time())
# if the time delta measured in test and the one passed as argument differ
# too much, one time, test is failed
if abs(time_delta[-1] - timedelta) > 0.005:
ok_timedelta.append(False)
else:
ok_timedelta.append(True)
# spin like crazy, loads CPU for a bit, and eventually exits.
# We re here trying to disturb the update rate
while True:
if randint(0, 10000) == 42:
break
# hack to dynamically change the update method
testing_update_onearg = functools.partial(testing_update,
last_update=testing_last_update,
time_delta=testing_time_delta,
ok_timedelta=acceptable_timedelta)
n1 = pyzmp.Node()
n1.update = types.MethodType(testing_update_onearg, n1)
assert not n1.is_alive()
# Starting the node in the same thread, to be able to test simply by shared memory.
# TODO : A Node that can choose process or thread run ( on start() instead of init() maybe ? )
runthread = threading.Thread(target=n1.run)
runthread.daemon = True # to kill this when test is finished
runthread.start()
# n1.start()
# sleep here for a while
time.sleep(10)
# removing init time only used for delta computation
testing_last_update.pop(0)
# Check time vars modified by update
for i in range(0, len(testing_last_update)):
print("update : {u} | delta: {d} | accept : {a}".format(
u=testing_last_update[i],
d=testing_time_delta[i],
a=acceptable_timedelta[i])
)
assert acceptable_timedelta[i] | 5,332,025 |
def get_shed_tool_conf_dict( app, shed_tool_conf ):
"""Return the in-memory version of the shed_tool_conf file, which is stored in the config_elems entry in the shed_tool_conf_dict associated with the file."""
for index, shed_tool_conf_dict in enumerate( app.toolbox.shed_tool_confs ):
if shed_tool_conf == shed_tool_conf_dict[ 'config_filename' ]:
return index, shed_tool_conf_dict
else:
file_name = strip_path( shed_tool_conf_dict[ 'config_filename' ] )
if shed_tool_conf == file_name:
return index, shed_tool_conf_dict | 5,332,026 |
def _improve_attribute_docs(obj, name, lines):
"""Improve the documentation of various attributes.
This improves the navigation between related objects.
:param obj: the instance of the object to document.
:param name: full dotted path to the object.
:param lines: expected documentation lines.
"""
if obj is None:
# Happens with form attributes.
return
if isinstance(obj, DeferredAttribute):
# This only points to a field name, not a field.
# Get the field by importing the name.
cls_path, field_name = name.rsplit(".", 1)
model = import_string(cls_path)
field = model._meta.get_field(obj.field_name)
del lines[:] # lines.clear() is Python 3 only
lines.append("**Model field:** {label}".format(label=field.verbose_name))
elif isinstance(obj, _FIELD_DESCRIPTORS):
# These
del lines[:]
lines.append("**Model field:** {label}".format(label=obj.field.verbose_name))
if isinstance(obj, FileDescriptor):
lines.append(
"**Return type:** :class:`~django.db.models.fields.files.FieldFile`"
)
elif PhoneNumberDescriptor is not None and isinstance(
obj, PhoneNumberDescriptor
):
lines.append(
"**Return type:** :class:`~phonenumber_field.phonenumber.PhoneNumber`"
)
elif isinstance(obj, related_descriptors.ForwardManyToOneDescriptor):
# Display a reasonable output for forward descriptors.
related_model = obj.field.remote_field.model
if isinstance(related_model, str) or isinstance(related_model, unicode):
cls_path = related_model
else:
cls_path = "{}.{}".format(related_model.__module__, related_model.__name__)
del lines[:]
lines.append(
"**Model field:** {label}, "
"accesses the :class:`~{cls_path}` model.".format(
label=obj.field.verbose_name, cls_path=cls_path
)
)
elif isinstance(obj, related_descriptors.ReverseOneToOneDescriptor):
related_model = obj.related.related_model
if isinstance(related_model, str) or isinstance(related_model, unicode):
cls_path = related_model
else:
cls_path = "{}.{}".format(related_model.__module__, related_model.__name__)
del lines[:]
lines.append(
"**Model field:** {label}, "
"accesses the :class:`~{cls_path}` model.".format(
label=obj.related.field.verbose_name, cls_path=cls_path
)
)
elif isinstance(obj, related_descriptors.ReverseManyToOneDescriptor):
related_model = obj.rel.related_model
if isinstance(related_model, str) or isinstance(related_model, unicode):
cls_path = related_model
else:
cls_path = "{}.{}".format(related_model.__module__, related_model.__name__)
del lines[:]
lines.append(
"**Model field:** {label}, "
"accesses the M2M :class:`~{cls_path}` model.".format(
label=obj.field.verbose_name, cls_path=cls_path
)
)
elif isinstance(obj, (models.Manager, ManagerDescriptor)):
# Somehow the 'objects' manager doesn't pass through the docstrings.
module, cls_name, field_name = name.rsplit(".", 2)
lines.append("Django manager to access the ORM")
tpl = "Use ``{cls_name}.objects.all()`` to fetch all objects."
lines.append(tpl.format(cls_name=cls_name)) | 5,332,027 |
def validate_environment(args):
"""
Validate an environment description for JSSPP OSP.
:param args: The command line arguments passed to this command.
"""
logging.info('Processing file %s', args.file.name)
logging.info('Validating structural requirements')
schema = load_environment_schema()
instance = parse_environment(args.file)
error = validate(schema, instance)
if error is not None:
path = '/'.join(map(str, error.absolute_path))
logging.error('File does not match schema at %s: %s', path, error)
sys.exit(1)
logging.info('Format OK') | 5,332,028 |
def write(data, filename):
"""Write a dictionary of FASTA sequences to file.
Arguments:
---------
data: dict Dictionary of {sid: sequence}
filename: str Filename to write
"""
w = open(filename, 'w')
write_list = []
for title, sequence in data.items():
sequence_list = []
while True:
if len(sequence) >= 80:
sequence_list.append(sequence[0:80])
sequence = sequence[80:]
else:
sequence_list.append(sequence)
break
write_list.append('>' + title + '\n' + '\n'.join(sequence_list))
w.write('\n'.join(write_list))
w.close() | 5,332,029 |
def get_column_interpolation_dust_raw(key, h_in_gp1, h_in_gp2, index,
mask1, mask2, step1_a, step2_a,
target_a, dust_factors,
kdtree_index=None,
luminosity_factors = None, cache
= False, snapshot = False):
"""This function returns the interpolated quantity between two
timesteps, from step1 to step2. Some galaxies are masked out: Any
galaxy that doesn't pass the mask in step1 (mask1), any galaxy
that doesn't a decendent in step2, or any galaxy that whose
descendent doesn't pass the step2 mask (mask2).
"""
print("\tLoading key: {}".format(key))
# if luminosity_factors is None:
# print("\t\tluminosity factors is none")
#print("dust_factors: ", dust_factors)
t1 = time.time()
step_del_a = step2_a - step1_a
target_del_a = target_a - step1_a
##=========DEBUG==========
# print("step del_a {:.3f} - {:.3f} = {:.3f}".format(step2_a, step1_a, step_del_a))
# print("target_del_a {:.3f} - {:.3f} = {:.3f}".format(target_a, step1_a, target_del_a))
##=========DEBUG==========
# The masking all galaxies that fail galmatcher's requirements at
# step1, galaxies that don't have a descndent, or if the
# descendent galaxy at step2 doesn't pass galmatcher requirements.
# If index is set None, we aren't doing interpolation at all. We
# are just using
if not snapshot:
mask_tot = mask1 & (index != -1) & mask2[index]
if (key in no_slope_var) or any(ptrn in key for ptrn in no_slope_ptrn):
#print('\t\tno interpolation')
data = h_in_gp1[key].value[mask_tot]
if kdtree_index is None:
val_out = data
else:
val_out = data[kdtree_index]
elif ":dustAtlas" in key:
#print('\t\tinterpolation with dust')
key_no_dust = key.replace(":dustAtlas","")
val1_no_dust = h_in_gp1[key_no_dust].value[mask_tot]
val1_dust = h_in_gp1[key].value[mask_tot]
if index is not None:
val2_no_dust = h_in_gp2[key].value[index][mask_tot]
else:
val2_no_dust = val1_no_dust
# val1_no_dust_lg = np.log(val1_no_dust)
# val1_dust_lg = np.log(val1_dust)
# val2_no_dust_lg = np.log(val2_no_dust)
dust_effect = val1_dust/val1_no_dust
dust_effect[val1_no_dust == 0] = 1
slope = (val2_no_dust - val1_no_dust)/step_del_a
slope[step_del_a ==0] =0
# slope_mag = (val2_no_dust_lg - val1_no_dust_lg)/step_del_a
# slope_mag[step_del_a == 0] = 0
##=======DEBUG=======
# def print_vals(label, data):
# print("\t\t{} below/zero/size: {}/{}/{}".format(label, np.sum(data<0), np.sum(data==0), data.size))
# print_vals("val1_no_dust", val1_no_dust)
# print_vals("val2_no_dust", val2_no_dust)
# print_vals("val1_dust", val1_dust)
##=======DEBUG=======
if kdtree_index is None:
tot_dust_effect = dust_effect**dust_factors
slct = dust_effect > 1.0
tot_dust_effect[slct] = dust_effect[slct]
val_out = (val1_no_dust + slope*target_del_a)*tot_dust_effect
#val_out = np.exp((val1_no_dust_lg + slope_mag*target_del_a)*tot_dust_effect)
else:
tot_dust_effect = (dust_effect[kdtree_index]**dust_factors)
slct = dust_effect[kdtree_index] > 1.0
tot_dust_effect[slct] = dust_effect[kdtree_index][slct]
val_out = (val1_no_dust[kdtree_index] + slope[kdtree_index]*target_del_a)*tot_dust_effect
#val_out = np.exp((val1_no_dust_lg[kdtree_index] + slope_mag[kdtree_index]*target_del_a)*tot_dust_effect)
else:
#print('\t\tinerpolation without dust')
val1_data = h_in_gp1[key].value[mask_tot]
val2_data = h_in_gp2[key].value[index][mask_tot]
# val1_data_lg = np.log(val1_data)
# val2_data_lg = np.log(val2_data)
slope = (val2_data - val1_data)/step_del_a
slope[step_del_a==0]=0
# slope_mag = (val1_data_lg-val2_data_lg)/step_del_a
# slope_mag[step_del_a==0]=0
if kdtree_index is None:
val_out = val1_data + slope*target_del_a
#val_out = np.log(val1_data_lg + slope_mag*target_del_a)
else:
val_out = val1_data[kdtree_index] + slope[kdtree_index]*target_del_a
#val_out = np.log(val1_data_lg[kdtree_index] + slope_mag[kdtree_index]*target_del_a)
#print('\t\t',val_out.dtype)
# If running on snapshot, we don't need to interpolate
else:
mask_tot = mask1
val1_data = h_in_gp1[key].value[mask_tot]
# reorder the data if it's a post-matchup index
if kdtree_index is None:
val_out = val1_data
else:
val_out = val1_data[kdtree_index]
if not(luminosity_factors is None):
if(any(l in key for l in luminosity_factors_keys)):
#print("\t\tluminosity adjusted")
val_out = val_out*luminosity_factors
elif('Luminosities' in key or 'Luminosity' in key):
#print("\t\tluminosity adjusted 2")
val_out = val_out*luminosity_factors
else:
pass
#print("\t\tluminosity untouched")
if np.sum(~np.isfinite(val_out))!=0:
print(key, "has a non-fininte value")
print("{:.2e} {:.2e}".format(np.sum(~np.isfinite(val_out)), val_out.size))
if ":dustAtlas" in key:
print(np.sum(~np.isfinite(val1_no_dust)))
print(np.sum(~np.isfinite(dust_effect)))
slct = ~np.isfinite(dust_effect)
print(val1_no_dust[slct])
print(val1_dust[slct])
print(np.sum(~np.isfinite(slope_mag)))
print(np.sum(~np.isfinite(target_del_a)))
if "emissionLines" in key:
print("overwriting non-finite values with 0")
val_out[~np.isfinite(val_out)]=0.0
else:
raise
#print("\t\toutput size: {:.2e}".format(val_out.size))
print("\t\t mask size:{:.1e}/{:.1e} data size:{:.1e} read + format time: {}".format(np.sum(mask_tot), mask_tot.size, val_out.size, time.time()-t1))
##=======DEBUG======
# print("\t\t non-finite: {}/{}/{}".format(np.sum(~np.isfinite(val_out)), np.sum(val_out<0), val_out.size))
# print("\t\t below/zero/size: {}/{}/{}".format(np.sum(val_out<0), np.sum(val_out==0), val_out.size))
##=======DEBUG======
return val_out | 5,332,030 |
def configure_bgp_additional_paths(device, bgp_as):
""" Configure additional_paths on bgp router
Args:
device ('obj'): device to use
bgp_as ('int'): bgp router to configure
Returns:
N/A
Raises:
SubCommandFailure: Failed executing configure commands
"""
log.info("Configuring bgp router {} with additional paths".format(bgp_as))
try:
device.configure(
[
"router bgp {}".format(bgp_as),
"bgp additional-paths select all",
"bgp additional-paths send receive",
"bgp additional-paths install",
]
)
except SubCommandFailure:
raise SubCommandFailure(
"Could not configure additional paths on bgp "
"router {}".format(bgp_as)
) | 5,332,031 |
def _get_next_foldername_index(name_to_check,dir_path):
"""Finds folders with name_to_check in them in dir_path and extracts which one has the hgihest index.
Parameters
----------
name_to_check : str
The name of the network folder that we want to look repetitions for.
dir_path : str
The folder where we want to look for network model repetitions.
Returns
-------
str
If there are no name matches, it returns the string '1'. Otherwise, it returns str(highest index found + 1)
"""
dir_content = os.listdir(dir_path)
dir_name_indexes = [int(item.split('.')[-1]) for item in dir_content if os.path.isdir(item) and name_to_check in item] #extracting the counter in the folder name and then we find the maximum
if len(dir_name_indexes) == 0:
return '1'
else:
highest_idx = max(dir_name_indexes)
return str(highest_idx + 1)
#find all folders that have name_to_check in them: | 5,332,032 |
def index_hydrate(params, container, cli_type, key, value):
"""
Hydrate an index-field option value to construct something like::
{
'index_field': {
'DoubleOptions': {
'DefaultValue': 0.0
}
}
}
"""
if 'IndexField' not in params:
params['IndexField'] = {}
if 'IndexFieldType' not in params['IndexField']:
raise RuntimeError('You must pass the --type option.')
# Find the type and transform it for the type options field name
# E.g: int-array => IntArray
_type = params['IndexField']['IndexFieldType']
_type = ''.join([i.capitalize() for i in _type.split('-')])
# ``index_field`` of type ``latlon`` is mapped to ``Latlon``.
# However, it is defined as ``LatLon`` in the model so it needs to
# be changed.
if _type == 'Latlon':
_type = 'LatLon'
# Transform string value to the correct type?
if key.split(SEP)[-1] == 'DefaultValue':
value = DEFAULT_VALUE_TYPE_MAP.get(_type, lambda x: x)(value)
# Set the proper options field
if _type + 'Options' not in params['IndexField']:
params['IndexField'][_type + 'Options'] = {}
params['IndexField'][_type + 'Options'][key.split(SEP)[-1]] = value | 5,332,033 |
def test_state_apply_aborts_on_pillar_error(
salt_cli,
salt_minion,
base_env_pillar_tree_root_dir,
):
"""
Test state.apply with error in pillar.
"""
pillar_top_file = textwrap.dedent(
"""
base:
'{}':
- basic
"""
).format(salt_minion.id)
basic_pillar_file = textwrap.dedent(
"""
syntax_error
"""
)
with pytest.helpers.temp_file(
"top.sls", pillar_top_file, base_env_pillar_tree_root_dir
), pytest.helpers.temp_file(
"basic.sls", basic_pillar_file, base_env_pillar_tree_root_dir
):
expected_comment = [
"Pillar failed to render with the following messages:",
"SLS 'basic' does not render to a dictionary",
]
shell_result = salt_cli.run(
"state.apply", "sls-id-test", minion_tgt=salt_minion.id
)
assert shell_result.exitcode == 1
assert shell_result.json == expected_comment | 5,332,034 |
def oio_make_subrequest(env, method=None, path=None, body=None, headers=None,
agent='Swift', swift_source=None,
make_env=oio_make_env):
"""
Same as swift's make_subrequest, but let some more headers pass through.
"""
return orig_make_subrequest(env, method=method, path=path, body=body,
headers=headers, agent=agent,
swift_source=swift_source,
make_env=make_env) | 5,332,035 |
def offline_data_fetcher(cfg: EasyDict, dataset: Dataset) -> Callable:
"""
Overview:
The outer function transforms a Pytorch `Dataset` to `DataLoader`. \
The return function is a generator which each time fetches a batch of data from the previous `DataLoader`.\
Please refer to the link https://pytorch.org/tutorials/beginner/basics/data_tutorial.html \
and https://pytorch.org/docs/stable/data.html for more details.
Arguments:
- cfg (:obj:`EasyDict`): Config which should contain the following keys: `cfg.policy.learn.batch_size`.
- dataset (:obj:`Dataset`): The dataset of type `torch.utils.data.Dataset` which stores the data.
"""
# collate_fn is executed in policy now
dataloader = DataLoader(dataset, batch_size=cfg.policy.learn.batch_size, shuffle=True, collate_fn=lambda x: x)
def _fetch(ctx: "OfflineRLContext"):
"""
Overview:
Every time this generator is iterated, the fetched data will be assigned to ctx.train_data. \
After the dataloader is empty, the attribute `ctx.train_epoch` will be incremented by 1.
Input of ctx:
- train_epoch (:obj:`int`): Number of `train_epoch`.
Output of ctx:
- train_data (:obj:`List[Tensor]`): The fetched data batch.
"""
while True:
for i, data in enumerate(dataloader):
ctx.train_data = data
yield
ctx.train_epoch += 1
# TODO apply data update (e.g. priority) in offline setting when necessary
return _fetch | 5,332,036 |
def graph_methods_for_all_datasets(trunc, series, output_dir):
"""
Graphs the statistics from series for all of the datasets.
Args:
df (pandas dataframe) : The dataframe containing the data series.
num_epochs (int) : How many epochs to graph (always includes last epoch)
series (str) : Which statistics to graph (e.g., 'TestError', 'TestCost', 'TrainError', 'TrainCost')
output_dir (str) : Directory to save the graphs to.
"""
for model in ['ST', 'M']:
for ds, ylim in [('MNIST', (0.4,1.)), ('notMNIST', (1.5,4.)), ('CIFAR10', (20.,30.))]:
graph_methods(ds, model, trunc, ylim, series, output_dir) | 5,332,037 |
def get_channel(id: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetChannelResult:
"""
Resource schema for AWS::MediaPackage::Channel
:param str id: The ID of the Channel.
"""
__args__ = dict()
__args__['id'] = id
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('aws-native:mediapackage:getChannel', __args__, opts=opts, typ=GetChannelResult).value
return AwaitableGetChannelResult(
arn=__ret__.arn,
description=__ret__.description,
egress_access_logs=__ret__.egress_access_logs,
hls_ingest=__ret__.hls_ingest,
ingress_access_logs=__ret__.ingress_access_logs) | 5,332,038 |
def _get_activation_fn(activation):
"""Return an activation function given a string"""
if activation == "relu":
return torch.nn.functional.relu
if activation == "gelu":
return torch.nn.functional.gelu
if activation == "glu":
return torch.nn.functional.glu
raise RuntimeError(F"activation should be relu/gelu, not {activation}.") | 5,332,039 |
def make_05dd():
"""移動ロック終了(イベント終了)"""
return "" | 5,332,040 |
def lambda_handler(request, context):
"""Main Lambda handler.
Since you can expect both v2 and v3 directives for a period of time during the migration
and transition of your existing users, this main Lambda handler must be modified to support
both v2 and v3 requests.
"""
try:
logger.info("Directive:")
logger.info(json.dumps(request, indent=4, sort_keys=True))
logger.info("Received v3 directive!")
if request["directive"]["header"]["name"] == "Discover":
response = handle_discovery_v3(request)
else:
response = handle_non_discovery_v3(request)
logger.info("Response:")
logger.info(json.dumps(response, indent=4, sort_keys=True))
#if version == "3":
#logger.info("Validate v3 response")
#validate_message(request, response)
return response
except ValueError as error:
logger.error(error)
raise | 5,332,041 |
def test_lfa_tested_nodes_make_more_contacts_if_risky(
simple_model_risky_behaviour_2_infections: simple_model_risky_behaviour_2_infections
):
"""Create 2 initial infections, one who engages in risky behaviour while LFA tested
and one who doesn't.
Set both to being lfa tested.
Sets the household sizes to 1 so that all contact would have to be outside household.
Assert that the individual who engages in risky behaviour while lfa testing makes contacts
Assert that the individual does not engage in risky behaviour makes no contacts.
"""
model = simple_model_risky_behaviour_2_infections
model.network.node(1).propensity_risky_behaviour_lfa_testing = False
# stop there being any within household infections
# not sure if this is strictly necessary
model.network.household(1).size = 1
model.network.household(2).size = 1
model.network.household(1).susceptibles = 0
model.network.household(2).susceptibles = 0
# set the nodes to being lfa tested
model.network.node(1).being_lateral_flow_tested = True
model.network.node(2).being_lateral_flow_tested = True
for _ in range(5):
model.simulate_one_step()
# node 1 does not engage in risky behaviour and should not make any global contacts
assert model.network.node(1).outside_house_contacts_made == 0
assert model.network.node(2).outside_house_contacts_made != 0 | 5,332,042 |
def batch_flatten(x):
"""Turn a n-D tensor into a 2D tensor where
the first dimension is conserved.
"""
y = T.reshape(x, (x.shape[0], T.prod(x.shape[1:])))
if hasattr(x, '_keras_shape'):
if None in x._keras_shape[1:]:
y._keras_shape = (x._keras_shape[0], None)
else:
y._keras_shape = (x._keras_shape[0], np.prod(x._keras_shape[1:]))
return y | 5,332,043 |
def msg_constant_to_behaviour_type(value: int) -> typing.Any:
"""
Convert one of the behaviour type constants in a
:class:`py_trees_ros_interfaces.msg.Behaviour` message to
a type.
Args:
value: see the message definition for details
Returns:
a behaviour class type (e.g. :class:`py_trees.composites.Sequence`)
Raises:
TypeError: if the message type is unrecognised
"""
if value == py_trees_ros_interfaces.msg.Behaviour.SEQUENCE:
return py_trees.composites.Sequence
elif value == py_trees_ros_interfaces.msg.Behaviour.SELECTOR:
return py_trees.composites.Selector
elif value == py_trees_ros_interfaces.msg.Behaviour.PARALLEL:
return py_trees.composites.Parallel
elif value == py_trees_ros_interfaces.msg.Behaviour.DECORATOR:
return py_trees.decorators.Decorator
elif value == py_trees_ros_interfaces.msg.Behaviour.BEHAVIOUR:
return py_trees.behaviour.Behaviour
else:
raise TypeError("invalid type specified in message [{}]".format(value)) | 5,332,044 |
def calc_skewness(sig):
"""Compute skewness along the specified axes.
Parameters
----------
input: ndarray
input from which skewness is computed.
Returns
-------
s: int
skewness result.
"""
return skew(sig) | 5,332,045 |
def im2vid(img_path, name):
"""
Creates video from corresponding frames
:param img_path: path where the images are located. End of path must be /*.{image extension}, e.g. /*.jpg
:param name: name of the video
:return:
"""
img_array = []
for filename in tqdm(glob.glob(img_path), desc='Loading images'):
img = cv2.imread(filename)
height, width, layers = img.shape
size = (width, height)
img_array.append(img)
out = cv2.VideoWriter(name, cv2.VideoWriter_fourcc(*'mp4v'), 24, size) # Save video as mp4 format
# out = cv2.VideoWriter(name, cv2.VideoWriter_fourcc(*'DIVX'), 24, size)
for i in tqdm(range(len(img_array)), desc='Converting to video'):
out.write(img_array[i])
out.release() | 5,332,046 |
def upload_usp_family(folder,
group_label,
group_description,
stop_if_existing=True):
"""
Upload a set of usp/recpot files in a give group
:param folder: a path containing all UPF files to be added.
Only files ending in .usp/.recpot are considered.
:param group_label: the name of the group to create. If it exists and is
non-empty, a UniquenessError is raised.
:param group_description: a string to be set as the group description.
Overwrites previous descriptions, if the group was existing.
:param stop_if_existing: if True, check for the md5 of the files and,
if the file already exists in the DB, raises a MultipleObjectsError.
If False, simply adds the existing UPFData node to the group.
"""
import os
import aiida.common
#from aiida.common import aiidalogger
from aiida.common import UniquenessError, NotExistent
from aiida.orm.querybuilder import QueryBuilder
from .otfg import OTFGGroup
files = [
os.path.realpath(os.path.join(folder, i)) for i in os.listdir(folder)
if os.path.isfile(os.path.join(folder, i)) and (
i.lower().endswith('.usp') or i.lower().endswith('recpot')
or i.lower().endswith('.uspcc'))
]
nfiles = len(files)
try:
group = OTFGGroup.get(label=group_label)
group_created = False
except NotExistent:
group = OTFGGroup(label=group_label, )
group_created = True
# Update the descript even if the group already existed
group.description = group_description
pseudo_and_created = [] # A list of records (UspData, created)
for f in files:
md5sum = md5_file(f)
qb = QueryBuilder()
qb.append(UspData, filters={'attributes.md5': {'==': md5sum}})
existing_usp = qb.first()
# Add the file if it is in the database
if existing_usp is None:
pseudo, created = UspData.get_or_create(f,
use_first=True,
store_usp=False)
pseudo_and_created.append((pseudo, created))
# The same file is there already
else:
if stop_if_existing:
raise ValueError("A usp/recpot with identical MD5 to"
" {} cannot be added with stop_if_existing"
"".format(f))
existing_usp = existing_usp[0]
pseudo_and_created.append((existing_usp, False))
# Check for unique per element
elements = [(i[0].element, i[0].md5sum) for i in pseudo_and_created]
# Check if we will duplicate after insertion
if not group_created:
for aiida_n in group.nodes:
if not isinstance(aiida_n, UspData):
continue
elements.append((aiida_n.element, aiida_n.md5sum))
# Discard duplicated pairs
elements = set(elements)
elements_names = [e[0] for e in elements]
# Check the uniqueness of the complete group
if not len(elements_names) == len(set(elements_names)):
duplicates = set(
[x for x in elements_names if elements_names.count(x) > 1])
dup_string = ", ".join(duplicates)
raise UniquenessError(
"More than one usp/recpot found for the elements: " + dup_string +
".")
if group_created:
group.store()
# Save the usp in the database if necessary and add them to the group
for pseudo, created in pseudo_and_created:
if created:
pseudo.store()
#aiidalogger.debug("New node {} created for file {}".format(
# pseudo.uuid, pseudo.filename))
else:
#aiidalogger.debug("Reusing node {} for file {}".format(
# pseudo.uuid, pseudo.filename))
pass
nodes_new = [
pseduo for pseduo, created in pseudo_and_created if created is True
]
nodes_add = [pseduo for pseduo, created in pseudo_and_created]
group.add_nodes(nodes_add)
return nfiles, len(nodes_new) | 5,332,047 |
def check_is_pandas_dataframe(log):
"""
Checks if a log object is a dataframe
Parameters
-------------
log
Log object
Returns
-------------
boolean
Is dataframe?
"""
if pkgutil.find_loader("pandas"):
import pandas as pd
return type(log) is pd.DataFrame
return False | 5,332,048 |
def mk_creoson_post_sessionId(monkeypatch):
"""Mock _creoson_post return dict."""
def fake_func(client, command, function, data=None, key_data=None):
return "123456"
monkeypatch.setattr(
creopyson.connection.Client, '_creoson_post', fake_func) | 5,332,049 |
def test_make_student_folder(clean_dir):
"""Test our utility function
"""
student_dir = make_student_folder(clean_dir, "jtd111")
# Check the folder we made
assert os.path.join(clean_dir, "jtd111") == student_dir
assert os.path.isdir(student_dir)
# Make sure that's all we've got in there
assert os.listdir(clean_dir) == ["jtd111"]
# Look for filenames
assert os.listdir(student_dir) == ["main.py"] | 5,332,050 |
def isUsernameFree(name):
"""Checks to see if the username name is free for use."""
global username_array
global username
for conn in username_array:
if name == username_array[conn] or name == username:
return False
return True | 5,332,051 |
def test_transform_bitmap_dark():
"""
Check function transform_bitmap_dark function on the
BMP_FILE_PATH define above.
Check: whether for each element in color map it has become smaller.
"""
# Instantiate Bitmap object with original file
original_bitmap = Bitmap.read_file(BMP_FILE_PATH)
transformed_bitmap = Bitmap(original_bitmap.transform_bitmap_dark())
# check rotation by comparing locations: .
for x in range(0, len(transformed_bitmap.color_table)):
assert(original_bitmap.color_table[x] >= transformed_bitmap.color_table[x]) | 5,332,052 |
def get_quadrangle_dimensions(vertices):
"""
:param vertices:
A 3D numpy array which contains a coordinates of a quadrangle, it should look like this:
D---C
| |
A---B
[ [[Dx, Dy]], [[Cx, Cy]], [[Bx, By]], [[Ax, Ay]] ].
:return:
width, height (which are integers)
"""
temp = np.zeros((4, 2), dtype=int)
for i in range(4):
temp[i] = vertices[i, 0]
delta_x = temp[0, 0]-temp[1, 0]
delta_y = temp[0, 1]-temp[1, 1]
width1 = int((delta_x**2 + delta_y**2)**0.5)
delta_x = temp[1, 0] - temp[2, 0]
delta_y = temp[1, 1] - temp[2, 1]
width2 = int((delta_x**2 + delta_y**2)**0.5)
delta_x = temp[2, 0] - temp[3, 0]
delta_y = temp[2, 1] - temp[3, 1]
height1 = int((delta_x**2 + delta_y**2)**0.5)
delta_x = temp[3, 0] - temp[0, 0]
delta_y = temp[3, 1] - temp[0, 1]
height2 = int((delta_x**2 + delta_y**2)**0.5)
width = max(width1, width2)
height = max(height1, height2)
return width, height | 5,332,053 |
def optimal_r(points, range_min, range_max):
"""
Computes the optimal Vietoris-Rips parameter r for the given list of points.
Parameter needs to be as small as possible and VR complex needs to have 1 component
:param points: list of tuples
:return: the optimal r parameter and list of (r, n_components) tuples
"""
step = (range_max - range_min) / 100
components_for_r = []
vr = defaultdict(list)
r = range_min
while r < range_max:
vr = vietoris(points, r)
comps = findComponents([s[0] for s in vr[0]], vr[1])
print("\rr=",r,"components=",len(comps), end="")
components_for_r.append((r, len(comps)))
if (len(comps) == 1):
# we found the solution with smallest r
print("\rDone, r=", r, "n components=", len(comps))
return r, vr, components_for_r
r += step
# ideal solution not found, return the last complex
print("\rNo ideal r found, returning the last one")
return r, vr, components_for_r | 5,332,054 |
def get_root_path():
"""
this is to get the root path of the code
:return: path
"""
path = str(Path(__file__).parent.parent)
return path | 5,332,055 |
def usage(msg = None):
"""
Print a usage message and exit
"""
sys.stdout = sys.stderr
if msg != None:
print(msg)
print("Usage:")
print("dcae_admin_db.py [options] configurationChanged json-file")
print("dcae_admin_db.py [options] suspend")
print("dcae_admin_db.py [options] resume")
print("dcae_admin_db.py [options] test")
print("dcae_admin_db.py [options] newdb dbname admin-pswd user-pswd viewer-pswd")
print("")
print("options:")
print("-H / --dbhost= - host name, defaults to CFG['dcae_admin_db_hostname']")
print("-d / --dbdir= - database directory path, defaults to CFG['db_directory']")
print("-c / --dbconf= - database directory path, defaults to CFG['db_configuration']")
print("-D / --dbname= - database name, defaults to CFG['dcae_admin_db_databasename']")
print("-n / --nocreate - do not create the databases / users")
print("-I / --ignoredb - ignore current state of database")
print("-U / --user= - user to login as, defaults to CFG['dcae_admin_db_username']")
print("-P / --password= - password for user, defaults to CFG['dcae_admin_db_password']")
print("-B / --bindir= - postgresql bin directory, defaults to CFG['pg_bin_directory']")
print("-i / --ignorefile= - skip configuration if this file is present, defaults to CFG['skip_configuration_file']")
print("-R / --remove - remove old databases / users")
print("-J / --jsontop= - top of json tree, as in \"['pgaas']\"")
print("-l / --logcfg= - ECOMP DCAE Common Logging configuration file")
print("-e / --errors= - where to redirect error output, defaults to CFG['dcae_admin_db_errors_file'] then stderr")
print("-t / --trace= - where to redirect trace output, defaults to CFG['dcae_admin_db_trace_file'] then stderr")
print("-v - verbose")
sys.exit(2) | 5,332,056 |
def _handle_requirements(hass: core.HomeAssistant, component,
name: str) -> bool:
"""Install the requirements for a component."""
if hass.config.skip_pip or not hasattr(component, 'REQUIREMENTS'):
return True
for req in component.REQUIREMENTS:
if not pkg_util.install_package(req, target=hass.config.path('deps')):
_LOGGER.error('Not initializing %s because could not install '
'dependency %s', name, req)
return False
return True | 5,332,057 |
def check_image(filename):
""" Check if filename is an image """
try:
im = Image.open(filename)
im.verify() # is it an image?
return True
except OSError:
return False | 5,332,058 |
def encode_save(sig=np.random.random([1, nfeat, nsensors]), name='simpleModel_ini', dir_path="../src/vne/models"):
"""
This function will create a model, test it, then save a persistent version (a file)
Parameters
__________
sig:
a numpy array with shape (nsamples, nfeatures, nsensors). in general a single neural signal may contain multiple channels.
the multi-channel nature of the neural activations is a feature of the vne.
typically shaped with size (1,1,S) where S is number os sensors
the vne will map all the different channels to a single scalar encoded signal.
name:
string with the filename to save the model under
dir:
dir_path, the local directory to save the model
Returns
--------
model:
A copy of the encoder model generated by the function
"""
model = simpleModel().eval()
model.apply(init_weights_simple)
sig = torch.tensor(sig.astype(np.float32)).to('cpu')
enc = model(sig)
print("signal={}".format(sig))
print("encoded={}".format(enc))
# save the model
model.apply(vne.init_weights_simple)
save_model(encoder=model, name=name, dir_path=dir_path)
return model | 5,332,059 |
def _imports_to_canonical_import(
split_imports: Set[Tuple[str, ...]],
parent_prefix=(),
) -> Tuple[str, ...]:
"""Extract the canonical import name from a list of imports
We have two rules.
1. If you have at least 4 imports and they follow a structure like
'a', 'a.b', 'a.b.c', 'a.b.d'
this is treated as a namespace package with a canonical import of `a.b`
2. If you have fewer imports but they have a prefix that is found in
KNOWN_NAMESPACE_PACKAGES
you are also treated as a namespace package
3. Otherwise return the commonprefix
"""
prefix: Tuple[str, ...] = commonprefix(list(split_imports))
c = Counter(len(imp) for imp in split_imports)
if (
len(prefix) == 1
and c.get(1) == 1
and (
(len(split_imports) > 3)
or (parent_prefix + prefix in KNOWN_NAMESPACE_PACKAGES)
)
):
ns_prefix = _imports_to_canonical_import(
split_imports={imp[1:] for imp in split_imports if len(imp) > 1},
parent_prefix=parent_prefix + prefix,
)
if prefix and ns_prefix:
return prefix + ns_prefix
return prefix | 5,332,060 |
def _bin_file(script):
"""Return the absolute path to scipt in the bin directory"""
return os.path.abspath(os.path.join(__file__, "../../../bin", script)) | 5,332,061 |
def r12writer(stream: Union[TextIO, str], fixed_tables: bool=False) -> 'R12FastStreamWriter':
"""
Context manager for writing DXF entities to a stream/file. `stream` can be any file like object
with a :func:`write` method or just a string for writing DXF entities to the file system.
If `fixed_tables` is ``True``, a standard TABLES section is written in front of the ENTITIES
section and some predefined text styles and line types can be used.
"""
if hasattr(stream, 'write'):
writer = R12FastStreamWriter(stream, fixed_tables)
try:
yield writer
finally:
writer.close()
else:
with open(stream, 'wt') as stream:
writer = R12FastStreamWriter(stream, fixed_tables)
try:
yield writer
finally:
writer.close() | 5,332,062 |
def test_get_time_step(initialized_bmi):
"""Test that there is a time step."""
time_step = initialized_bmi.get_time_step()
assert isinstance(time_step, float) | 5,332,063 |
def score_hmm_logprob(bst, hmm, normalize=False):
"""Score events in a BinnedSpikeTrainArray by computing the log
probability under the model.
Parameters
----------
bst : BinnedSpikeTrainArray
hmm : PoissonHMM
normalize : bool, optional. Default is False.
If True, log probabilities will be normalized by their sequence
lengths.
Returns
-------
logprob : array of size (n_events,)
Log probabilities, one for each event in bst.
"""
logprob = np.atleast_1d(hmm.score(bst))
if normalize:
logprob = np.atleast_1d(logprob) / bst.lengths
return logprob | 5,332,064 |
def rotate_2d_list(squares_list):
"""
http://stackoverflow.com/questions/8421337/rotating-a-two-dimensional-array-in-python
"""
return [x for x in zip(*squares_list[::-1])] | 5,332,065 |
def next_ticket(ticket):
"""Return the next ticket for the given ticket.
Args:
ticket (Ticket): an arbitrary ticket
Returns:
the next ticket in the chain of tickets, having the next
pseudorandom ticket number, the same ticket id, and a
generation number that is one larger.
Example:
>>> next_ticket(Ticket(ticket_number='0.26299714122838008416507544297546663599715395525154425586041245287750224561854', id='AB-130', generation=1))
Ticket(ticket_number='0.8232357229934205790595761924514048157652891124687533667363938813600770093316', id='AB-130', generation=2)
"""
return Ticket(next_fraction(ticket.ticket_number),
ticket.id,
ticket.generation+1) | 5,332,066 |
def icecreamParlor4(m, arr):
"""I forgot about Python's nested for loops - on the SAME array - and how
that's actually -possible-, and it makes things so much simplier.
It turns out, that this works, but only for small inputs."""
# Augment the array of data, so that it not only includes the item, but
# also the item's index into the array.
decorated_arr = []
for index, item in enumerate(arr):
decorated_arr.append((item, index))
# Iterate each combination of index, and see if conditions are met with
# them. There are 2 things we care about: both amounts equal to m, and
# they both aren't the same item.
for i in decorated_arr:
for j in decorated_arr:
if (i[0] + j[1]) == m and i[1] != j[1]:
return [i[1] + 1, j[1] + 1] | 5,332,067 |
def test_player_stats_bad(start_date='2020-02-28', end_date='2019-10-02'):
"""
Test function to check that function gracefully hanldes errors
when it is supposed to.
Keyword Arguments:
start_date {str} -- start_date to query (default: {'2020-02-28'})
end_date {str} -- end_date to query (default: {'2019-10-02'})
"""
with pytest.raises(Exception) as e:
assert pypuck.player_stats(start_date, end_date)
assert str(e.value) == ("Invalid date range - "
"end_date earlier than start_date")
# Change start_date to bad type
start_date = 2019
with pytest.raises(Exception) as e:
assert pypuck.player_stats(start_date, end_date)
assert str(e.value) == ("Expecting <class 'str'> got "
"<class 'int'> for start_date") | 5,332,068 |
def insert_train_val_data(article_list, table_name):
"""
Inserts Train and Val data in the table name specified
Args:
article_list (list[dict]): Data to be stored in the database
table_name (str): name of the database table
Returns:
None
"""
for article in article_list:
img_path = article['img_local_path']
articles_db = []
for data in article['articles']:
base_url = "{0.netloc}".format(urlsplit(data['article_url'])).replace("www.", "")
articles_db.append((data['caption'], base_url, data['article_url'], img_path))
sql = 'INSERT INTO ' + table_name + ' (caption, base_url, article_url, img_local_path ) VALUES (%s, %s, %s, %s)'
my_cursor.executemany(sql, articles_db)
mydb.commit()
print(my_cursor.rowcount, 'inserted') | 5,332,069 |
def get_data(filepath, transform=None, rgb=False):
"""
Read in data from the given folder.
Parameters
----------
filepath: str
Path for the file e.g.: F'string/containing/filepath'
transform: callable
A function which tranforms the data to the required format
rgb: bool
Image type for different channel types
Returns
-------
torchvision.datasets.folder.ImageFolder
Required data after read in
"""
if transform is None:
transform = transforms.Compose([transforms.ToTensor()])
if rgb:
# will read the data into 3 channels,
# majority of images are 1 channel only however
xray_data = datasets.ImageFolder(root=filepath,
transform=transform)
else:
# read in all images as one channel
xray_data = datasets.ImageFolder(root=filepath, transform=transform)
return xray_data | 5,332,070 |
def setup(hass, config):
"""Set up the Ecovacs component."""
_LOGGER.debug("Creating new Ecovacs component")
hass.data[ECOVACS_DEVICES] = []
hass.data[ECOVACS_CONFIG] = []
from ozmo import EcoVacsAPI, VacBot
ecovacs_api = EcoVacsAPI(
ECOVACS_API_DEVICEID,
config[DOMAIN].get(CONF_USERNAME),
EcoVacsAPI.md5(config[DOMAIN].get(CONF_PASSWORD)),
config[DOMAIN].get(CONF_COUNTRY),
config[DOMAIN].get(CONF_CONTINENT),
)
devices = ecovacs_api.devices()
_LOGGER.debug("Ecobot devices: %s", devices)
for device in devices:
_LOGGER.info(
"Discovered Ecovacs device on account: %s with nickname %s",
device["did"],
device["nick"],
)
vacbot = VacBot(
ecovacs_api.uid,
ecovacs_api.REALM,
ecovacs_api.resource,
ecovacs_api.user_access_token,
device,
config[DOMAIN].get(CONF_CONTINENT).lower(),
monitor=True,
)
hass.data[ECOVACS_DEVICES].append(vacbot)
def stop(event: object) -> None:
"""Shut down open connections to Ecovacs XMPP server."""
for device in hass.data[ECOVACS_DEVICES]:
_LOGGER.info(
"Shutting down connection to Ecovacs device %s", device.vacuum["did"]
)
device.disconnect()
# Listen for HA stop to disconnect.
hass.bus.listen_once(EVENT_HOMEASSISTANT_STOP, stop)
if hass.data[ECOVACS_DEVICES]:
_LOGGER.debug("Starting vacuum components")
dconfig = config[DOMAIN]
if len(dconfig.get(CONF_SUPPORTED_FEATURES)) == 0:
dconfig[CONF_SUPPORTED_FEATURES] = STRING_TO_SERVICE.keys()
if CONF_UNSUPPORTED_FEATURES in dconfig:
filtered_features = []
for supported_feature in dconfig.get(CONF_SUPPORTED_FEATURES):
if supported_feature not in dconfig.get(CONF_UNSUPPORTED_FEATURES):
filtered_features.append(supported_feature)
dconfig[CONF_SUPPORTED_FEATURES] = filtered_features
_LOGGER.debug("SUPPORTED FEATURES")
_LOGGER.debug(dconfig.get(CONF_SUPPORTED_FEATURES))
deebot_config = {
CONF_SUPPORTED_FEATURES: strings_to_services(dconfig.get(CONF_SUPPORTED_FEATURES), STRING_TO_SERVICE)
}
hass.data[ECOVACS_CONFIG].append(deebot_config)
_LOGGER.debug(hass.data[ECOVACS_CONFIG])
discovery.load_platform(hass, "vacuum", DOMAIN, {}, config)
return True | 5,332,071 |
def read_input_files(input_file: str) -> frozenset[IntTuple]:
"""
Extracts an initial pocket dimension
which is a set of active cube 3D coordinates.
"""
with open(input_file) as input_fobj:
pocket = frozenset(
(x, y)
for y, line in enumerate(input_fobj)
for x, char in enumerate(line.strip())
if char == '#'
)
return pocket | 5,332,072 |
def _set_span_yields(span_yields: Optional[SpanYields]):
"""Sets the current parent span list."""
task = _current_task()
if task is None:
# There is no current task, so we're not running in an async context.
# Set the spans for the current thread.
_non_async_span_yields.set(span_yields)
else:
setattr(task, 'trace_span_yields', span_yields) | 5,332,073 |
def resnet101(pretrained=False, root='./pretrain_models', **kwargs):
"""Constructs a ResNet-101 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 4, 23, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet101']))
return model | 5,332,074 |
def insert_left_side(left_side, board_string):
"""
Replace the left side of the Sudoku board 'board_string' with 'left_side'.
"""
# inputs should match in upper left corner
assert(left_side[0] == board_string[0])
# inputs should match in lower left corner
assert(left_side[8] == low_left_digit(board_string))
as_list = list(board_string)
for idx in range(9):
as_list[idx*9] = left_side[idx]
return "".join(as_list) | 5,332,075 |
def collect_story_predictions(story_file, policy_model_path, nlu_model_path,
max_stories=None, shuffle_stories=True):
"""Test the stories from a file, running them through the stored model."""
def actions_since_last_utterance(tracker):
actions = []
for e in reversed(tracker.events):
if isinstance(e, UserUttered):
break
elif isinstance(e, ActionExecuted):
actions.append(e.action_name)
actions.reverse()
return actions
if nlu_model_path is not None:
interpreter = RasaNLUInterpreter(model_directory=nlu_model_path)
else:
interpreter = RegexInterpreter()
agent = Agent.load(policy_model_path, interpreter=interpreter)
stories = _get_stories(story_file, agent.domain,
max_stories=max_stories,
shuffle_stories=shuffle_stories)
preds = []
actual = []
logger.info("Evaluating {} stories\nProgress:".format(len(stories)))
for s in tqdm(stories):
sender = "default-" + uuid.uuid4().hex
dialogue = s.as_dialogue(sender, agent.domain)
actions_between_utterances = []
last_prediction = []
for i, event in enumerate(dialogue.events[1:]):
if isinstance(event, UserUttered):
p, a = _min_list_distance(last_prediction,
actions_between_utterances)
preds.extend(p)
actual.extend(a)
actions_between_utterances = []
agent.handle_message(event.text, sender=sender)
tracker = agent.tracker_store.retrieve(sender)
last_prediction = actions_since_last_utterance(tracker)
elif isinstance(event, ActionExecuted):
actions_between_utterances.append(event.action_name)
if last_prediction:
preds.extend(last_prediction)
preds_padding = len(actions_between_utterances) - \
len(last_prediction)
preds.extend(["None"] * preds_padding)
actual.extend(actions_between_utterances)
actual_padding = len(last_prediction) - \
len(actions_between_utterances)
actual.extend(["None"] * actual_padding)
return actual, preds | 5,332,076 |
def get_plaintext_help_text(testcase, config):
"""Get the help text for this testcase for display in issue descriptions."""
# Prioritize a HELP_FORMAT message if available.
formatted_help = get_formatted_reproduction_help(testcase)
if formatted_help:
return formatted_help
# Show a default message and HELP_URL if only it has been supplied.
help_url = get_reproduction_help_url(testcase, config)
if help_url:
return 'See %s for instructions to reproduce this bug locally.' % help_url
return '' | 5,332,077 |
def test_encode_erc721_asset_data_type_error_on_token_id():
"""Test that passing a non-int for token_id raises a TypeError."""
with pytest.raises(TypeError):
encode_erc721_asset_data("asdf", "asdf") | 5,332,078 |
def xml_out(db):
"""XML output of basic stats"""
stats = basic_stats(db)
print('<?xml version="1.0"?>')
print('<idp-audit rps="%d" logins="%d" users="%d">'
% (stats['rps'], stats['logins'], stats['users']))
for rp, i in list(db['rp'].items()):
print(' <rp count="%d">%s</rp>' % (i, rp))
print("</idp-audit>") | 5,332,079 |
def sniff(store=False, prn=None, lfilter=None,
count=0,
stop_event=None, refresh=.1, *args, **kwargs):
"""Sniff packets
sniff([count=0,] [prn=None,] [store=1,] [offline=None,] [lfilter=None,] + L2ListenSocket args)
store: wether to store sniffed packets or discard them
prn: function to apply to each packet. If something is returned,
it is displayed. Ex:
ex: prn = lambda x: x.summary()
lfilter: python function applied to each packet to determine
if further action may be done
ex: lfilter = lambda x: x.haslayer(Padding)
stop_event: Event that stops the function when set
refresh: check stop_event.set() every refresh seconds
"""
s = conf.L2listen(type=ETH_P_ALL, *args, **kwargs)
n = 0
lst = []
try:
while True:
if stop_event and stop_event.is_set():
break
sel = select([s], [], [], refresh)
if s in sel[0]:
p = s.recv(MTU)
if p is None:
break
if lfilter and not lfilter(p):
continue
if store:
lst.append(p)
if prn:
r = prn(p)
if r is not None:
print(r)
n += 1
if count and n == count:
break
except KeyboardInterrupt:
pass
finally:
s.close()
return plist.PacketList(lst, "Sniffed") | 5,332,080 |
def generate_margined_binary_data ( num_samples, count, limits, rng ):
"""
Draw random samples from a linearly-separable binary model
with some non-negligible margin between classes. (The exact
form of the model is up to you.)
# Arguments
num_samples: number of samples to generate
(ie, the number of rows in the returned X
and the length of the returned y)
count: the number of feature dimensions
limits: a tuple (low, high) specifying the value
range of all the features x_i
rng: an instance of numpy.random.Generator
from which to draw random numbers
# Returns
X: a matrix of sample vectors, where
the samples are the rows and the
features are the columns
ie, its size should be:
num_samples x count
y: a vector of num_samples binary labels
"""
# TODO: implement this
return None, None | 5,332,081 |
def import_timeseries(context, file, firstyear, lastyear):
"""Import time series data."""
context["scen"].read_file(Path(file), firstyear, lastyear) | 5,332,082 |
def _brighten_images(images: np.ndarray, brightness: int = BRIGHTNESS) -> np.ndarray:
"""
Adjust the brightness of all input images
:params images: The original images of shape [H, W, D].
:params brightness: The amount the brighness should be raised or lowered
:return: Images with adjusted brightness
"""
brighten_images = deepcopy(images)
for image in brighten_images:
_brighten_image(image, brightness)
return brighten_images | 5,332,083 |
def findBaseDir(basename, max_depth=5, verbose=False):
"""
Get relative path to a BASEDIR.
:param basename: Name of the basedir to path to
:type basename: str
:return: Relative path to base directory.
:rtype: StringIO
"""
MAX_DEPTH = max_depth
BASEDIR = os.path.abspath(os.path.dirname(__file__))
print("STARTING AT: %s\n Looking for: %s" % (BASEDIR, basename))
for level in range(MAX_DEPTH):
if verbose:
print('LEVEL %d: %s\n Current basename: %s' %
(level, BASEDIR, os.path.basename(BASEDIR)))
if os.path.basename(BASEDIR) == basename:
break
else:
BASEDIR = os.path.abspath(os.path.dirname(BASEDIR))
if level == MAX_DEPTH - 1:
sys.exit('Could not find correct basedir\n Currently at %s' % BASEDIR)
return os.path.relpath(BASEDIR) | 5,332,084 |
def CreateMovie(job_name, input_parameter, view_plane, plot_type):
"""encoder = os.system("which ffmpeg")
print encoder
if(len(encoder) == 0):
feedback['error'] = 'true'
feedback['message'] = ('ERROR: Movie create encoder not found')
print feedback
return
"""
movie_name = job_name + '_' + input_parameter+ '_' + view_plane + '_'+ plot_type + "_movie.mp4"
cmd = "ffmpeg -qscale 1 -r 3 -b 3000k -i " + input_parameter + '_' + view_plane + '_'+ plot_type + "_image_%01d.png " + movie_name;
print cmd
os.system(cmd)
return movie_name | 5,332,085 |
def _increment(i):
"""Generate a incrementing integer count, starting at and including i."""
while True:
yield i
i += 1 | 5,332,086 |
def describe_recurrence(recur, recurrence_dict, connective="and"):
"""Create a textual description of the recur set.
Arguments:
recur (Set): recurrence pattern as set of day indices eg Set(["1","3"])
recurrence_dict (Dict): map of strings to recurrence patterns
connective (Str): word to connect list of days, default "and"
Returns:
Str: List of days as a human understandable string
"""
day_list = list(recur)
day_list.sort()
days = " ".join(day_list)
for recurrence in recurrence_dict:
if recurrence_dict[recurrence] == days:
return recurrence # accept the first perfect match
# Assemble a long desc, e.g. "Monday and Wednesday"
day_names = []
for day in days.split(" "):
for recurrence in recurrence_dict:
if recurrence_dict[recurrence] == day:
day_names.append(recurrence)
break
return join_list(day_names, connective) | 5,332,087 |
def route_image_zoom_in():
"""
Zooms in.
"""
result = image_viewer.zoom_in()
return jsonify({'zoom-in' : result}) | 5,332,088 |
def evaluate_flow_file(gt_file, pred_file):
"""
evaluate the estimated optical flow end point error according to ground truth provided
:param gt_file: ground truth file path
:param pred_file: estimated optical flow file path
:return: end point error, float32
"""
# Read flow files and calculate the errors
gt_flow = read_flow(gt_file) # ground truth flow
eva_flow = read_flow(pred_file) # predicted flow
# Calculate errors
average_pe = flow_error(gt_flow[:, :, 0], gt_flow[:, :, 1],
eva_flow[:, :, 0], eva_flow[:, :, 1])
return average_pe | 5,332,089 |
def get_heavy_load_rses(threshold, session=None):
"""
Retrieve heavy load rses.
:param threshold: Threshold as an int.
:param session: Database session to use.
:returns: .
"""
try:
results = session.query(models.Source.rse_id, func.count(models.Source.rse_id).label('load'))\
.filter(models.Source.is_using == true())\
.group_by(models.Source.rse_id)\
.all()
if not results:
return
result = []
for t in results:
if t[1] >= threshold:
t2 = {'rse_id': t[0], 'load': t[1]}
result.append(t2)
return result
except IntegrityError as error:
raise RucioException(error.args) | 5,332,090 |
def facetcolumns(table, key, missing=None):
"""
Like :func:`petl.util.materialise.columns` but stratified by values of the
given key field. E.g.::
>>> import petl as etl
>>> table = [['foo', 'bar', 'baz'],
... ['a', 1, True],
... ['b', 2, True],
... ['b', 3]]
>>> fc = etl.facetcolumns(table, 'foo')
>>> fc['a']
{'foo': ['a'], 'bar': [1], 'baz': [True]}
>>> fc['b']
{'foo': ['b', 'b'], 'bar': [2, 3], 'baz': [True, None]}
"""
fct = dict()
it = iter(table)
hdr = next(it)
flds = list(map(text_type, hdr))
indices = asindices(hdr, key)
assert len(indices) > 0, 'no key field selected'
getkey = operator.itemgetter(*indices)
for row in it:
kv = getkey(row)
if kv not in fct:
cols = dict()
for f in flds:
cols[f] = list()
fct[kv] = cols
else:
cols = fct[kv]
for f, v in izip_longest(flds, row, fillvalue=missing):
if f in cols:
cols[f].append(v)
return fct | 5,332,091 |
def unzip_file(filename, target_directory):
"""
Given a filename, unzip it into the given target_directory.
"""
with zipfile.ZipFile(filename) as z:
z.extractall(target_directory) | 5,332,092 |
def divide_set(vectors, labels, column, value):
"""
Divide the sets into two different sets along a specific dimension and value.
"""
set_1 = [(vector, label) for vector, label in zip(vectors, labels) if split_function(vector, column, value)]
set_2 = [(vector, label) for vector, label in zip(vectors, labels) if not split_function(vector, column, value)]
vectors_set_1 = [element[0] for element in set_1]
vectors_set_2 = [element[0] for element in set_2]
label_set_1 = [element[1] for element in set_1]
label_set_2 = [element[1] for element in set_2]
return vectors_set_1, label_set_1, vectors_set_2, label_set_2 | 5,332,093 |
def execute_freezerc(dict, must_fail=False, merge_stderr=False):
"""
:param dict:
:type dict: dict[str, str]
:param must_fail:
:param merge_stderr:
:return:
"""
return execute([FREEZERC] + dict_to_args(dict), must_fail=must_fail,
merge_stderr=merge_stderr) | 5,332,094 |
def train_model(model, device, data_train, data_dev, x_dev, y_dev, optimizer, criterion,
num_epochs, model_save_path, window_len, stride_len, valid_period):
"""Training function"""
print('Start training the model')
writer = SummaryWriter(comment='__' + 'Overtesting')
# weight decay
scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=100, gamma=0.5)
for epoch in range(num_epochs):
start = time.time()
running_loss = 0.0
running_loss2 = 0.0
count = 0
for i, data in enumerate(data_train):
count += 1
# get the inputs; data is a list of [inputs, labels]
inputs = data['features'].float().to(device)
labels = data['labels'].type(torch.cuda.FloatTensor).squeeze(1).to(device)
#zero the parameter gradients
optimizer.zero_grad()
# forward propagation
output = model(inputs)
loss = criterion(output, labels)
running_loss2 += loss.item()
# add L2 regularization
add_L2 = True
if add_L2:
l2_reg = 0.0
for W in model.parameters():
l2_reg = l2_reg + W.norm(2)
loss = loss + 1.0/(2*output.size(0))*l2_reg * 0.001
running_loss += loss.item()
loss.backward()
optimizer.step()
print('Epoch: {}, Iter: {}, Loss: {}, Elapsed: {}'.format(epoch+1, i+1,
running_loss/i, time.time()-start))
writer.add_scalar('Train_L2_loss: ', running_loss/i)
with open(model_save_path + '_train_L2_loss.txt', "a") as myfile:
myfile.write(str(running_loss / count))
myfile.write("\n")
scheduler.step()
if epoch % valid_period == 0:
# save the model
st_time = time.time()
model.eval()
#torch.save(model.state_dict(), model_save_path + '_ep'+ str(epoch+1) + '.pth')
sample_input = torch.from_numpy(np.ones((1,window_len,6))).float().to(device)
traced_script_module = torch.jit.trace(model, sample_input)
traced_script_module.save(model_save_path + '.pt')
running_loss3 = 0.0
count = 0
for i_v, data_v in enumerate(data_dev):
count += 1
inputs = data_v['features'].float().to(device)
labels = data_v['labels'].type(torch.cuda.FloatTensor).squeeze(1).to(device)
output = model(inputs)
loss = criterion(output, labels)
running_loss3 += loss.item()
with open(model_save_path + '_valid_loss.txt', "a") as myfile:
myfile.write(str(running_loss3 / count))
myfile.write("\n")
writer.add_scalar('Val_loss', running_loss3 / i_v)
print('Eval >>> : ', running_loss3 / i_v, 'Time>>> ', time.time() - st_time)
model.train() | 5,332,095 |
def corField2D_vector(field):
"""
2D correlation field of a vector field. Correlations are calculated with
use of Fast Fourier Transform.
Parameters
----------
field : (n, n, 2) shaped array like
Vector field to extract correlations from.
Points are supposed to be uniformly distributed.
Returns
-------
C : 2D numpy array
Unnormalised correlation field.
C[0, 0] is the origin, points are uniformly distributed.
xCL : float
Unnormalised longitudinal correlation of field projected on the first
direction of space at distance equal to field grid spacing.
yCL : float
Unnormalised longitudinal correlation of field projected on the second
direction of space at distance equal to field grid spacing.
xCT : float
Unnormalised transversal correlation of field projected on the first
direction of space at distance equal to field grid spacing.
yCT : float
Unnormalised transversal correlation of field projected on the second
direction of space at distance equal to field grid spacing.
Norm : float
Norm of correlation field.
"""
xfield = field[:, :, 0] # projection of field on the first direction of space
xC, xNorm = corField2D_scalar(xfield) # unnormalised correlation field and its norm associated to field projection on the first direction of space
yfield = field[:, :, 1] # projection of field on the second direction of space
yC, yNorm = corField2D_scalar(yfield) # unnormalised correlation field and its norm associated to field projection on the second direction of space
C = xC + yC # correlation field of field
xCL, yCL = xC[0, 1], yC[1, 0] # longitudinal correlations in first and second directions of space
xCT, yCT = xC[1, 0], yC[0, 1] # transversal correlations in first and second directions of space
Norm = xNorm + yNorm # norm of correlation field
return C, xCL, yCL, xCT, yCT, Norm | 5,332,096 |
def load_json_fixture(filename: str):
"""Load stored JSON data."""
return json.loads((TEST_EXAMPLES_PATH / filename).read_text()) | 5,332,097 |
def stat_list_card(
box: str,
title: str,
items: List[StatListItem],
name: Optional[str] = None,
subtitle: Optional[str] = None,
commands: Optional[List[Command]] = None,
) -> StatListCard:
"""Render a card displaying a list of stats.
Args:
box: A string indicating how to place this component on the page.
title: The title.
items: The individual stats to be displayed.
name: An optional name for this item.
subtitle: The subtitle, displayed below the title.
commands: Contextual menu commands for this component.
Returns:
A `h2o_wave.types.StatListCard` instance.
"""
return StatListCard(
box,
title,
items,
name,
subtitle,
commands,
) | 5,332,098 |
def test_DataGeneratorAllSpectrums_fixed_set():
"""
Test whether use_fixed_set=True toggles generating the same dataset on each epoch.
"""
# Get test data
binned_spectrums, tanimoto_scores_df = create_test_data()
# Define other parameters
batch_size = 4
dimension = 88
# Create normal generator
normal_generator = DataGeneratorAllSpectrums(binned_spectrums=binned_spectrums[:8],
reference_scores_df=tanimoto_scores_df,
dim=dimension, batch_size=batch_size,
use_fixed_set=False)
# Create generator that generates a fixed set every epoch
fixed_generator = DataGeneratorAllSpectrums(binned_spectrums=binned_spectrums[:8],
reference_scores_df=tanimoto_scores_df,
dim=dimension, batch_size=batch_size,
num_turns=5, use_fixed_set=True)
def collect_results(generator):
n_batches = len(generator)
X = np.zeros((batch_size, dimension, 2, n_batches))
y = np.zeros((batch_size, n_batches))
for i, batch in enumerate(generator):
X[:, :, 0, i] = batch[0][0]
X[:, :, 1, i] = batch[0][1]
y[:, i] = batch[1]
return X, y
first_X, first_y = collect_results(normal_generator)
second_X, second_y = collect_results(normal_generator)
assert not np.array_equal(first_X, second_X)
assert first_y.shape == (4, 2), "Expected different number of labels"
first_X, first_y = collect_results(fixed_generator)
second_X, second_y = collect_results(fixed_generator)
assert np.array_equal(first_X, second_X)
assert first_y.shape == (4, 10), "Expected different number of labels"
# Create another fixed generator based on the same dataset that should generate the same
# fixed set
fixed_generator2 = DataGeneratorAllSpectrums(binned_spectrums=binned_spectrums[:8],
reference_scores_df=tanimoto_scores_df,
dim=dimension, batch_size=batch_size,
num_turns=5, use_fixed_set=True)
first_X, first_y = collect_results(fixed_generator)
second_X, second_y = collect_results(fixed_generator2)
assert np.array_equal(first_X, second_X) | 5,332,099 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.