content stringlengths 22 815k | id int64 0 4.91M |
|---|---|
def lock_file(filename):
"""
Locks a file so that the exodus reader can safely read
a file without something else writing to it while we do it.
"""
with open(filename, "a+") as f:
fcntl.flock(f, fcntl.LOCK_SH)
yield
fcntl.flock(f, fcntl.LOCK_UN) | 5,327,900 |
def get_install_path():
"""Use registry and asking the user to better determine the install directory."""
reg_likely_path = get_registry_path()
if reg_likely_path:
user_path = get_user_path(initial_dir=reg_likely_path.as_posix())
else:
user_path = get_user_path(initial_dir=r'C:/Program Files (x86)/')
if reg_likely_path != user_path:
msg = (
'Registry path and user defined path do not match. '
'reg_likely_path: {} != user_path: {}'.format(reg_likely_path, user_path)
)
log.error(msg)
sys.exit('Exiting...')
check_selected_base_path(user_path)
return user_path | 5,327,901 |
def constant(t, length):
""" ezgal.sfhs.constant( ages, length )
Burst of constant starformation from t=0 to t=length """
if type(t) == type(np.array([])):
sfr = np.zeros(t.size)
m = t <= length
if m.sum(): sfr[m] = 1.0
return sfr
else:
return 0.0 if t > length else 1.0 | 5,327,902 |
def export_json(data, output=None):
"""Creates a json file as output"""
if output is not None:
if not output.endswith('.json'):
output += '.json'
with open(f'{output}', 'w') as f:
json.dump(data, f)
else:
print(data) | 5,327,903 |
def write_values(event_number, cube, index, oasis_file):
"""
write out one oasis compatible csv file
write out one file for wind gusts histogram bin index an # x, y, z file
output should be index, grid point histogram bin index value, confidence for all grid points:
"""
# Check we get correct cube dimension for function
assert cube.ndim == 2, 'Cube should have 2 dimension coordinates only ie. be 2-D '
# Confidence set to 1 for all grid point histogram bin index values
confidence = np.ones(cube.data.size)
# Convert cube.data to an index in a histogram
# Histogram has BINWIDTH bin widths (eg. 0.1 m/s), eg. 900 bins for a value range between 0 and 90 m/s
cube = np.ma.masked_invalid(cube.data)
if np.any(cube.data > RANGEMAX):
raise ValueError(f'Cube data range [{np.max(cube.data)}] exceeds max specified data range [{RANGEMAX}]!\n'
f'Adjust RANGEMAX and re-run...')
gustbins = np.floor(cube.data / BINWIDTH)
header = ['EVENT_ID', 'AREAPERIL_ID', 'INTENSITY_BIN_INDEX', 'PROBABILITY']
id = 'sfp'
df = write_df(event_number, index, gustbins, confidence, header, id)
write_csvfile(df, oasis_file) | 5,327,904 |
def model_setup_fn(attrs):
"""Generate the setup function for models."""
model = load_model(attrs['type'], attrs['data'])
def func(self):
self.model = model
self.type = attrs['type']
self.data = attrs['data']
self.network_type = attrs['network_type']
self.dto = attrs.get('dto')
self.catbuffer = attrs.get('catbuffer')
self.extras = attrs.get('extras', {})
return func | 5,327,905 |
def run_throughput_inner(query_root, data_dir, generated_query_dir,
host, port, database, user, password,
stream, num_streams, queue, verbose):
"""
:param query_root:
:param data_dir: subdirectory with data to be loaded
:param generated_query_dir: subdirectory with generated queries
:param host: hostname where the Postgres database is running
:param port: port number where the Postgres database is listening
:param database: database name, where the benchmark will be run
:param user: username of the Postgres user with full access to the benchmark DB
:param password: password for the Postgres user
:param stream: stream number
:param num_streams: number of streams
:param queue: process queue
:param verbose: True if more verbose output is required
:return: none, uses exit(1) to abort on errors
"""
try:
conn = pgdb.PGDB(host, port, database, user, password)
result = r.Result("ThroughputQueryStream%s" % stream)
if run_query_stream(conn, query_root, generated_query_dir, stream, num_streams, result, verbose):
print("unable to finish query in stream #%s" % stream)
exit(1)
queue.put(result)
except Exception as e:
print("unable to connect to DB for query in stream #%s: %s" % (stream, e))
exit(1) | 5,327,906 |
def _mkdir(space, dirname, mode=0777, recursive=False, w_ctx=None):
""" mkdir - Makes directory """
mode = 0x7FFFFFFF & mode
if not _valid_fname(dirname):
space.ec.warn("mkdir() expects parameter 1 to "
"be a valid path, string given")
return space.w_False
if not is_in_basedir(space, 'mkdir', rpath.realpath(dirname)):
return space.w_False
try:
if not os.path.isdir(dirname):
if recursive:
_recursive_mkdir(dirname, mode)
else:
os.mkdir(dirname, mode)
return space.w_True
else:
space.ec.warn("mkdir(): No such file or directory")
return space.w_False
except OSError, e:
space.ec.warn("mkdir(): %s" % os.strerror(e.errno))
return space.w_False
except TypeError:
return space.w_False | 5,327,907 |
def test_error_y_is_None():
"""Assert that an error is raised when y is None for some strategies."""
selector = FeatureSelector(strategy="univariate", solver=f_regression, n_features=9)
pytest.raises(ValueError, selector.fit, X_reg) | 5,327,908 |
def test_colorizer(event=None):
"""Run all unit tests for leoColorizer.py."""
g.run_unit_tests('leo.unittests.core.test_leoColorizer.TestColorizer') | 5,327,909 |
def prepare_fixed_decimal(data, schema):
"""Converts decimal.Decimal to fixed length bytes array"""
if not isinstance(data, decimal.Decimal):
return data
scale = schema.get('scale', 0)
size = schema['size']
# based on https://github.com/apache/avro/pull/82/
sign, digits, exp = data.as_tuple()
if -exp > scale:
raise ValueError(
'Scale provided in schema does not match the decimal')
delta = exp + scale
if delta > 0:
digits = digits + (0,) * delta
unscaled_datum = 0
for digit in digits:
unscaled_datum = (unscaled_datum * 10) + digit
bits_req = unscaled_datum.bit_length() + 1
size_in_bits = size * 8
offset_bits = size_in_bits - bits_req
mask = 2 ** size_in_bits - 1
bit = 1
for i in range(bits_req):
mask ^= bit
bit <<= 1
if bits_req < 8:
bytes_req = 1
else:
bytes_req = bits_req // 8
if bits_req % 8 != 0:
bytes_req += 1
tmp = MemoryIO()
if sign:
unscaled_datum = (1 << bits_req) - unscaled_datum
unscaled_datum = mask | unscaled_datum
for index in range(size - 1, -1, -1):
bits_to_write = unscaled_datum >> (8 * index)
tmp.write(mk_bits(bits_to_write & 0xff))
else:
for i in range(offset_bits // 8):
tmp.write(mk_bits(0))
for index in range(bytes_req - 1, -1, -1):
bits_to_write = unscaled_datum >> (8 * index)
tmp.write(mk_bits(bits_to_write & 0xff))
return tmp.getvalue() | 5,327,910 |
def hello_fmt(values: Dict[str, List[int]]):
"""Print the sum per category.
Args:
values: Dict containing lists of int per category.
"""
print(", ".join(f"{k}: {sum(v)}" for k, v in values.items())) | 5,327,911 |
def _create_profile_d_file(prefix):
"""
Create profile.d file with Java environment variables set.
"""
from fabtools.require.files import file as require_file
require_file(
'/etc/profile.d/java.sh',
contents=dedent("""\
export JAVA_HOME="%s/jdk"
export PATH="$JAVA_HOME/bin:$PATH"
""" % prefix),
mode='0755',
use_sudo=True,
) | 5,327,912 |
def test_add_method():
"""Tests the __add__ method"""
options={"column_names":["Frequency","b","c"],"column_names_delimiter":",","data":[[0.1*10**10,1,2],[2*10**10,3,4]],
"data_delimiter":'\t',
"header":['Hello There',"My Darling"],"column_names_begin_token":'#',"comment_begin":'!',
"comment_end":"\n",
"directory":TESTS_DIRECTORY,
"column_units":["Hz",None,None],
"column_descriptions":["Frequency in Hz",None,None],
"column_types":['float','float','float'],
"row_formatter_string":"{0:.2e}{delimiter}{1}{delimiter}{2}",
"treat_header_as_comment":True}
new_table=AsciiDataTable(None,**options)
new_table_2=AsciiDataTable(None,**options)
new_table_2.add_row([0.3*10**10,1,2])
add_table=new_table+new_table_2
print(add_table)
print(new_table) | 5,327,913 |
def test_arrayer_thread():
"""Tests that the arrayer monitor thread can be restarted after exit"""
j1 = Job(array_task(1))
j1.task = array_task
sched = mock_scheduler()
exec = mock_executor(sched)
arr = job_array.JobArrayer(exec, submit_interval=10000.0, stale_time=0.05, min_array_size=5)
arr.add_job(j1, args=(1), kwargs={})
assert arr._monitor_thread.is_alive()
# Stop the monitoring thread.
arr.stop()
assert not arr._monitor_thread.is_alive()
# Submitting an additional job should restart the thread.
arr.add_job(j1, args=(2), kwargs={})
assert arr._monitor_thread.is_alive()
arr.stop() | 5,327,914 |
def test_feature_flexiblerollout_stickiness_100(unleash_client):
"""
Feature.flexible.rollout.custom.stickiness_100 should be enabled without field defined for 100%
"""
# Set up API
responses.add(responses.POST, URL + REGISTER_URL, json={}, status=202)
responses.add(responses.GET, URL + FEATURES_URL, json=json.loads(MOCK_JSON), status=200)
responses.add(responses.POST, URL + METRICS_URL, json={}, status=202)
# Tests
unleash_client.initialize_client()
assert unleash_client.is_enabled("Feature.flexible.rollout.custom.stickiness_100", {'customField': 'any_value'}) | 5,327,915 |
def wait_ele_disappear(context, selector=None):
"""
The specified selector element string disappears from the page within
a specified period of time
:param context: step context
:param selector: locator string for selector element (or None).
"""
g_Context.step.wait_ele_disappear(context, selector) | 5,327,916 |
def device_traits() -> dict[str, Any]:
"""Fixture that sets default traits used for devices."""
return {"sdm.devices.traits.Info": {"customName": "My Sensor"}} | 5,327,917 |
def writeOutput(ipData,outfilename):
""" Writes the text output """
# Get the current working directory so we can write the results file there
outfilename = os.path.join(os.getcwd(),outfilename+'.txt')
file1 = open(outfilename,'w')
numPoints = ipData.size
for i in xrange(numPoints):
ip = ipData[i]
instName = ip['iname']
label = ip['label']
ipnum = ip['ipnum']
huval = ip['HUval']
file1.write('%s %7d %2d %8.1f\n' % (instName,label,ipnum,huval))
file1.close()
print ('HU results written to file: %s' % (outfilename))
return 0 | 5,327,918 |
async def setup_bga_account(message, bga_username, bga_password):
"""Save and verify login info."""
# Delete account info posted on a public channel
discord_id = message.author.id
if message.guild:
await message.delete()
account = BGAAccount()
logged_in = account.login(bga_username, bga_password)
player_id = account.get_player_id(bga_username)
account.logout()
account.close_connection()
if logged_in:
save_data(discord_id, bga_userid=player_id, username=bga_username, password=bga_password)
await message.channel.send(
f"Account {bga_username} setup successfully. This bot will store your username and password so that you can make tables with !play. To play chess with pocc, use `!play chess pocc`",
)
else:
await message.author.send(
'Unable to setup account because of bad username or password. Try putting quotes (") around either if there are spaces or special characters.',
) | 5,327,919 |
def calculate_equivalent_diameter(areas):
"""Calculate the equivalent diameters of a list or numpy array of areas.
:param areas: List or numpy array of areas.
:return: List of equivalent diameters.
"""
areas = np.asarray(areas)
diameters = np.sqrt(4 * areas / np.pi)
return diameters.tolist() | 5,327,920 |
def test_audit_log(s3_stubber):
"""Test that reversion revisions are created."""
company_without_change = CompanyFactory(
great_profile_status=Company.GREAT_PROFILE_STATUSES.published,
)
company_with_change = CompanyFactory(
great_profile_status=Company.GREAT_PROFILE_STATUSES.unpublished,
)
bucket = 'test_bucket'
object_key = 'test_key'
csv_content = f"""datahub_company_id,is_published_find_a_supplier,has_find_a_supplier_profile
{company_without_change.pk},t,t
{company_with_change.pk},t,t
"""
s3_stubber.add_response(
'get_object',
{
'Body': BytesIO(csv_content.encode(encoding='utf-8')),
},
expected_params={
'Bucket': bucket,
'Key': object_key,
},
)
call_command('update_company_great_profile_status', bucket, object_key)
company_without_change.refresh_from_db()
assert company_without_change.great_profile_status == Company.GREAT_PROFILE_STATUSES.published
versions = Version.objects.get_for_object(company_without_change)
assert versions.count() == 0
company_with_change.refresh_from_db()
assert company_with_change.great_profile_status == Company.GREAT_PROFILE_STATUSES.published
versions = Version.objects.get_for_object(company_with_change)
assert versions.count() == 1
assert versions[0].revision.get_comment() == 'GREAT profile status updated.' | 5,327,921 |
def truncation_error(stencil: list, deriv: int, interval: str = DEFAULT_INTERVAL):
"""
derive the leading-order of error term
in the finite difference equation based on the given stencil.
Args:
stencil (list of int): relative point numbers
used for discretization.
deriv (int): order of derivative.
interval (str, optional): an interval symbol like `dx`.
Defaults to DEFAULT_INTERVAL.
Returns:
sympy Expr: the leading-order of error term
Examples:
>>> from dictos import finite_difference as fd
>>> fd.truncation_error([-1, 0, 1], deriv=1)
-f^(3)*h**2/6
>>> fd.truncation_error([-1, 0, 1], deriv=2)
-f^(4)*h**2/12
>>> fd.truncation_error([-2, -1, 0, 1, 2], deriv=1)
f^(5)*h**4/30
>>> fd.truncation_error([-2, -1, 0, 1, 2], deriv=2)
f^(6)*h**4/90
"""
coef = coefficients(stencil, deriv)
# derive finite difference coefficients based on given stencil
x_set = create_coordinate_symbols(stencil, interval=interval)
# create set of coordinate symbols from stencil.
# [-2, -1, 0, 1, 2] -> [-2*h, -h, 0, h, 2*h]
num_term = len(x_set) + deriv
f_ts = [taylor_series(x, num_term) for x in x_set]
# calculate Taylor series around points in x_set.
fd_eq = dot_product(coef, f_ts)
# calculate weighted sum of Taylor series.
# for instance, 2nd-order 3-point central finite difference
# for 1st derivative is
# fd_eq [= f(h)/2 - f(-h)/2)] = f^(1)*h + f^(3)*h**3/6 + ...
h = sp.symbols(interval)
return sp.expand(
sp.simplify(
derivative_symbol(DEFAULT_DIFFERENTIAND, deriv)
- sp.nsimplify(fd_eq / h ** deriv, rational=True, tolerance=1e-10)
)
).as_leading_term(h)
# extract the leading-order of errer term.
# A finite difference formulation with error term is, for instance,
# f^(1) = (f(h) - f(-h))/(2*h) - f^(3)*h**3/6 - ...
# to extract error terms, reformulate fd_eq as
# f^(1) - fd_eq/h**1 = - f^(3)*h**3/6 - ... | 5,327,922 |
def _get_parameter_defaults(fpm, metadata, readout_mode, subarray, frame_time,
temperature, cosmic_ray_mode, verbose=2,
logger=LOGGER):
"""
Helper function to obtain appropriate defaults for parameters
that have not been explicitly set.
(Saves duplication of code between simulate_sca and
simulate_sca_fromdata.)
"""
# If the readout mode is not specified, obtained a default from
# the FITS metadata of the input file. Failing that, obtain a
# default value from the detector properties.
if readout_mode is None:
if metadata is not None and 'READPATT' in metadata:
readout_mode = metadata['READPATT']
if verbose > 2:
logger.info( "Readout mode %s obtained from FITS metadata." % \
readout_mode )
else:
readout_mode = detector_properties['DEFAULT_READOUT_MODE']
if verbose > 2:
logger.info( "Readout mode defaulted to " + \
"%s from detector properties." % readout_mode )
else:
if verbose > 2:
logger.info( "Readout mode explicitly set to %s." % readout_mode )
# If the output subarray mode is not specified, obtained a default
# from the FITS metadata of the input file, as long as this is a
# known subarray mode. Failing that, obtain a default value from
# the detector properties.
if subarray is None:
if metadata is not None and 'SUBARRAY' in metadata:
subarray = metadata['SUBARRAY']
if subarray in detector_properties['SUBARRAY']:
if verbose > 2:
logger.info( "Subarray mode %s obtained from FITS metadata." % \
subarray )
else:
nonstandard = subarray
subarray = detector_properties['DEFAULT_SUBARRAY']
if verbose > 2:
strg = "Subarray mode %s obtained from FITS metadata " % \
nonstandard
strg += "is non-standard, so output subarray mode "
strg += "defaulted to %s from detector properties." % \
subarray
logger.info( strg )
else:
subarray = detector_properties['DEFAULT_SUBARRAY']
if verbose > 2:
logger.info( "Subarray mode defaulted to " + \
"%s from detector properties." % subarray )
else:
if verbose > 2:
logger.info( "Subarray mode explicitly set to %s." % subarray )
if frame_time is None:
if metadata is not None and 'TFRAME' in metadata:
frame_time = metadata['TFRAME']
if verbose > 2:
strg = "Frame time of %f seconds obtained " % \
frame_time
strg += "from FITS metadata "
strg += "(overriding the readout mode and subarray)."
logger.info( strg )
else:
strg = "Frame time of %f seconds specified explicitly " % frame_time
strg += "(overriding the readout mode and subarray)."
logger.info( strg )
# If the detector temperature is not specified, use the target
# temperature from the detector properties.
if temperature is None:
temperature = fpm['TARGET_TEMPERATURE']
if verbose > 3:
logger.debug( "Temperature defaulted to %fK from detector properties." % \
temperature )
else:
if verbose > 2:
logger.info( "Temperature explicitly set to %fK." % temperature )
# If the cosmic ray mode is not specified, obtained a default from
# the FITS metadata of the input file. Failing that, obtain a default
# value from the cosmic ray properties.
if cosmic_ray_mode is None:
if metadata is not None and 'CRMODE' in metadata:
cosmic_ray_mode = metadata['CRMODE']
if verbose > 3:
logger.debug( "Cosmic ray mode %s obtained from FITS metadata." % \
cosmic_ray_mode )
else:
cosmic_ray_mode = cosmic_ray_properties['DEFAULT_CR_MODE']
if verbose > 3:
logger.debug( "Cosmic ray mode defaulted to " + \
"%s from cosmic ray properties." % cosmic_ray_mode )
else:
if verbose > 2:
logger.info( "Cosmic ray mode explicitly set to %s." % cosmic_ray_mode )
return (readout_mode, subarray, frame_time, temperature, cosmic_ray_mode) | 5,327,923 |
def cmd_renderurl(cfg, command, argv):
"""Renders a single url of your blog to stdout."""
parser = build_parser('%prog renderurl [options] <url> [<url>...]')
parser.add_option('--headers',
action='store_true', dest='headers', default=False,
help='Option that causes headers to be displayed '
'when rendering a single url.')
(options, args) = parser.parse_args(argv)
if not args:
parser.print_help()
return 0
for url in args:
p = build_douglas(cfg)
base_url = cfg['base_url']
if url.startswith(base_url):
url = url[len(base_url):]
p.run_render_one(url, options.headers)
return 0 | 5,327,924 |
def combine_expressions(expressions, relation='AND', licensing=Licensing()):
"""
Return a combined license expression string with relation, given a list of
license expressions strings.
For example:
>>> a = 'mit'
>>> b = 'gpl'
>>> combine_expressions([a, b])
'mit AND gpl'
>>> assert 'mit' == combine_expressions([a])
>>> combine_expressions([])
>>> combine_expressions(None)
>>> combine_expressions(('gpl', 'mit', 'apache',))
'gpl AND mit AND apache'
"""
if not expressions:
return
if not isinstance(expressions, (list, tuple)):
raise TypeError(
'expressions should be a list or tuple and not: {}'.format(
type(expressions)))
# Remove duplicate element in the expressions list
expressions = list(dict((x, True) for x in expressions).keys())
if len(expressions) == 1:
return expressions[0]
expressions = [licensing.parse(le, simple=True) for le in expressions]
if relation == 'OR':
return str(licensing.OR(*expressions))
else:
return str(licensing.AND(*expressions)) | 5,327,925 |
def parrallelize(model: nn.Module) -> nn.Module:
""" Make use of all available GPU using nn.DataParallel
NOTE: ensure to be using different random seeds for each process if you use techniques like data-augmentation or any other techniques which needs random numbers different for each steps. TODO: make sure this isn't already done by Pytorch?
"""
if torch.cuda.device_count() > 1:
print(f'> Using "nn.DataParallel(model)" on {torch.cuda.device_count()} GPUs.')
model = nn.DataParallel(model)
return model | 5,327,926 |
def get_prof_details(prof_id):
"""
Returns the details of the professor in same order as DB.
"""
cursor = sqlite3.connect('./db.sqlite3').cursor()
cursor.execute("SELECT * FROM professor WHERE prof_id = ?;", (prof_id))
return cursor.fetchone() | 5,327,927 |
def test_can_understand_Td_symmetry():
"""Ensure values match regression logfiles for Td symmetry."""
data = datasets.logfiles["tanaka1996"][
"methane@UMP2/6-311G(2df,2pd)"
] # tetrahedron
moments, axes, atomcoords = coords.inertia(data.atommasses, data.atomcoords)
assert moments == pytest.approx([3.182947905, 3.182947905, 3.182947905], 1e-2)
assert axes.T @ axes == pytest.approx(np.eye(3))
assert axes == pytest.approx(np.eye(3))
groups = coords._equivalent_atoms(data.atommasses, atomcoords)
assert len(groups) == 2
assert len(groups[0]) == 1
assert len(groups[1]) == 4
rotor_class = coords._classify_rotor(moments)
assert rotor_class == ("spheric", "nonplanar")
proper_axes = coords._get_proper_axes(atomcoords, groups, axes, rotor_class)
assert len(proper_axes) == 7
assert proper_axes[0][0] == 3
assert proper_axes[1][0] == 3
assert proper_axes[2][0] == 3
assert proper_axes[3][0] == 3
assert proper_axes[4][0] == 2
assert proper_axes[5][0] == 2
assert proper_axes[6][0] == 2
assert proper_axes[0][1] == pytest.approx(
[0.5773502691896257, 0.5773502691896257, 0.5773502691896257]
)
assert proper_axes[1][1] == pytest.approx(
[0.5773502691896257, -0.5773502691896257, -0.5773502691896257]
)
assert proper_axes[2][1] == pytest.approx(
[-0.5773502691896257, 0.5773502691896257, -0.5773502691896257]
)
assert proper_axes[3][1] == pytest.approx(
[-0.5773502691896257, -0.5773502691896257, 0.5773502691896257]
)
assert proper_axes[4][1] == pytest.approx([0.0, 0.0, -1.0])
assert proper_axes[5][1] == pytest.approx([0.0, -1.0, 0.0])
assert proper_axes[6][1] == pytest.approx([-1.0, 0.0, 0.0])
improper_axes = coords._get_improper_axes(
atomcoords, groups, axes, rotor_class, proper_axes
)
assert len(improper_axes) == 3
assert improper_axes[0][0] == 4
assert improper_axes[1][0] == 4
assert improper_axes[2][0] == 4
assert improper_axes[0][1] == proper_axes[4][1]
assert improper_axes[1][1] == proper_axes[5][1]
assert improper_axes[2][1] == proper_axes[6][1]
mirror_axes = coords._get_mirror_planes(
atomcoords, groups, axes, rotor_class, proper_axes
)
assert len(mirror_axes) == 6
assert mirror_axes[0][0] == "v"
assert mirror_axes[1][0] == "v"
assert mirror_axes[2][0] == "v"
assert mirror_axes[3][0] == "v"
assert mirror_axes[4][0] == "v"
assert mirror_axes[5][0] == "v"
assert mirror_axes[0][1] == pytest.approx(
[0.7071067811865476, -0.7071067811865476, 0.0]
)
assert mirror_axes[1][1] == pytest.approx(
[0.0, -0.7071067811865476, 0.7071067811865476]
)
assert mirror_axes[2][1] == pytest.approx(
[0.0, -0.7071067811865476, -0.7071067811865476]
)
assert mirror_axes[3][1] == pytest.approx(
[-0.7071067811865476, 0.0, 0.7071067811865476]
)
assert mirror_axes[4][1] == pytest.approx(
[-0.7071067811865476, 0.0, -0.7071067811865476]
)
assert mirror_axes[5][1] == pytest.approx(
[-0.7071067811865476, -0.7071067811865476, 0.0]
)
assert not coords._has_inversion_center(atomcoords, groups)
point_group = coords.find_point_group(data.atommasses, atomcoords, proper_axes)
assert point_group == "Td"
assert coords.symmetry_number(point_group) == 12
data = datasets.logfiles["symmetries"]["tetrahedrane"] # tetrahedron
moments, axes, atomcoords = coords.inertia(data.atommasses, data.atomcoords)
assert moments == pytest.approx([37.54433184, 37.54433184, 37.54433184])
assert axes.T @ axes == pytest.approx(np.eye(3))
assert axes == pytest.approx(np.eye(3))
groups = coords._equivalent_atoms(data.atommasses, atomcoords)
assert len(groups) == 2
assert len(groups[0]) == 4
assert len(groups[1]) == 4
rotor_class = coords._classify_rotor(moments)
assert rotor_class == ("spheric", "nonplanar")
proper_axes = coords._get_proper_axes(atomcoords, groups, axes, rotor_class)
assert len(proper_axes) == 7
assert proper_axes[0][0] == 3
assert proper_axes[1][0] == 3
assert proper_axes[2][0] == 3
assert proper_axes[3][0] == 3
assert proper_axes[4][0] == 2
assert proper_axes[5][0] == 2
assert proper_axes[6][0] == 2
assert proper_axes[0][1] == pytest.approx(
[0.5773502691896257, 0.5773502691896257, 0.5773502691896257]
)
assert proper_axes[1][1] == pytest.approx(
[0.5773502691896257, -0.5773502691896257, -0.5773502691896257]
)
assert proper_axes[2][1] == pytest.approx(
[-0.5773502691896257, 0.5773502691896257, -0.5773502691896257]
)
assert proper_axes[3][1] == pytest.approx(
[-0.5773502691896257, -0.5773502691896257, 0.5773502691896257]
)
assert proper_axes[4][1] == pytest.approx([1.0, 0.0, 0.0])
assert proper_axes[5][1] == pytest.approx([0.0, 0.0, 1.0])
assert proper_axes[6][1] == pytest.approx([0.0, -1.0, 0.0])
improper_axes = coords._get_improper_axes(
atomcoords, groups, axes, rotor_class, proper_axes
)
assert len(improper_axes) == 3
assert improper_axes[0][0] == 4
assert improper_axes[1][0] == 4
assert improper_axes[2][0] == 4
assert improper_axes[0][1] == proper_axes[4][1]
assert improper_axes[1][1] == proper_axes[5][1]
assert improper_axes[2][1] == proper_axes[6][1]
mirror_axes = coords._get_mirror_planes(
atomcoords, groups, axes, rotor_class, proper_axes
)
assert len(mirror_axes) == 6
assert mirror_axes[0][0] == "v"
assert mirror_axes[1][0] == "v"
assert mirror_axes[2][0] == "v"
assert mirror_axes[3][0] == "v"
assert mirror_axes[4][0] == "v"
assert mirror_axes[5][0] == "v"
assert mirror_axes[0][1] == pytest.approx(
[0.7071067811865476, 0.0, 0.7071067811865476]
)
assert mirror_axes[1][1] == pytest.approx(
[0.7071067811865476, 0.0, -0.7071067811865476]
)
assert mirror_axes[2][1] == pytest.approx(
[0.7071067811865476, -0.7071067811865476, 0.0]
)
assert mirror_axes[3][1] == pytest.approx(
[0.0, -0.7071067811865476, 0.7071067811865476]
)
assert mirror_axes[4][1] == pytest.approx(
[0.0, -0.7071067811865476, -0.7071067811865476]
)
assert mirror_axes[5][1] == pytest.approx(
[-0.7071067811865476, -0.7071067811865476, 0.0]
)
assert not coords._has_inversion_center(atomcoords, groups)
point_group = coords.find_point_group(data.atommasses, atomcoords, proper_axes)
assert point_group == "Td"
assert coords.symmetry_number(point_group) == 12 | 5,327,928 |
def get_process_path(tshark_path=None, process_name='tshark'):
"""
Finds the path of the tshark executable. If the user has provided a path
or specified a location in config.ini it will be used. Otherwise default
locations will be searched.
:param tshark_path: Path of the tshark binary
:raises TSharkNotFoundException in case TShark is not found in any location.
"""
config = get_config()
possible_paths = [config.get('tshark', 'tshark_path')]
# Add the user provided path to the search list
if tshark_path is not None:
possible_paths.insert(0, tshark_path)
# Windows search order: configuration file's path, common paths.
if sys.platform.startswith('win'):
for env in ('ProgramFiles(x86)', 'ProgramFiles'):
program_files = os.getenv(env)
if program_files is not None:
possible_paths.append(
os.path.join(program_files, 'Wireshark', '%s.exe' % process_name)
)
# Linux, etc. search order: configuration file's path, the system's path
else:
os_path = os.getenv(
'PATH',
'/usr/bin:/usr/sbin:/usr/lib/tshark:/usr/local/bin'
)
for path in os_path.split(':'):
possible_paths.append(os.path.join(path, process_name))
for path in possible_paths:
if os.path.exists(path):
return path
raise TSharkNotFoundException(
'TShark not found. Try adding its location to the configuration file. '
'Search these paths: {}'.format(possible_paths)
) | 5,327,929 |
def test_set_simplify():
"""This tests our ability to simplify set objects.
This test is pretty simple since sets just serialize to
lists, with a tuple wrapper with the correct ID (3)
for sets so that the detailer knows how to interpret it."""
input = set(["hello", "world"])
set_detail_index = serde.detailers.index(native_serde._detail_collection_set)
str_detail_index = serde.detailers.index(native_serde._detail_str)
target = (set_detail_index, [(str_detail_index, (b"hello",)), (str_detail_index, (b"world",))])
assert serde._simplify(input)[0] == target[0]
assert set(serde._simplify(input)[1]) == set(target[1]) | 5,327,930 |
def test_add_order_error_validate_bool_product_code(client):
"""Must be error on validate data"""
data = dict(user_id=1, product_code=False)
response = client.post("/orders", headers=HEADERS, json=data)
response_data = response.json().get("detail")[0]
assert response.status_code == status.HTTP_422_UNPROCESSABLE_ENTITY
assert response_data.get("msg") == "__init__() takes exactly 3 positional arguments (2 given)"
assert response_data.get("type") == "type_error"
assert response_data.get("loc") == ["body", "product_code"] | 5,327,931 |
def stats_per_gop(processed_video_sequence, needed=[]):
"""
general helper to extract statistics on a per gop basis
"""
logging.debug(f"calculate {needed} gop based for {processed_video_sequence}")
results = []
for gop in by_gop(processed_video_sequence, columns=needed + ["FrameType"]):
df = pd.DataFrame(gop)
gop_res = {}
for x in needed:
gop_res["mean_" + x] = df[x].mean()
gop_res["median_" + x] = df[x].median()
gop_res["std_" + x] = df[x].std()
gop_res["skew_" + x] = float(scipy.stats.skew(df[x]))
gop_res["kurtosis_" + x] = float(scipy.stats.kurtosis(df[x]))
gop_res["iqr_" + x] = float(scipy.stats.iqr(df[x]))
for i in range(11):
quantile = round(0.1 * i, 1)
gop_res["{}_quantil_{}".format(quantile, x)] = float(df[x].quantile(quantile))
# select non-iframes
df = df[df["FrameType"] != 1]
for x in needed:
gop_res["mean_" + x + "_non-i"] = df[x].mean()
gop_res["median_" + x + "_non-i"] = df[x].median()
gop_res["std_" + x + "_non-i"] = df[x].std()
gop_res["skew_" + x + "_non-i"] = float(scipy.stats.skew(df[x]))
gop_res["kurtosis_" + x + "_non-i"] = float(scipy.stats.kurtosis(df[x]))
gop_res["iqr_" + x + "_non-i"] = float(scipy.stats.iqr(df[x]))
for i in range(11):
quantile = round(0.1 * i, 1)
gop_res["{}_quantil_{}_non-i".format(quantile, x)] = float(df[x].quantile(quantile))
results.append(gop_res)
df = pd.DataFrame(results)
result = df.mean().to_dict()
logging.debug(f"estimated {needed} feature values: {result}")
return result | 5,327,932 |
def test_TileServer_get_tiles_url():
"""Should work as expected (create TileServer object and get tiles endpoint)."""
r = RasterTiles(raster_path)
app = TileServer(r)
assert app.get_tiles_url() == "http://127.0.0.1:8080/tiles/{z}/{x}/{y}.png" | 5,327,933 |
def available_help(mod, ending="_command"):
"""Returns the dochelp from all functions in this module that have _command
at the end."""
help_text = []
for key in mod.__dict__:
if key.endswith(ending):
name = key.split(ending)[0]
help_text.append(name + ":\n" + mod.__dict__[key].__doc__)
return help_text | 5,327,934 |
def __s_polynomial(g, h):
"""
Computes the S-polynomial of g, h. The S-polynomial is a polynomial built explicitly so that the leading terms
cancel when combining g and h linearly.
"""
deg_g = __multidegree(g)
deg_h = __multidegree(h)
max_deg = map(max, zip(deg_g, deg_h))
R = g.parent()
# Builds a polynomial with the variables raised to max_deg, in order
vars = map(R, R.variable_names())
x_pow_max_deg = reduce(operator.mul, [x ** d for (d, x) in zip(max_deg, vars)], R(1))
quo_g, _ = x_pow_max_deg.quo_rem(g.lt())
quo_h, _ = x_pow_max_deg.quo_rem(h.lt())
return quo_g * g - quo_h * h | 5,327,935 |
def notification_reminder(paci_list,supervisor,operator,type):
"""
This method sends first, second, reminders and then send third one and cc supervisor in the email
"""
first_reminder_list=[]
second_reminder_list=[]
penality_reminder_list=[]
if paci_list and len(paci_list) > 0:
for paci in paci_list:
if paci.reminder_grd_operator_again:
penality_reminder_list.append(paci)
elif paci.reminder_grd_operator and not paci.reminder_grd_operator_again:
second_reminder_list.append(paci)
elif not paci.reminder_grd_operator:
first_reminder_list.append(paci)
if penality_reminder_list and len(penality_reminder_list)>0:
email_notification_reminder(operator,penality_reminder_list,"Third Reminder","Apply for",type,supervisor)
elif second_reminder_list and len(second_reminder_list)>0:
email_notification_reminder(operator,second_reminder_list,"Second Reminder","Apply for",type)
for paci in second_reminder_list:
frappe.db.set_value('PACI',paci.name,'reminder_grd_operator_again',1)
elif first_reminder_list and len(first_reminder_list)>0:
email_notification_reminder(operator,first_reminder_list,"First Reminder","Apply for",type)
for paci in first_reminder_list:
frappe.db.set_value('PACI',paci.name,'reminder_grd_operator',1) | 5,327,936 |
def generate_map_chunk(size_x: int, size_y: int, biome_type: str, x_offset: int = 0, y_offset: int = 0):
"""
Function responsible for generating map chunk in specified or random biome type,
map chunk is basically a rectangular part of a map;
generated array is basically nested list representing a 2d-array, where
fields are integers indicating elevation of certain point.
For generating map chunk I use OpenSimplex noise generator, which is
a deterministic coherent (gradient) noise generator, The chunk is randomised
by chosing random seed for the generator object initialisation.
Args:
size_x (int): horizontal size of chunk in map pixels
size_y (int): vertical size of chunk in map pixels
biome_type (str): string indicating which biome type to use
x_offset (int): integer indicating horizontal offset used in generating Simplex Noise
y_offset (int): integer indicating vertical offset used in generating Simplex Noise
Returns:
map_array (:obj:`list` of :obj:`list` of :obj:`int`): list of lists containing elevation
number for specified coordinates
"""
map_array = []
for _ in range(size_x):
map_array_part = []
for _ in range(size_y):
map_array_part.append(127)
map_array.append(map_array_part)
noise_maker = OpenSimplex(randint(-10000, 10000))
for x in range(size_x):
for y in range(size_y):
for octave in range(OCTAVES):
if map_array[x][y] > LEVELS.water or octave < 1:
map_array[x][y] = int_median_cutter(0, 255,
map_array[x][y]+OCTAVE_AMPLITUDE[octave]*\
noise_maker.noise2d((x+x_offset)/OCTAVE_WAVELENGTH[octave],
(y+y_offset)/OCTAVE_WAVELENGTH[octave]))
if biome_type == 'random':
biome_type = ['ocean_islands',
'ocean',
'high_mountains',
'default'][randint(0,3)]
if biome_type == 'ocean_islands':
for x in range(size_x):
for y in range(size_y):
map_array[x][y] = max(map_array[x][y] - 100, 20)
elif biome_type == 'ocean':
for x in range(size_x):
for y in range(size_y):
map_array[x][y] = max(int(map_array[x][y]*0.3125), 20)
elif biome_type == 'high_mountains':
for x in range(size_x):
for y in range(size_y):
map_array[x][y] = min(map_array[x][y] + 100 + 10 *
noise_maker.noise2d(x/OCTAVE_WAVELENGTH[1],
y/OCTAVE_WAVELENGTH[1]), 250)
return map_array | 5,327,937 |
def initialize(g, app):
"""
If postgresql url is defined in configuration params a
scoped session will be created
"""
if 'DATABASES' in app.config and 'POSTGRESQL' in app.config['DATABASES']:
# Database connection established for console commands
for k, v in app.config['DATABASES']['POSTGRESQL'].items():
init_db_conn(k, v)
if 'test' not in sys.argv:
# Establish a new connection every request
@app.before_request
def before_request():
"""
Assign postgresql connection pool to the global
flask object at the beginning of every request
"""
# inject stack context if not testing
from flask import _app_ctx_stack
for k, v in app.config['DATABASES']['POSTGRESQL'].items():
init_db_conn(k, v, scopefunc=_app_ctx_stack)
g.postgresql_pool = pool
# avoid to close connections if testing
@app.teardown_request
def teardown_request(exception):
"""
Releasing connection after finish request, not required in unit
testing
"""
pool = getattr(g, 'postgresql_pool', None)
if pool is not None:
for k, v in pool.connections.items():
v.session.remove()
else:
@app.before_request
def before_request():
"""
Assign postgresql connection pool to the global
flask object at the beginning of every request
"""
for k, v in app.config['DATABASES']['POSTGRESQL'].items():
init_db_conn(k, v)
g.postgresql_pool = pool | 5,327,938 |
def register(workflow_id, workflow_version):
"""Register an (empty) workflow definition in the database."""
name = "workflow_definitions:{}:{}".format(workflow_id, workflow_version)
workflow_definition = dict(id=workflow_id, version=workflow_version,
stages=[])
# DB.set_hash_values(name, workflow_definition)
DB.save_dict(name, workflow_definition, hierarchical=False) | 5,327,939 |
def newid(length=16):
"""
Generate a new random string ID.
The generated ID is uniformly distributed and cryptographically strong. It is
hence usable for things like secret keys and access tokens.
:param length: The length (in chars) of the ID to generate.
:type length: int
:returns: A random string ID.
:rtype: str
"""
l = int(math.ceil(float(length) * 6. / 8.))
return base64.b64encode(os.urandom(l))[:length].decode('ascii') | 5,327,940 |
def test_process_cycle(zs2_file_name, verbose=True):
"""This is a test to check if util output changed
in an incompatible manner. A zs2 file is read, converted to XML,
and back-converted to a raw datastream."""
if verbose:
print('Decoding %s...' % zs2_file_name)
data_stream = _parser.load(zs2_file_name)
input_fingerprint = fingerprint(data_stream)
if verbose:
print(' Data fingerprint %s' % input_fingerprint)
xml_data = data_stream_to_xml(data_stream)
if verbose:
print(' Length of XML: %.0f kB' % (len(xml_data)/1024.))
if verbose:
print('Encoding XML to zs2...')
enc_data_stream = xml_to_data_stream(xml_data)
output_fingerprint = fingerprint(enc_data_stream)
if verbose:
print(' Data fingerprint: %s' % output_fingerprint)
if input_fingerprint != output_fingerprint:
raise ValueError('Decode/Encode cycle of %s is unsuccessful.' % zs2_file_name)
return input_fingerprint == output_fingerprint | 5,327,941 |
def test_dataset_isel(
dask_client, # pylint: disable=redefined-outer-name,unused-argument
):
"""Test dataset selection."""
ds = create_test_dataset()
selected_ds = ds.isel(slices=dict(x=slice(0, 2)))
assert selected_ds.dimensions == dict(x=2, y=2)
assert selected_ds.attrs == (dataset.Attribute(name="attr", value=1), )
assert numpy.all(
selected_ds.variables["var1"].values == numpy.arange(4).reshape(2, 2))
selected_ds = ds.isel(slices=dict(y=slice(0, 1)))
assert selected_ds.dimensions == dict(x=5, y=1)
assert numpy.all(selected_ds.variables["var1"].values == numpy.arange(
0, 10, 2).reshape(5, 1))
# Cannot slice on something which is not a dimension
with pytest.raises(ValueError, match="invalid dimension"):
ds.isel(slices=dict(z=slice(0, 1)))
with pytest.raises(ValueError, match="invalid dimension"):
ds.isel(slices=dict(var1=slice(0, 1))) | 5,327,942 |
def test_interpolation_dx():
"""
Test interpolation of a SparseFunction from a Derivative of
a Function.
"""
u = unit_box(shape=(11, 11))
sf1 = SparseFunction(name='s', grid=u.grid, npoint=1)
sf1.coordinates.data[0, :] = (0.5, 0.5)
op = Operator(sf1.interpolate(u.dx))
assert sf1.data.shape == (1,)
u.data[:] = 0.0
u.data[5, 5] = 4.0
u.data[4, 5] = 2.0
u.data[6, 5] = 2.0
op.apply()
# Exactly in the middle of 4 points, only 1 nonzero is 4
assert sf1.data[0] == pytest.approx(-20.0) | 5,327,943 |
def test_gas_concentration(value):
"""
Test if the Stc3xGasConcentration() type works as expected for different
values.
"""
result = Stc3xGasConcentration(value.get('ticks'))
assert type(result) is Stc3xGasConcentration
assert type(result.ticks) is int
assert result.ticks == value.get('ticks')
assert type(result.vol_percent) is float
assert result.vol_percent == pytest.approx(value.get('vol_percent'), 0.01) | 5,327,944 |
def get_dev_risk(weight, error):
"""
:param weight: shape [N, 1], the importance weight for N source samples in the validation set
:param error: shape [N, 1], the error value for each source sample in the validation set
(typically 0 for correct classification and 1 for wrong classification)
"""
N, d = weight.shape
_N, _d = error.shape
assert N == _N and d == _d, "dimension mismatch!"
weighted_error = weight * error
cov = np.cov(np.concatenate((weighted_error, weight), axis=1), rowvar=False)[0][1]
var_w = np.var(weight, ddof=1)
eta = -cov / var_w
return np.mean(weighted_error) + eta * np.mean(weight) - eta | 5,327,945 |
def skewness_fn(x, dim=1):
"""Calculates skewness of data "x" along dimension "dim"."""
std, mean = torch.std_mean(x, dim)
n = torch.Tensor([x.shape[dim]]).to(x.device)
eps = 1e-6 # for stability
sample_bias_adjustment = torch.sqrt(n * (n - 1)) / (n - 2)
skewness = sample_bias_adjustment * (
(torch.sum((x.T - mean.unsqueeze(dim).T).T.pow(3), dim) / n)
/ std.pow(3).clamp(min=eps)
)
return skewness | 5,327,946 |
def encrypt(message):
""" Self-developed encryption method that uses base conversion """
base = random.randint(3, 9)
number_list = []
for i in message:
number_list.append(keys.index(i)+1)
converted_number_list = []
for i in number_list:
converted_number_list.append(convert(i, base))
encryption_list = []
for number in converted_number_list:
cur = []
for digit in str(number):
cur.append(chars[int(digit)])
encryption_list.append(cur)
string_encryption_list = []
for i in encryption_list:
string_encryption_list.append(''.join([str(x) for x in i]))
converted_base_number = convert(123, base)
encrypted_base_list = []
for i in str(converted_base_number):
if i == '0':
encrypted_base_list.append('?')
elif i == '1':
encrypted_base_list.append('{')
elif i == '2':
encrypted_base_list.append('[')
elif i == '3':
encrypted_base_list.append('/')
elif i == '4':
encrypted_base_list.append('$')
elif i == '6':
encrypted_base_list.append('@')
elif i == '7':
encrypted_base_list.append('>')
return insert(list('|'.join(string_encryption_list)), encrypted_base_list) | 5,327,947 |
def run_shell(cmd: list[str], cwd: str = "", silent: bool = False) -> None:
"""Run command silently."""
if silent:
subprocess.call(
cmd,
cwd=cwd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
else:
subprocess.call(cmd, cwd=cwd) | 5,327,948 |
def drop_it():
"""
Given the array arr, iterate through and remove each element starting from the first element (the 0 index)
until the function func returns true when the iterated element is passed through it.
Then return the rest of the array once the condition is satisfied, otherwise,
arr should be returned as an empty array.
Example:
drop_it([1, 2, 3, 4], lambda x: x >= 3) should return [3, 4]
"""
return | 5,327,949 |
def get_signature_algorithm(algorithm_type_string):
"""convert a string into a key_type (TFTF_SIGNATURE_TYPE_xxx)
returns a numeric key_type, or raises an exception if invalid
"""
try:
return TFTF_SIGNATURE_ALGORITHMS[algorithm_type_string]
except:
raise ValueError("Unknown algorithm type: '{0:s}'".
format(algorithm_type_string)) | 5,327,950 |
def print_wer_vs_length():
"""Print the average word error rate for each length sentence."""
avg_wers = map(mean, wer_bins)
for i in range(len(avg_wers)):
print "%5d %f"%(i, avg_wers[i])
print "" | 5,327,951 |
def pytest_sessionstart():
""" Download sct_testing_data prior to test collection. """
logger.info("Downloading sct test data")
downloader.main(['-d', 'sct_testing_data', '-o', sct_test_path()]) | 5,327,952 |
def bending_without_n_iteration(model, values, concrete_type, exp):
"""Calculate the necessery longitudial reinforcment of a
beam that is loaded by a torque load without normal forces.
Parameters
----------
model : class
class method that contains the Finite Element Analysis
Returns
-------
erf_As : float
necessary longitudial reinforcement
"""
m = []
erf_As = []
fcd = values.concrete(concrete_type)['fcd']
for i, ele in enumerate(model.elements):
if type(ele)==BeamColumnElement:
m.append(ele.local_internal_forces[2])
m.append(ele.local_internal_forces[5]*-1)
m_ed = max(abs(m[0]), abs(m[1]))
# initial values for eps
eps_c_2 = -3.5
eps_s_1 = 25
sigma_c = 3.4
sigma_s = 24
# first iteration step
alpha_r = (3*abs(eps_c_2)-2)/(3*abs(eps_c_2))
k_a = (abs(eps_c_2)*(3*abs(eps_c_2)-4)+2)/(2*abs(eps_c_2)*(3*abs(eps_c_2)-2))
x_c = abs(eps_c_2)/(abs(eps_c_2)+abs(eps_s_1))*values.static_usable_height(ele.h, exp)
F_cd = -1*alpha_r*ele.b*x_c*fcd
z = values.static_usable_height(ele.h, exp)-k_a*x_c
m_rds = -1*F_cd*z*1000
diff = abs(m_rds-m_ed)
if m_rds > m_ed:
eps_s_1 = 25
while diff > 0.001:
x_c = abs(eps_c_2)/(abs(eps_c_2)+abs(eps_s_1))*values.static_usable_height(ele.h, exp)
if abs(eps_c_2)<= 2:
alpha_r = 1/12*abs(eps_c_2)*(6-abs(eps_c_2))
k_a = (8-abs(eps_c_2))/(4*(6-abs(eps_c_2)))
elif abs(eps_c_2)>2 or abs(eps_c_2)<=3.5:
alpha_r = (3*abs(eps_c_2)-2)/(3*abs(eps_c_2))
k_a = (abs(eps_c_2)*(3*abs(eps_c_2)-4)+2)/(2*abs(eps_c_2)*(3*abs(eps_c_2)-2))
F_cd = -1*alpha_r*ele.b*x_c*fcd
z = values.static_usable_height(ele.h, exp)-k_a*x_c
m_rds = -1*F_cd*z*1000
diff = abs(m_rds-m_ed)
if m_rds > m_ed:
eps_c_2 = eps_c_2 + sigma_c
else:
eps_c_2 = eps_c_2 - sigma_c
if eps_c_2 < -3.5:
eps_c_2 = -3.5
sigma_c = sigma_c/2
elif m_rds < m_ed:
print('Betondruckzone zu gering')
eps_c_2 = -3.5
while diff > 0.001:
x_c = abs(eps_c_2)/(abs(eps_c_2)+abs(eps_s_1))*values.static_usable_height(ele.h, exp)
alpha_r = (3*abs(eps_c_2)-2)/(3*abs(eps_c_2))
k_a = (abs(eps_c_2)*(3*abs(eps_c_2)-4)+2)/(2*abs(eps_c_2)*(3*abs(eps_c_2)-2))
F_cd = -1*alpha_r*ele.b*x_c*fcd
z = values.static_usable_height(ele.h, exp)-k_a*x_c
m_rds = -1*F_cd*z*1000
diff = abs(m_rds-m_ed)
if m_rds < m_ed:
eps_s_1 = eps_s_1 - sigma_s
else:
eps_s_1 = eps_s_1 + sigma_s
if eps_s_1 > 25:
eps_s_1 = 25
sigma_s = sigma_s/2
#Stahlspannung
sigma_s1d = 435+(525/1.15-500/1.15)/(25-2.175)*(eps_s_1-2.175)
#Benötigte Bewehrung
s = eps_c_2 /(eps_c_2-eps_s_1)
mue2 = m_ed*0.001*fcd/(ele.b*values.static_usable_height(ele.h, exp)**2*fcd*sigma_s1d*(1-k_a*s))
As=mue2*ele.b*values.static_usable_height(ele.h, exp)*10000
#a ist richtig ist beim Leonhardt auf Seite 171
erf_As.append(As)
#TODO: if abfrage für hyperjet
#diff_erf_As.append(erf_As[i].g)
#debug('As')
del m[0]
del m[0]
return erf_As | 5,327,953 |
def corrgroups60(display=False):
""" A simulated dataset with tight correlations among distinct groups of features.
"""
# set a constant seed
old_seed = np.random.seed()
np.random.seed(0)
# generate dataset with known correlation
N = 1000
M = 60
# set one coefficent from each group of 3 to 1
beta = np.zeros(M)
beta[0:30:3] = 1
# build a correlation matrix with groups of 3 tightly correlated features
x = np.ones(M)
mu = np.zeros(M)
C = np.eye(M)
for i in range(0,30,3):
C[i,i+1] = C[i+1,i] = 0.99
C[i,i+2] = C[i+2,i] = 0.99
C[i+1,i+2] = C[i+2,i+1] = 0.99
f = lambda X: np.matmul(X, beta)
# Make sure the sample correlation is a perfect match
X_start = np.random.randn(N, M)
X_centered = X_start - X_start.mean(0)
Sigma = np.matmul(X_centered.T, X_centered) / X_centered.shape[0]
W = np.linalg.cholesky(np.linalg.inv(Sigma)).T
X_white = np.matmul(X_centered, W.T)
assert np.linalg.norm(np.corrcoef(np.matmul(X_centered, W.T).T) - np.eye(M)) < 1e-6 # ensure this decorrelates the data
# create the final data
X_final = np.matmul(X_white, np.linalg.cholesky(C).T)
X = X_final
y = f(X) + np.random.randn(N) * 1e-2
# restore the previous numpy random seed
np.random.seed(old_seed)
return pd.DataFrame(X), y | 5,327,954 |
def test_string_indexer():
"""Test String indexers."""
ob = Test.StringIndexerTest()
assert ob["spam"] is None
assert ob[u"spam"] is None
ob["spam"] = "spam"
assert ob["spam"] == "spam"
assert ob["spam"] == u"spam"
assert ob[u"spam"] == "spam"
assert ob[u"spam"] == u"spam"
ob[u"eggs"] = u"eggs"
assert ob["eggs"] == "eggs"
assert ob["eggs"] == u"eggs"
assert ob[u"eggs"] == "eggs"
assert ob[u"eggs"] == u"eggs"
with pytest.raises(TypeError):
ob = Test.StringIndexerTest()
ob[1]
with pytest.raises(TypeError):
ob = Test.StringIndexerTest()
ob[1] = "wrong" | 5,327,955 |
def alpha_nu_gao08(profile, **kwargs):
"""log normal distribution of alpha about the
alpha--peak height relation from Gao+2008"""
z = kwargs["z"]
alpha = kwargs["alpha"]
# scatter in dex
if "sigma_alpha" in kwargs:
sigma_alpha = kwargs["sigma_alpha"]
else:
# take scatter from Dutton & Maccio 2014
sigma_alpha = 0.16 + 0.03 * z
try:
M = profile.MDelta(z, "vir")
nu = peakHeight(M, z)
except:
# can't find peak height, reject model
return -np.inf
alpha_model = 0.155 + 0.0095 * nu**2
return lnlike_gauss(np.log10(alpha_model), np.log10(alpha), sigma_alpha) | 5,327,956 |
def testRedirect():
"""Redirect URL"""
assert ['http://rediretinmyurl.com/http://dest.url.org/1/2/3/4?434', 'http://secondurl.com', 'ftp://1.2.3.4/adsfasdf'] == grab('http://rediretinmyurl.com/http://dest.url.org/1/2/3/4?434 http://secondurl.com ftp://1.2.3.4/adsfasdf', needScheme) | 5,327,957 |
def get_feed_entries(helper, name, stats):
"""Pulls the indicators from the minemeld feed."""
feed_url = helper.get_arg('feed_url')
feed_creds = helper.get_arg('credentials')
feed_headers = {}
# If auth is specified, add it as a header.
if feed_creds is not None:
auth = '{0}:{1}'.format(feed_creds['username'], feed_creds['password'])
auth = base64.encodestring(auth).replace('\n', '')
feed_headers['Authorization'] = 'Basic {0}'.format(auth)
# Pull events as json.
resp = helper.send_http_request(
url=feed_url,
method='GET',
parameters={'v': 'json', 'tr': 1},
headers=feed_headers)
# Raise exceptions on problems.
resp.raise_for_status()
feed_entries = resp.json()
# Return the normalized events to be saved to the kv store.
return normalized(name, feed_entries) | 5,327,958 |
def test_eager(celery_worker, task_app_request, dbsession, demo_user):
"""When in eager mode, transactions are executed properly.."""
celery = get_celery(task_app_request.registry)
celery._conf["task_always_eager"] = True
try:
# Try RetryableTransactionTask in eager mode
with transaction.manager:
# Do a dummy database write
u = dbsession.query(User).first()
demotasks.modify_username.apply_async(args=[u.id], tm=transaction.manager)
# Task should not execute until TM commits
assert u.username != "set by celery"
# TM commits the new result should be instantly available
# Task has now fired after transaction was committed
with transaction.manager:
u = dbsession.query(User).first()
assert u.username == "set by celery"
# Let's test ScheduleOnCommitTask with manually managed transaction
with transaction.manager:
u = dbsession.query(User).first()
u.username = "foobar"
demotasks.modify_username_manual_transaction.apply_async(args=[u.id], tm=transaction.manager)
# ScheduledOnCommitTask should have finished now
with transaction.manager:
u = dbsession.query(User).first()
assert u.username == "set by celery"
finally:
celery._conf["task_always_eager"] = False | 5,327,959 |
def get_skeleton_definition(character):
"""
Returns skeleton definition of the given character
:param character: str, HIK character name
:return: dict
"""
hik_bones = dict()
hik_count = maya.cmds.hikGetNodeCount()
for i in range(hik_count):
bone = get_skeleton_node(character, i)
if not bone:
continue
hik_name = maya.cmds.GetHIKNodeName(i)
hik_bones[hik_name] = {'bone': bone, 'hik_id': i}
return hik_bones | 5,327,960 |
def get_f_a_st(
fuel="C3H8",
oxidizer="O2:1 N2:3.76",
mech="gri30.cti"
):
"""
Calculate the stoichiometric fuel/air ratio of an undiluted mixture using
Cantera. Calculates using only x_fuel to allow for compound oxidizer
(e.g. air)
Parameters
----------
fuel : str
oxidizer : str
mech : str
mechanism file to use
Returns
-------
float
stoichiometric fuel/air ratio
"""
if oxidizer.lower() == "air":
oxidizer = "O2:1 N2:3.76"
gas = ct.Solution(mech)
gas.set_equivalence_ratio(
1,
fuel,
oxidizer
)
x_fuel = gas.mole_fraction_dict()[fuel]
return x_fuel / (1 - x_fuel) | 5,327,961 |
def get_parser_args(args=None):
"""
Transform args (``None``, ``str``, ``list``, ``dict``) to parser-compatible (list of strings) args.
Parameters
----------
args : string, list, dict, default=None
Arguments. If dict, '--' are added in front and there should not be positional arguments.
Returns
-------
args : None, list of strings.
Parser arguments.
Notes
-----
All non-strings are converted to strings with :func:`str`.
"""
if isinstance(args,str):
return args.split()
if isinstance(args,list):
return list(map(str,args))
if isinstance(args,dict):
toret = []
for key in args:
toret += ['--%s' % key]
if isinstance(args[key],list):
toret += [str(arg) for arg in args[key]]
else:
val = str(args[key])
if val: toret += [val]
return toret
return args | 5,327,962 |
def parse_time_to_min(time):
"""Convert a duration to an integer in minutes.
Example
-------
>>> parse_time_to_min("2m 30s")
2.5
"""
if " " in time:
return sum([parse_time_to_min(t) for t in time.split(" ")])
time = time.strip()
for unit, value in time_units.items():
if time.endswith(unit):
number = float(time.replace(unit, ""))
return number * value / time_units["m"] | 5,327,963 |
def relate_stream_island(stream_layer, island_layer):
"""
Return the streams inside or delimiting islands.
The topology is defined by DE-9IM matrices.
:param stream_layer: the layer of the river network
:stream_layer type: QgisVectorLayer object (lines)
:param island_layer: the layer of the islands
:island_layer type: QgisVectorLayer object (polygons)
:return: list of lists of all the streams that make up the islands
:rtype: list of lists of QgisFeatures objects
"""
# Get the features of the stream and island layers
streams_list = list(stream_layer.dataProvider().getFeatures())
islands_list = list(island_layer.dataProvider().getFeatures())
# Initialise output list
streams_in_island_list = []
for island in islands_list:
# Initialise list of output list
island_list = []
# Get the AbstractGeometry object for the current island
current_island_abstract_geom = island.geometry().constGet()
for stream in streams_list:
# Get the AbstractGeometry object for the current stream
current_stream_abstract_geom = stream.geometry().constGet()
# Create QgsGeometryEngine object
engine = QgsGeometry.createGeometryEngine(current_stream_abstract_geom)
# Prepares the geometry, so that subsequent calls to spatial relation methods are much faster
engine.prepareGeometry()
# Test if the current stream fits with the DE-9IM matrices
if engine.relatePattern(current_island_abstract_geom,'F1FF0F212') or engine.relatePattern(current_island_abstract_geom,'1FF00F212') or engine.relatePattern(current_island_abstract_geom,'1FF0FF212') or engine.relatePattern(current_island_abstract_geom,'1FFF0F212'):
# If so, then the current stream is appended to the output list
island_list.append(stream)
streams_in_island_list.append(island_list)
return streams_in_island_list | 5,327,964 |
def stringify_array(v,
maxDepth=None,
maxItems=-1,
maxStrlen=-1):
"""
Convert a dict to a string representation.
Parameters:
d(dict) : the data dict to convert
maxDepth (int|None): if > 0, then ellipsise structures deeper than this
maxItems (int|-1): if > 0, then ellipsise lists longer than this or dicts with more than this many items
maxStrlen (int|-1): if > 0, then ellipsise strings longer than this
Returns:
tuple(depth:int, str): the depth (explored) of the structure and the string representation of the data
"""
return _stringify_array(v, maxDepth=maxDepth, maxItems=maxItems, maxStrlen=maxStrlen) | 5,327,965 |
def rpickle(picke_file, state=None):
"""
Save the state of the gps file treated
"""
logger.warning('Running rpickle ...')
results = []
if picke_file.isfile():
with open(picke_file, 'rb') as read_pickle:
results += pickle.load(read_pickle)
# print results
return results | 5,327,966 |
def navigation_task(MAX_LOOP, direction):
""" Moving Alphabot """
Ab = alphabot.AlphaBot2()
try:
count = 0
while count < MAX_LOOP:
time.sleep(0.300)
if direction.lower() == 'forward':
Ab.forward()
if direction.lower() == 'backward':
Ab.backward()
if direction.lower() == 'left':
Ab.left()
if direction.lower() == 'right':
Ab.right()
count += 1
except KeyboardInterrupt:
GPIO.cleanup()
GPIO.cleanup() | 5,327,967 |
def test_pydist():
"""Make sure pydist.json exists and validates against our schema."""
# XXX this test may need manual cleanup of older wheels
import jsonschema
def open_json(filename):
return json.loads(open(filename, 'rb').read().decode('utf-8'))
pymeta_schema = open_json(resource_filename('wheel.test',
'pydist-schema.json'))
valid = 0
for dist in ("simple.dist", "complex-dist"):
basedir = pkg_resources.resource_filename('wheel.test', dist)
for (dirname, subdirs, filenames) in os.walk(basedir):
for filename in filenames:
if filename.endswith('.whl'):
whl = ZipFile(os.path.join(dirname, filename))
for entry in whl.infolist():
if entry.filename.endswith('/metadata.json'):
pymeta = json.loads(whl.read(entry).decode('utf-8'))
jsonschema.validate(pymeta, pymeta_schema)
valid += 1
assert valid > 0, "No metadata.json found" | 5,327,968 |
def should_print(test_function):
"""should_print is a helper for testing code that uses print
For example, if you had a function like this:
```python
def hello(name):
print('Hello,', name)
```
You might want to test that it prints "Hello, Nate" if you give it the
name "Nate". To do that, you could write the following test.
```python
@should_print
def test_hello_nate(output):
hello("Nate")
assert output == "Hello, Nate"
```
There are a couple pieces of this:
- Put `@should_print` directly above the test function.
- Add an `output` parameter to the test function.
- Assert against `output`
"""
return mock.patch("sys.stdout", new_callable=FakeStringIO)(test_function) | 5,327,969 |
def clusters_to_annotations(image):
"""
<gui>
<item name="image" type="Image" label="Image" role="output"/>
</gui>
"""
itk_image = medipy.itk.medipy_image_to_itk_image(image, False)
annotations_calculator = itk.ClustersToAnnotationsCalculator[itk_image].New(
Image=itk_image)
annotations_calculator.Compute()
image.annotations[:] = []
for label in annotations_calculator.GetAnnotationsLabels() :
position = [x for x in reversed(annotations_calculator.GetAnnotationPosition(label))]
position = image.index_to_physical(position)
size = annotations_calculator.GetAnnotationSize(label)
size *= min(image.spacing)
color = colorsys.hsv_to_rgb(random.random(), 1, 1)
annotation = medipy.base.ImageAnnotation(
position, str(label),
medipy.base.ImageAnnotation.Shape.sphere, size, color)
image.annotations.append(annotation) | 5,327,970 |
def fista(y, A, At, reg_weight, noise_eng, max_iter=100, update_reg=False, **kwargs):
"""
The FISTA algorithm for the ell1 minimisation problem:
min_x |y - Ax|^2 + reg * |x|_1
:param y: the given measurements (here it is the Fourier transform at certain frequencies)
:param A: the mapping from the sparse signal x to the measurements y
:param At: the mapping from the measurements y to the sparse signal x
:param reg_weight: regularisation weight for the ell1-norm of x
:param noise_eng: noise energy, i.e., |y - Ax|^2
:param max_iter: maximum number of FISTA iterations
:param update_reg: whether to update the regularisation weight or not
:param max_iter_reg: maximum number of iterations used to update the regularisation weight
:return:
"""
if not update_reg:
max_iter_reg = 1
else:
max_iter_reg = kwargs['max_iter_reg']
# initialise
x = At(y)
AtA = lambda input_arg: np.real(At(A(input_arg)))
# Lipschitz constant for 2 * A^H Ax
L = 1.01 * 2. * np.real(power_method(AtA, x.shape, 100))
# print repr(L) # for debug purposes
for reg_loop in range(max_iter_reg):
x = At(y)
beta = x
t_new = 1.
for fista_loop in range(max_iter):
x_old = x
t_old = t_new
# gradient step
beta = beta - 2. / L * At(A(beta) - y)
# soft-thresholding
x = soft(beta, reg_weight / L)
# update t and beta
t_new = (1. + np.sqrt(1. + 4. * t_old ** 2)) / 2.
beta = x + (t_old - 1.) / t_new * (x - x_old)
reg_weight *= (noise_eng / linalg.norm(y - A(x)) ** 2)
return x, reg_weight | 5,327,971 |
def produce_segmentation(indices: list[list[int]], wav_name: str) -> list[dict]:
"""produces the segmentation yaml content from the indices of the probabilistic_dac
Args:
indices (list[list[int]]): output of the probabilistic_dac function
wav_name (str): the name of the wav file (with the .wav suffix)
Returns:
list[dict]: the content of the segmentation yaml
"""
talk_segments = []
for ind in indices:
size = len(ind) / TARGET_SAMPLE_RATE
if size < NOISE_THRESHOLD:
continue
start = ind[0] / TARGET_SAMPLE_RATE
talk_segments.append(
{
"duration": round(size, 6),
"offset": round(start, 6),
"rW": 0,
"uW": 0,
"speaker_id": "NA",
"wav": wav_name,
}
)
return talk_segments | 5,327,972 |
def create_table_persons():
"""
Table: fp.persons
Partition key: name (string)
Attributes: polls (number), friends (number)
RCU: 2
WCU: 2
"""
try:
print('Creating persons table...')
dynamodb.create_table(
TableName='fp.persons',
KeySchema=[
{
'AttributeName': 'name',
'KeyType': 'HASH' #Partition key
}
],
AttributeDefinitions=[
{
'AttributeName': 'name',
'AttributeType': 'S'
}
],
ProvisionedThroughput={
'ReadCapacityUnits': 2,
'WriteCapacityUnits': 2
}
)
except Exception as e:
print('persons table is not created!')
print(e)
else:
print('persons table is successfully created!')
print() | 5,327,973 |
def trac_get_tracs_for_object(obj, user=None, trac_type=None):
"""
Returns tracs for a specific object.
"""
content_type = ContentType.objects.get_for_model(type(obj))
qs = Trac.objects.filter(content_type=content_type, object_id=obj.pk)
if user:
qs = qs.filter(user=user)
if trac_type:
qs = qs.filter(trac_type=trac_type)
return qs | 5,327,974 |
def open_in_browser(url : str):
"""Open the link in (default) browser.
Args:
url (string): URL to be opened
Raises:
NorURLError: If the url is NOT a url.
"""
val = url_validate(url)
if val == True:
webbrowser.open_new_tab(url) # Use built-in module to open url in new tab.
else:
errors = "[ERROR]:: " + url + ":: is not a valid URL."
raise NotURLError(errors) | 5,327,975 |
def display_help(parser, sub_command_parsers, perf_args, adb_device, verbose):
"""Display help for command referenced by perf_args.
Args:
parser: argparse.ArgumentParser instance which is used to print the
global help string.
sub_command_parsers: Dictionary of argparse.ArgumentParser instances
indexed by subcommand, for each subcommand this script provides.
perf_args: PerfArgs instance which is used to determine which perf command
help to display.
adb_device: Device used to determine which perf binary should be used.
verbose: Whether verbose output is enabled.
"""
parser.print_help()
command = perf_args.command
if command:
command_name = command.name
command_header = '%s help%s' % (
'perf' if command.real_command else os.path.basename(sys.argv[0]),
' %s' % command_name if command_name != 'help' else '')
print os.linesep.join(
('', command_header + ('-' * (80 - len(command_header)))))
sub_command_parser = sub_command_parsers.get(command_name)
if sub_command_parser:
sub_command_parser.print_help()
elif perf_args.command.real_command:
out, err, _ = execute_command(
find_host_binary(PERFHOST_BINARY, adb_device), perf_args.args,
'Unable to get %s help' % PERFHOST_BINARY, verbose=verbose,
ignore_error=True)
print out + err | 5,327,976 |
def db_upgrade(c, target="head"):
"""
Upgrade the db to the target alembic revision.
"""
c.run(f"poetry run alembic upgrade {target}", pty=True, env=env) | 5,327,977 |
def size_from_ftp(ftp, url):
"""Get size of a file on an FTP server.
Parameters
----------
ftp : FTP
An open ftplib FTP session.
url : str
File URL.
Returns
-------
int
Size in bytes.
"""
url = urlparse(url)
return ftp.size(url.path) | 5,327,978 |
def set_name_line(hole_lines, name):
"""Define the label of each line of the hole
Parameters
----------
hole_lines: list
a list of line object of the slot
name: str
the name to give to the line
Returns
-------
hole_lines: list
List of line object with label
"""
for ii in range(len(hole_lines)):
hole_lines[ii].label = name + "_" + str(ii)
return hole_lines | 5,327,979 |
def dist_to_boxes(points, boxes):
"""
Calculates combined distance for each point to all boxes
:param points: (N, 3)
:param boxes: (N, 7) [x, y, z, h, w, l, ry]
:return: distances_array: (M) torch.Tensor of [(N), (N), ...] distances
"""
distances_array = torch.Tensor([])
box_corners = kitti_utils.boxes3d_to_corners3d(boxes)
for box in box_corners:
minX = min(box[:, 0])
minY = min(box[:, 1])
minZ = min(box[:, 2])
maxX = max(box[:, 0])
maxY = max(box[:, 1])
maxZ = max(box[:, 2])
centroid = np.array([(maxX + minX) / 2, (maxY + minY) / 2, (maxZ + minZ) / 2])
dists_to_curr_box = dist_to_box_centroid(torch.from_numpy(points), torch.from_numpy(centroid)).reshape(1, len(points))
distances_array = torch.cat((distances_array.float(), dists_to_curr_box.float()), 0)
return distances_array | 5,327,980 |
def print_listdir(x):
"""."""
log = logging.getLogger('SIP.workflow.function')
log.info('HERE A')
print('Task id = {} {}'.format(x, os.listdir('.')))
return x, os.listdir('.') | 5,327,981 |
def blackwhite2D(data,xsize=None,ysize=None,show=1):
"""blackwhite2D(data,xsize=None,ysize=None,show=1)) - display list or array data as black white image
default popup window with (300x300) pixels
"""
if type(data) == type([]):
data = array(data)
w,h = data.shape[1],data.shape[0]
d = preprocess(data)
im = Image.new('L',(w,h))
for j in range(h):
for i in range(w):
ij = i+j*w
im.putpixel((i,j),d[j][i])
if show:
if xsize == None:
xsize = 300
if ysize == None:
ysize = 300
resizeImage(im,xsize,ysize)
return im | 5,327,982 |
def contains_digit(s):
"""Find all files that contain a number and store their patterns.
"""
isdigit = str.isdigit
return any(map(isdigit, s)) | 5,327,983 |
def to_signed(dtype):
"""
Return dtype that can hold data of passed dtype but is signed.
Raise ValueError if no such dtype exists.
Parameters
----------
dtype : `numpy.dtype`
dtype whose values the new dtype needs to be able to represent.
Returns
-------
`numpy.dtype`
"""
if dtype.kind == "u":
if dtype.itemsize == 8:
raise ValueError("Cannot losslessly convert uint64 to int.")
dtype = "int{:d}".format(min(dtype.itemsize * 2 * 8, 64))
return np.dtype(dtype) | 5,327,984 |
def prepare_model_runs(args):
"""Generate multiple model runs according to a model run file referencing a scenario
with multiple variants.
"""
# Read model run and scenario using the Store class
store = _get_store(args)
nb_variants = len(store.read_scenario_variants(args.scenario_name))
# Define default lower and upper of variant range
var_start = 0
var_end = nb_variants
# Check if optional cli arguments specify range of variants
# They are compared to None because they can be 0
if args.start is not None:
var_start = args.start
if var_start < 0:
raise ValueError('Lower bound of variant range must be >=0')
if var_start > nb_variants:
raise ValueError("Lower bound of variant range greater"
" than number of variants")
if args.end is not None:
var_end = args.end
if var_end < 0:
raise ValueError('Upper bound of variant range must be >=0')
if var_end > nb_variants - 1:
raise ValueError("Upper bound of variant range cannot be greater"
" than {:d}".format(nb_variants - 1))
if var_end < var_start:
raise ValueError("Upper bound of variant range must be >= lower"
" bound of variant range")
store.prepare_model_runs(args.model_run_name, args.scenario_name,
var_start, var_end) | 5,327,985 |
def listen_for_wakeword():
"""Continuously detecting the appeareance of wakeword from the audio stream. Higher priority than the listen() function.
Returns:
(bool): return True if detected wakeword, False otherwise.
"""
gotWakeWord = core.listen_for_wakeword()
return gotWakeWord | 5,327,986 |
def test_server():
"""
Validate Server operations.
"""
server: Server[int] = Server()
assert str(server) == "(Server: busy=False)"
assert server.ready
assert not server.busy
assert server.size == 0
# Push a packet
server.serve(10)
assert not server.ready
assert server.busy
assert server.size == 1
assert str(server) == "(Server: busy=True, packet=10)"
# Check that pushing another packet raises error:
with pytest.raises(RuntimeError):
server.serve(20)
# Pop a packet:
assert server.pop() == 10
assert str(server) == "(Server: busy=False)"
assert server.ready
assert not server.busy
assert server.size == 0 | 5,327,987 |
def import_string(import_name):
"""Returns a callable for a given setuptools style import string
:param import_name: A console_scripts style import string
"""
import_name = str(import_name).replace(":", ".")
try:
import_module(import_name)
except ImportError:
if "." not in import_name:
# this is a case like "import name", where continuing to the
# next style of import would not improve the situation, so
# we raise here.
raise
else:
return sys.modules[import_name]
# this is a case where the previous attempt may have failed due to
# not being importable. ("not a package", etc)
module_name, obj_name = import_name.rsplit(".", 1)
try:
module = __import__(module_name, None, None, [obj_name])
except ImportError:
# Recurse to support importing modules not yet set up by the parent module
# (or package for that matter)
module = import_string(module_name)
try:
return getattr(module, obj_name)
except AttributeError as e:
raise ImportError(e) | 5,327,988 |
def kurtosis(x,y):
"""
Calculate kurtosis of the probability
distribution of the forecast error if
an observation and forecast vector are given.
Both vectors must have same length, so pairs of
elements with same index are compared.
Description:
Kurtosis is a measure of the magnitude of the peak of the
distribution, or, conversely, how fat-tailed the distribution is,
and is the fourth standardized moment
The difference between the kurtosis of a sample distribution
and that of the normal distribution is known as the excess
kurtosis. In the subsequent anIn [142]: U
alysis, the term kurtosis will be
treated synonymously with excess kurtosis. A distribution
with a positive kurtosis value is known as leptokurtic, which
indicates a peaked distribution; whereas a negative kurtosis
indicates a flat data distribution, known as platykurtic. The
pronounced peaks of the leptokurtic distribution represent a
large number of very small forecast errors
:param x: vector of observations
:param y: vector of forecasts
:returns: Kurtosis
"""
from scipy.stats import kurtosis
return kurtosis(x-y) | 5,327,989 |
def preprocess_text(sentence):
"""Handle some weird edge cases in parsing, like 'i' needing to be capitalized
to be correctly identified as a pronoun"""
cleaned = []
words = sentence.split(' ')
for w in words:
if w == 'i':
w = 'I'
if w == "i'm":
w = "I'm"
cleaned.append(w)
return ' '.join(cleaned) | 5,327,990 |
def _GetModuleFromPathViaPkgutil(module_path, name_to_give):
"""Loads module by using pkgutil.get_importer mechanism."""
importer = pkgutil.get_importer(os.path.dirname(module_path))
if importer:
if hasattr(importer, '_par'):
# par zipimporters must have full path from the zip root.
# pylint:disable=protected-access
module_name = '.'.join(
module_path[len(importer._par._zip_filename) + 1:].split(os.sep))
else:
module_name = os.path.basename(module_path)
if importer.find_module(module_name):
return _LoadModule(importer, module_path, module_name, name_to_give)
raise ImportError('{0} not found'.format(module_path)) | 5,327,991 |
def test_both_inputs():
"""Test error while both type of inputs used"""
result = runner.invoke(main, BAD_BOTH_PARAMS_COMMAND)
assert result.exit_code == 1 | 5,327,992 |
def weighted_l2_loss(gt_value, pred_value, weights):
"""Computers an l2 loss given broadcastable weights and inputs."""
diff = pred_value - gt_value
squared_diff = diff * diff
if isinstance(gt_value, float):
gt_shape = [1]
else:
gt_shape = gt_value.get_shape().as_list()
if isinstance(weights, float):
weight_shape = [1]
else:
weight_shape = weights.get_shape().as_list()
tf.logging.info('gt vs pred vs weights shape: %s vs %s vs %s', str(gt_shape),
str(pred_value.get_shape().as_list()), str(weight_shape))
# TODO(kgenova) Consider using tf.losses.mean_squared_error. But need to
# be careful about reduction method. Theirs is probably better since the
# magnitude of the loss isn't affected by the weights. But it would need
# hparam tuning, so it's left out in the first pass.
log.info('Weights: {}'.format(weights))
return weights * squared_diff | 5,327,993 |
def csr_scale_rows(*args):
"""
csr_scale_rows(npy_int32 const n_row, npy_int32 const n_col, npy_int32 const [] Ap, npy_int32 const [] Aj,
npy_bool_wrapper [] Ax, npy_bool_wrapper const [] Xx)
csr_scale_rows(npy_int32 const n_row, npy_int32 const n_col, npy_int32 const [] Ap, npy_int32 const [] Aj,
signed char [] Ax, signed char const [] Xx)
csr_scale_rows(npy_int32 const n_row, npy_int32 const n_col, npy_int32 const [] Ap, npy_int32 const [] Aj,
unsigned char [] Ax, unsigned char const [] Xx)
csr_scale_rows(npy_int32 const n_row, npy_int32 const n_col, npy_int32 const [] Ap, npy_int32 const [] Aj,
short [] Ax, short const [] Xx)
csr_scale_rows(npy_int32 const n_row, npy_int32 const n_col, npy_int32 const [] Ap, npy_int32 const [] Aj,
unsigned short [] Ax, unsigned short const [] Xx)
csr_scale_rows(npy_int32 const n_row, npy_int32 const n_col, npy_int32 const [] Ap, npy_int32 const [] Aj,
int [] Ax, int const [] Xx)
csr_scale_rows(npy_int32 const n_row, npy_int32 const n_col, npy_int32 const [] Ap, npy_int32 const [] Aj,
unsigned int [] Ax, unsigned int const [] Xx)
csr_scale_rows(npy_int32 const n_row, npy_int32 const n_col, npy_int32 const [] Ap, npy_int32 const [] Aj,
long long [] Ax, long long const [] Xx)
csr_scale_rows(npy_int32 const n_row, npy_int32 const n_col, npy_int32 const [] Ap, npy_int32 const [] Aj,
unsigned long long [] Ax, unsigned long long const [] Xx)
csr_scale_rows(npy_int32 const n_row, npy_int32 const n_col, npy_int32 const [] Ap, npy_int32 const [] Aj,
float [] Ax, float const [] Xx)
csr_scale_rows(npy_int32 const n_row, npy_int32 const n_col, npy_int32 const [] Ap, npy_int32 const [] Aj,
double [] Ax, double const [] Xx)
csr_scale_rows(npy_int32 const n_row, npy_int32 const n_col, npy_int32 const [] Ap, npy_int32 const [] Aj,
long double [] Ax, long double const [] Xx)
csr_scale_rows(npy_int32 const n_row, npy_int32 const n_col, npy_int32 const [] Ap, npy_int32 const [] Aj,
npy_cfloat_wrapper [] Ax, npy_cfloat_wrapper const [] Xx)
csr_scale_rows(npy_int32 const n_row, npy_int32 const n_col, npy_int32 const [] Ap, npy_int32 const [] Aj,
npy_cdouble_wrapper [] Ax, npy_cdouble_wrapper const [] Xx)
csr_scale_rows(npy_int32 const n_row, npy_int32 const n_col, npy_int32 const [] Ap, npy_int32 const [] Aj,
npy_clongdouble_wrapper [] Ax, npy_clongdouble_wrapper const [] Xx)
csr_scale_rows(npy_int64 const n_row, npy_int64 const n_col, npy_int64 const [] Ap, npy_int64 const [] Aj,
npy_bool_wrapper [] Ax, npy_bool_wrapper const [] Xx)
csr_scale_rows(npy_int64 const n_row, npy_int64 const n_col, npy_int64 const [] Ap, npy_int64 const [] Aj,
signed char [] Ax, signed char const [] Xx)
csr_scale_rows(npy_int64 const n_row, npy_int64 const n_col, npy_int64 const [] Ap, npy_int64 const [] Aj,
unsigned char [] Ax, unsigned char const [] Xx)
csr_scale_rows(npy_int64 const n_row, npy_int64 const n_col, npy_int64 const [] Ap, npy_int64 const [] Aj,
short [] Ax, short const [] Xx)
csr_scale_rows(npy_int64 const n_row, npy_int64 const n_col, npy_int64 const [] Ap, npy_int64 const [] Aj,
unsigned short [] Ax, unsigned short const [] Xx)
csr_scale_rows(npy_int64 const n_row, npy_int64 const n_col, npy_int64 const [] Ap, npy_int64 const [] Aj,
int [] Ax, int const [] Xx)
csr_scale_rows(npy_int64 const n_row, npy_int64 const n_col, npy_int64 const [] Ap, npy_int64 const [] Aj,
unsigned int [] Ax, unsigned int const [] Xx)
csr_scale_rows(npy_int64 const n_row, npy_int64 const n_col, npy_int64 const [] Ap, npy_int64 const [] Aj,
long long [] Ax, long long const [] Xx)
csr_scale_rows(npy_int64 const n_row, npy_int64 const n_col, npy_int64 const [] Ap, npy_int64 const [] Aj,
unsigned long long [] Ax, unsigned long long const [] Xx)
csr_scale_rows(npy_int64 const n_row, npy_int64 const n_col, npy_int64 const [] Ap, npy_int64 const [] Aj,
float [] Ax, float const [] Xx)
csr_scale_rows(npy_int64 const n_row, npy_int64 const n_col, npy_int64 const [] Ap, npy_int64 const [] Aj,
double [] Ax, double const [] Xx)
csr_scale_rows(npy_int64 const n_row, npy_int64 const n_col, npy_int64 const [] Ap, npy_int64 const [] Aj,
long double [] Ax, long double const [] Xx)
csr_scale_rows(npy_int64 const n_row, npy_int64 const n_col, npy_int64 const [] Ap, npy_int64 const [] Aj,
npy_cfloat_wrapper [] Ax, npy_cfloat_wrapper const [] Xx)
csr_scale_rows(npy_int64 const n_row, npy_int64 const n_col, npy_int64 const [] Ap, npy_int64 const [] Aj,
npy_cdouble_wrapper [] Ax, npy_cdouble_wrapper const [] Xx)
csr_scale_rows(npy_int64 const n_row, npy_int64 const n_col, npy_int64 const [] Ap, npy_int64 const [] Aj,
npy_clongdouble_wrapper [] Ax, npy_clongdouble_wrapper const [] Xx)
"""
return _csr.csr_scale_rows(*args) | 5,327,994 |
def Uninstall(vm):
"""Uninstalls the pip package on the VM."""
pip.Uninstall(vm, pip_cmd='pip3') | 5,327,995 |
def view_scene(args: argparse.Namespace) -> None:
"""Read GTSFM output from .txt files and render the scene to the GUI.
We also zero-center the point cloud, and transform camera poses to a new
world frame, where the point cloud is zero-centered.
Args:
args: rendering options.
"""
points_fpath = f"{args.output_dir}/points3D.txt"
images_fpath = f"{args.output_dir}/images.txt"
cameras_fpath = f"{args.output_dir}/cameras.txt"
wTi_list, img_fnames = io_utils.read_images_txt(images_fpath)
calibrations = io_utils.read_cameras_txt(cameras_fpath)
if len(calibrations) == 1:
calibrations = calibrations * len(img_fnames)
point_cloud, rgb = io_utils.read_points_txt(points_fpath)
mean_pt = compute_point_cloud_center_robust(point_cloud)
# Zero-center the point cloud (about estimated center)
zcwTw = Pose3(Rot3(np.eye(3)), -mean_pt)
# expression below is equivalent to applying zcwTw.transformFrom() to each world point
point_cloud -= mean_pt
is_nearby = np.linalg.norm(point_cloud, axis=1) < args.max_range
point_cloud = point_cloud[is_nearby]
rgb = rgb[is_nearby]
for i in range(len(wTi_list)):
wTi_list[i] = zcwTw.compose(wTi_list[i])
if args.rendering_library == "open3d":
draw_scene_open3d(point_cloud, rgb, wTi_list, calibrations, args)
# elif args.rendering_library == "mayavi":
# draw_scene_mayavi(point_cloud, rgb, wTi_list, calibrations, args)
else:
raise RuntimeError("Unsupported rendering library") | 5,327,996 |
def get_homography_calibration_files(fullpath=True):
"""
Returns a list of the homography calibration yaml files in the homgraphies directory
of the mct configuration.
"""
file_list = os.listdir(homographies_dir)
dummy, params_file = os.path.split(homography_calibrator_params_file)
file_list.remove(params_file)
if fullpath:
file_list = [os.path.join(homographies_dir,f) for f in file_list]
return file_list | 5,327,997 |
def complexity_hjorth(signal):
"""**Hjorth's Complexity and Parameters**
Hjorth Parameters are indicators of statistical properties initially introduced by Hjorth
(1970) to describe the general characteristics of an EEG trace in a few quantitative terms, but
which can applied to any time series. The parameters are activity, mobility, and complexity.
NeuroKit returns complexity directly in the output tuple, but the other parameters can be found
in the dictionary.
* The **activity** parameter is simply the variance of the signal, which corresponds to the
mean power of a signal (if its mean is 0).
.. math::
Activity = \\sigma_{signal}^2
* The **complexity** parameter gives an estimate of the bandwidth of the signal, which
indicates the similarity of the shape of the signal to a pure sine wave (for which the
value converges to 1). In other words, it is a measure of the "excessive details" with
reference to the "softest" possible curve shape. The Complexity parameter is defined as the
ratio of the mobility of the first derivative of the signal to the mobility of the signal.
.. math::
Complexity = \\sigma_{d}/ \\sigma_{signal}
* The **mobility** parameter represents the mean frequency or the proportion of standard
deviation of the power spectrum. This is defined as the square root of variance of the
first derivative of the signal divided by the variance of the signal.
.. math::
Mobility = \\frac{\\sigma_{dd}/ \\sigma_{d}}{Complexity}
:math:`d` and :math:`dd` represent the first and second derivatives of the signal, respectively.
Hjorth (1970) illustrated the parameters as follows:
.. figure:: ../img/hjorth1970.png
:alt: Figure from Hjorth (1970).
:target: http://dx.doi.org/10.1016/0013-4694(70)90143-4
See Also
--------
.fractal_petrosian
Parameters
----------
signal : Union[list, np.array, pd.Series]
The signal (i.e., a time series) in the form of a vector of values.
Returns
-------
hjorth : float
Hjorth's Complexity.
info : dict
A dictionary containing the additional Hjorth parameters, such as ``"Mobility"`` and
``"Activity"``.
Examples
----------
.. ipython:: python
import neurokit2 as nk
# Simulate a signal with duration os 2s
signal = nk.signal_simulate(duration=2, frequency=5)
# Compute Hjorth's Complexity
complexity, info = nk.complexity_hjorth(signal)
complexity
info
References
----------
* Hjorth, B (1970) EEG Analysis Based on Time Domain Properties. Electroencephalography and
Clinical Neurophysiology, 29, 306-310. http://dx.doi.org/10.1016/0013-4694(70)90143-4
"""
# Sanity checks
if isinstance(signal, (np.ndarray, pd.DataFrame)) and signal.ndim > 1:
raise ValueError(
"Multidimensional inputs (e.g., matrices or multichannel data) are not supported yet."
)
# Calculate derivatives
dx = np.diff(signal)
ddx = np.diff(dx)
# Calculate variance and its derivatives
x_var = np.var(signal) # = activity
dx_var = np.var(dx)
ddx_var = np.var(ddx)
# Mobility and complexity
mobility = np.sqrt(dx_var / x_var)
complexity = np.sqrt(ddx_var / dx_var) / mobility
return complexity, {"Mobility": mobility, "Activity": x_var} | 5,327,998 |
def load_gecko():
"""
target variable is column "A375 Percent rank"
"""
data_nonessential = pandas.read_excel(settings.pj(settings.offtarget_data_dir, 'GeCKOv2_Non_essentials_Achilles_A375_complete.xls')) #(4697, 31)
data_all_A375 = pandas.read_csv(settings.pj(settings.offtarget_data_dir, 'GeckoAvanaSameUnits/GeCKOv2_DMSO_lentiGuide_A375.txt', sep="\t")) # (121964, 25)
guides = data_nonessential['sgRNA Sequence'].values
data = data_nonessential[data_all_A375["sgRNA Sequence"].isin(guides)]
#missing_guides = set(guides).difference(set(data_all_A375["sgRNA Sequence"].values))
#tmp = set(data_all_A375["sgRNA Sequence"].values).difference(set(guides))
return data | 5,327,999 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.