content stringlengths 22 815k | id int64 0 4.91M |
|---|---|
def carla_cityscapes_image_to_ndarray(image: carla.Image) -> np.ndarray: # pylint: disable=no-member
"""Returns a `NumPy` array from a `CARLA` semantic segmentation image.
Args:
image: The `CARLA` semantic segmented image.
Returns:
A `NumPy` array representation of the image.
"""
image.convert(carla.ColorConverter.CityScapesPalette) # pylint: disable=no-member
array = np.frombuffer(image.raw_data, dtype=np.dtype("uint8"))
array = array.astype(np.float32) / 255
array = np.reshape(array, (image.height, image.width, 4))
array = array[:, :, :3]
array = array[:, :, ::-1]
return array | 30,200 |
async def restore(ctx, server: str, backup_name: str):
"""Restores a server to a named file system backup. Shuts down the server before executing and restarts after
the backup finishes. """
pass | 30,201 |
def test_fetch_burst_uuid():
""" test fetch_burst_uuid """
test_dict = UUID_DICT["burst"]
uuid = test_dict["uuid"]
filename = test_dict["filename"]
lib = PhotoLibrary()
photo = lib.fetch_uuid(uuid)
bursts_selected = lib.fetch_burst_uuid(photo.burstid)
assert len(bursts_selected) == test_dict["burst_selected"]
assert isinstance(bursts_selected[0], PhotoAsset)
bursts_all = lib.fetch_burst_uuid(photo.burstid, all=True)
assert len(bursts_all) == test_dict["burst_all"]
assert isinstance(bursts_all[0], PhotoAsset) | 30,202 |
def main(time_series: str, config: dict = {}, train: bool = True, test: bool = True):
"""_summary_
Parameters
----------
time_series : str
_description_
config : dict, optional
_description_, by default {}
"""
# Get train files
train_files = list_files(time_series, config, pattern="*_tr.csv")
test_files = list_files(time_series, config, pattern="*_ts.csv")
if len(train_files) == 0:
print("Error: no files found!")
sys.exit()
# Train Gluon models
target = config["TS"][time_series]["target"]
for n, file in enumerate(tqdm(train_files)):
params = {
'time_series': time_series,
'target': target,
'model': "LSTM",
'iter': n+1
}
FDIR = path.join(config["DATA_PATH"], config["PRED_PATH"], time_series, params["model"])
FPATH = path.join(FDIR, f"pred_{str(n+1)}.csv")
if path.exists(FPATH):
continue
run_name = f"{time_series}_{target}_LSTM_{n+1}"
X, y = load_data(file,target)
#test_iteration(X, y, config, run_name, params)
# TODO: remove next line
if train:
train_iteration(X, y, config, run_name, params)
if test:
X_ts, y_ts = load_data(test_files[n], target)
test_iteration(X_ts, y_ts, config, run_name, params) | 30,203 |
def main(argv):
"""
Build the docs and serve them with an HTTP server.
"""
parser = argparse.ArgumentParser(description='Build and serve HTML Sphinx docs')
parser.add_argument(
'--port',
help='Serve on this port, default 8000',
type=int,
default=8000)
parser.add_argument(
'--source',
help='Directory of source Sphinx (reStructuredText) docs',
type=os.path.realpath,
default='docs/source')
parser.add_argument(
'--destination',
help='Where to build the HTML output',
type=os.path.realpath,
default='docs/build/html')
parser.add_argument(
'--doctrees',
help='Where the doctrees are built',
type=os.path.realpath,
default='docs/build/doctrees')
options = parser.parse_args(argv)
bound_build_docs = partial(build_docs, options.source, options.destination, options.doctrees)
# Do the initial build
bound_build_docs()
# Watch the source directory for changes, build docs again if detected
observer = Observer()
observer.schedule(
BuildDocsHandler(bound_build_docs),
path=options.source, recursive=True)
observer.start()
# Set the root for the request handler, overriding Python stdlib current
# working directory.
DocsHTTPRequestHandler._root = options.destination
server = SocketServer.TCPServer(
('', options.port),
DocsHTTPRequestHandler)
try:
logger.info('Serving on localhost:{}'.format(options.port))
server.serve_forever()
except KeyboardInterrupt:
sys.stdout.write('\n')
logger.info('(stopping server)')
observer.stop()
finally:
observer.join()
logging.info('Server stopped, exiting')
sys.exit(0) | 30,204 |
def since(version):
"""A decorator that annotates a function to append the version
of skutil the function was added. This decorator is an adaptation of PySpark's.
Parameters
----------
version : str, float or int
The version the specified method was added to skutil.
Examples
--------
>>> @since('0.1.5')
... def some_fun():
... '''Some docstring'''
... return None
...
>>>
>>> some_fun.__doc__ # doctest: +SKIP
'Some docstring\n\n.. versionadded:: 0.1.5'
.. versionadded:: 0.1.5
"""
indent_p = re.compile(r'\n( +)')
def deco(f):
indents = indent_p.findall(f.__doc__)
indent = ' ' * (min(len(m) for m in indents) if indents else 0)
f.__doc__ = f.__doc__.rstrip() + "\n\n%s.. versionadded:: %s" % (indent, version)
return f
return deco | 30,205 |
def get_dMdU():
"""Compute dMdU"""
dMdC = form_nd_array("dMdC", [3,3,3,3,3])
dMdPsi = form_nd_array("dMdPsi", [3,3,3,3,3])
dMdGamma = form_nd_array("dMdGamma",[3,3,3,3,3,3])
dCdU = form_symb_dCdU()
dPsidU = form_symb_dPhidU()
dGammadU = form_symb_dGammadU()
dMdU = form_nd_array("dMdU",[3,3,3,8*12])
for I in range(3):
for J in range(3):
for K in range(3):
for L in range(8*12):
dMdU[I,J,K,L] = 0
for O in range(3):
for P in range(3):
dMdU[I,J,K,L] += dMdC[I,J,K,O,P]*dCdU[O,P,L] + dMdPsi[I,J,K,O,P]*dPsidU[O,P,L]
for Q in range(3):
dMdU[I,J,K,L] += dMdGamma[I,J,K,O,P,Q]*dGammadU[O,P,Q,L]
tmp = [get_matrix_form_TOT(dCdU)[:,:12], get_matrix_form_TOT(dPsidU)[:,:12],\
get_matrix_form_FOT(dGammadU)[:,:12], get_matrix_form_VOT(dMdC),\
get_matrix_form_VOT(dMdPsi), get_matrix_form_VIOT(dMdGamma)]
symb = ["dCdU","dPsidU","dGammadU","dMdC","dMdPsi","dMdGamma"]
[implementation_extract_matrix(t,s,"I","I") for t,s in zip(tmp,symb)]
implementation_print_matrix(get_matrix_form_FOT(dMdU)[:,:12],"dMdU","I","I")
return dMdU | 30,206 |
async def test_setup_duplicate_entries(opp: OpenPeerPower, remote: Mock, caplog):
"""Test duplicate setup of platform."""
await async_setup_component(opp, SAMSUNGTV_DOMAIN, MOCK_CONFIG)
await opp.async_block_till_done()
assert opp.states.get(ENTITY_ID)
assert len(opp.states.async_all()) == 1
await async_setup_component(opp, SAMSUNGTV_DOMAIN, MOCK_CONFIG)
assert len(opp.states.async_all()) == 1 | 30,207 |
def get_group_value_ctx_nb(sc_oc):
"""Get group value from context.
Accepts `vectorbt.portfolio.enums.SegmentContext` and `vectorbt.portfolio.enums.OrderContext`.
Best called once from `segment_prep_func_nb`.
To set the valuation price, change `last_val_price` of the context in-place.
!!! note
Cash sharing must be enabled."""
if not sc_oc.cash_sharing:
raise ValueError("Cash sharing must be enabled")
return get_group_value_nb(
sc_oc.from_col,
sc_oc.to_col,
sc_oc.last_cash[sc_oc.group],
sc_oc.last_shares,
sc_oc.last_val_price
) | 30,208 |
def motion_blur_generate_kernel(radius, angle, sigma):
"""
Args:
radius
angle (float): Radians clockwise from the (x=1, y=0) vector. This
is how ImageMagick's -motion-blur filter accepts angles, as far
as I can tell.
>>> mb_1_0_inf_expected = torch.ones(3) / 3
>>> mb_1_0_inf = motion_blur_generate_kernel(1, 0, np.inf)[0]
>>> assert torch.all(torch.isclose(mb_1_0_inf[0], mb_1_0_inf_expected))
>>> g_3_1 = torch.from_numpy(scipy.signal.gaussian(5, 1)[2:]).float()
>>> g_3_1 /= g_3_1.sum()
>>> mb_1_0_1 = motion_blur_generate_kernel(1, 0, 1)[0]
>>> assert torch.all(mb_1_0_1[0] == g_3_1), (mb_1_0_1[0], g_3_1)
>>> assert torch.all(mb_1_0_1[1] == 0)
>>> assert torch.all(mb_1_0_1[2] == 0)
"""
# Make angles be counterclockwise from (x=1, y=0) vector to maintain sanity.
angle = 2 * np.pi - angle
# Make all angles lie in [0, 2*pi]
if angle < 0:
angle += math.ceil(angle / (2 * np.pi)) * 2*np.pi
if angle > 2 * np.pi:
angle = angle % (2 * np.pi)
size = 2 * radius + 1
kernel = torch.zeros((size, size))
# Gaussian centered at 0th element.
kernel_1d = scipy.signal.gaussian(size * 2 - 1, sigma)[size-1:]
direction_up = 0 <= angle <= np.pi
direction_right = (angle < np.pi / 2) or (angle > 3 / 2 * np.pi)
cy = size - 1 if direction_up else 0
cx = 0 if direction_right else size - 1
# dy is relative to matrix coordinates, so, e.g., angle of np.pi/4 should
# be a line going up => dy should be negative.
dx, dy = np.cos(angle).item(), -np.sin(angle).item()
for i in range(size):
# *o*ffset_*x*, *o*ffset_*y*
ox, oy = dx * i, dy * i
x = min(cx + round(ox), size)
y = min(cy + round(oy), size)
assert x >= 0, f'x={x} should be >= 0!'
assert y >= 0, f'y={y} should be >= 0!'
kernel[y, x] = kernel_1d[i]
kernel /= kernel.sum()
return kernel, cy, cx | 30,209 |
def substitute_crypto_to_req(req):
"""Replace crypto requirements if customized."""
crypto_backend = get_crypto_req()
if crypto_backend is None:
return req
def is_not_crypto(r):
CRYPTO_LIBS = PYCRYPTO_DIST, "cryptography"
return not any(r.lower().startswith(c) for c in CRYPTO_LIBS)
return [r for r in req if is_not_crypto(r)] + [crypto_backend] | 30,210 |
def remove_att(doc_id, doc_rev, att_id, **kwargs):
"""Delete an attachment.
http://docs.couchdb.org/en/stable/api/document/attachments.html#delete--db-docid-attname
:param str doc_id: The attachment document.
:param str doc_rev: The document revision.
:param str att_id: The attachment to remove.
:param kwargs: (optional) Arguments that :meth:`requests.Session.request` takes.
:rtype: (str, str, dict)
"""
if ("params" not in kwargs) or (not isinstance(kwargs["params"], dict)):
kwargs["params"] = {}
path = urljoin(utils.encode_document_id(doc_id), utils.encode_attachment_id(att_id))
kwargs["params"]["rev"] = doc_rev
return "DELETE", path, kwargs | 30,211 |
def read_file(file_path):
"""Read file according to its file schema"""
s3_schema = 's3'
path_comps = urllib.parse.urlparse(file_path)
scheme = path_comps.scheme
return_result = None
if not scheme or scheme != s3_schema:
file_stream = open(file_path)
return_result = file_stream.read()
file_stream.close()
elif scheme == s3_schema:
return_result = read_s3_file(file_path)
return return_result | 30,212 |
def build_command(codemodders_list: List) -> BaseCodemodCommand:
"""Build a custom command with the list of visitors."""
class CustomCommand(BaseCodemodCommand):
transformers = codemodders_list
return CustomCommand(CodemodContext()) | 30,213 |
def default_gen_mat(dt: float, size: int) -> np.ndarray:
"""Default process matrix generator.
Parameters
----------
dt : float
Dimension variable difference.
size : int
Size of the process matrix, equals to number of rows and columns.
Returns
-------
np.ndarray
Process matrix.
"""
mat = np.identity(size)
for i in range(1, size):
np.fill_diagonal(mat[:, i:], dt**i/np.math.factorial(i))
return mat | 30,214 |
def handle_list(bot, update, conn):
""" Lists all contracts that are active
"""
for contract in db.get_open_contracts_for_user(conn, update.message.chat_id):
bot.send_message(
contract.chat_id,
f'Contract for url {contract.query} \nTo stop watching it, send "/stop {contract.contract_id}"'
) | 30,215 |
def bubble_sort_optimized(array):
""" Модификований алгоритм сортування "Бульбашкою"
:param array: Вхідний масив даних, що треба відсортувати.
"""
n = len(array)
for i in range(n - 1, 0, -1):
check = True
for j in range(i):
if array[j] > array[j + 1]:
check = False
array[j], array[j + 1] = array[j + 1], array[j]
if check:
break | 30,216 |
def SplitMultiDelims(line, delimiters):
"""
Split a string into tokens using one or more (multi-character) delimiters.
(Bug: The current version of this function does not preserve white space,
but this should not matter.)
"""
token = ''
for sub_token in line.strip().split():
if sub_token in delimiters:
yield token
yield sub_token
token = ''
elif len(token) > 0:
token += ' ' + sub_token
else:
token += sub_token
if len(token) > 0:
yield token | 30,217 |
async def test_consume_multiple_queues(event_loop, caplog):
"""Consume multiple queues and check delivery.
"""
class CounterConsumerMultiple(CounterConsumerMixin, BaseConsumer):
"""Counter consumer (multiple queue interface).
Count messages from all consumed queues in `self.counter`.
"""
pass
backend = QueueBackend(loop=event_loop)
await backend.connect()
queue_1 = await backend.get_queue('test_mult_1')
queue_2 = await backend.get_queue('test_mult_2')
consumer = CounterConsumerMultiple(loop=event_loop)
await consumer.start()
consumer.consume(queue_1)
consumer.consume(queue_2)
await send_test_message(backend.connection, "test_mult_1")
await send_test_message(backend.connection, "test_mult_2")
await wait_messages(consumer, 2)
await consumer.stop()
assert consumer.counter == 2
assert log_count(caplog, level="ERROR") == 0
backend.close() | 30,218 |
def row_accessor(row: tuple):
"""
Utility generator yielding tuple of form (str, str); e.g
('A10', 'Project/Programme Name').
:param row:
:return:
"""
for item in row:
yield ("".join([item.column, str(item.row)]), item.value) | 30,219 |
def _PrepareListOfSources(spec, generator_flags, gyp_file):
"""Prepare list of sources and excluded sources.
Besides the sources specified directly in the spec, adds the gyp file so
that a change to it will cause a re-compile. Also adds appropriate sources
for actions and copies. Assumes later stage will un-exclude files which
have custom build steps attached.
Arguments:
spec: The target dictionary containing the properties of the target.
gyp_file: The name of the gyp file.
Returns:
A pair of (list of sources, list of excluded sources).
The sources will be relative to the gyp file.
"""
sources = OrderedSet()
_AddNormalizedSources(sources, spec.get('sources', []))
excluded_sources = OrderedSet()
# Add in the gyp file.
if not generator_flags.get('standalone'):
sources.add(gyp_file)
# Add in 'action' inputs and outputs.
for a in spec.get('actions', []):
inputs = a['inputs']
inputs = [_NormalizedSource(i) for i in inputs]
# Add all inputs to sources and excluded sources.
inputs = OrderedSet(inputs)
sources.update(inputs)
if not spec.get('msvs_external_builder'):
excluded_sources.update(inputs)
if int(a.get('process_outputs_as_sources', False)):
_AddNormalizedSources(sources, a.get('outputs', []))
# Add in 'copies' inputs and outputs.
for cpy in spec.get('copies', []):
_AddNormalizedSources(sources, cpy.get('files', []))
return (sources, excluded_sources) | 30,220 |
def striptag(tag):
"""
Get the short representation of a fully qualified tag
:param str tag: a (fully qualified or not) XML tag
"""
if tag.startswith('{'):
return tag.rsplit('}')[1]
return tag | 30,221 |
def dump_key(key):
""" Convert key into printable form using openssl utility
Used to compare keys which can be stored in different
format by different OpenSSL versions
"""
return Popen(["openssl","pkey","-text","-noout"],stdin=PIPE,stdout=PIPE).communicate(key)[0] | 30,222 |
def download_complete(root_data_path, domain_name, start_date, end_date):
"""
Check that all files have been downloaded and that they contain the data in
the expected date range
"""
missing_files = _find_missing_files(
root_data_path=root_data_path,
domain_name=domain_name,
start_date=start_date,
end_date=end_date,
)
return len(missing_files) == 0 | 30,223 |
def convertsrc(direc=os.path.curdir, ext=None, orig=1):
"""Replace Numeric/arrayobject.h with numpy/oldnumeric.h in all files in the
directory with extension give by list ext (if ext is None, then all files are
replaced)."""
if ext is None:
files = glob.glob(os.path.join(direc,'*'))
else:
files = []
for aext in ext:
files.extend(glob.glob(os.path.join(direc,"*.%s" % aext)))
for afile in files:
fid = open(afile)
fstr = fid.read()
fid.close()
fstr, n = header_re.subn(r'numpy/oldnumeric.h',fstr)
if n > 0:
if orig:
base, ext = os.path.splitext(afile)
os.rename(afile, base+".orig")
else:
os.remove(afile)
makenewfile(afile, fstr) | 30,224 |
def get_test_string(actual: Any, rtol: float, atol: float) -> str:
"""
Args:
actual: The actual value that was produced, and that should be the desired value.
rtol: The relative tolerance of the comparisons in the assertion.
atol: The absolute tolerance of the comparisons in the assertion.
Returns:
A string of Python code that produces the desired value.
"""
return str(actual) | 30,225 |
def is_type_resolved(_type):
"""Helper function that checks if type is already resolved."""
return _type in BASIC_TYPES or isinstance(_type, TypeDef) | 30,226 |
def _add_ingredients(ingredients, column1, column2):
"""Add ingredients from two columns."""
# Process down then across. It's possible that the file was exported
# to read across then down, but we're ignoring that possibility.
# This could be a flag we could pass in.
column1.extend(column2)
# A line where the first non-whitespace character is a '-' is a
# convention that indicates that this line should be appended to the
# previous line. As a result, we never append the ingredient right away.
# Instead wait until we find a line that does not begin with '-' and
# then add the ingredient.
previous = ''
for ingredient in column1:
if ingredient.lstrip().startswith('-'):
previous = previous.rstrip() + ' ' + ingredient.lstrip()[1:].strip()
else:
if previous.lstrip():
ingredients.append(_get_ingredient(previous))
previous = ingredient
if previous.lstrip():
ingredients.append(_get_ingredient(previous))
column1[:] = [] # clear
column2[:] = [] | 30,227 |
def menu_alta(nombre, apellido, datos):
""" Pregunta si se desea ingresar un nombre y apellido y
de ser así, pide los datos al usuario. """
print ("No se encuentra %s %s en la agenda." %(nombre, apellido))
confirmacion = input("¿Desea ingresarlo? (s/n) ")
if confirmacion.lower() != "s":
return
telefono = input("Telefono: ")
cumple = input("Cumpleaños: ")
datos.append([nombre,apellido,telefono,cumple]) | 30,228 |
def test_rom_is_bytes(mario):
"""Test the emulator can run with in-memory bytes."""
rom = mario.read_bytes()
cartridge = Cartridge(rom)
assert "SUPER MARIOLAND" in repr(cartridge) | 30,229 |
def test_get_or_create_bootcamp_application(mocker):
"""
get_or_create_bootcamp_application should fetch an existing bootcamp application, or create one with the \
application state set properly
"""
patched_derive_state = mocker.patch(
"applications.api.derive_application_state",
return_value=AppStates.COMPLETE.value,
)
users = UserFactory.create_batch(2)
bootcamp_runs = BootcampRunFactory.create_batch(2)
bootcamp_app, created = get_or_create_bootcamp_application(
bootcamp_run_id=bootcamp_runs[0].id, user=users[0]
)
patched_derive_state.assert_called_once_with(bootcamp_app)
assert bootcamp_app.bootcamp_run == bootcamp_runs[0]
assert bootcamp_app.user == users[0]
assert bootcamp_app.state == patched_derive_state.return_value
assert created is True
# The function should just return the existing application if one exists already
existing_app = BootcampApplicationFactory.create(
user=users[1], bootcamp_run=bootcamp_runs[1]
)
bootcamp_app, created = get_or_create_bootcamp_application(
bootcamp_run_id=bootcamp_runs[1].id, user=users[1]
)
assert bootcamp_app == existing_app
assert created is False | 30,230 |
def main(audio_dir, results_dir):
"""
Estimates beat positions for all files in the Harmonix Set, using the estimators published in the paper.
Args:
audio_dir: str - The complete path to the directory containing mp3 files for all tracks in the Harmonix Set.
results_dir: str - The complete path to the directory to save the estimated beat positions to.
"""
#
# Get the filenames from the dataset, these should correspond to the filenames of the audio files.
#
dataset = HarmonixDataset()
filenames_and_beats = dataset.beat_time_lists
filenames = [os.path.join(audio_dir, os.path.splitext(os.path.basename(fname))[0] + '.mp3') for fname in filenames_and_beats.keys()]
#
# Compile arguments and run estimators
#
# NOTE [matt.c.mccallum 10.13.19]: Librosa is done on a single thread due to a locking issue
# when using the `librosa.load` function and the multiprocessing module.
# This causes the `librosa.load` function to hang indefinitely.
# This may be due to the specific decoder that Librosa uses and so this
# may be platform dependent as is the decoder selection.
args = [(fname,) for fname in filenames]
estimator_args = [
(args, madmom_1, os.path.join(results_dir, 'Krebs'), 12),
(args, madmom_2, os.path.join(results_dir, 'Korzeniowski'), 12),
(args, madmom_3, os.path.join(results_dir, 'Bock_1'), 12),
(args, madmom_4, os.path.join(results_dir, 'Bock_2'), 12),
(args, ellis, os.path.join(results_dir, 'Ellis'), 1)
]
for args in estimator_args:
process_estimator(*args) | 30,231 |
def test_for_dataset_1(rate_sim_3):
"""Test _for_dataset.
Use timestep=False.
"""
x = rate_sim_3._for_dataset(timestep=False)
desired_stimulus_set = np.array(
[
[3, 1],
[3, 1],
[9, 12],
[0, 0],
[3, 4],
[3, 4],
[3, 4],
[3, 4]
], dtype=np.int32
)
tf.debugging.assert_equal(desired_stimulus_set, x['stimulus_set']) | 30,232 |
def get_extension(media):
"""Gets the corresponding extension for any Telegram media."""
# Photos are always compressed as .jpg by Telegram
try:
get_input_photo(media)
return '.jpg'
except TypeError:
# These cases are not handled by input photo because it can't
if isinstance(media, (types.UserProfilePhoto, types.ChatPhoto)):
return '.jpg'
# Documents will come with a mime type
if isinstance(media, types.MessageMediaDocument):
media = media.document
if isinstance(media, (
types.Document, types.WebDocument, types.WebDocumentNoProxy)):
if media.mime_type == 'application/octet-stream':
# Octet stream are just bytes, which have no default extension
return ''
else:
return guess_extension(media.mime_type) or ''
return '' | 30,233 |
def VLBAInitContFQParms(uv, parms, err, \
logfile=None, check=False, debug=False):
"""
Initialize VLBA continuum pipeline frequency dependent parameters
Values set if None on input
* uv = Project raw UV data object
* parms = Project parameters
* err = Obit error/message stack
* logfile = logfile for messages
* check = Only check script, don't execute tasks
* debug = show input
Returns python dict with parameters
"""
################################################################
# get frequency from uv data
freq = uv.Desc.Dict["crval"][uv.Desc.Dict["jlocf"]]
# Frequency dependent values
if freq<1.0e9: # Below L band
if parms["manPCsolInt"]==None:
parms["manPCsolInt"] = 0.25 # Manual phase cal solution interval (min)
if parms["manPCSmoo"]==None:
parms["manPCSmoo"] = 10.0 # Manual phase cal smoothing time (hr)
if parms["delaySmoo"]==None:
parms["delaySmoo"] = 0.5 # Delay smoothing time (hr)
if parms["bpsolint1"]==None:
parms["bpsolint1"] = 10.0/60.0 # BPass phase correction solution in min
if parms["FOV"]==None:
parms["FOV"] = 0.2/3600 # Field of view radius in deg.
if parms["solPInt"]==None:
parms["solPInt"] = 0.10 # phase self cal solution interval (min)
if parms["solAInt"]==None:
parms["solAInt"] = 3.0 # amp+phase self cal solution interval (min)
if parms["findSolInt"]==None:
parms["findSolInt"] = 0.1 # Solution interval (min) for Calib
if parms["findTimeInt"]==None:
parms["findTimeInt"] = 10.0 # Maximum timerange, large=>scan
if parms["CalAvgTime"]==None:
parms["CalAvgTime"] = 10.0/60.0 # Time for averaging calibrated uv data (min)
elif freq<2.0e9: # L band
if parms["manPCsolInt"]==None:
parms["manPCsolInt"] = 0.5 # Manual phase cal solution interval (min)
if parms["manPCSmoo"]==None:
parms["manPCSmoo"] = 10.0 # Manual phase cal smoothing time (hr)
if parms["delaySmoo"]==None:
parms["delaySmoo"] = 0.5 # Delay smoothing time (hr)
if parms["bpsolint1"]==None:
parms["bpsolint1"] = 15.0/60.0 # BPass phase correction solution in min
if parms["FOV"]==None:
parms["FOV"] = 0.2/3600 # Field of view radius in deg.
if parms["solPInt"]==None:
parms["solPInt"] = 0.25 # phase self cal solution interval (min)
if parms["solAInt"]==None:
parms["solAInt"] = 3.0 # amp+phase self cal solution interval (min)
if parms["findSolInt"]==None:
parms["findSolInt"] = 0.25 # Solution interval (min) for Calib
if parms["findTimeInt"]==None:
parms["findTimeInt"] = 10.0 # Maximum timerange, large=>scan
if parms["CalAvgTime"]==None:
parms["CalAvgTime"] = 10.0/60.0 # Time for averaging calibrated uv data (min)
elif freq<3.0e9: # S band
if parms["manPCsolInt"]==None:
parms["manPCsolInt"] = 0.5 # Manual phase cal solution interval (min)
if parms["manPCSmoo"]==None:
parms["manPCSmoo"] = 1.0 # Manual phase cal smoothing time (hr)
if parms["delaySmoo"]==None:
parms["delaySmoo"] = 0.5 # Delay smoothing time (hr)
if parms["bpsolint1"]==None:
parms["bpsolint1"] = 10.0/60.0 # BPass phase correction solution in min
if parms["FOV"]==None:
parms["FOV"] = 0.1/3600 # Field of view radius in deg.
if parms["solPInt"]==None:
parms["solPInt"] = 0.25 # phase self cal solution interval (min)
if parms["solAInt"]==None:
parms["solAInt"] = 3.0 # amp+phase self cal solution interval (min)
if parms["findSolInt"]==None:
parms["findSolInt"] = 0.5 # Solution interval (min) for Calib
if parms["findTimeInt"]==None:
parms["findTimeInt"] = 10.0 # Maximum timerange, large=>scan
if parms["CalAvgTime"]==None:
parms["CalAvgTime"] = 10.0/60.0 # Time for averaging calibrated uv data (min)
elif freq<8.0e9: # C band
if parms["manPCsolInt"]==None:
parms["manPCsolInt"] = 0.5 # Manual phase cal solution interval (min)
if parms["manPCSmoo"]==None:
parms["manPCSmoo"] = 10.0 # Manual phase cal smoothing time (hr)
if parms["delaySmoo"]==None:
parms["delaySmoo"] = 0.5 # Delay smoothing time (hr)
if parms["bpsolint1"]==None:
parms["bpsolint1"] = 10.0/60.0 # BPass phase correction solution in min
if parms["FOV"]==None:
parms["FOV"] = 0.1/3600 # Field of view radius in deg.
if parms["solPInt"]==None:
parms["solPInt"] = 0.25 # phase self cal solution interval (min)
if parms["solAInt"]==None:
parms["solAInt"] = 3.0 # amp+phase self cal solution interval (min)
if parms["findSolInt"]==None:
parms["findSolInt"] = 0.5 # Solution interval (min) for Calib
if parms["findTimeInt"]==None:
parms["findTimeInt"] = 10.0 # Maximum timerange, large=>scan
if parms["CalAvgTime"]==None:
parms["CalAvgTime"] = 10.0/60.0 # Time for averaging calibrated uv data (min)
elif freq<10.0e9: # X band
if parms["manPCsolInt"]==None:
parms["manPCsolInt"] = 0.5 # Manual phase cal solution interval (min)
if parms["manPCSmoo"]==None:
parms["manPCSmoo"] = 10.0 # Manual phase cal smoothing time (hr)
if parms["delaySmoo"]==None:
parms["delaySmoo"] = 0.5 # Delay smoothing time (hr)
if parms["bpsolint1"]==None:
parms["bpsolint1"] = 10.0/60.0 # BPass phase correction solution in min
if parms["FOV"]==None:
parms["FOV"] = 0.05/3600 # Field of view radius in deg.
if parms["solPInt"]==None:
parms["solPInt"] = 0.25 # phase self cal solution interval (min)
if parms["solAInt"]==None:
parms["solAInt"] = 3.0 # amp+phase self cal solution interval (min)
if parms["findSolInt"]==None:
parms["findSolInt"] = 0.5 # Solution interval (min) for Calib
if parms["findTimeInt"]==None:
parms["findTimeInt"] = 10.0 # Maximum timerange, large=>scan
if parms["CalAvgTime"]==None:
parms["CalAvgTime"] = 10.0/60.0 # Time for averaging calibrated uv data (min)
elif freq<18.0e9: # Ku band
if parms["manPCsolInt"]==None:
parms["manPCsolInt"] = 0.5 # Manual phase cal solution interval (min)
if parms["manPCSmoo"]==None:
parms["manPCSmoo"] = 10.0 # Manual phase cal smoothing time (hr)
if parms["delaySmoo"]==None:
parms["delaySmoo"] = 0.5 # Delay smoothing time (hr)
if parms["bpsolint1"]==None:
parms["bpsolint1"] = 10.0/60.0 # BPass phase correction solution in min
if parms["FOV"]==None:
parms["FOV"] = 0.02/3600 # Field of view radius in deg.
if parms["solPInt"]==None:
parms["solPInt"] = 0.25 # phase self cal solution interval (min)
if parms["solAInt"]==None:
parms["solAInt"] = 3.0 # amp+phase self cal solution interval (min)
if parms["findSolInt"]==None:
parms["findSolInt"] = 0.5 # Solution interval (min) for Calib
if parms["findTimeInt"]==None:
parms["findTimeInt"] = 10.0 # Maximum timerange, large=>scan
if parms["CalAvgTime"]==None:
parms["CalAvgTime"] = 10.0/60.0 # Time for averaging calibrated uv data (min)
elif freq<26.0e9: # K band
if parms["manPCsolInt"]==None:
parms["manPCsolInt"] = 0.2 # Manual phase cal solution interval (min)
if parms["manPCSmoo"]==None:
parms["manPCSmoo"] = 10.0 # Manual phase cal smoothing time (hr)
if parms["delaySmoo"]==None:
parms["delaySmoo"] = 0.5 # Delay smoothing time (hr)
if parms["bpsolint1"]==None:
parms["bpsolint1"] = 10.0/60.0 # BPass phase correction solution in min
if parms["FOV"]==None:
parms["FOV"] = 0.02/3600 # Field of view radius in deg.
if parms["solPInt"]==None:
parms["solPInt"] = 0.25 # phase self cal solution interval (min)
if parms["solAInt"]==None:
parms["solAInt"] = 3.0 # amp+phase self cal solution interval (min)
if parms["findSolInt"]==None:
parms["findSolInt"] = 0.3 # Solution interval (min) for Calib
if parms["findTimeInt"]==None:
parms["findTimeInt"] = 10.0 # Maximum timerange, large=>scan
if parms["CalAvgTime"]==None:
parms["CalAvgTime"] = 5.0/60.0 # Time for averaging calibrated uv data (min)
elif freq<38.0e9: # Ka band
if parms["manPCsolInt"]==None:
parms["manPCsolInt"] = 0.2 # Manual phase cal solution interval (min)
if parms["manPCSmoo"]==None:
parms["manPCSmoo"] = 10.0 # Manual phase cal smoothing time (hr)
if parms["delaySmoo"]==None:
parms["delaySmoo"] = 0.5 # Delay smoothing time (hr)
if parms["bpsolint1"]==None:
parms["bpsolint1"] = 10.0/60.0 # BPass phase correction solution in min
if parms["FOV"]==None:
parms["FOV"] = 0.02/3600 # Field of view radius in deg.
if parms["solPInt"]==None:
parms["solPInt"] = 0.25 # phase self cal solution interval (min)
if parms["solAInt"]==None:
parms["solAInt"] = 3.0 # amp+phase self cal solution interval (min)
if parms["findSolInt"]==None:
parms["findSolInt"] = 0.2 # Solution interval (min) for Calib
if parms["findTimeInt"]==None:
parms["findTimeInt"] = 10.0 # Maximum timerange, large=>scan
if parms["CalAvgTime"]==None:
parms["CalAvgTime"] = 5.0/60.0 # Time for averaging calibrated uv data (min)
elif freq<50.0e9: # Q band
if parms["manPCsolInt"]==None:
parms["manPCsolInt"] = 0.1 # Manual phase cal solution interval (min)
if parms["manPCSmoo"]==None:
parms["manPCSmoo"] = 10.0 # Manual phase cal smoothing time (hr)
if parms["delaySmoo"]==None:
parms["delaySmoo"] = 0.5 # Delay smoothing time (hr)
if parms["bpsolint1"]==None:
parms["bpsolint1"] = 5.0/60.0 # BPass phase correction solution in min
if parms["FOV"]==None:
parms["FOV"] = 0.02/3600 # Field of view radius in deg.
if parms["solPInt"]==None:
parms["solPInt"] = 0.10 # phase self cal solution interval (min)
if parms["solAInt"]==None:
parms["solAInt"] = 3.0 # amp+phase self cal solution interval (min)
if parms["findSolInt"]==None:
parms["findSolInt"] = 0.1 # Solution interval (min) for Calib
if parms["findTimeInt"]==None:
parms["findTimeInt"] = 10.0 # Maximum timerange, large=>scan
if parms["CalAvgTime"]==None:
parms["CalAvgTime"] = 5.0/60.0 # Time for averaging calibrated uv data (min)
else: # Above Q band
if parms["manPCsolInt"]==None:
parms["manPCsolInt"] = 0.10 # Manual phase cal solution interval (min)
if parms["manPCSmoo"]==None:
parms["manPCSmoo"] = 10.0 # Manual phase cal smoothing time (hr)
if parms["delaySmoo"]==None:
parms["delaySmoo"] = 0.5 # Delay smoothing time (hr)
if parms["bpsolint1"]==None:
parms["bpsolint1"] = 5.0/60.0 # BPass phase correction solution in min
if parms["FOV"]==None:
parms["FOV"] = 0.01/3600 # Field of view radius in deg.
if parms["solPInt"]==None:
parms["solPInt"] = 0.10 # phase self cal solution interval (min)
if parms["solAInt"]==None:
parms["solAInt"] = 3.0 # amp+phase self cal solution interval (min)
if parms["findSolInt"]==None:
parms["findSolInt"] = 0.1 # Solution interval (min) for Calib
if parms["findTimeInt"]==None:
parms["findTimeInt"] = 10.0 # Maximum timerange, large=>scan
if parms["CalAvgTime"]==None:
parms["CalAvgTime"] = 4.0/60.0 # Time for averaging calibrated uv data (min)
# end VLBAInitContFqParms | 30,234 |
def process_input(df, col_group, col_t, col_death_rate, return_df=True):
"""
Trim filter and adding extra information to the data frame.
Args:
df (pd.DataFrame): Provided data frame.
col_group (str): Column name of group definition.
col_t (str): Column name of the independent variable.
col_death_rate (str): Name for column that contains the death rate.
return_df (bool, optional):
If True return the combined data frame, otherwise return the
splitted dictionary.
Returns:
pd.DataFrame: processed data frame.
"""
assert col_group in df
assert col_t in df
assert col_death_rate in df
# trim down the data frame
df = df[[col_group, col_t, col_death_rate]].reset_index(drop=True)
df.sort_values([col_group, col_t], inplace=True)
df.columns = ['location', 'days', 'ascdr']
# check and filter and add more information
data = split_by_group(df, col_group='location')
for location, df_location in data.items():
assert df_location.shape[0] == df_location['days'].unique().size
df_location = filter_death_rate(df_location,
col_t='days',
col_death_rate='ascdr')
df_location['ln ascdr'] = np.log(df_location['ascdr'])
df_location['asddr'] = df_location['ascdr'].values - \
np.insert(df_location['ascdr'].values[:-1], 0, 0.0)
df_location['ln asddr'] = np.log(df_location['asddr'])
data.update({
location: df_location.copy()
})
if return_df:
return pd.concat(list(data.values()))
else:
return data | 30,235 |
def cross_entropy_loss():
"""
Returns an instance to compute Cross Entropy loss
"""
return tf.keras.losses.BinaryCrossentropy(from_logits=True) | 30,236 |
def main():
"""Entry point."""
greet('Content extraction tool')
filesystem = Filesystem()
source = ask('Path to source theme')
target = ask('Path to target theme')
filesystem.ensure_folder_exists(source)
filesystem.ensure_folder_exists(target)
while True:
keys = ask_list('Which keys you want to check')
values = ask_list('Corresponding values')
pairs = list(zip(keys, values))
print('Resulting pairs:', pairs)
conf = ask('Type 1 if this is correct')
if conf == '1':
break
print('Finding uuids')
chosen_uuids = []
meta_path = filesystem.join(source, 'metainfo')
for filename in perc(filesystem.list_files(meta_path)):
full_path = filesystem.join(meta_path, filename)
with open(full_path, mode='r', encoding='utf-8') as file:
content = file.read()
record = json.loads(content)
cond = [record.get(key) == value for key, value in pairs]
if all(cond):
chosen_uuids.append(record['uuid'])
print(f'Found {len(chosen_uuids)} matching records')
for uuid in perc(chosen_uuids):
relocate_by_uuid(uuid, source, target, filesystem) | 30,237 |
def f(x):
"""
Surrogate function over the error metric to be optimized
"""
evaluation = run_quip(cutoff = float(x[:,0]), delta = float(x[:,1]), n_sparse = float(x[:,2]), nlmax = float(x[:,3]))
print("\nParam: {}, {}, {}, {} | MAE : {}, R2: {}".format(float(x[:,0]),float(x[:,1]),float(x[:,2]),float(x[:,3]) ,evaluation[0],evaluation[1]))
return evaluation[0] | 30,238 |
def equal(* vals):
"""Returns True if all arguments are equal"""
if len(vals) < 2:
return True
a = vals[0]
for b in vals[1:]:
if a != b:
return False
return True | 30,239 |
def test_all_settings():
""" Test parse_request for '_all _settings GET'
returns correctly parsed request.
"""
request = {
'args': {
'method': 'GET',
'uri': '/_all/_settings',
},
'parsed_request': {
'indices': ['_all'],
'cluster': False,
'call': '_settings',
'scripted': False
}
}
tornado_http_request = HTTPRequest(**request['args'])
assert parse_request(tornado_http_request) == request['parsed_request'] | 30,240 |
def chrome_options() -> Options:
"""Pass standard Chrome options to a test."""
options = Options()
executable_path = os.getenv("EXECUTABLE_PATH")
assert (
executable_path is not None
), "EXECUTABLE_PATH environment variable must be set"
logger.info(f"EXECUTABLE_PATH is {executable_path}")
options.binary_location = executable_path
options.add_argument("whitelisted-ips=''")
options.add_argument("disable-xss-auditor")
options.add_argument("disable-web-security")
options.add_argument("allow-running-insecure-content")
options.add_argument("no-sandbox")
options.add_argument("disable-setuid-sandbox")
options.add_argument("disable-popup-blocking")
options.add_argument("allow-elevated-browser")
options.add_argument("verbose")
return options | 30,241 |
def main(custom_args: list = None):
"""
The main entry point of the `dref` command line tool.
Accepts two positional arguments:
input_uri - the input document to be resolved.
output_file - the output file for resolved data.
If the `output_file` extension is `yaml` or `yml`, the output
format will be YAML, otherwise JSON.
The program may be used from inside other python programs by calling
this function, and passing the arguments as the `custom_args` function
argument as a `list`. By default, the `sys.argv` is used.
"""
args = parse_args(sys.argv[1:] if custom_args is None else custom_args)
log_level = VERBOSITY.get(args.verbosity, logging.INFO)
out_handler = logging.StreamHandler(sys.stdout)
out_handler.setFormatter(DrefLogFormatter(use_color=sys.stdout.isatty()))
out_handler.addFilter(DrefLogFilter(logging.WARN,
logging.INFO,
logging.DEBUG))
err_handler = logging.StreamHandler(sys.stderr)
err_handler.setFormatter(DrefLogFormatter(use_color=sys.stderr.isatty()))
err_handler.addFilter(DrefLogFilter(logging.ERROR))
log = logging.getLogger('dollar-ref')
log.addHandler(out_handler)
log.addHandler(err_handler)
log.setLevel(log_level)
try:
data = read_file(args.input_uri)
cwd = os.path.dirname(args.input_uri)
except FileNotFoundError:
log.error(f"Input file '{args.input_uri}' was not found.")
sys.exit(1)
try:
resolved = resolve(data, cwd=cwd, external_only=args.internal)
with open(args.output_file, 'w') as out:
if args.output_file.endswith(('yml', 'yaml')):
raw_out = yaml.dump(resolved,
explicit_start=True,
default_flow_style=False)
else:
raw_out = json.dumps(resolved)
out.write(raw_out)
except FileNotFoundError:
log.error(f"Could not write to output file '{args.output_file}'.")
sys.exit(1)
except ResolutionError as exc:
log.error(str(exc))
sys.exit(1)
log.info(f"Successfully resolved '{args.input_uri}' "
f"into '{args.output_file}'.") | 30,242 |
def exp(x: pd.Series) -> pd.Series:
"""
Exponential of series
:param x: timeseries
:return: exponential of each element
**Usage**
For each element in the series, :math:`X_t`, raise :math:`e` (Euler's number) to the power of :math:`X_t`.
Euler's number is the base of the natural logarithm, :math:`ln`.
:math:`R_t = e^{X_t}`
**Examples**
Raise :math:`e` to the power :math:`1`. Returns Euler's number, approximately 2.71828
>>> exp(1)
**See also**
:func:`log`
"""
return np.exp(x) | 30,243 |
def level_location(level, cache_dir):
"""
Return the path where all tiles for `level` will be stored.
>>> level_location(2, '/tmp/cache')
'/tmp/cache/02'
"""
if isinstance(level, string_type):
return os.path.join(cache_dir, level)
else:
return os.path.join(cache_dir, "%02d" % level) | 30,244 |
def pil_paste_image(im, mask, start_point=(0, 0)):
"""
:param im:
:param mask:
:param start_point:
:return:
"""
out = Image.fromarray(im)
mask = Image.fromarray(mask)
out.paste(mask, start_point, mask)
return np.asarray(out) | 30,245 |
def service_northwind_v2(schema_northwind_v2):
"""https://services.odata.org/V2/Northwind/Northwind.svc/"""
return pyodata.v2.service.Service('http://not.resolvable.services.odata.org/V2/Northwind/Northwind.svc',
schema_northwind_v2, requests) | 30,246 |
def train_and_eval(
params: base_configs.ExperimentConfig,
strategy_override: tf.distribute.Strategy) -> Mapping[str, Any]:
"""Runs the train and eval path using compile/fit."""
logging.info('Running train and eval.')
# Note: for TPUs, strategy and scope should be created before the dataset
strategy = strategy_override or distribution_utils.get_distribution_strategy(
distribution_strategy=params.runtime.distribution_strategy,
all_reduce_alg=params.runtime.all_reduce_alg,
num_gpus=params.runtime.num_gpus,
tpu_address=params.runtime.tpu)
strategy_scope = distribution_utils.get_strategy_scope(strategy)
logging.info('Detected %d devices.',
strategy.num_replicas_in_sync if strategy else 1)
label_smoothing = params.model.loss.label_smoothing
one_hot = label_smoothing and label_smoothing > 0
builders = _get_dataset_builders(params, strategy, one_hot)
datasets = [builder.build() if builder else None for builder in builders]
# Unpack datasets and builders based on train/val/test splits
train_builder, validation_builder = builders # pylint: disable=unbalanced-tuple-unpacking
train_dataset, validation_dataset = datasets
train_epochs = params.train.epochs
train_steps = params.train.steps or train_builder.num_steps
validation_steps = params.evaluation.steps or validation_builder.num_steps
initialize(params, train_builder)
logging.info('Global batch size: %d', train_builder.global_batch_size)
with strategy_scope:
model_params = params.model.model_params.as_dict()
model = get_models()[params.model.name](**model_params)
if params.model.model_weights_path:
if os.path.isdir(params.model.model_weights_path):
checkpoint = tf.train.latest_checkpoint(params.model.model_weights_path)
else:
checkpoint = params.model.model_weights_path
logging.info('Load weights from %s', checkpoint)
model.load_weights(checkpoint)
if flags.FLAGS.mode == 'sensitivity_analysis' or flags.FLAGS.pruning_config_file:
if flags.FLAGS.mode == 'sensitivity_analysis':
if flags.FLAGS.pruning_config_file:
raise ValueError
layer = [
layer for layer in model.layers
if hasattr(layer, 'kernel') or hasattr(layer, 'depthwise_kernel')
][flags.FLAGS.sensitivity_layer_count]
layer_name = layer.name
weight_name = 'kernel' if hasattr(layer, 'kernel') else 'depthwise_kernel'
pruning_params = cprune_from_config.generate_sensitivity_config(
model_name=model.name,
layer_name=layer_name,
weight_name=weight_name,
granularity=flags.FLAGS.sensitivity_granularity,
gamma=flags.FLAGS.sensitivity_gamma,
respect_submatrix=flags.FLAGS.sensitivity_respect_submatrix,
two_over_four_chin=flags.FLAGS.sensitivity_two_over_four_chin)
else:
pruning_params = get_pruning()[params.model.name]
params_dict.override_params_dict(
pruning_params, flags.FLAGS.pruning_config_file, is_strict=False)
logging.info('Specified pruning params: %s', pp.pformat(pruning_params.as_dict()))
_pruning_params = cprune_from_config.predict_sparsity(model, pruning_params)
logging.info('Understood pruning params: %s', pp.pformat(_pruning_params))
model = cprune_from_config.cprune_from_config(model, pruning_params)
else:
weights_list = model.get_weights()
model = tf.keras.models.clone_model(model)
model.set_weights(weights_list)
models = [model]
if flags.FLAGS.mode == 'prune_physically':
smaller_model = cprune_from_config.prune_physically(model)
models.append(smaller_model)
for _model in models:
learning_rate = optimizer_factory.build_learning_rate(
params=params.model.learning_rate,
batch_size=train_builder.global_batch_size,
train_steps=train_steps)
optimizer = optimizer_factory.build_optimizer(
optimizer_name=params.model.optimizer.name,
base_learning_rate=learning_rate,
params=params.model.optimizer.as_dict())
metrics_map = _get_metrics(one_hot)
metrics = [metrics_map[metric] for metric in params.train.metrics]
if one_hot:
loss_obj = tf.keras.losses.CategoricalCrossentropy(
label_smoothing=params.model.loss.label_smoothing)
else:
loss_obj = tf.keras.losses.SparseCategoricalCrossentropy()
_model.compile(optimizer=optimizer,
loss=loss_obj,
metrics=metrics)
initial_epoch = 0
if params.train.resume_checkpoint:
initial_epoch = resume_from_checkpoint(model=model,
model_dir=params.model_dir,
train_steps=train_steps)
callbacks = None
if params.mode == 'train_and_eval':
serialize_config(params=params, model_dir=params.model_dir)
# TODO(dankondratyuk): callbacks significantly slow down training
model_pruning_config = None
if flags.FLAGS.pruning_config_file:
model_pruning_config = cprune_from_config._expand_model_pruning_config(
model, pruning_params
)
callbacks = custom_callbacks.get_callbacks(
model_checkpoint=params.train.callbacks.enable_checkpoint_and_export,
include_tensorboard=params.train.callbacks.enable_tensorboard,
time_history=params.train.callbacks.enable_time_history,
track_lr=params.train.tensorboard.track_lr,
model_pruning_config=model_pruning_config,
write_model_weights=params.train.tensorboard.write_model_weights,
batch_size=train_builder.global_batch_size,
log_steps=params.train.time_history.log_steps,
model_dir=params.model_dir)
if flags.FLAGS.pruning_config_file:
callbacks += [
cpruning_callbacks.UpdateCPruningStep(),
# cpruning_callbacks.CPruningSummaries(log_dir=params.model_dir),
]
if params.evaluation.skip_eval:
validation_kwargs = {}
else:
validation_kwargs = {
'validation_data': validation_dataset,
'validation_steps': validation_steps,
'validation_freq': params.evaluation.epochs_between_evals,
}
history = None
if params.mode == 'train_and_eval':
history = model.fit(
train_dataset,
epochs=train_epochs,
steps_per_epoch=train_steps,
initial_epoch=initial_epoch,
callbacks=callbacks,
verbose=flags.FLAGS.verbose,
**validation_kwargs)
elif params.mode == 'eval':
cprune.apply_cpruning_masks(model)
if flags.FLAGS.pruning_config_file:
_pruning_params = cprune_from_config.predict_sparsity(model, pruning_params)
logging.info('Pruning result: %s', pp.pformat(_pruning_params))
validation_output = None
if params.evaluation.eval_data == 'train':
eval_dataset = train_dataset
eval_steps = train_steps
elif params.evaluation.eval_data == 'validation':
eval_dataset = validation_dataset
eval_steps = validation_steps
if params.mode == 'sensitivity_analysis':
file_writer = tf.summary.create_file_writer(flags.FLAGS.model_dir + '/metrics')
file_writer.set_as_default()
cprune_registry.ConstraintRegistry.add_weight_constraint_pair(
'depthwise_kernel', 'depthwise_constraint')
for sparsity_x_16 in range(16):
cprune.apply_cpruning_masks(model, step=sparsity_x_16)
_validation_output = model.evaluate(
eval_dataset, steps=eval_steps, verbose=2, return_dict=True)
_validation_output = [_validation_output['loss'],
_validation_output['accuracy'],
_validation_output['top_5_accuracy']]
_stats = common.build_stats(history, _validation_output, callbacks)
prefix = 'pruning_sensitivity/' + layer_name + '/' + weight_name + '/'
for key, value in _stats.items():
tf.summary.scalar(prefix + key, data=value, step=sparsity_x_16)
_pruning_params = cprune_from_config.predict_sparsity(model, pruning_params)
sparsity = _pruning_params['pruning'][0]['pruning'][0]['current_sparsity']
tf.summary.scalar(prefix + 'sparsity', data=sparsity, step=sparsity_x_16)
elif flags.FLAGS.mode == 'prune_physically':
logging.info('Number of filters before and after physical pruning:')
for layer, new_layer in zip(model.layers, smaller_model.layers):
if type(layer) is tf.keras.layers.Conv2D:
logging.info(' {}, {}, {}'.format(layer.name, layer.filters, new_layer.filters))
if type(layer) is tf.keras.layers.Dense:
logging.info(' {}, {}, {}'.format(layer.name, layer.units, new_layer.units))
for i, _model in enumerate(models):
situation = 'before' if i == 0 else 'after'
logging.info('Model summary {} physical pruning:'.format(situation))
_model.summary(print_fn=logging.info)
_validation_output = _model.evaluate(
eval_dataset, steps=eval_steps, verbose=2, return_dict=True)
_validation_output = [_validation_output['loss'],
_validation_output['accuracy'],
_validation_output['top_5_accuracy']]
_stats = common.build_stats(history, _validation_output, callbacks)
logging.info('Evaluation {} physical pruning: {}'.format(situation, _stats))
postfix = '' if i == 0 else '_small'
export_path = os.path.join(flags.FLAGS.model_dir, 'saved_model' + postfix)
_model.save(export_path, include_optimizer=False)
elif not params.evaluation.skip_eval or params.mode == 'eval':
logging.info('Evaluate %s data', params.evaluation.eval_data)
validation_output = model.evaluate(
eval_dataset, steps=eval_steps, verbose=2, return_dict=True)
if validation_output:
validation_output = [validation_output['loss'],
validation_output['accuracy'],
validation_output['top_5_accuracy']]
# TODO(dankondratyuk): eval and save final test accuracy
stats = common.build_stats(history, validation_output, callbacks)
return stats | 30,247 |
def make_params(params, extra_params):
"""
Creates URL query params by combining arbitrary params
with params designated by keyword arguments and escapes
them to be compatible with HTTP request URI.
Raises an exception if there is a conflict between the
two ways to specify a query param.
"""
params = params or {}
wire_params = {
k: quote(escape(v), b",*[]:/-")
for k, v in (extra_params or {}).items()
if v is not None
}
if set(wire_params).intersection(set(params)):
raise ValueError("Conflict between keyword argument and 'params'")
for k, v in (params or {}).items():
if v is None:
continue
wire_params[k] = quote(escape(v), b",*[]:/-")
return wire_params | 30,248 |
def raw_tag(name, value):
"""Create a DMAP tag with raw data."""
return name.encode('utf-8') + \
len(value).to_bytes(4, byteorder='big') + \
value | 30,249 |
def process_preprocess_layers(params: dict):
"""Perform preprocess layers"""
layers = params["preprocess_layers"]
if "formats" in layers:
add_formats_layer(params) | 30,250 |
def get_command(tool_xml):
"""Get command XML element from supplied XML root."""
root = tool_xml.getroot()
commands = root.findall("command")
command = None
if len(commands) == 1:
command = commands[0]
return command | 30,251 |
def OPTIMIZE(writer, segments):
"""This policy merges all existing segments.
"""
from whoosh.reading import SegmentReader
for seg in segments:
reader = SegmentReader(writer.storage, writer.schema, seg)
writer.add_reader(reader)
reader.close()
return [] | 30,252 |
def build_features_revenue_model_q2(
df_listings: pd.DataFrame, df_daily_revenue: pd.DataFrame
):
"""Builds the features to be used on the revenue modelling for
answer question 2.
Parameters
----------
df_listings : pd.DataFrame
Pandas dataframe with information about listings.
df_daily_revenue : pd.DataFrame
Pandas dataframe with information about daily revenue.
Returns
-------
pd.DataFrame
Returns the input pandas dataframe with the new features added.
"""
data = pd.merge(
df_daily_revenue,
df_listings[["Código", "Comissão"]],
left_on="listing",
right_on="Código",
how="left",
)
data["company_revenue"] = data["Comissão"] * data["revenue"]
data_revenue = (
data.groupby("date")
.agg(company_revenue=("company_revenue", "sum"))
.reset_index()
)
data_revenue = build_date_features(data_revenue, "date")
data = data_revenue.loc[data_revenue["company_revenue"].notna()]
X = data.drop(columns="company_revenue").astype(float)
y = data["company_revenue"]
return X, y | 30,253 |
def test_get_entry_empty():
"""
Test the get_entry function by reading the `test_input.xlsx` workbook
reading the and empty row and check that the output is None
"""
row = pytest.ws.__getitem__('5')
entry = standardcitations.get_entry(row, True)
assert not entry | 30,254 |
def load_data():
"""Return the MNIST data as a tuple containing the training data,
the validation data, and the test data.
The ``training_data`` is returned as a tuple with two entries.
The first entry contains the actual training images. This is a
numpy ndarray with 50,000 entries. Each entry is, in turn, a
numpy ndarray with 784 values, representing the 28 * 28 = 784
pixels in a single MNIST image.
The second entry in the ``training_data`` tuple is a numpy ndarray
containing 50,000 entries. Those entries are just the digit
values (0...9) for the corresponding images contained in the first
entry of the tuple.
The ``validation_data`` and ``test_data`` are similar, except
each contains only 10,000 images.
This is a nice data format, but for use in neural networks it's
helpful to modify the format of the ``training_data`` a little.
That's done in the wrapper function ``load_data_wrapper()``, see
below.
"""
f = gzip.open('../data/mnist.pkl.gz', 'rb')
training_data, validation_data, test_data = cPickle.load(f, encoding='latin1')
f.close()
return (training_data, validation_data, test_data) | 30,255 |
def tensorboard_image(
writer: SummaryWriter,
image: torch.Tensor,
target: torch.Tensor,
output: torch.Tensor,
global_step: int,
) -> None:
"""Output image tensors in tensorboard visualization.
Useful for image based training
Args:
writer (SummaryWriter): tensorboard writer object
image (torch.tensor): image tensor
target (torch.tensor): target image tensor
output (torch.tensor): prediction image tensor
global_step (int): global step number
"""
# input image
grid_image = make_grid(image[:2].clone().cpu().data, 3, normalize=True)
writer.add_image("Input image", grid_image, global_step)
# Model output
grid_image = make_grid(output[:2].clone().cpu().data, 3, normalize=True)
writer.add_image("Prediction", grid_image, global_step)
# target image
grid_image = make_grid(target[:2].clone().cpu().data, 3, normalize=True)
writer.add_image("Target", grid_image, global_step) | 30,256 |
def find_by_id(cls, groupkey, objectid, raises=False):
"""A helper function to look up an object by id"""
ob = None
try:
ob = keyedcache.cache_get(groupkey, objectid)
except keyedcache.NotCachedError as e:
try:
ob = cls.objects.get(pk=objectid)
keyedcache.cache_set(e.key, value=ob)
except cls.DoesNotExist:
log.debug("No such %s: %s", groupkey, objectid)
if raises:
raise cls.DoesNotExist
return ob | 30,257 |
def write_results(conn, seed_tab_rows, sig_param_dicts,\
bkg_nllh, sig_nllhs, TSs):
"""
Writes a result row to the result table and updates the seed table as done
Parameters:
conn: sqlite connection object
seed_tab_rows: list of named tuples, a row of the seed table as a named tuple
sig_param_dicts: list of dictionaries, a dict of the best fit signal paramters
bkg_nllh: float, the background NLLH
sig_nllhs: list of floats, the signal NLLH
TSs: list of floats, the test statistic value
"""
nrows = len(TSs)
data = []
for i in xrange(nrows):
imx = sig_param_dicts[i]['Signal_imx']
imy = sig_param_dicts[i]['Signal_imy']
A = sig_param_dicts[i]['Signal_A']
ind = sig_param_dicts[i]['Signal_gamma']
bkg_pnames = sorted([k for k in sig_param_dicts[i].keys() if 'bkg_rate' in k])
bkg_rates = "'"
for bkg_pname in bkg_pnames:
bkg_rates += "%.3f, " %(sig_param_dicts[i][bkg_pname])
bkg_rates = bkg_rates[:-2] + "'" #+ ")"
data.append((seed_tab_rows[i].blipID, seed_tab_rows[i].timeID,\
seed_tab_rows[i].time,\
seed_tab_rows[i].duration, TSs[i], bkg_nllh, sig_nllhs[i],\
imx, imy, A, ind, bkg_rates))
# logging.debug((seed_tab_row.blipID, seed_tab_row.timeID, seed_tab_row.time,\
# seed_tab_row.duration, TS, bkg_nllh, sig_nllh,\
# imx, imy, A, ind, bkg_rates))
sql = '''INSERT INTO Results
(blipID, timeID, time, duration, TS,
bkg_nllh, sig_nllh, imx, imy, A, ind, bkg_rates) VALUES
(?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?);
'''
logging.debug("sql snippet: ")
logging.debug(sql)
data_seeds = []
for row in seed_tab_rows:
data_seeds.append((row.blipID, row.timeID))
sql2 = "update Seeds set done=1 where blipID=? AND timeID=?"
with conn:
conn.executemany(sql, data)
conn.executemany(sql2, data_seeds)
# conn.commit()
# conn.execute(sql)
# conn.commit() | 30,258 |
def get_elements(xmldoc, tag_name, attribute):
"""Returns a list of elements"""
l = []
for item in xmldoc.getElementsByTagName(tag_name) :
value = item.getAttribute(attribute)
l.append( repr( value ) )
return l | 30,259 |
def test_card_value_equality():
"""
Same cards values are equal and different cards values are not equal.
Cards values are equal to a number.
"""
five_diamonds_1 = Card("5", "diamonds", value=5)
five_diamonds_2 = Card("5", "diamonds", value=5)
six_diamonds = Card("6", "diamonds", value=6)
assert five_diamonds_1 == five_diamonds_2
assert five_diamonds_2 == 5
assert five_diamonds_1 != six_diamonds
assert six_diamonds != 5 | 30,260 |
def test_algorithm(circuit, iterations=(1000000)):
"""
Tests a circuit by submitting it to both aer_simulator and PyLinalg.
"""
linalg = PyLinalg()
qlm_circ, _ = qiskit_to_qlm(circuit, sep_measures=True)
test_job = qlm_circ.to_job(nbshots=0, aggregate_data=False)
expected = linalg.submit(test_job)
qiskit_qpu = BackendToQPU(Aer.get_backend('aer_simulator'))
test_job.nbshots = iterations
result = qiskit_qpu.submit(test_job)
dist_calc = compare_results(expected, result, aggregate=False)
distance = analyze_distance(dist_calc)
print("Distance is {}".format(distance))
return distance | 30,261 |
def return_list_of_sn_host():
""" Return potential SN host names
This includes:
- List of object names in SIMBAD that would correspond to extra-galactic object
- Unknown objects
- objects with failed crossmatch
In practice, this exclude galactic objects from SIMBAD.
"""
list_simbad_galaxies = [
"galaxy",
"Galaxy",
"EmG",
"Seyfert",
"Seyfert_1",
"Seyfert_2",
"BlueCompG",
"StarburstG",
"LSB_G",
"HII_G",
"High_z_G",
"GinPair",
"GinGroup",
"BClG",
"GinCl",
"PartofG",
]
keep_cds = \
["Unknown", "Candidate_SN*", "SN", "Transient", "Fail"] + \
list_simbad_galaxies
return keep_cds | 30,262 |
def folder2catalog(path, granule_trunk='', granule_extension='*', add_sf=False, client=None):
""" Reads a folder of granules into a STAREDataFrame catalog
:param path: Path of the folder containing granules
:type path: str
:param granule_trunk: Granule identifier (e.g. MOD09)
:type granule_trunk: str
:param granule_extension: Extension of the granule (e.g. hdf, nc, HDF5)
:type granule_extension: str
:param add_sf: toggle creating simple feature representation of the iFOVs
:type add_sf: bool
:param client:
:type client:
:return: catalog
:rtype: starepandas.STAREDataFrame
"""
term = '{path}/{granule_trunk}*.{ext}'.format(path=path, granule_trunk=granule_trunk, ext=granule_extension)
s3 = None
if path[0:5] != 's3://':
granule_paths = glob.glob(term)
else:
granule_paths, s3 = starepandas.io.s3.s3_glob(path, '.*\.{ext}$'.format(ext=granule_extension))
if not granule_paths:
print('no granules in folder')
return None
pattern = '.*[^_stare]\.(nc|hdf|HDF5)'
granule_paths = list(filter(re.compile(pattern).match, granule_paths))
df = starepandas.STAREDataFrame()
if client is None:
for granule_path in granule_paths:
if s3 is not None:
granule_url = 's3://{bucket_name}/{granule}'.format(bucket_name=s3[0]['bucket_name'],
granule=granule_path)
else:
granule_url = granule_path
row = make_row(granule_url, add_sf)
df = df.append(row, ignore_index=True)
else:
pass
# client=Client()
# client.close()
df.set_sids('stare_cover', inplace=True)
if add_sf:
df.set_geometry('geom', inplace=True)
return df | 30,263 |
def test_ingest():
"""
Test ingesting data with a given schema
"""
schema = pa.schema([
pa.field("foo", pa.int64()),
pa.field("bar", pa.int64())
])
data = [{"foo": 1, "bar": 2}, {"foo": 10, "bar": 20}]
converted_data = client.ingest_data(data, schema)
assert converted_data.to_pydict() == {'foo': [1, 10], 'bar': [2, 20]} | 30,264 |
def _check_satellite_low(xbee, is_on_hold):
"""
Check if satellites are low and set the is_on_hold flag.
Args:
xbee(xbee.Zigbee): the XBee communication interface.
is_on_hold(bool): a flag telling if the thread is already on hold.
Returns:
bool: True if low sats, False if cleared.
"""
if shared.status['thread_flag'] & shared.NSATS_TOO_LOW:
if not is_on_hold: _log_and_broadcast(xbee, "IFO,%s low sats hold." % shared.AGENT_ID)
tiime.sleep(0.5)
return True
else: return False | 30,265 |
def indexof(path):
"""Returns list of filenames parsed off "Index of" page"""
resp = requests.get(path)
return [a for a, b in file_index_re.findall(resp.text) if a == b] | 30,266 |
def get_oversight(xml: str) -> Optional[OversightInfo]:
""" Get oversight """
if val := xml.get('oversight_info'):
return OversightInfo(
has_dmc=val.get('has_dmc', ''),
is_fda_regulated_drug=val.get('is_fda_regulated_drug', ''),
is_fda_regulated_device=val.get('is_fda_regulated_device', ''),
is_unapproved_device=val.get('is_unapproved_device', ''),
is_ppsd=val.get('is_ppsd', ''),
is_us_export=val.get('is_us_export', '')) | 30,267 |
def get_user_id(gi,email):
"""
Get the user ID corresponding to a username email
Arguments:
gi (bioblend.galaxy.GalaxyInstance): Galaxy instance
email : email address for the user
Returns:
String: user ID, or None if no match.
"""
user_id = None
try:
for u in get_users(gi):
if fnmatch.fnmatch(u.email,email):
return u.id
except ConnectionError as ex:
logger.warning("Failed to get user list: %s (%s)" % (ex.body,
ex.status_code))
return None | 30,268 |
def _patch_intercept_mark():
"""Need to make cursor wider when in first column. Otherwise
the border of the cell box makes it hard to notice the cursor.
NB! Need to be careful with setting text["insertwidth"]!
My first straightforward solution caused unexplainable
infinite loop of insertions and deletions in the text
(insert a line and a word, select that word and then do Ctrl-Z).
Looks like this solution is safe, but I don't dare to include
it in the main code.
UPADTE: not safe. Select and delete a block of lines. Write a new
line and do Ctrl-Z"""
original_intercept_mark = CodeViewText.intercept_mark
def _patched_intercept_mark(self, *args):
if args[:2] == ("set", "insert") and args[2].endswith(".0"):
self.set_insertwidth(3)
else:
self.set_insertwidth(2)
original_intercept_mark(self, *args)
CodeViewText.intercept_mark = _patched_intercept_mark | 30,269 |
def test_circuitpython_libraries(monkeypatch):
"""Test main function of 'circuitpyton_libraries.py', without writing an output file."""
monkeypatch.setattr(common_funcs, "list_repos", mock_list_repos)
circuitpython_libraries.main(validator="all") | 30,270 |
def get_tests_directory() -> str:
"""
Returns the path of the top level directory for tests.
Returns: The path of the top level directory for tests.
This is useful for constructing paths to the test files.
"""
module_file_path = os.path.abspath(__file__)
return os.path.dirname(module_file_path) | 30,271 |
def test_set_car_online():
"""Test set/get car_online."""
_controller = Controller(None)
_controller.set_id_vin(CAR_ID, VIN)
assert _controller.get_car_online() == {}
assert _controller.get_car_online(vin=VIN) == {}
assert _controller.is_car_online(vin=VIN) == {}
_controller.set_car_online(vin=VIN)
assert _controller.is_car_online(vin=VIN) is True
last_wake_up = _controller.get_last_wake_up_time(vin=VIN)
assert int(last_wake_up) >= int(NOW)
_controller.set_car_online(vin=VIN, online_status=False)
assert _controller.is_car_online(vin=VIN) is False
assert _controller.get_last_wake_up_time(vin=VIN) == last_wake_up | 30,272 |
def ldns_pkt_size(*args):
"""LDNS buffer."""
return _ldns.ldns_pkt_size(*args) | 30,273 |
def draw_intro_screen():
"""
Start menu.
"""
# Animating chicken sprite.
chick_x = xDisplay * 0.1875
chick_y = yDisplay * 0.55
chick_dir = True
while 1:
for event in pygame.event.get():
quit_program(event)
# Controls chicken's movement
if chick_x > xDisplay * 0.65:
chick_dir = False
elif chick_x < xDisplay * 0.1875:
chick_dir = True
if chick_dir:
chick_x += 5
else:
chick_x -= 5
# Updates display
gameDisplay.blit(background, (0, 0))
display_message(xDisplay / 2, yDisplay / 3, 75, "Chicken! Bwak!")
draw_chicken(chick_x, chick_y)
draw_button("I'm no chicken!", 0.15 * xDisplay, 0.75 * yDisplay, 175, 50,
colors["darkGreen"], colors["green"], "play")
draw_button("Chicken out...", 0.65 * xDisplay, 0.75 * yDisplay, 175, 50,
colors["darkRed"], colors["red"], "quit")
pygame.display.update()
clock.tick(60) | 30,274 |
def getUser(userID):
""" Takes a user ID as an argument and returns the user associated
with that ID.
Args:
userID -- The ID of a user stored in the Patrons table
"""
user = session.query(Patrons).filter_by(id = userID).one()
return user | 30,275 |
def guard_transform(transform):
"""Return an Affine transformation instance"""
if not isinstance(transform, Affine):
if tastes_like_gdal(transform):
warnings.warn(
"GDAL-style transforms are deprecated and will not "
"be supported in Rasterio 1.0.",
FutureWarning,
stacklevel=2)
transform = Affine.from_gdal(*transform)
else:
transform = Affine(*transform)
a, e = transform.a, transform.e
if a == 0.0 or e == 0.0:
raise ValueError(
"Transform has invalid coefficients a, e: (%f, %f)" % (
transform.a, transform.e))
return transform | 30,276 |
def config_logger(filename="/logging.yaml", level=None):
"""
Configure the main logger.
Parameters
----------
filename : string, optional
The filename for the logging configuration
file. Defaults to "logging.yaml" in the current
working directory.
level : object
Sets the logging level of the Python logger.
Defaults to `INFO` if the configuration file is
not found.
Returns
-------
None
"""
path_to_script = os.path.dirname(os.path.realpath(__file__))
filename = path_to_script + filename
if os.path.exists(filename):
logging.info("Setting logger from logging.yaml")
with open(filename, 'rt') as file_data:
config = yaml.load(file_data.read())
logging.config.dictConfig(config)
else:
if level is None:
level = logging.INFO
logging.info("Setting default logger")
logging.basicConfig(level=level) | 30,277 |
def create_heatmap(piek_json,
antske_json,
output_path=None,
verbose=0):
"""
"""
# initialize dataframe
likert_values = [1, 2, 3, 4, 5, 6, 7]
df = pandas.DataFrame()
default_values = [None for _ in range(len(likert_values))]
for likert_value in likert_values:
df[likert_value] = default_values
piek_antske_to_items = defaultdict(list)
keys = list(piek_json.keys())
assert piek_json.keys() == antske_json.keys()
for key in keys:
piek_value = piek_json[key]
antske_value = antske_json[key]
piek_antske_to_items[(piek_value, antske_value)].append(key)
for (piek, antske), items in piek_antske_to_items.items():
num_items = len(items)
df.set_value(piek, antske, len(items))
if verbose >= 2:
print(piek, antske, len(items))
for index, row in df.iterrows():
for column_name, value in row.items():
to_change = False
if value is None:
value = 0
to_change = True
elif math.isnan(value):
value = 0
to_change = True
if to_change:
df.set_value(index, column_name, value)
df = df[df.columns].astype(int)
df = df.drop(df.index[0])
f, ax = plt.subplots(figsize=(9, 6))
plot = sns.heatmap(df, annot=True, fmt="d", linewidths=.5, ax=ax)
ax.invert_yaxis()
if output_path is not None:
plot.figure.savefig(output_path)
return df, ax | 30,278 |
def get_classification_report(true_labels, pred_labels, labels=None, target_names=None, output_dict=False):
"""
true_labels = [0, 1, 2, 3, 4, 1] # Y
pred_labels = [0, 1, 1, 2, 2, 1] # X
target_names = ["A", "B", "C", "D", "E"]
out_result = get_classification_report(true_labels, pred_labels, target_names=target_names, output_dict=False)
宏平均(macro avg)和微平均(micro avg)
如果每个class的样本数量差不多,那么宏平均和微平均没有太大差异
如果每个class的样本数量差异很大,而且你想:
更注重样本量多的class:使用微平均,若微平均比宏平均小,应检检查样本量多的class
更注重样本量少的class:使用宏平均,若宏平均比微平均小,应检查样本量少的class
:param true_labels:
:param pred_labels:
:param labels:
:param target_names:
:param output_dict:
:return:
"""
true_labels = np.array(true_labels, dtype=np.int32)
pred_labels = np.array(pred_labels, dtype=np.int32)
if target_names:
labels = list(range(len(target_names)))
result = metrics.classification_report(true_labels,
pred_labels,
labels=labels,
digits=4,
target_names=target_names,
output_dict=output_dict)
if output_dict:
macro_avg = result["macro avg"]
accuracy = result["accuracy"]
weighted_avg = result["weighted avg"]
out_result = {"macro_avg": macro_avg, "accuracy": accuracy, "weighted_avg": weighted_avg}
# pdf=pd.DataFrame.from_dict(result)
# save_csv("classification_report.csv", pdf)
else:
out_result = result
return out_result | 30,279 |
def test_simple_flow_existing_tag_on_last_commit(
existing_tag, next_tag, simple_repo, default_detectors):
"""Test a simple flow locally."""
repo = git.Repo(simple_repo, odbt=git.GitDB)
repo.create_tag(
existing_tag,
ref=list(repo.iter_commits())[0])
autotag = core.AutoTag(
repo=simple_repo,
branch='master',
upstream_remotes=None,
detectors=default_detectors,
git_name=TEST_NAME,
git_email=TEST_EMAIL,
skip_if_exists=True)
autotag.work()
assert next_tag not in repo.tags | 30,280 |
def generate_message(minutes_to_next=None):
"""
Using the list of exercises, this function generates a new exercise message. Optionally it
takes a minutes_to_next parameter which it uses to add an indication of when the next exercise
will take place.
"""
# Randomly select an exercise and a number of repetitions
exercise, data = random.choice(exercises.items())
repetitions = random.randint(*data['range'])
# Prepare the message string
unit_string = ' ' + data['unit'] if data['unit'] else ''
text = '{}{} {} RIGHT NOW!'.format(repetitions, prefix, exercise)
# Add the next exercise indication
if minutes_to_next is not None:
current_time = datetime.datetime.now(timezone('UTC'))
next_time = (now.astimezone(timezone(message_timezone)) +
datetime.timedelta(minutes=minutes_to_next))
next_text = 'NEXT EXERCISE AT {}'.format(time.strftime('%H:%M'))
text += '\n' + next_text
return text | 30,281 |
def yx():
"""
测试印象笔记服务
:return:
"""
client = EvernoteClient(token=dev_token,sandbox=False)
client.service_host = 'app.yinxiang.com'
userStore = client.get_user_store()
user = userStore.getUser()
print user
return "yx" | 30,282 |
def generate_frequency_result_for_time_precedence_query_workload(config_map, time_interval, spatial_interval):
"""
:param config_map:
:param time_interval:
:param spatial_interval:
:return:
"""
frequency_result = {}
for key in config_map.keys():
region_param_list = config_map.get(key)
lon_min = region_param_list[4]
lon_max = region_param_list[5]
lat_min = region_param_list[6]
lat_max = region_param_list[7]
time_min = normalize_to_utc_date(region_param_list[8])
time_max = normalize_to_utc_date(region_param_list[9])
frequency_result[key] = generate_query_frequency_per_region(key, time_interval, spatial_interval, lon_min, lon_max, lat_min, lat_max, time_min, time_max)
print("finish frequency result for query")
return frequency_result | 30,283 |
def delete(request):
"""退出登录,清除session"""
request.session.flush()
return redirect('/login/') | 30,284 |
def acovf(x, unbiased=False, demean=True, fft=None, missing='none', nlag=None):
"""
Autocovariance for 1D
Parameters
----------
x : array
Time series data. Must be 1d.
unbiased : bool
If True, then denominators is n-k, otherwise n
demean : bool
If True, then subtract the mean x from each element of x
fft : bool
If True, use FFT convolution. This method should be preferred
for long time series.
missing : str
A string in ['none', 'raise', 'conservative', 'drop'] specifying how
any NaNs are to be treated.
nlag : {int, None}
Limit the number of autocovariances returned. Size of returned
array is nlag + 1. Setting nlag when fft is False uses a simple,
direct estimator of the autocovariances that only computes the first
nlag + 1 values. This can be much faster when the time series is long
and only a small number of autocovariances are needed.
Returns
-------
acovf : array
autocovariance function
References
-----------
.. [*] Parzen, E., 1963. On spectral analysis with missing observations
and amplitude modulation. Sankhya: The Indian Journal of
Statistics, Series A, pp.383-392.
"""
if fft is None:
# GH#4937
import warnings
warnings.warn('fft=True will become the default in a future version '
'of statsmodels/sm2. To suppress this warning, '
'explicitly set fft=False.', FutureWarning)
fft = False
x = np.squeeze(np.asarray(x))
if x.ndim > 1:
raise ValueError("x must be 1d. Got %d dims." % x.ndim)
missing = missing.lower()
if missing not in ['none', 'raise', 'conservative', 'drop']:
raise ValueError("`missing` option %s not understood"
% missing) # pragma: no cover
if missing == 'none':
deal_with_masked = False
else:
deal_with_masked = np.isnan(x).any()
if deal_with_masked:
if missing == 'raise':
raise MissingDataError("NaNs were encountered in the data")
notmask_bool = ~np.isnan(x)
if missing == 'conservative':
# Must copy for thread safety (GH#4937)
x = x.copy()
x[~notmask_bool] = 0
else:
# 'drop'
x = x[notmask_bool] # copies non-missing
notmask_int = notmask_bool.astype(int)
if demean and deal_with_masked:
# whether 'drop' or 'conservative':
xo = x - x.sum() / notmask_int.sum()
if missing == 'conservative':
xo[~notmask_bool] = 0
elif demean:
xo = x - x.mean()
else:
xo = x
n = len(x)
lag_len = nlag
if nlag is None:
lag_len = n - 1
elif nlag > n - 1:
raise ValueError('nlag must be smaller than nobs - 1')
if not fft and nlag is not None:
# GH#4937
acov = np.empty(lag_len + 1)
acov[0] = xo.dot(xo)
for i in range(lag_len):
acov[i + 1] = xo[i + 1:].dot(xo[:-(i + 1)])
if not deal_with_masked or missing == 'drop':
if unbiased:
acov /= (n - np.arange(lag_len + 1))
else:
acov /= n
else:
if unbiased:
divisor = np.empty(lag_len + 1, dtype=np.int64)
divisor[0] = notmask_int.sum()
for i in range(lag_len):
divisor[i + 1] = np.dot(notmask_int[i + 1:],
notmask_int[:-(i + 1)])
divisor[divisor == 0] = 1
acov /= divisor
else:
# biased, missing data but npt 'drop'
acov /= notmask_int.sum()
return acov
if unbiased and deal_with_masked and missing == 'conservative':
d = np.correlate(notmask_int, notmask_int, 'full')
d[d == 0] = 1
elif unbiased:
xi = np.arange(1, n + 1)
d = np.hstack((xi, xi[:-1][::-1]))
elif deal_with_masked:
# biased and NaNs given and ('drop' or 'conservative')
d = notmask_int.sum() * np.ones(2 * n - 1)
else:
# biased and no NaNs or missing=='none'
d = n * np.ones(2 * n - 1)
if fft:
nobs = len(xo)
n = _next_regular(2 * nobs + 1)
Frf = np.fft.fft(xo, n=n)
acov = np.fft.ifft(Frf * np.conjugate(Frf))[:nobs] / d[nobs - 1:]
acov = acov.real
else:
acov = np.correlate(xo, xo, 'full')[n - 1:] / d[n - 1:]
if nlag is not None:
# GH#4937 Copy to allow gc of full array rather than view
return acov[:lag_len + 1].copy()
return acov | 30,285 |
def coins_snapshot(broker='hb', code='btc', size='5'):
"""
获取实时快照数据
params:
broker:hb,ok,chbtc
code:btc,ltc,eth,etc,bcc
size:<150
return Panel: asks,bids
"""
pass | 30,286 |
def ParseTimeCommandResult(command_result):
"""Parse command result and get time elapsed.
Args:
command_result: The result after executing a remote time command.
Returns:
Time taken for the command.
"""
time_data = re.findall(r'real\s+(\d+)m(\d+.\d+)', command_result)
time_in_seconds = 60 * float(time_data[0][0]) + float(time_data[0][1])
return time_in_seconds | 30,287 |
def _GetTmpDir():
"""Determines the right temporary dir to use and creates it if necessary.
Returns:
path to temporary directory.
"""
tmp_dir = FLAGS.emulator_tmp_dir or os.path.abspath(
tempfile.mkdtemp('android-emulator-launch'))
if not os.path.exists(tmp_dir):
os.makedirs(tmp_dir)
return tmp_dir | 30,288 |
def _push_aol_to_dativetop_server(aol):
"""Make a PUT request to the DativeTop server in order to push ``aol`` on to
the server's AOL.
"""
try:
resp = requests.put(c.DATIVETOP_SERVER_URL, json=aol)
resp.raise_for_status()
return resp.json(), None
except json.decoder.JSONDecodeError:
msg = ('Failed to parse JSON from DativeTop Server response to our PUT'
' request.')
logger.exception(msg)
return None, msg
except requests.exceptions.RequestException:
msg = 'Failed to push our AOL to the DativeTop Server.'
logger.exception(msg)
return None, msg | 30,289 |
def dual_single(lag_mul: float, val: np.ndarray, count: np.ndarray) -> float:
"""Weighted average minus 1 for estimate of F_0.
Computes phi_n(lambda_n) - 1.
Args:
lag_mul: The normalized Lagrangian multiplier. Must be
strictly between 0 and 1.
val: Likelihood values (excluding zero and infinity).
count: Counts for likelihood values (including zero and
infinity). Only the first and the last counts can be
zero. Sum must be positive.
Returns:
Weighted average minus 1.
"""
return (
sum(count[1:-1] / (lag_mul + (1 - lag_mul) * val)) + count[0] / lag_mul
) / sum(count) - 1 | 30,290 |
def _execute(
repository_ctx,
cmdline,
error_msg = None,
error_details = None,
empty_stdout_fine = False,
environment = {}):
"""Executes an arbitrary shell command.
Args:
repository_ctx: the repository_ctx object
cmdline: list of strings, the command to execute
error_msg: string, a summary of the error if the command fails
error_details: string, details about the error or steps to fix it
empty_stdout_fine: bool, if True, an empty stdout result is fine, otherwise
it's an error
environment: environment variables passed to repository_ctx.execute
Return:
the result of repository_ctx.execute(cmdline)
"""
result = repository_ctx.execute(cmdline, environment = environment)
if result.stderr or not (empty_stdout_fine or result.stdout):
_fail("\n".join([
error_msg.strip() if error_msg else "Repository command failed",
result.stderr.strip(),
error_details if error_details else "",
]))
return result | 30,291 |
def resetDefaultConfig(): #-----------------------------------------------
"""Resets settings/config/materials to defaults.
"""
#print 'deb:lresetDefaultConfig keywords_org: \n', keywords_org #---------
updateConfig(keywords_org, drawTypes_org) | 30,292 |
def parse_tagged_block(block):
"""
Replaces "data" attribute of a block with parsed data structure
if it is known how to parse it.
"""
key = block.key.decode('ascii')
if not TaggedBlock.is_known(key):
warnings.warn("Unknown tagged block (%s)" % block.key)
decoder = _tagged_block_decoders.get(key, lambda data: data)
return Block(key, decoder(block.data)) | 30,293 |
def floatX(arr):
"""Converts data to a numpy array of dtype ``theano.config.floatX``.
Parameters
----------
arr : array_like
The data to be converted.
Returns
-------
numpy ndarray
The input array in the ``floatX`` dtype configured for Theano.
If `arr` is an ndarray of correct dtype, it is returned as is.
"""
return numpy.asarray(arr, dtype=theano.config.floatX) | 30,294 |
def getNumberOfPublicIp():
"""Get the total number of public IP
return: (long) Number of public IP
"""
#No need to calculate this constant everytime
return 3689020672
# Real implementation:
#ranges = getValidPublicIpRange()
#number_of_ip = 0
#for range in ranges:
# number_of_ip = number_of_ip + (range[1] - range[0] + 1)
#return number_of_ip | 30,295 |
def load_config(config_name):
"""
loads config specified by name given and reload connnection
"""
import connection
global SOLR_PORT, SOLR_ADDRESS, SOLR_CORE
config = CONFIGS[config_name]
SOLR_CORE = config['core']
SOLR_PORT = config['port']
SOLR_ADDRESS = config['host']
connection.reload_config() | 30,296 |
def framework_find(fn, executable_path=None, env=None):
"""
Find a framework using dyld semantics in a very loose manner.
Will take input such as:
Python
Python.framework
Python.framework/Versions/Current
"""
error = None
try:
return dyld_find(fn, executable_path=executable_path, env=env)
except ValueError as e:
error = e
fmwk_index = fn.rfind('.framework')
if fmwk_index == -1:
fmwk_index = len(fn)
fn += '.framework'
fn = os.path.join(fn, os.path.basename(fn[:fmwk_index]))
try:
return dyld_find(fn, executable_path=executable_path, env=env)
except ValueError:
raise error
finally:
error = None | 30,297 |
def find_modules(path):
"""
Funtion to list all the modules in a repository.
"""
modules = set()
packages = find_packages(path)
for pkg in packages:
modules.add(pkg)
pkgpath = path + "/" + pkg.replace(".", "/")
if sys.version_info.major == 2 or (
sys.version_info.major == 3 and sys.version_info.minor < 6
):
for _, name, ispkg in iter_modules([pkgpath]):
if not ispkg:
modules.add(pkg + "." + name)
else:
for info in iter_modules([pkgpath]):
if not info.ispkg:
modules.add(pkg + "." + info.name)
return modules | 30,298 |
def cart2sph(x, y, z):
"""
Convert Cartesian coordinates x, y, z
to conventional spherical coordinates r, p, a
:param x: Cartesian coordinate or vector x
:type x: float or np.ndarray
:param y: Cartesian coordinate or vector y
:type y: float or np.ndarray
:param z: Cartesian coordinates or vector z
:type z: float or np.ndarray
:return: Spherical coordinates: radius, polar angle, and azimuth angle
:rtype: np.ndarray
"""
r = (x ** 2 + y ** 2 + z ** 2) ** 0.5
p = np.arccos(z / r)
a = np.arctan2(y, x)
return np.array([r, p, a]) | 30,299 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.