content stringlengths 22 815k | id int64 0 4.91M |
|---|---|
def hidden_state_embedding(hidden_states: torch.Tensor, layers: List[int],
use_cls: bool, reduce_mean: bool = True) -> torch.Tensor:
"""
Extract embeddings from hidden attention state layers.
Parameters
----------
hidden_states
Attention hidden states in the transformer model.
layers
List of layers to use for the embedding.
use_cls
Whether to use the next sentence token (CLS) to extract the embeddings.
reduce_mean
Whether to take the mean of the output tensor.
Returns
-------
Tensor with embeddings.
"""
hs = [hidden_states[layer][:, 0:1, :] if use_cls else hidden_states[layer] for layer in layers]
hs = torch.cat(hs, dim=1) # type: ignore
y = hs.mean(dim=1) if reduce_mean else hs # type: ignore
return y | 5,330,400 |
def is_new_user(day: datetime.datetime, first_day: datetime.datetime):
"""
Check if user has contributed results to this project before
"""
if day == first_day:
return 1
else:
return 0 | 5,330,401 |
def to_ndarray(X):
"""
Convert to numpy ndarray if not already. Right now, this only converts
from sparse arrays.
"""
if isinstance(X, np.ndarray):
return X
elif sps.issparse(X):
print('Converting from sparse type: {}'.format(type(X)))
return X.toarray()
else:
raise ValueError('Unexpected data type: {}'.format(type(X))) | 5,330,402 |
def _2d_gauss(x, y, sigma=2.5 / 60.0):
"""A Gaussian beam"""
return np.exp(-(x ** 2 + y ** 2) / (2 * sigma ** 2)) | 5,330,403 |
def copy_render_file(src_path, dst_path):
"""Create copy file of an image."""
if hasattr(os, "link"):
os.link(src_path, dst_path)
else:
shutil.copy(src_path, dst_path) | 5,330,404 |
def per_device_work(session, check_mode, enable_pass, settings_header):
"""
This function contains the code that should be executed on each device that this script connects to. It is called
after establishing a connection to each device in the loop above.
You can either put your own code here, or if there is a single-device version of a script that performs the correct
task, it can be imported and called here, essentially making this script connect to all the devices in the chosen
CSV file and then running a single-device script on each of them.
"""
session.start_cisco_session(enable_pass=enable_pass)
commands_to_add = session.script.settings.getlist(settings_header, session.os)
logger.debug("<ADD_GLOBAL_CONFIG> Commands to send:\n{}".format(str(commands_to_add)))
if commands_to_add:
add_commands(session, check_mode, commands_to_add)
else:
raise scripts.ScriptError("There are no commands to apply for OS type: {}.".format(session.os))
session.end_cisco_session() | 5,330,405 |
def attach_task_custom_attributes(queryset, as_field="task_custom_attributes_attr"):
"""Attach a json task custom attributes representation to each object of the queryset.
:param queryset: A Django projects queryset object.
:param as_field: Attach the task custom attributes as an attribute with this name.
:return: Queryset object with the additional `as_field` field.
"""
model = queryset.model
sql = """
SELECT json_agg(
row_to_json(custom_attributes_taskcustomattribute)
ORDER BY custom_attributes_taskcustomattribute.order
)
FROM custom_attributes_taskcustomattribute
WHERE custom_attributes_taskcustomattribute.project_id = {tbl}.id
"""
sql = sql.format(tbl=model._meta.db_table)
queryset = queryset.extra(select={as_field: sql})
return queryset | 5,330,406 |
def render_to_svg(path: str, cell_grid: List[List[GridCell]], save_png: bool):
"""
render a grid_cell as an svg file
:param path: path to the svg file that we will save to
:param cell_grid: the cell_grid object that can either be GridCells or WalledCells
:param save_png: if true will also save a png version of the maze
:return: None
"""
rows = len(cell_grid)
cols = len(cell_grid[0])
drawing = SDraw.Drawing(cols * 10, rows * 10, origin=(0, 0), displayInline=False)
# draw the border
r = SDraw.Rectangle(0, 0, cols * 10, rows * 10, stroke_width=1, fill='none', stroke='black')
drawing.append(r)
for i in range(rows):
for j in range(cols):
x_pos = j * 10
y_pos = (rows-1) * 10 - i * 10
render_cell(drawing, cell_grid[i][j], x_pos, y_pos)
drawing.setPixelScale(2) # Set number of pixels per geometry unit
drawing.saveSvg(path)
if save_png:
png_path = os.path.splitext(path)[0]
drawing.savePng(png_path + '.png') | 5,330,407 |
def softmax_like(env, *, trajectory_model, agent_model, log=False):
"""softmax_like
:param env: OpenAI Gym environment
:param trajectory_model: trajectory probabilistic program
:param agent_model: agent's probabilistic program
:param log: boolean; if True, print log info
"""
Qs = torch.as_tensor(
[
infer_Q(
env,
action,
trajectory_model=trajectory_model,
agent_model=agent_model,
log=log,
)
for action in range(env.action_space.n)
]
)
action_logits = args.alpha * Qs
action_dist = Categorical(logits=action_logits)
if log:
print('policy:')
print(
tabulate(
[action_dist.probs.tolist()],
headers=env.actions,
tablefmt='fancy_grid',
)
)
return action_dist.sample() | 5,330,408 |
def bulk_add(packages, user):
"""
Support bulk add by processing entries like:
repo [org]
"""
added = 0
i = 0
packages = packages.split('\n')
num = len(packages)
org = None
results = str()
db.set(config.REDIS_KEY_USER_SLOTNUM_PACKAGE % user, num)
results += "Added %s slots.\n" % num
orgs_selected = db.hgetall(config.REDIS_KEY_USER_ORGS_SELECTED %
user).items()
for package in packages:
try: # First, try: repo [org]
package, org = package.split()
for orgsel in orgs_selected:
if org == orgsel[1]:
get_package_selected(user, package=package,
orgset=orgsel[0], slotset=i)
results += ("Added %s to slot %s with organization %s.\n" %
(package, i + 1, org))
added += 1
i += 1
except: # Next, try: repo
try:
package = package.split()
package = package[0]
get_package_selected(user, package=package, slotset=i)
results += "Added %s to slot %s.\n" % (
package, i + 1)
added += 1
i += 1
except: # Give up
pass
results += "Added %s packages" % added
if added == 0:
results += ", check org slots for matching org?\n"
else:
results += ".\n"
return results | 5,330,409 |
def laser_heater_to_energy_spread(energy_uJ):
"""
Returns rms energy spread in induced in keV.
Based on fits to measurement in SLAC-PUB-14338
"""
return 7.15*sqrt(energy_uJ) | 5,330,410 |
def apparent_attenuation(og, fg):
"""Apparent attenuation
"""
return 100.0 * (float(og) - float(fg)) / float(og) | 5,330,411 |
def masked_greater(x: numpy.ndarray, value: int):
"""
usage.dask: 5
usage.matplotlib: 1
usage.scipy: 3
"""
... | 5,330,412 |
def most_similar(W, vocab, id2word, word, n=15):
"""
Find the `n` words most similar to the given `word`. The provided
`W` must have unit vector rows, and must have merged main- and
context-word vectors (i.e., `len(W) == len(word2id)`).
Returns a list of word strings.
"""
assert len(W) == len(vocab)
word_id = vocab[word][0]
dists = np.dot(W, W[word_id])
top_ids = np.argsort(dists)[::-1][:n + 1]
return [id2word[id] for id in top_ids if id != word_id][:n] | 5,330,413 |
async def on_message(message):
"""メンバー募集 (.rect@数字)"""
if message.content.startswith(".rect"):
mcount = int(message.content[6:len(message.content)])
text= "あと{}人 募集中\n"
revmsg = text.format(mcount)
#friend_list 押した人のList
frelist = []
msg = await client.send_message(message.channel, revmsg)
#投票の欄
await client.add_reaction(msg, '\u21a9')
await client.add_reaction(msg, '⏫')
await client.pin_message(msg)
#リアクションをチェックする
while len(frelist) < int(message.content[6:len(message.content)]):
target_reaction = await client.wait_for_reaction(message=msg)
#発言したユーザが同一でない場合 真
if target_reaction.user != msg.author:
#==============================================================
#押された絵文字が既存のものの場合 >> 左 del
if target_reaction.reaction.emoji == '\u21a9':
#==========================================================
#◀のリアクションに追加があったら反応 frelistにuser.nameがあった場合 真
if target_reaction.user.name in frelist:
frelist.remove(target_reaction.user.name)
mcount += 1
#リストから名前削除
await client.edit_message(msg, text.format(mcount) +
'\n'.join(frelist))
#メッセージを書き換え
else:
pass
#==============================================================
#押された絵文字が既存のものの場合 >> 右 add
elif target_reaction.reaction.emoji == '⏫':
if target_reaction.user.name in frelist:
pass
else:
frelist.append(target_reaction.user.name)
#リストに名前追加
mcount = mcount - 1
await client.edit_message(msg, text.format(mcount) +
'\n'.join(frelist))
elif target_reaction.reaction.emoji == '✖':
await client.edit_message(msg, '募集終了\n'+ '\n'.join(frelist))
await client.unpin_message(msg)
break
await client.remove_reaction(msg, target_reaction.reaction.emoji, target_reaction.user)
#ユーザーがつけたリアクションを消す※権限によってはエラー
#==============================================================
else:
await client.edit_message(msg, '募集終了\n'+ '\n'.join(frelist)) | 5,330,414 |
def getRecordsFromDb():
"""Return all records found in the database associated with :func:`dbFilePath()`.
List of records are cached using an application configuration entry identified
by ``_CACHED_RECORDS`` key.
See also :func:`openDb`.
"""
try:
records = flask.current_app.config["_CACHED_RECORDS"]
except KeyError:
records = None
database_filepath = dbFilePath()
app.logger.info("database_filepath: %s" % database_filepath)
if not os.path.isfile(database_filepath):
raise IOError(2, 'Database file %s does not exist', database_filepath)
database_connection = openDb(database_filepath)
cursor = database_connection.cursor()
# get record count
cursor.execute('select count(1) from _')
count = int(cursor.fetchone()[0])
# load db if needed or count has changed
if records is None or count != len(records):
cursor.execute('select record from _ order by revision desc,build_date desc')
records = [json.loads(record[0]) for record in cursor.fetchall()]
flask.current_app.config["_CACHED_RECORDS"] = records
database_connection.close()
return records | 5,330,415 |
def build_cell(num_units,
num_layers,
cell_fn,
initial_state=None,
copy_state=True,
batch_size=None,
output_dropout_rate=0.,
input_shape=None,
attention_mechanism_fn=None,
memory=None,
memory_sequence_len=None,
alignment_history=False,
mode=tf.estimator.ModeKeys.TRAIN,
name=None):
""""
General function to create RNN cells for decoding.
Handles multi-layer cases, LSTMs and attention wrappers
"""
if alignment_history == True:
print("a")
input()
cells = []
for _ in range(num_layers):
cell = cell_fn(num_units, dtype=tf.float32, name=name)
# build internal variables if input shape provided
if input_shape is not None:
cell.build(input_shape)
# apply dropout if its a tensor or we are in training
if ((isinstance(output_dropout_rate, tf.Tensor) or
output_dropout_rate > 0 and mode == tf.estimator.ModeKeys.TRAIN)):
cell = tf.contrib.rnn.DropoutWrapper(
cell,
output_keep_prob=1 - output_dropout_rate)
cells.append(cell)
if num_layers > 1:
cell = tf.nn.rnn_cell.MultiRNNCell(cells)
else:
cell = cells[0]
if initial_state is not None and not copy_state:
if batch_size is None:
batch_size = tf.shape(tf.contrib.framework.nest.flatten(initial_state)[0])[0]
zero_state = cell.zero_state(batch_size, tf.float32)
initial_state = bridge_state(initial_state, zero_state)
if attention_mechanism_fn is not None:
attention_mechanism = attention_mechanism_fn(
num_units,
memory,
memory_sequence_len)
cell_input_fn = None
if isinstance(attention_mechanism, CoverageBahdanauAttention):
cell_input_fn = (
lambda inputs, attention: tf.concat([inputs, tf.split(attention, 2, axis=-1)[0]], -1))
cell = tf.contrib.seq2seq.AttentionWrapper(
cell,
attention_mechanism,
output_attention=not isinstance(
attention_mechanism, tf.contrib.seq2seq.BahdanauAttention),
attention_layer_size=num_units,
initial_cell_state=initial_state,
alignment_history=alignment_history)
if batch_size is None:
batch_size = tf.shape(tf.contrib.framework.nest.flatten(initial_state)[0])[0]
initial_state = cell.zero_state(batch_size, tf.float32)
return (cell, initial_state) if initial_state is not None else cell | 5,330,416 |
def setup(hass: HomeAssistant, config: ConfigType) -> bool:
"""Use config values to set up a function enabling status retrieval."""
conf = config[DOMAIN]
host = conf[CONF_HOST]
port = conf[CONF_PORT]
apcups_data = APCUPSdData(host, port)
hass.data[DOMAIN] = apcups_data
# It doesn't really matter why we're not able to get the status, just that
# we can't.
try:
apcups_data.update(no_throttle=True)
except Exception: # pylint: disable=broad-except
_LOGGER.exception("Failure while testing APCUPSd status retrieval")
return False
return True | 5,330,417 |
def _lagged_coherence_1freq(x, f, Fs, N_cycles=3, f_step=1):
"""Calculate lagged coherence of x at frequency f using the hanning-taper FFT method"""
# Determine number of samples to be used in each window to compute lagged coherence
Nsamp = int(np.ceil(N_cycles * Fs / f))
# For each N-cycle chunk, calculate the fourier coefficient at the frequency of interest, f
chunks = _nonoverlapping_chunks(x, Nsamp)
C = len(chunks)
hann_window = signal.hanning(Nsamp)
fourier_f = np.fft.fftfreq(Nsamp, 1 / float(Fs))
fourier_f_idx = np.argmin(np.abs(fourier_f - f))
fourier_coefsoi = np.zeros(C, dtype=complex)
for i2, c in enumerate(chunks):
fourier_coef = np.fft.fft(c * hann_window)
fourier_coefsoi[i2] = fourier_coef[fourier_f_idx]
# Compute the lagged coherence value
lcs_num = 0
for i2 in range(C - 1):
lcs_num += fourier_coefsoi[i2] * np.conj(fourier_coefsoi[i2 + 1])
lcs_denom = np.sqrt(np.sum(
np.abs(fourier_coefsoi[:-1])**2) * np.sum(np.abs(fourier_coefsoi[1:])**2))
return np.abs(lcs_num / lcs_denom) | 5,330,418 |
def get_lengths(input_list):
"""Generator function that yields the
length of the strings in input_list."""
# Yield the length of a string
for person in input_list:
yield (person,len(person)) | 5,330,419 |
def test_process_incoming(capsys):
""" Test to determine that messages expected to be given to the blockchain
are added to the receive_queue.
"""
with capsys.disabled():
msg = ('to-blockchain', 'msg-data')
receive_queue = Queue()
address = ('0.0.0.0', 1)
process_incoming_msg(
pack_msg(msg),
address,
receive_queue
)
assert receive_queue.get() == (msg[0], msg[1], address)
n_address = ('123.123.123.123', 1)
msg = ('N_new_peer', n_address)
process_incoming_msg(
pack_msg(msg),
address,
receive_queue
)
assert n_address in PEERS._peer_list
msg = ('N_pong', '')
process_incoming_msg(
pack_msg(msg),
n_address,
receive_queue
)
assert n_address in PEERS.get_active_peers()
msg = ('N_get_peers', '')
process_incoming_msg(
pack_msg(msg),
('127.0.0.1', 6667),
receive_queue
)
for p in PEERS.get_all_peers():
received = RECEIVER.receive_msg()
assert p == unpack_msg(received[0])[1]
msg = ('N_ping', '')
process_incoming_msg(
pack_msg(msg),
('127.0.0.1', 6667),
receive_queue
)
received = RECEIVER.receive_msg()
assert 'N_pong' == unpack_msg(received[0])[0]
utils.set_debug()
process_incoming_msg(pack_msg(''),
address,
receive_queue)
captured = capsys.readouterr()
assert captured.out.startswith('### DEBUG ### Received invalid message\n') | 5,330,420 |
def test_create_downloadable_file_from_metadata(clean_db, monkeypatch):
"""Try to create a downloadable file from artifact_core metadata"""
# fake file metadata
file_metadata = {
"object_url": "10021/Patient 1/sample 1/aliquot 1/wes_forward.fastq",
"file_size_bytes": 1,
"md5_hash": "hash1234",
"facet_group": "foobar",
"uploaded_timestamp": datetime.now(),
"foo": "bar", # unsupported column - should be filtered
}
additional_metadata = {"more": "info"}
# Mock artifact upload publishing
publisher = MagicMock()
monkeypatch.setattr("cidc_api.models.models.publish_artifact_upload", publisher)
# Create the trial (to avoid violating foreign-key constraint)
TrialMetadata.create(TRIAL_ID, METADATA)
# Create files with empty or "null" additional metadata
for nullish_value in ["null", None, {}]:
df = DownloadableFiles.create_from_metadata(
TRIAL_ID, "wes_bam", file_metadata, additional_metadata=nullish_value
)
clean_db.refresh(df)
assert df.additional_metadata == {}
# Create the file
DownloadableFiles.create_from_metadata(
TRIAL_ID, "wes_bam", file_metadata, additional_metadata=additional_metadata
)
# Check that we created the file
new_file = (
clean_db.query(DownloadableFiles)
.filter_by(object_url=file_metadata["object_url"])
.first()
)
assert new_file
del file_metadata["foo"]
for k in file_metadata.keys():
assert getattr(new_file, k) == file_metadata[k]
assert new_file.additional_metadata == additional_metadata
# Check that no artifact upload event was published
publisher.assert_not_called()
# Check that artifact upload publishes
DownloadableFiles.create_from_metadata(
TRIAL_ID,
"wes_bam",
file_metadata,
additional_metadata=additional_metadata,
alert_artifact_upload=True,
)
publisher.assert_called_once_with(file_metadata["object_url"]) | 5,330,421 |
def mostrar_porcentaje_sexo():
"""
Muestra el porcentaje de varones y mujeres por cada aula.
"""
for aula in taller.aulas:
print(f"Aula: {aula.nombre}")
print(f"Porcentaje de varones: {aula.porcentaje('M'):.2%}")
print(f"Porcentaje de mujeres: {aula.porcentaje('F'):.2%}") | 5,330,422 |
def program_hash(p:Program)->Hash:
""" Calculate the hashe of a program """
string=";".join([f'{nm}({str(args)})' for nm,args in p.ops if nm[0]!='_'])
return md5(string.encode('utf-8')).hexdigest() | 5,330,423 |
def build_pages(config, site_navigation):
"""
Builds all the pages and writes them into the build directory.
"""
#site_navigation = nav.SiteNavigation(config['pages'])
loader = jinja2.FileSystemLoader(config['templates_dir'])
env = jinja2.Environment(loader=loader)
index = site_navigation.get_page('Home')
index.set_builder(build_index)
catalist = site_navigation.get_page('Catalogs')
catalist.set_builder(build_catalog)
build_404(config, env, site_navigation)
for page in site_navigation.walk_pages():
try:
log.debug("Building page %s", page.input_path)
build_page = page.get_builder() or _build_page
build_page(page, config, site_navigation, env)
except:
log.error("Error building page %s", page.input_path)
raise | 5,330,424 |
async def get_events(user_creds, client_creds, list_args, filter_func=None):
"""List events from all calendars according to the parameters given.
The supplied credentials dict may be updated if tokens are refreshed.
:param user_creds: User credentials from `obtain_user_permission`.
:param client_creds: Client credentials from configuration.
:param list_args: Arguments to pass to the calendar API's event list
function.
:param filter_func: Callable that can filter out individual events.
The function should return True to include, False to exclude.
:raise CredentialsError: if the credentials have not been set up,
or if they have expired.
"""
filter_func = filter_func or no_filter
if "access_token" not in user_creds:
raise CredentialsError("No access token in user credentials.")
async with Aiogoogle(user_creds=user_creds, client_creds=client_creds) as aiogoogle:
# Is there a way to cache service discovery?
service = await aiogoogle.discover("calendar", "v3")
try:
calendar_list = await aiogoogle.as_user(
service.calendarList.list(), timeout=30
)
_update_user_creds(user_creds, aiogoogle.user_creds)
events = []
for calendar_list_entry in calendar_list["items"]:
events += await _get_calendar_events(
aiogoogle,
service,
list_args,
calendar_list_entry,
filter_func,
)
return dict(items=sorted(events, key=_event_sort_key_function))
except HTTPError as ex:
if "invalid_grant" in str(ex):
raise CredentialsError("User credentials rejected.") from ex
raise | 5,330,425 |
def test_plugin_xmpp_general(tmpdir):
"""
NotifyXMPP() General Checks
"""
# Set success flag
apprise.plugins.SliXmppAdapter.success = True
# Enforce Adapter
apprise.plugins.NotifyXMPP._adapter = apprise.plugins.SliXmppAdapter
# Create a restore point
ca_backup = apprise.plugins.SliXmppAdapter\
.CA_CERTIFICATE_FILE_LOCATIONS
# Clear CA Certificates
apprise.plugins.SliXmppAdapter.CA_CERTIFICATE_FILE_LOCATIONS = []
# Disable Throttling to speed testing
apprise.plugins.NotifyBase.request_rate_per_sec = 0
# Create our instance
obj = apprise.Apprise.instantiate('xmpp://', suppress_exceptions=False)
# Not possible because no password or host was specified
assert obj is None
with pytest.raises(TypeError):
apprise.Apprise.instantiate(
'xmpp://hostname', suppress_exceptions=False)
# SSL Flags
if hasattr(ssl, "PROTOCOL_TLS"):
# Test cases where PROTOCOL_TLS simply isn't available
ssl_temp_swap = ssl.PROTOCOL_TLS
del ssl.PROTOCOL_TLS
# Test our URL
url = 'xmpps://user:pass@127.0.0.1'
obj = apprise.Apprise.instantiate(url, suppress_exceptions=False)
# Test we loaded
assert isinstance(obj, apprise.plugins.NotifyXMPP) is True
# Check that it found our mocked environments
assert obj.enabled is True
with mock.patch('slixmpp.ClientXMPP') as mock_stream:
client_stream = mock.Mock()
client_stream.connect.return_value = True
mock_stream.return_value = client_stream
# We fail because we could not verify the host
assert obj.notify(
title='title', body='body',
notify_type=apprise.NotifyType.INFO) is False
# Restore the variable for remaining tests
setattr(ssl, 'PROTOCOL_TLS', ssl_temp_swap)
else:
# Handle case where it is not missing
setattr(ssl, 'PROTOCOL_TLS', ssl.PROTOCOL_TLSv1)
# Test our URL
url = 'xmpps://user:pass@localhost'
obj = apprise.Apprise.instantiate(url, suppress_exceptions=False)
# Test we loaded
assert isinstance(obj, apprise.plugins.NotifyXMPP) is True
# Check that it found our mocked environments
assert obj.enabled is True
with mock.patch('slixmpp.ClientXMPP') as mock_stream:
client_stream = mock.Mock()
client_stream.connect.return_value = True
mock_stream.return_value = client_stream
assert obj.notify(
title='title', body='body',
notify_type=apprise.NotifyType.INFO) is True
# Restore settings as they were
del ssl.PROTOCOL_TLS
urls = (
{
'u': 'xmpp://user:pass@localhost',
'p': 'xmpp://user:****@localhost',
}, {
'u': 'xmpp://user:pass@localhost?'
'xep=30,199,garbage,xep_99999999',
'p': 'xmpp://user:****@localhost',
}, {
'u': 'xmpps://user:pass@localhost?xep=ignored&verify=no',
'p': 'xmpps://user:****@localhost',
}, {
'u': 'xmpps://user:pass@localhost/?verify=false&to='
'user@test.com, user2@test.com/resource',
'p': 'xmpps://user:****@localhost',
}, {
'u': 'xmpps://user:pass@localhost:5226?'
'jid=user@test.com&verify=no',
'p': 'xmpps://user:****@localhost:5226',
}, {
'u': 'xmpps://user:pass@localhost?jid=user@test.com&verify=False',
'p': 'xmpps://user:****@localhost',
}, {
'u': 'xmpps://user:pass@localhost?verify=False',
'p': 'xmpps://user:****@localhost',
}, {
'u': 'xmpp://user:pass@localhost?to=user@test.com&verify=no',
'p': 'xmpp://user:****@localhost',
}
)
# Try Different Variations of our URL
for entry in urls:
url = entry['u']
privacy_url = entry['p']
obj = apprise.Apprise.instantiate(url, suppress_exceptions=False)
# Test we loaded
assert isinstance(obj, apprise.plugins.NotifyXMPP) is True
# Check that it found our mocked environments
assert obj.enabled is True
# Test url() call
assert isinstance(obj.url(), six.string_types) is True
# Test url(privacy=True) call
assert isinstance(obj.url(privacy=True), six.string_types) is True
assert obj.url(privacy=True).startswith(privacy_url)
with mock.patch('slixmpp.ClientXMPP') as mock_stream:
client_stream = mock.Mock()
client_stream.connect.return_value = True
mock_stream.return_value = client_stream
print(obj.url())
# test notifications
assert obj.notify(
title='title', body='body',
notify_type=apprise.NotifyType.INFO) is True
# test notification without a title
assert obj.notify(
title='', body='body',
notify_type=apprise.NotifyType.INFO) is True
# Test Connection Failure
with mock.patch('slixmpp.ClientXMPP') as mock_stream:
client_stream = mock.Mock()
client_stream.connect.return_value = False
mock_stream.return_value = client_stream
# test notifications
assert obj.notify(
title='title', body='body',
notify_type=apprise.NotifyType.INFO) is False
# Toggle our enabled flag
obj.enabled = False
with mock.patch('slixmpp.ClientXMPP') as mock_client:
# Allow a connection to succeed
mock_client.connect.return_value = True
# Verify that we can't send content now
assert obj.notify(
title='', body='body',
notify_type=apprise.NotifyType.INFO) is False
# Toggle it back so it doesn't disrupt other testing
obj.enabled = True
# create an empty file for now
ca_cert = tmpdir.mkdir("apprise_slixmpp_test").join('ca_cert')
ca_cert.write('')
# Update our path
apprise.plugins.SliXmppAdapter.CA_CERTIFICATE_FILE_LOCATIONS = \
[str(ca_cert), ]
obj = apprise.Apprise.instantiate(
'xmpps://user:pass@localhost/user@test.com?verify=yes',
suppress_exceptions=False)
assert isinstance(obj, apprise.plugins.NotifyXMPP) is True
with mock.patch('slixmpp.ClientXMPP') as mock_client:
# Allow a connection to succeed
mock_client.connect.return_value = True
# Our notification now should be able to get a ca_cert to reference
assert obj.notify(
title='', body='body', notify_type=apprise.NotifyType.INFO) is True
# Restore our CA Certificates from backup
apprise.plugins.SliXmppAdapter.CA_CERTIFICATE_FILE_LOCATIONS = \
ca_backup | 5,330,426 |
def process_file(input_file, input_type, index, is_parallel):
"""
Process an individual SAM/BAM file.
How we want to process the file depends on the input type and whether we
are operating in parallel. If in parallel the index must be loaded for each
input file. If the input is a BAM file it needs to be read using Pysam, if
SAM it can be read directly as a text file.
Args:
input_file: Path to the input file.
input_type: Whether the file is 'bam' or 'sam'.
index: If operating in parallel a string to the index file, if not the
loaded GTF index dictionary.
is_parallel: Whether to operate in parallel.
Returns:
Dictionary containing alignment statistics for the input file.
"""
sample_name = input_file.split("/")[-1]
logger = logging.getLogger("stats." + sample_name[0:10])
logger.info("Processing " + sample_name + "...")
if is_parallel:
logger.info("Loading index...")
with open(index, "rb") as index_file:
loaded_index = pickle.load(index_file)
logger.info("Loaded.")
else:
loaded_index = index
if input_type == "sam":
logger.info("Parsing SAM file...")
with open(input_file) as sam:
output_table = gen_stats(sam, input_type, sample_name, loaded_index)
elif input_type == "bam":
logger.info("Parsing BAM file...")
bam = pysam.AlignmentFile(input_file, "rb")
output_table = gen_stats(bam, input_type, sample_name, loaded_index)
logger.info("Finished " + sample_name)
return output_table | 5,330,427 |
def add_dictionaries(coefficients, representatives, p):
""" Computes a dictionary that is the linear combination of `coefficients`
on `representatives`
Parameters
----------
coefficients : :obj:`Numpy Array`
1D array with the same number of elements as `representatives`. Each
entry is an integer mod p.
representatives : :obj:`list(dict)`
List where each entry is a dictionary. The keys on each dictionary are
integers, and these might coincide with dictionaries on other entries.
p : int(prime)
Returns
-------
rep_sum : :obj:`dict`
Result of adding the dictionaries on `representatives` with
`coefficients`.
Example
-------
>>> import numpy as np
>>> p=5
>>> coefficients = np.array([1,2,3])
>>> representatives = [
... {0:np.array([1,3]), 3:np.array([0,0,1])},
... {0:np.array([4,3]),2:np.array([4,5])},
... {3:np.array([0,4,0])}]
>>> add_dictionaries(coefficients, representatives, p)
{0: array([4, 4]), 3: array([0, 2, 1]), 2: array([3, 0])}
"""
rep_sum = {}
for i, rep in enumerate(representatives):
for spx_idx in iter(rep):
if spx_idx not in rep_sum:
rep_sum[spx_idx] = (coefficients[i] * rep[spx_idx]) % p
else:
rep_sum[spx_idx] = (rep_sum[spx_idx] + coefficients[i] * rep[
spx_idx]) % p
# end else
# end for
# end for
# Find simplices where expression is zero
zero_simplices = []
for spx_idx in iter(rep_sum):
if not np.any(rep_sum[spx_idx]):
zero_simplices.append(spx_idx)
# end if
# end for
# If an entry is zero, delete it
for spx_idx in zero_simplices:
del rep_sum[spx_idx]
# end for
return rep_sum | 5,330,428 |
def import_documentation_parts(restApiId=None, mode=None, failOnWarnings=None, body=None):
"""
See also: AWS API Documentation
:example: response = client.import_documentation_parts(
restApiId='string',
mode='merge'|'overwrite',
failOnWarnings=True|False,
body=b'bytes'|file
)
:type restApiId: string
:param restApiId: [REQUIRED]
[Required] The identifier of an API of the to-be-imported documentation parts.
:type mode: string
:param mode: A query parameter to indicate whether to overwrite (OVERWRITE ) any existing DocumentationParts definition or to merge (MERGE ) the new definition into the existing one. The default value is MERGE .
:type failOnWarnings: boolean
:param failOnWarnings: A query parameter to specify whether to rollback the documentation importation (true ) or not (false ) when a warning is encountered. The default value is false .
:type body: bytes or seekable file-like object
:param body: [REQUIRED]
[Required] Raw byte array representing the to-be-imported documentation parts. To import from a Swagger file, this is a JSON object.
:rtype: dict
:return: {
'ids': [
'string',
],
'warnings': [
'string',
]
}
:returns:
(string) --
"""
pass | 5,330,429 |
def dropsRowsWithMatchClassAndDeptRemainderIsZero(df, Col, RemainderInt, classToShrink):
"""
Takes as input a dataframe, a column, a remainder integer, and a class within the column.
Returns the dataframe minus the rows that match the ClassToShrink in the Col and have a depth from the DEPT col with a remainder of zero.
"""
print("original lenght of dataframe = ", len(df))
df_new = df.drop(df[(df[Col] == classToShrink) & (df.index % 10 != 0)].index)
print("length of new dataframe after dropping rows = ", len(df_new))
print("number of rows dropped = ", len(df) - len(df_new))
print("length of 0 class is :", len(df_new[df_new[Col] == classToShrink]))
return df_new | 5,330,430 |
def import_plugins(
plugins_to_import: Union[str, List[str], None] = None,
warn: bool = True,
) -> Union[
'ModuleType', Tuple['ModuleType', None]
]:
"""
Import the Meerschaum plugins directory.
:param plugins_to_import:
If provided, only import the specified plugins.
Otherwise import the entire plugins module. May be a string, list, or `None`.
Defaults to `None`.
"""
global __path__
import sys
from meerschaum.config._paths import (
PLUGINS_RESOURCES_PATH, PLUGINS_ARCHIVES_RESOURCES_PATH, PLUGINS_INIT_PATH
)
PLUGINS_RESOURCES_PATH.mkdir(parents=True, exist_ok=True)
PLUGINS_INIT_PATH.touch()
_locks['__path__'].acquire()
_locks['sys.path'].acquire()
if isinstance(plugins_to_import, str):
plugins_to_import = [plugins_to_import]
from meerschaum.utils.warnings import error, warn as _warn
if str(PLUGINS_RESOURCES_PATH.parent) not in sys.path:
sys.path.insert(0, str(PLUGINS_RESOURCES_PATH.parent))
if str(PLUGINS_RESOURCES_PATH.parent) not in __path__:
__path__.append(str(PLUGINS_RESOURCES_PATH.parent))
if not plugins_to_import:
try:
import plugins
except ImportError as e:
warn(e)
plugins = None
else:
from meerschaum.utils.packages import attempt_import
plugins = attempt_import(
*[('plugins.' + p) for p in plugins_to_import],
install=False, warn=True, lazy=False, venv=None,
)
if plugins is None and warn:
_warn(f"Failed to import plugins.", stacklevel=3)
if str(PLUGINS_RESOURCES_PATH.parent) in sys.path:
sys.path.remove(str(PLUGINS_RESOURCES_PATH.parent))
_locks['__path__'].release()
_locks['sys.path'].release()
return plugins | 5,330,431 |
def _opendata_to_section_meeting(data, term_year):
"""Converts OpenData class section info to a SectionMeeting instance.
Args:
data: An object from the `classes` field returned by OpenData.
term_year: The year this term is in.
"""
date = data['date']
days = []
if date['weekdays']:
days = re.findall(r'[A-Z][a-z]?',
date['weekdays'].replace('U', 'Su'))
# TODO(david): Actually use the term begin/end dates when we get nulls
date_format = '%m/%d/%Y'
start_date = None
end_date = None
if date['start_date']:
start_date = date['start_date'] + '/' + str(term_year)
start_date = datetime.strptime(start_date, date_format)
if date['end_date']:
end_date = date['end_date'] + '/' + str(term_year)
end_date = datetime.strptime(end_date, date_format)
time_format = '%H:%M'
# TODO(david): DRY-up
start_seconds = None
if date['start_time']:
start_time = datetime.strptime(date['start_time'], time_format)
start_seconds = (start_time -
start_time.replace(hour=0, minute=0, second=0)).seconds
end_seconds = None
if date['end_time']:
end_time = datetime.strptime(date['end_time'], time_format)
end_seconds = (end_time -
end_time.replace(hour=0, minute=0, second=0)).seconds
meeting = m.SectionMeeting(
start_seconds=start_seconds,
end_seconds=end_seconds,
days=days,
start_date=start_date,
end_date=end_date,
building=data['location']['building'],
room=data['location']['room'],
is_tba=date['is_tba'],
is_cancelled=date['is_cancelled'],
is_closed=date['is_closed'],
)
if data['instructors']:
last_name, first_name = data['instructors'][0].split(',')
prof_id = m.Professor.get_id_from_name(first_name, last_name)
if not m.Professor.objects.with_id(prof_id):
m.Professor(id=prof_id, first_name=first_name,
last_name=last_name).save()
meeting.prof_id = prof_id
return meeting | 5,330,432 |
def run_step(emr_engine, datastore, action, step_wrapper):
"""
:type emr_engine: dart.engine.emr.emr.EmrEngine
:type datastore: dart.model.datastore.Datastore
:type action: dart.model.action.Action
:type step_wrapper: dart.engine.emr.steps.StepWrapper
"""
cluster_id = datastore.data.extra_data['cluster_id']
emr_engine.conn.add_jobflow_steps(cluster_id, step_wrapper.step)
while True:
time.sleep(30)
# http://docs.aws.amazon.com/ElasticMapReduce/latest/DeveloperGuide/emr-steps.html
step_state = _get_step_state(action, step_wrapper, emr_engine.conn.list_steps(cluster_id))
if step_state == 'COMPLETED':
progress = "%.2f" % round(float(step_wrapper.step_num) / float(step_wrapper.steps_total), 2)
updated_action = emr_engine.dart.patch_action(action, progress=progress)
# do this so callers see an in-place update of the action
action.data = updated_action.data
return
elif step_state not in ['RUNNING', 'PENDING']:
values = (action.id, step_wrapper.step_num, step_state)
raise Exception('action (id=%s) failed on step %s with state: %s' % values) | 5,330,433 |
def get_token():
""" returns a session token from te internal API.
"""
auth_url = '%s/sessions' % local_config['INTERNAL_API_BASE_URL']
auth_credentials = {'eppn': 'worker@pebbles',
'password': local_config['SECRET_KEY']}
try:
r = requests.post(auth_url, auth_credentials, verify=local_config['SSL_VERIFY'])
return json.loads(r.text).get('token')
except:
return None | 5,330,434 |
def setlist(L):
""" list[alpha] -> set[alpha] """
# E : set[alpha]
E = set()
# e : alpha
for e in L:
E.add(e)
return E | 5,330,435 |
def ek_8_fix(alts: List[str]) -> List[str]:
"""
Replace ek, 8 patterns in text.
This is google ASR specifc. Google gets confused between 1 and 8.
Therefore if alternatives only contain 8 and 1, we change everything to 8
pm.
TODO: Another really structurally bad piece of logic.
"""
count_ek = 0
count_8 = 0
count_other = 0
rule = r"\b(?P<num>\d+) p(.| )?m(.)?"
for text in alts:
match = re.search(rule, text, flags=re.I | re.U)
if match:
if match.group("num") == "1":
count_ek += 1
elif match.group("num") == "8":
count_8 += 1
else:
count_other += 1
if count_8 and count_ek and not count_other:
output_alts = []
substitute = "8 pm"
for text in alts:
text = re.sub(rule, substitute, text, flags=re.I | re.U)
output_alts.append(text)
return output_alts
return alts | 5,330,436 |
def _pixel_at(x, y):
"""
Returns (r, g, b) color code for a pixel with given coordinates (each value is in
0..256 limits)
"""
screen = QtGui.QGuiApplication.primaryScreen()
color = screen.grabWindow(0, x, y, 1, 1).toImage().pixel(0, 0)
return ((color >> 16) & 0xFF), ((color >> 8) & 0xFF), (color & 0xFF) | 5,330,437 |
def reincarnatedCLI(nodeRegsForCLI, newLooper, tdir, cli):
"""
Creating a new cli instance is equivalent to starting and stopping a cli
"""
cli = newCLI(nodeRegsForCLI, newLooper, tdir, unique_name='reincarnate')
yield cli
cli.close() | 5,330,438 |
def parse_tibia_time(tibia_time: str) -> datetime:
"""Gets a time object from a time string from tibia.com"""
tibia_time = tibia_time.replace(",","").replace(" ", " ")
# Getting local time and GMT
t = time.localtime()
u = time.gmtime(time.mktime(t))
# UTC Offset
local_utc_offset = ((timegm(t) - timegm(u)) / 60 / 60)
# Extracting timezone
tz = tibia_time[-4:].strip()
try:
# Convert time string to time object
# Removing timezone cause CEST and CET are not supported
t = datetime.strptime(tibia_time[:-4].strip(), "%b %d %Y %H:%M:%S")
except ValueError:
log.error("parse_tibia_time: couldn't parse '{0}'".format(tibia_time))
return None
# Getting the offset
if tz == "CET":
utc_offset = 1
elif tz == "CEST":
utc_offset = 2
else:
log.error("parse_tibia_time: unknown timezone for '{0}'".format(tibia_time))
return None
# Add/subtract hours to get the real time
return t + timedelta(hours=(local_utc_offset - utc_offset)) | 5,330,439 |
def a3v(V: Vector3) -> np.ndarray:
"""Converts vector3 to numpy array.
Arguments:
V {Vector3} -- Vector3 class containing x, y, and z.
Returns:
np.ndarray -- Numpy array with the same contents as the vector3.
"""
return np.array([V.x, V.y, V.z]) | 5,330,440 |
def _p_value_color_format(pval):
"""Auxiliary function to set p-value color -- green or red."""
color = "green" if pval < 0.05 else "red"
return "color: %s" % color | 5,330,441 |
def rms(da, dim=None, dask='parallelized', keep_attrs=True):
"""
Reduces a dataarray by calculating the root mean square along the dimension
dim.
"""
# TODO If dim is None then take the root mean square along all dimensions?
if dim is None:
raise ValueError('Must supply a dimension along which to calculate rms')
rms = xr.apply_ufunc(_rms_gufunc, da,
input_core_dims=[[dim]],
dask=dask, output_dtypes=[da.dtype],
keep_attrs=keep_attrs)
# Return the name of the da as variable_rms
rms.name = str(da.name) + '_rms'
return rms | 5,330,442 |
def check_blackouts():
"""Check blacked-out servers."""
zkclient = context.GLOBAL.zk.conn
try:
blacked_out_nodes = zkclient.get_children(z.BLACKEDOUT_SERVERS)
for server in blacked_out_nodes:
_LOGGER.warn('Server blackedout: %s', server)
except kazoo.client.NoNodeError:
pass | 5,330,443 |
def generator(seed):
"""
build the generator network.
"""
weights_initializer = tf.truncated_normal_initializer(stddev=0.02)
# fully connected layer to upscale the seed for the input of
# convolutional net.
target = tf.contrib.layers.fully_connected(
inputs=seed,
num_outputs=4 * 4 * 256,
activation_fn=tf.nn.relu,
normalizer_fn=None,
weights_initializer=weights_initializer,
scope='g_project')
# reshape to images
target = tf.reshape(target, [-1, 4, 4, 256])
# transpose convolution to upscale
for layer_idx in range(4):
if layer_idx == 3:
num_outputs = 1
kernel_size = 32
stride = 1
# arXiv:1511.06434v2
# use tanh in output layer
activation_fn = tf.nn.tanh
# arXiv:1511.06434v2
# use batch norm except the output layer
normalizer_fn = None
else:
num_outputs = 2 ** (6 - layer_idx)
kernel_size = 5
stride = 2
# arXiv:1511.06434v2
# use ReLU
activation_fn = tf.nn.relu
# arXiv:1511.06434v2
# use batch norm
normalizer_fn = tf.contrib.layers.batch_norm
target = tf.contrib.layers.convolution2d_transpose(
inputs=target,
num_outputs=num_outputs,
kernel_size=kernel_size,
stride=stride,
padding='SAME',
activation_fn=activation_fn,
normalizer_fn=normalizer_fn,
weights_initializer=weights_initializer,
scope='g_conv_t_{}'.format(layer_idx))
return target | 5,330,444 |
def KFoldROC(k=5, batch_size=20, img_width=150, img_height=150, img_channels=150):
"""
Calculates and plots ROC curves for each of the cross-validated models.
@params:
k - Optional : number of splits (i.e. the 'K' in k-fold cross-validation)
batch_size - Optional : number of images to feed CPU/GPU per batch
img_width - Optional : image width
img_height - Optional : image height
img_channel - Optional : number of channels in each image (e.g. grayscale = 1, RGB = 3, RGB + Alpha = 4)
Returns:
A single image file with all k ROC curves plotted.
A .csv file of all images, their true labels, and the predicted labels.
"""
for idx, model in enumerate(os.listdir('checkpoints')):
model_path = 'checkpoints/' + str(model)
split = idx + 1
test_dir = 'data/k_folds/split_' + str(split) + '/val'
test_datagen = ImageDataGenerator(rescale = 1. / 255)
test_generator = test_datagen.flow_from_directory(
test_dir,
shuffle = False,
target_size = (img_width, img_height),
batch_size = batch_size,
class_mode = 'binary')
print('Loading model: ' + str(model))
model = load_model(model_path)
print('\n\nModel Summary:')
model.summary()
print('Predicting...')
results = model.predict_generator(test_generator)
img_id = [img for img in test_generator.filenames]
img_label = [label for label in test_generator.classes]
print('Writing predictions to file...')
with open('data/analysis/split_' + str(split) + '.txt', 'w') as fh:
for idx, result in enumerate(results):
fh.write(img_id[idx] + '\t' + str(img_label[idx]) + '\t' + str(result[0]) + '\n')
plt.figure(num = 0, dpi = 300, frameon = True).clf()
for split in range(1, k + 1):
label = []
pred = []
with open('data/analysis/split_' + str(split) + '.txt') as fh:
data = csv.reader(fh, delimiter = '\t')
for line in data:
label.append(int(line[1]))
pred.append(float(line[2]))
fpr, tpr, thresh = metrics.roc_curve(label, pred)
auc = metrics.roc_auc_score(label, pred)
plt.plot(fpr, tpr, label='Model ' + str(split) + ', AUC=' + "%.3f" % round(auc, 3))
plt.plot([0, 1], [0, 1], 'r--')
plt.xlabel('False-Positive Rate')
plt.ylabel('True-Positive Rate')
plt.legend(loc=(0.5,0.1))
plt.grid(b=True, which='major', color='gray', linestyle='-')
plt.savefig('kfold_AUC.png') | 5,330,445 |
def solve_fxdocc_root(iws, e_onsite, concentration, hilbert_trafo: Callable[[complex], complex],
beta: float, occ: float = None, self_cpa_iw0=None, mu0: float = 0,
weights=1, n_fit=0, restricted=True, **root_kwds) -> RootFxdocc:
"""Determine the CPA self-energy by solving the root problem for fixed `occ`.
Parameters
----------
iws : (N_iw) complex array_like
Positive fermionic Matsubara frequencies.
e_onsite : (N_cmpt) float or (..., N_iw, N_cmpt) complex np.ndarray
On-site energy of the components. This can also include a local
frequency dependent self-energy of the component sites.
If multiple non-frequency dependent on-site energies should be
considered simultaneously, pass an on-site energy with `N_z=1`:
`e_onsite[..., np.newaxis, :]`.
concentration : (..., N_cmpt) float array_like
Concentration of the different components used for the average.
hilbert_trafo : Callable[[complex], complex]
Hilbert transformation of the lattice to calculate the coherent Green's
function.
beta : float
Inverse temperature.
occ : float
Total occupation.
self_cpa_iw0, mu0 : (..., N_iw) complex np.ndarray and float, optional
Starting guess for CPA self-energy and chemical potential.
`self_cpa_iw0` implicitly contains the chemical potential `mu0`,
thus they should match.
Returns
-------
root.self_cpa : (..., N_iw) complex np.ndarray
The CPA self-energy as the root of `self_root_eq`.
root.mu : float
Chemical potential for the given occupation `occ`.
Other Parameters
----------------
weights : (N_iw) float np.ndarray, optional
Passed to `gftool.density_iw`.
Residues of the frequencies with respect to the residues of the
Matsubara frequencies `1/beta`. (default: 1.)
For Padé frequencies this needs to be provided.
n_fit : int, optional
Passed to `gftool.density_iw`.
Number of additionally fitted moments. If Padé frequencies
are used, this is typically not necessary. (default: 0)
restricted : bool, optional
Whether `self_cpa_z` is restricted to `self_cpa_z.imag <= 0`. (default: True)
Note, that even if `restricted=True`, the imaginary part can get negative
within tolerance. This should be removed by hand if necessary.
root_kwds
Additional arguments passed to `scipy.optimize.root`.
`method` can be used to choose a solver.
`options=dict(fatol=tol)` can be specified to set the desired tolerance
`tol`.
Raises
------
RuntimeError
If unable to find a solution.
See Also
--------
solve_root
Examples
--------
>>> from functools import partial
>>> beta = 30
>>> e_onsite = [-0.3, 0.3]
>>> conc = [0.3, 0.7]
>>> hilbert = partial(gt.bethe_gf_z, half_bandwidth=1)
>>> occ = 0.5,
>>> iws = gt.matsubara_frequencies(range(1024), beta=30)
>>> self_cpa_iw, mu = gt.cpa.solve_fxdocc_root(iws, e_onsite, conc,
... hilbert, occ=occ, beta=beta)
>>> import matplotlib.pyplot as plt
>>> __ = plt.plot(iws.imag, self_cpa_iw.imag, '+--')
>>> __ = plt.axhline(np.average(e_onsite, weights=conc) - mu)
>>> __ = plt.plot(iws.imag, self_cpa_iw.real, 'x--')
>>> plt.show()
check occupation
>>> gf_coher_iw = hilbert(iws - self_cpa_iw)
>>> gt.density_iw(iws, gf_coher_iw, beta=beta, moments=[1, self_cpa_iw[-1].real])
0.499999...
check CPA
>>> self_compare = gt.cpa.solve_root(iws, np.array(e_onsite)-mu, conc,
... hilbert_trafo=hilbert)
>>> np.allclose(self_cpa_iw, self_compare, atol=1e-5)
True
"""
concentration = np.asarray(concentration)[..., np.newaxis, :]
e_onsite = np.asarray(e_onsite)
if self_cpa_iw0 is None: # static average + 0j to make it complex array
self_cpa_iw0 = np.sum(e_onsite * concentration, axis=-1) - mu0 + 0j
self_cpa_iw0, __ = np.broadcast_arrays(self_cpa_iw0, iws)
self_cpa_nomu = self_cpa_iw0 + mu0 # strip contribution of mu
# TODO: use on-site energy to estimate m2+mu, which only has to be adjusted by mu
m1 = np.ones_like(self_cpa_iw0[..., -1].real)
def _occ_diff(x):
gf_coher_iw = hilbert_trafo(iws - x)
m2 = x[..., -1].real # for large iws, real part should static part
occ_root = density_iw(iws, gf_iw=gf_coher_iw, beta=beta, weights=weights,
moments=np.stack([m1, m2], axis=-1), n_fit=n_fit).sum()
return occ_root - occ
mu = chemical_potential(lambda mu: _occ_diff(self_cpa_nomu - mu), mu0=mu0)
LOGGER.debug("VCA chemical potential: %s", mu)
# one iteration gives the ATA: average t-matrix approximation
self_cpa_nomu = self_fxdpnt_eq(self_cpa_nomu - mu, iws, e_onsite - mu,
concentration, hilbert_trafo) + mu
mu = chemical_potential(lambda mu: _occ_diff(self_cpa_nomu - mu), mu0=mu)
LOGGER.debug("ATA chemical potential: %s", mu)
x0, shapes = _join([mu], self_cpa_nomu.real, self_cpa_nomu.imag)
self_root_eq_ = partial(restrict_self_root_eq if restricted else self_root_eq,
z=iws, concentration=concentration, hilbert_trafo=hilbert_trafo)
def root_eq(mu_selfcpa):
mu, self_cpa_re, self_cpa_im = _split(mu_selfcpa, shapes)
self_cpa = self_cpa_re + 1j*self_cpa_im - mu # add contribution of mu
self_root = self_root_eq_(self_cpa, e_onsite=e_onsite - mu)
occ_root = _occ_diff(self_cpa)
return _join([self_root.size*occ_root],
self_root.real, self_root.imag)[0]
root_kwds.setdefault("method", "krylov")
LOGGER.debug('Search CPA self-energy root')
if 'callback' not in root_kwds and LOGGER.isEnabledFor(logging.DEBUG):
# setup LOGGER if no 'callback' is provided
root_kwds['callback'] = lambda x, f: LOGGER.debug(
'Residue: mu=%+6g cpa=%6g', f[0], np.linalg.norm(f[1:])
)
sol = optimize.root(root_eq, x0=x0, **root_kwds)
LOGGER.debug("CPA self-energy root found after %s iterations.", sol.nit)
if not sol.success:
raise RuntimeError(sol.message)
mu, self_cpa_re, self_cpa_im = _split(sol.x, shapes)
self_cpa = self_cpa_re - mu + 1j*self_cpa_im # add contribution of mu
LOGGER.debug("CPA chemical potential: %s", mu.item())
return RootFxdocc(self_cpa, mu=mu.item()) | 5,330,446 |
def getn_hidden_area(*args):
"""getn_hidden_area(int n) -> hidden_area_t"""
return _idaapi.getn_hidden_area(*args) | 5,330,447 |
def cdist(X: DNDarray, Y: DNDarray = None, quadratic_expansion: bool = False) -> DNDarray:
"""
Calculate Euclidian distance between two DNDarrays:
.. math:: d(x,y) = \\sqrt{(|x-y|^2)}
Returns 2D DNDarray of size :math: `m \\times n`
Parameters
----------
X : DNDarray
2D array of size :math: `m \\times f`
Y : DNDarray
2D array of size :math: `n \\times f`
quadratic_expansion : bool
Whether to use quadratic expansion for :math:`\\sqrt{(|x-y|^2)}` (Might yield speed-up)
"""
if quadratic_expansion:
return _dist(X, Y, _euclidian_fast)
else:
return _dist(X, Y, _euclidian) | 5,330,448 |
def popcount_u8(x: np.ndarray):
"""Return the total bit count of a uint8 array"""
if x.dtype != np.uint8:
raise ValueError("input dtype must be uint8")
count = 0
# for each item look-up the number of bits in the LUT
for elem in x.flat:
count += u8_count_lut[elem]
return count | 5,330,449 |
def split_errorRC(tr, t1, t2, q, Emat, maxdt, ddt, dphi):
"""
Calculates error bars based on a F-test and
a given confidence interval q.
Note
----
This version uses a Fisher transformation for
correlation-type misfit.
Parameters
----------
tr : :class:`~obspy.core.Trace`
Seismogram
t1 : :class:`~obspy.core.utcdatetime.UTCDateTime`
Start time of picking window
t2 : :class:`~obspy.core.utcdatetime.UTCDateTime`
End time of picking window
q : float
Confidence level
Emat : :class:`~numpy.ndarray`
Energy minimization matrix
Returns
-------
err_dtt : float
Error in dt estimate (sec)
err_phi : float
Error in phi estimate (degrees)
err_contour : :class:`~numpy.ndarray`
Error contour for plotting
"""
from scipy import stats
phi = np.arange(-90.0, 90.0, dphi)*np.pi/180.
dtt = np.arange(0., maxdt, ddt)
# Copy trace to avoid overriding
tr_tmp = tr.copy()
tr_tmp.trim(t1, t2)
# Get degrees of freedom
dof = split_dof(tr_tmp)
if dof <= 3:
dof = 3.01
print(
"Degrees of freedom < 3. Fixing to DOF = 3, which may " +
"result in inaccurate errors")
n_par = 2
# Fisher transformation
vmin = np.arctanh(Emat.min())
# Error contour
zrr_contour = vmin + (vmin*np.sign(vmin)*n_par/(dof - n_par) *
stats.f.ppf(1. - q, n_par, dof - n_par)) *\
np.sqrt(1./(dof-3))
# Back transformation
err_contour = np.tanh(zrr_contour)
# Estimate uncertainty (q confidence interval)
err = np.where(Emat < err_contour)
err_phi = max(
0.25*(phi[max(err[0])] - phi[min(err[0])])*180./np.pi, 0.25*dphi)
err_dtt = max(0.25*(dtt[max(err[1])] - dtt[min(err[1])]), 0.25*ddt)
return err_dtt, err_phi, err_contour | 5,330,450 |
def filter_parts(settings):
"""
Remove grouped components and glyphs that have been deleted or split.
"""
parts = []
temp = copy.copy(settings['glyphs'])
for glyph in settings['glyphs']:
name = glyph['class_name']
if name.startswith("_split") or name.startswith("_group") or name.startswith("_delete"):
parts.append(glyph)
temp.remove(glyph)
settings['glyphs'] = temp
# Remove from the training glyphs as well
temp2 = copy.copy(settings['training_glyphs'])
for glyph in settings['training_glyphs']:
name = glyph['class_name']
if name.startswith("_split") or name.startswith("_group") or name.startswith("_delete"):
temp2.remove(glyph)
settings['training_glyphs'] = temp2
return parts | 5,330,451 |
def do_path():
"""Send the HTTP request (GET) and process to get the path to the user space on the cache."""
url = settings.XFC_API_URL + "user?name=" + settings.USER
response = requests.get(url, verify=settings.VERIFY)
if response.status_code == 200:
data = response.json()
sys.stdout.write(data["cache_path"]+"\n")
elif response.status_code == 404:
user_not_initialized_message()
else:
error_from_response(response) | 5,330,452 |
def kernel_zz(Y, X, Z):
"""
Kernel zz for second derivative of the potential generated by a sphere
"""
radius = np.sqrt(Y ** 2 + X ** 2 + Z ** 2)
r2 = radius*radius
r5 = r2*r2*radius
kernel = (3*Z**2 - r2)/r5
return kernel | 5,330,453 |
def getAccentedVocal(vocal, acc_type="g"):
"""
It returns given vocal with grave or acute accent
"""
vocals = {'a': {'g': u'\xe0', 'a': u'\xe1'},
'e': {'g': u'\xe8', 'a': u'\xe9'},
'i': {'g': u'\xec', 'a': u'\xed'},
'o': {'g': u'\xf2', 'a': u'\xf3'},
'u': {'g': u'\xf9', 'a': u'\xfa'}}
return vocals[vocal][acc_type] | 5,330,454 |
def env_initialize(env, train_mode=True, brain_idx=0, idx=0, verbose=False):
""" Setup environment and return info """
# get the default brain
brain_name = env.brain_names[brain_idx]
brain = env.brains[brain_name]
# reset the environment
env_info = env.reset(train_mode=train_mode)[brain_name]
# examine the state space and action space
state = env_info.vector_observations[idx]
state_size = len(state)
action_size = brain.vector_action_space_size
if verbose:
# number of agents in the environment
print(f'Number of agents: {len(env_info.agents)}')
print(f'Number of actions: {action_size}')
print(f'States have length: {state_size}')
print(f'States look like: {state}')
return (brain, brain_name, state, action_size, state_size) | 5,330,455 |
def lorentzianfit(x, y, parent=None, name=None):
"""Compute Lorentzian fit
Returns (yfit, params), where yfit is the fitted curve and params are
the fitting parameters"""
dx = np.max(x) - np.min(x)
dy = np.max(y) - np.min(y)
sigma = dx * 0.1
amp = fit.LorentzianModel.get_amp_from_amplitude(dy, sigma)
a = FitParam(_("Amplitude"), amp, 0.0, amp * 1.2)
b = FitParam(_("Base line"), np.min(y), np.min(y) - 0.1 * dy, np.max(y))
sigma = FitParam(_("Std-dev") + " (σ)", sigma, sigma * 0.2, sigma * 10)
mu = FitParam(_("Mean") + " (μ)", xpeak(x, y), np.min(x), np.max(x))
params = [a, sigma, mu, b]
def fitfunc(x, params):
return fit.LorentzianModel.func(x, *params)
values = guifit(
x, y, fitfunc, params, parent=parent, wintitle=_("Lorentzian fit"), name=name
)
if values:
return fitfunc(x, values), params | 5,330,456 |
def tf_quat(T):
""" Return quaternion from 4x4 homogeneous transform """
assert T.shape == (4, 4)
return rot2quat(tf_rot(T)) | 5,330,457 |
def download_folder_to_path(
bucket_name: str,
folder: str,
path: str,
suffix: str=None,
storage_client = None,
flatten=False,
asynchronous=False,
):
"""
Downloads a folder hosted in a bucket to the chosen path.
If flatten is set to True, then the hierarchy structure of the cloud folder
is ignored and all files are downloaded to a single directory.
"""
storage_client = storage_client or get_storage_client()
bucket = storage_client.get_bucket(bucket_name)
blobs = list(bucket.list_blobs(prefix=folder))
if suffix:
blobs = [b for b in blobs if b.name.endswith(suffix)]
#if not os.path.isdir(path):
# raise ValueError("You must first create a folder at {0} before running
# this command.".format(path))
if folder.startswith("/"):
sublength = len(folder.split("/")) - 1
else:
sublength = len(folder.split("/"))
if asynchronous:
loop = asyncio.get_event_loop()
loop.run_until_complete(_download_blobs_async(blobs, flatten, sublength, path))
else:
_download_blobs(blobs, flatten, sublength, path, storage_client) | 5,330,458 |
def isString(string):
""" Checks if the input argument is a string """
if not type(string) == str:
raise TypeError("The input argument has to be a string") | 5,330,459 |
def solve2(lines):
"""Solve the problem."""
result = 0
for group in parse_answers2(lines):
result += len(group)
return result | 5,330,460 |
def copy_file(source, target, compress=None):
"""
Copies a file from source to target, optionally compressing it before writing
it out.
Parameters
----------
source : str
target : str
compress : str, optional
The compression algorithm to use. Currently only ".gz" is supported. If
set, target becomes target + compress.
"""
assert op.isfile(source)
if (op.isdir(target)):
target = op.join(target, op.basename(source))
if (compress is None):
t = open(target, "wb")
else:
assert compress == ".gz"
t = gzip_open(file_name=target+compress, mode="wb")
t.write(open(source, "rb").read())
del t | 5,330,461 |
def execute_on_hosts(hosts, commands):
"""Execute Shell command on hosts over SSH.
:param hosts: A list of host names.
:param commands: A list of Shell commands.
"""
commands_merged = ''
for command in commands:
commands_merged += 'echo $ ' + command + ';'
commands_merged += command + ';'
for host in hosts:
print 'Host: ' + host
print subprocess.Popen(
'ssh ' + host + ' "' + commands_merged + '"',
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
shell=True).communicate()[0] | 5,330,462 |
def copy_local_textfile_tree_to_container(local_source_dir, container_host_connection, container_config, container_target_dir):
"""
Copy the contents of a local directory to a container directory.
"""
dir_content = fileutil.get_dir_content(local_source_dir)
for item in dir_content:
source_path = local_source_dir.joinpath(item)
if os.path.isfile(str(source_path)):
target_path = container_target_dir.joinpath(item)
copy_textfile_to_container(container_host_connection, container_config, source_path, target_path) | 5,330,463 |
def get_contract_type(timestamp: int, due_timestamp: int) -> str:
"""Get the contract_type
Input the timestamp and due_timestamp. Return which contract_type is.
Args:
timestamp: The target timestamp, you want to know.
due_timestamp: The due timestamp of the contract.
Returns:
The contract_type name.
Raises:
RuntimeError: An error occurred timestamp gt due_timestamp.
"""
minus = due_timestamp - timestamp
if minus < 0:
raise RuntimeError("the timestamp more than due_timestamp")
if minus < 7 * 24 * 60 * 60 * 1000:
return CONTRACT_TYPE_THIS_WEEK
elif minus < 14 * 24 * 60 * 60 * 1000:
return CONTRACT_TYPE_NEXT_WEEK
else:
return CONTRACT_TYPE_QUARTER | 5,330,464 |
def is_sequence_of(obj: Any,
types: Optional[Union[Type[object],
Tuple[Type[object], ...]]] = None,
depth: Optional[int] = None,
shape: Optional[Sequence[int]] = None
) -> bool:
"""
Test if object is a sequence of entirely certain class(es).
Args:
obj: The object to test.
types: Allowed type(s). If omitted, we just test the depth/shape.
depth: Level of nesting, ie if ``depth=2`` we expect a sequence of
sequences. Default 1 unless ``shape`` is supplied.
shape: The shape of the sequence, ie its length in each dimension.
If ``depth`` is omitted, but ``shape`` included, we set
``depth = len(shape)``.
Returns:
bool: ``True`` if every item in ``obj`` matches ``types``.
"""
if not is_sequence(obj):
return False
if shape is None or shape == ():
next_shape: Optional[Tuple[int]] = None
if depth is None:
depth = 1
else:
if depth is None:
depth = len(shape)
elif depth != len(shape):
raise ValueError('inconsistent depth and shape')
if len(obj) != shape[0]:
return False
next_shape = cast(Tuple[int], shape[1:])
for item in obj:
if depth > 1:
if not is_sequence_of(item, types, depth=depth - 1,
shape=next_shape):
return False
elif types is not None and not isinstance(item, types):
return False
return True | 5,330,465 |
def cnf_create(cnf_fs, cids):
""" create a new cnf filesys using list of cids
"""
for cid in cids:
cnf_fs[-1].create([cid]) | 5,330,466 |
def anonymize_dicom(dicom_file,patient_name='anonymous',
fields_to_anonymize=ANONYMIZATION_FIELDS,
fields_to_return=None,path_to_save='.',
new_dicom_name='anonymous.dcm'):
""" Given a dicom file, alter the given fields, anonymizing the
patient name seperatley. Save a new dicom in the given directory with
the given name
"""
#having lots of issues with the character encoding
# changed to python 3, now having more fun
try:
#im = dicom.read_file(unicode(dicom_file,'utf-8'))
im = dicom.read_file(dicom_file)
except UnicodeDecodeError:
print("utf-8 codec can't decode byte...filename {}".format(dicom_file))
except dicom.errors.InvalidDicomError:
#im = dicom.read_file(unicode(dicom_file,'utf-8'),force=True)
im = dicom.read_file(dicom_file,force=True)
if fields_to_return:
# create dictionary to hold returned fields
returned_fields ={}.fromkeys(fields_to_return)
# collect fields to retrieve
for attr in returned_fields:
try:
# expect the field not to exist
returned_fields[attr]=getattr(im,attr)
except AttributeError:
continue
# now replace fields to anonymize with ''
for attr in fields_to_anonymize:
if attr=='PatientsName':
set_attr = patient_name
else:
set_attr=''
try:
setattr(im,attr,set_attr)
#print "{} has been set to {}".format(attr, set_attr)
except AttributeError:
print("The following attribute not found: {}".format(set_attr))
except UnboundLocalError:
print("Can't set attribute: utf-8 codec can't decode byte...filename {}".format(dicom_file))
# now save the new dicom
new_name = os.path.join(path_to_save,new_dicom_name)
im.save_as(new_name)
if fields_to_return:
return returned_fields | 5,330,467 |
def _parse_tree_height(sent):
"""
Gets the height of the parse tree for a sentence.
"""
children = list(sent._.children)
if not children:
return 0
else:
return max(_parse_tree_height(child) for child in children) + 1 | 5,330,468 |
def inject_js(in_file, out_file, dataset):
"""parse the html and inject code"""
with open(in_file) as rfh:
content = rfh.read()
sub = os.path.splitext(
os.path.basename(in_file))[0]
out_lines = []
infname = False
lastfield = None
nextinject = None
finished = None
for line in content.splitlines():
if line.strip() == "</head>":
out_lines += SCRIPT_TEMPLATE(
dataset=dataset,
subject=sub).splitlines()
if infname and lastfield is None:
lastfield = os.path.basename(line.strip()[10:-4])
if infname and lastfield is not None:
nextinject = RATE_TEMPLATE(
reportlet=lastfield).splitlines()
lastfield = None
infname = None
if line.strip() == '<div class="elem-filename">':
infname = True
if infname and line.strip() == '</div>':
infname = False
if line.strip() == '<div id="errors">':
finished = False
if finished is False and line.strip() == '</div>':
finished = True
out_lines.append(line)
if nextinject is not None:
out_lines += nextinject
nextinject = None
if finished:
out_lines += ['<h1>Overall rating and comments</h1>']
out_lines += RATE_TEMPLATE(
reportlet="overall").splitlines()
out_lines += BUTTON_TEMPLATE.splitlines()
finished = None
with open(out_file, 'w') as rfh:
rfh.write('\n'.join(out_lines)) | 5,330,469 |
def assert_array_almost_equal(x: bool, y: bool):
"""
usage.scipy: 4
"""
... | 5,330,470 |
def pandas_loss_p_g_i_t(c_m, lgd, ead, new):
""" Distribution of losses at time t.
long format (N_MC, G, K, T)."""
mat_4D = loss_g_i_t(c_m, lgd, ead, new)
names = ['paths', 'group_ID', 'credit_rating_rank', 'time_steps']
index = pds.MultiIndex.from_product([range(s)for s in mat_4D.shape], names=names)
df = pds.DataFrame({'loss_p_g_i_t': mat_4D.flatten()}, index=index)['loss_p_g_i_t']
df = pds.Series.to_frame(df)
df['loss_p_g_i_t_ID'] = np.arange(len(df))
df.insert(0, 'portfolio_ID', 'pilot 1 Bank A', allow_duplicates=False)
return df | 5,330,471 |
def get_file_if_unique(location, ext):
"""Find file if unique for the provided extension."""
files = glob(os.path.join(location, ext))
if len(files) == 1:
return files[0]
else:
print("Multiple/No " + ext[1:] +
" files found in the working directory."
"Specify one please.")
sys.exit() | 5,330,472 |
def _task_cleanup(result, task_id, task_status, obj, **kwargs):
"""
Cleanup after task is revoked.
"""
apiview = result['meta']['apiview']
view = apiview['view']
if view == 'vm_snapshot':
from vms.models import Vm, Snapshot
from api.vm.snapshot.tasks import _vm_snapshot_cb_failed
if apiview['method'] == 'PUT' and 'source_hostname' in apiview:
vm = Vm.objects.get(hostname=apiview['source_hostname'])
else:
vm = obj
snap = Snapshot.objects.get(vm=vm, disk_id=Snapshot.get_disk_id(vm, apiview['disk_id']),
name=apiview['snapname'])
_vm_snapshot_cb_failed(result, task_id, snap, apiview['method'], vm=obj)
elif view == 'vm_snapshot_list':
from vms.models import Snapshot
from api.vm.snapshot.tasks import _vm_snapshot_list_cb_failed
snaps = Snapshot.objects.filter(vm=obj, disk_id=Snapshot.get_disk_id(obj, apiview['disk_id']),
name__in=apiview['snapnames'])
_vm_snapshot_list_cb_failed(result, task_id, snaps, apiview['method'])
elif view == 'vm_backup':
from vms.models import Backup
from api.vm.backup.tasks import _vm_backup_cb_failed
bkp = Backup.objects.get(vm_hostname=apiview['hostname'], vm_disk_id=apiview['disk_id'] - 1,
name=apiview['bkpname'])
_vm_backup_cb_failed(result, task_id, bkp, apiview['method'], vm=obj)
elif view == 'vm_backup_list':
from vms.models import Backup
from api.vm.backup.tasks import _vm_backup_list_cb_failed
bkps = Backup.objects.filter(vm_hostname=apiview['hostname'], vm_disk_id=apiview['disk_id'] - 1,
name__in=apiview['bkpnames'])
_vm_backup_list_cb_failed(result, task_id, bkps, apiview['method'])
elif view == 'vm_manage':
if apiview['method'] == 'POST':
from api.vm.base.tasks import _vm_create_cb_failed
result['message'] = ''
_vm_create_cb_failed(result, task_id, obj)
elif apiview['method'] == 'DELETE':
from api.vm.base.tasks import _vm_delete_cb_failed
_vm_delete_cb_failed(result, task_id, obj)
elif apiview['method'] == 'PUT':
from api.vm.base.tasks import _vm_update_cb_done
_vm_update_cb_done(result, task_id, obj)
elif view == 'vm_status':
from api.vm.status.tasks import _vm_status_cb_failed
if apiview['method'] == 'PUT':
_vm_status_cb_failed(result, task_id, obj)
elif view == 'vm_migrate':
from vms.models import SlaveVm
from api.vm.migrate.tasks import _vm_migrate_cb_failed
ghost_vm = SlaveVm.get_by_uuid(obj.slave_vms[0])
assert ghost_vm.is_used_for_migration()
_vm_migrate_cb_failed(result, task_id, obj, ghost_vm)
elif view == 'image_manage' or view == 'image_snapshot':
# obj = Image
from vms.models import Snapshot
from api.image.base.tasks import _image_manage_cb_failed
method = apiview['method']
snap_id = obj.src_snap_id
if method == 'POST' and snap_id:
snap = Snapshot.objects.get(id=snap_id)
else:
snap = None
_image_manage_cb_failed(result, task_id, obj, method, snap=snap)
elif view == 'node_image':
# obj = NodeStorage
from vms.models import Image
from api.node.image.tasks import _node_image_cb_failed
img = Image.objects.get(name=apiview['name'])
_node_image_cb_failed(result, task_id, obj, img)
else:
task_cleanup_signal.send(sender=view, apiview=apiview, result=result, task_id=task_id, status=task_status,
obj=obj) | 5,330,473 |
async def test_invalid_characters(hass, aioclient_mock):
"""Test that we replace bad characters with placeholders."""
aioclient_mock.get(
"http://1.1.1.1",
text="""
<root>
<device>
<deviceType>ABC</deviceType>
<serialNumber>\xff\xff\xff\xff</serialNumber>
</device>
</root>
""",
)
mock_ssdp_response = {
"st": "mock-st",
"location": "http://1.1.1.1",
}
mock_get_ssdp = {
"mock-domain": [
{
ssdp.ATTR_UPNP_DEVICE_TYPE: "ABC",
}
]
}
mock_init = await _async_run_mocked_scan(hass, mock_ssdp_response, mock_get_ssdp)
assert len(mock_init.mock_calls) == 1
assert mock_init.mock_calls[0][1][0] == "mock-domain"
assert mock_init.mock_calls[0][2]["context"] == {
"source": config_entries.SOURCE_SSDP
}
assert mock_init.mock_calls[0][2]["data"] == {
"ssdp_location": "http://1.1.1.1",
"ssdp_st": "mock-st",
"deviceType": "ABC",
"serialNumber": "ÿÿÿÿ",
} | 5,330,474 |
def pdtb2_make_splits_xval(path, write_path):
"""Make 12 cross-validation splits for PDTB 2.0"""
sections = ['00', '01', '02', '03', '04', '05', '06', '07', '08',
'09', '10', '11', '12', '13', '14', '15', '16', '17',
'18', '19', '20', '21', '22', '23', '24']
dev_sections = []
test_sections = []
train_sections = []
for i in range(0, 25, 2):
dev_sections.append([sections[i], sections[(i+1)%25]])
test_sections.append([sections[(i+23)%25], sections[(i+24)%25]])
train_sections.append([sections[(i+j)%25] for j in range(2, 23)])
means_d = {'train':0, 'dev':0, 'test': 0}
pdtb_data = list(CorpusReader(path).iter_data())
for fold_no, (dev, test, train) in enumerate(zip(dev_sections[:-1],
test_sections[:-1],
train_sections[:-1])):
all_splits = dev + test + train
assert len(set(all_splits)) == 25
split_d = {'train': train, 'dev': dev, 'test': test}
lines_d = {'train': [], 'dev': [], 'test': []}
label_d = {}
for corpus in pdtb_data:
for split, sections in split_d.items():
if corpus.Relation == 'Implicit' and corpus.Section in sections:
sense1 = (corpus.ConnHeadSemClass1, corpus.Conn1)
sense2 = (corpus.ConnHeadSemClass2, corpus.Conn1)
sense3 = (corpus.Conn2SemClass1, corpus.Conn2)
sense4 = (corpus.Conn2SemClass2, corpus.Conn2)
# Use list instead of set to preserve order
sense_list = [sense1, sense2, sense3, sense4]
formatted_sense_list = []
for sense_full, conn in sense_list:
if sense_full is not None:
sense = '.'.join(sense_full.split('.')[0:2])
if (sense not in [s for s, c, sf in formatted_sense_list] and
sense in SELECTED_SENSES_PDTB2):
formatted_sense_list.append((sense, conn, sense_full))
# No useable senses
if not formatted_sense_list:
continue
arg1 = corpus.Arg1_RawText
arg2 = corpus.Arg2_RawText
if split == 'train':
for sense, conn, sense_full in formatted_sense_list:
lines_d[split].append(tab_delimited([split, corpus.Section,
corpus.FileNumber,
sense, corpus.Relation,
arg1, arg2,
conn, sense_full]))
label_d[sense] = label_d.get(sense, 0) + 1
else:
if len(formatted_sense_list) == 1:
formatted_sense_list.append((None, None, None))
sense_paired = zip(formatted_sense_list[0], formatted_sense_list[1])
senses, conns, senses_full = sense_paired
lines_d[split].append(tab_delimited([split, corpus.Section,
corpus.FileNumber,
senses[0], senses[1],
corpus.Relation, arg1, arg2,
conns[0], senses_full[0],
conns[1], senses_full[1]]))
label_d[senses[0]] = label_d.get(senses[0], 0) + 1
if senses[1] is not None:
label_d[senses[1]] = label_d.get(senses[1], 0) + 1
assert len(formatted_sense_list) <= 2
if len(formatted_sense_list) == 2:
if formatted_sense_list[0][0] == formatted_sense_list[1][0]:
print('redundant!')
for split, lines in lines_d.items():
means_d[split] += len(lines)-1
# Write to file
write_path_fold = os.path.join(write_path,
'fold_{}'.format(fold_no+1))
write_to_file(lines_d, write_path_fold)
print('Cross-validation fold {}'.format(fold_no+1))
print('Label counts: ', label_d)
total = 0
for _, count in label_d.items():
total += count
print('Total: ', total)
for split, total in means_d.items():
print('Mean {}: {}'.format(split, total/len(dev_sections[:-1]))) | 5,330,475 |
def test_dice_type():
"""
test DiceType instance
"""
dice_type = DiceType.to_list()
assert type(dice_type) == list
assert len(dice_type) == len(DiceType) | 5,330,476 |
def find_available_pacs(pacs, pac_to_unstuck=None, pac_to_super=None, pac_to_normal=None):
"""
Finds the available pacs that are not assigned
"""
available_pacs = pacs['mine']
if pac_to_unstuck is not None:
available_pacs = [x for x in available_pacs if x['id'] not in pac_to_unstuck.keys()]
if pac_to_super is not None:
available_pacs = [x for x in available_pacs if x['id'] not in pac_to_super.keys()]
if pac_to_normal is not None:
available_pacs = [x for x in available_pacs if x['id'] not in pac_to_normal.keys()]
return available_pacs | 5,330,477 |
async def login_for_access_token(
form_data: OAuth2PasswordRequestForm = Depends(),
):
"""
Log in to your account using oauth2 authorization.
In response we get an jwt authorization token
which is used for granting access to data
"""
is_auth, scope = await authenticate_authority(
form_data.username, form_data.password
)
if not is_auth:
raise HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED,
detail="Incorrect username or password",
)
access_token_expires = timedelta(
minutes=security_config.ACCESS_TOKEN_EXPIRE_MINUTES
)
access_token = create_access_token(
data={"sub": form_data.username, "scopes": [scope]},
expires_time=access_token_expires,
)
return {"access_token": access_token, "token_type": "bearer"} | 5,330,478 |
def retag(tag, msg):
"""
Retag a tag with a new message
"""
local('git tag %s %s -f -m "%s"' % (tag, tag, msg))
local('git push --tags') | 5,330,479 |
def get_plain_expressions(s):
"""Return a list of plain, non-nested shell expressions found in the shell
string s. These are shell expressions that do not further contain a nested
expression and can therefore be resolved indenpendently.
For example::
>>> get_plain_expressions("${_pyname%${_pyname#?}}")
['${_pyname#?}']
"""
return _get_non_nested_expressions(s) | 5,330,480 |
def register_modelzoo(backend):
"""Import and register modelzoo automatically."""
if backend != "pytorch":
return
from .torch_vision_model import import_all_torchvision_models
import logging
try:
import_all_torchvision_models()
except Exception as e:
logging.warn("Failed to import torchvision models, msg={}".format(str(e))) | 5,330,481 |
def _do_dspam(ids, result):
"""
:param ids: 学习的ID列表
:param result: 结果
:return:
"""
keys = {}
# redis = get_redis_connection()
for k, v in deal_with_ids(ids).iteritems():
mail_model = get_mail_model(k)
servers = mail_model.objects.filter(id__in=v).values_list('server_id', flat=True).distinct()
for s in servers:
mails = mail_model.objects.filter(id__in=v, server_id=s)
keys.setdefault(s, []).extend(map(lambda mail: mail.get_mail_filename(), mails))
# keys.extend(map(lambda mail: mail.get_mail_filename(), mails))
for k, v in keys.iteritems():
try:
redis = get_redis_connection2(k)
except:
redis = get_redis_connection2('default')
map(lambda key: redis.lpush('dspam_{}'.format(result), key), set(v)) | 5,330,482 |
def test_runner(self, namespace, record_size, array_size, thread_per_size=4):
"""Perform simultaneous writes of varying record size to a container.
Args:
self (Test): avocado test object
namespace (str): location from which to read the pool parameters
record_size (list): list of different record sizes to be written
array_size (optional): size of array value to be written
thread_per_size (int): threads per rec size
"""
# create a new pool
self.pool.append(self.get_pool(namespace=namespace, connect=False))
# display available space before write
self.pool[-1].display_pool_daos_space("before writes")
self.pool[-1].connect()
# create a new container
self.container.append(self.get_container(self.pool[-1]))
self.container[-1].open()
# initialize dicts to hold threads
jobs = {"write": [], "read": []}
# create read/write threads.
for rec in record_size:
for _ in range(thread_per_size):
# create threads using single value type
jobs["write"].append(
threading.Thread(
target=container_write, args=(self.container[-1], rec)))
jobs["read"].append(
threading.Thread(
target=container_read, args=(self.container[-1], None)))
# create threads using array value type
jobs["write"].append(
threading.Thread(
target=container_write,
args=(self.container[-1], rec, array_size)))
jobs["read"].append(
threading.Thread(
target=container_read,
args=(self.container[-1], array_size)))
# start all the write threads
for job in jobs["write"]:
job.start()
# wait for all write threads to finish
for job in jobs["write"]:
job.join()
# start read threads
for job in jobs["read"]:
job.start()
# wait for all read threads to complete
for job in jobs["read"]:
job.join()
# display free space after reads and writes
self.pool[-1].display_pool_daos_space("after writes and reads")
# container and pool destroy handled by cleanup | 5,330,483 |
def create_indices(dims):
"""Create lists of indices"""
return [range(1,dim+1) for dim in dims] | 5,330,484 |
def observed_property(property_name, default, cast=None):
"""Default must be immutable."""
hidden_property_name = "_" + property_name
if cast is None:
if cast is False:
cast = lambda x: x
else:
cast = type(default)
def getter(self):
try:
return getattr(self, hidden_property_name)
except AttributeError:
return default
def deleter(self):
try:
delattr(self, hidden_property_name)
except AttributeError:
pass
def setter(self, value):
value = cast(value)
if value == default:
try:
delattr(self, hidden_property_name)
except AttributeError:
pass
else:
setattr(self, hidden_property_name, value)
return property(getter, observed(setter), observed(deleter)) | 5,330,485 |
def query_helper(
source: S3Ref, query: str, dest: S3Ref = None, transform: Callable = None
) -> StringIO:
"""
query_helper runs the given s3_select query on the given object.
- The results are saved in a in memory file (StringIO) and returned.
- If dest is specified, the file is copied to the provided S3Ref
- If transform callable is specified, tranform is called first with the
temp file before uploading to the destination s3.
"""
event_stream = s3.select_object_content(
Bucket=source.bucket,
Key=source.key,
ExpressionType="SQL",
Expression=query,
InputSerialization={"JSON": {"Type": "LINES"}},
OutputSerialization={"JSON": {}},
)
# Iterate over events in the event stream as they come
output = StringIO()
for s3_select_event in event_stream["Payload"]:
if "Records" in s3_select_event:
data = s3_select_event["Records"]["Payload"]
output.write(data.decode("utf-8"))
if transform:
output.seek(0)
output = transform(output)
if dest is not None:
upload(output, dest)
output.seek(0)
return output | 5,330,486 |
def indicator_selector(row, indicator, begin, end):
"""Return Tons of biomass loss."""
dasy = {}
if indicator == 4:
return row[2]['value']
for i in range(len(row)):
if row[i]['indicator_id'] == indicator and row[i]['year'] >= int(begin) and row[i]['year'] <= int(end):
dasy[str(row[i]['year'])] = row[i]['value']
return dasy | 5,330,487 |
def xy_from_range_bearing(range: float, bearing: float) -> map_funcs.Point:
"""Given a range in metres and a bearing from the camera this returns the x, y position in metres relative to the runway
start."""
theta_deg = bearing - google_earth.RUNWAY_HEADING_DEG
x = CAMERA_POSITION_XY.x + range * math.cos(math.radians(theta_deg))
y = CAMERA_POSITION_XY.y + range * math.sin(math.radians(theta_deg))
return map_funcs.Point(x, y) | 5,330,488 |
def flask_get_modules():
"""Return the list of all modules
---
tags:
- Modules
responses:
200:
description: A list of modules
"""
db_list = db.session.query(Module).all()
return jsonify(db_list) | 5,330,489 |
def p_rbrace(p):
"""rbrace : RIGHT_CURLY_BRACKET"""
global LAST_POPPED_TABLE
# p[0] = ("rbrace",) + tuple(p[-len(p) + 1 :])
s = pop_scope()
p[0] = {"popped_table": s}
LAST_POPPED_TABLE = s | 5,330,490 |
def create_user(username, password):
"""Registra um novo usuario caso nao esteja cadastrado"""
if User.query.filter_by(username=username).first():
raise RuntimeError(f'{username} ja esta cadastrado')
user = User(username=username, password=generate_password_hash(password))
db.session.add(user)
db.session.commit()
return user | 5,330,491 |
def extract_pubmed_data(log: WarningLog, pubmed_data_node: etree.Element, article: Article):
""" Extracts information from a <PubmedData> node to add into the given article. """
reference_pmids: list[int] = []
reference_list_node = extract_single_node_by_tag(pubmed_data_node, "ReferenceList")
if reference_list_node is not None:
for reference_node in reference_list_node:
id_list_node = extract_single_node_by_tag(reference_node, "ArticleIdList")
if id_list_node is None:
continue
pmid = extract_article_pmid_from_list(log, id_list_node)
if pmid is not None:
reference_pmids.append(pmid)
article.reference_pmids = reference_pmids | 5,330,492 |
def _get_mesh_colour_scheme():
"""Returns colour scheme for MESH (maximum estimated size of hail).
:return: colour_map_object: Instance of `matplotlib.colors.ListedColormap`.
:return: colour_norm_object: Instance of `matplotlib.colors.BoundaryNorm`.
"""
colour_list = [
[152, 152, 152], [152, 203, 254], [0, 152, 254], [0, 45, 254],
[0, 101, 0], [0, 152, 0], [0, 203, 0], [254, 254, 50],
[254, 203, 0], [254, 152, 0], [254, 0, 0], [254, 0, 152],
[152, 50, 203]
]
for i in range(len(colour_list)):
colour_list[i] = numpy.array(colour_list[i], dtype=float) / 255
colour_map_object = matplotlib.colors.ListedColormap(colour_list)
colour_map_object.set_under(numpy.full(3, 1))
colour_bounds_mm = numpy.array([
0.1, 15.9, 22.2, 28.6, 34.9, 41.3, 47.6, 54, 60.3, 65, 70, 75, 80, 85
])
colour_norm_object = matplotlib.colors.BoundaryNorm(
colour_bounds_mm, colour_map_object.N)
return colour_map_object, colour_norm_object | 5,330,493 |
def index():
"""
Serve index page.
"""
try:
data = get_latest_covid_stats()
except FailedRequestError as err:
# Log error response to logger
logger.debug(
f"Request to Public Health England COVID-19 API failed: {err}.")
flash("An error occurred obtaining latest COVID-19 stats from the Public Health England API.")
return redirect(url_for("error"))
return render_template("index.html", data=data) | 5,330,494 |
def parse_setting_file(filename):
"""parse the setting file. Refer to the leading comment for more details"""
setting = [ ]
f = open(filename)
for ind, line in enumerate(f):
ind += 1
params = {
"ispin": {
"reg_exp":re.compile("ispin\s*=\s*([1-2])"),
"necessity": False,
"default_value": "1",
"valid_values": ["1", "2"]
},
"atoms": {
"reg_exp": re.compile(r"atoms\s*=\s*([0-9\,]+)"),
"necessity": True,
"valid_values": [ ]
},
"max_index": {
"reg_exp": re.compile("max_index\s*=\s*(\d+)"),
"necessity": True,
"valid_values": [ ]
},
"file_name": {
"reg_exp": re.compile("filename\s*=\s*([a-zA-Z0-9\_]+)"),
"necessity": False,
"default_value": "output_dos",
"valid_values": [ ]
},
"operation": {
"reg_exp": re.compile("operation\s*=\s*([a-z]+)"),
"necessity": False,
"default_value": "COM",
"valid_values": ["c", "s", "com", "sub"]
},
"sum_dos": {
"reg_exp": re.compile("sum_dos\s*=\s*([a-z]+)"),
"necessity": False,
"default_value": "false",
"valid_values": ["t", "f", "true", "false"]
},
"fermi_energy": {
"reg_exp": re.compile("fermi_energy\s*=\s*([0-9e\.\+-]+)"),
"necessity": False,
"default_value": "0",
"valid_values": [ ]
}
}
if not line.strip():
continue
line = line.lower()
correct_set = True
for key, value in params.items():
m = value["reg_exp"].search(line)
if m:
m = m.group(1)
m = check_param_validation(m, params[key]["valid_values"])
if m:
params[key]["value"] = m
else:
params[key]["value"] = value["default_value"]
elif value["necessity"] == False:
params[key]["value"] = value["default_value"]
else:
correct_set = False
print("\n[Error]: <line %d> tag %s must be set! Skip the job\n\tline %d: %r" % (ind, key.upper(), ind, line))
break
if not correct_set:
continue
m = generate_dos_filename([ind, line], params["atoms"]["value"], params["max_index"]["value"])
if not m:
continue
else:
params["dos_filenames"] = {"value": m}
setting = dict([(key, params[key]["value"]) for key in params])
print("\n[INFO]:<line %d> has been parsed successfully\nline %d: %r" % (ind, ind, line))
print("Parsed tags:")
pprint.pprint(setting)
yield setting
#return setting | 5,330,495 |
def get_files(directory, include_hidden, include_empty):
"""Returns all FILES in the directory which apply to the filter rules."""
return (os.path.join(dir_path, filename)
for dir_path, _, file_names in os.walk(directory)
for filename in file_names
if not os.path.islink(os.path.join(dir_path, filename))
and (include_hidden or
reduce(lambda r, d: r and not d.startswith("."),
os.path.abspath(os.path.join(dir_path, filename)).split(os.sep), True))
and (include_empty or os.path.getsize(os.path.join(dir_path, filename)) > 0)) | 5,330,496 |
def cp_chmod(src, dst, mode):
"""
helper function to copy a file and set chmod
"""
shutil.copyfile(src, dst)
os.chmod(dst, mode) | 5,330,497 |
def write_webhook(unique_id, webhook):
"""
writes webhook string (url) to corresponding file.
:param unique_id: unique_id
:param webhook: webhook string
"""
if webhook and isinstance(webhook, str):
open(os.path.join(RAM_DIR, unique_id + ".webhook"), "w").write(webhook)
else:
if webhook is not None:
logger.warn(f"id: {unique_id}, webhook: {webhook} is not valid.") | 5,330,498 |
async def test_update_system_data_v2(
event_loop, v2_server, v2_settings_json, v2_subscriptions_json):
"""Test getting updated data for a v2 system."""
async with v2_server:
v2_server.add(
'api.simplisafe.com',
'/v1/users/{0}/subscriptions'.format(TEST_USER_ID), 'get',
aresponses.Response(
text=json.dumps(v2_subscriptions_json), status=200))
v2_server.add(
'api.simplisafe.com',
'/v1/subscriptions/{0}/settings'.format(TEST_SUBSCRIPTION_ID),
'get',
aresponses.Response(text=json.dumps(v2_settings_json), status=200))
async with aiohttp.ClientSession(loop=event_loop) as websession:
[system] = await get_systems(TEST_EMAIL, TEST_PASSWORD, websession)
await system.update()
assert system.serial == TEST_SYSTEM_SERIAL_NO
assert system.system_id == TEST_SYSTEM_ID
assert system.account.access_token == TEST_ACCESS_TOKEN
assert len(system.sensors) == 34 | 5,330,499 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.