content
stringlengths 22
815k
| id
int64 0
4.91M
|
|---|---|
def event_message(iden, event):
"""Return an event message."""
return {"id": iden, "type": "event", "event": event}
| 14,300
|
def match(command):
"""Match function copied from cd_mkdir.py"""
return (
command.script.startswith('cd ') and any((
'no such file or directory' in command.output.lower(),
'cd: can\'t cd to' in command.output.lower(),
'does not exist' in command.output.lower()
)))
| 14,301
|
def db_remove_game(game: str, channel: str) -> bool:
"""Removes a game from the database, for a specific channel
"""
if db_check_game_exists(game, channel):
cursor.execute(
"DELETE FROM deathcount "
"WHERE channel=(?) AND game=(?)",
(channel.lower(), game.lower())
)
connection.commit()
return True
else:
return False
| 14,302
|
def get_unique_id():
"""Return an ID that will be unique over the current segmentation
:return: unique_id
:rtype: int
"""
global UNIQUE_ID
UNIQUE_ID = UNIQUE_ID + 1
return UNIQUE_ID
| 14,303
|
def human_study_csv_entity(datadir, src, tgt, nsample):
"""process model output for the entity control experiment, and
generate csv files for human study of entity control.
This function will yield a `human_study_entity.csv` file in the
`datadir` directory
Args:
datadir: the dataset directory
src: the input src file
tgt: the model generation file
nsample: only use first `nsample` examples
"""
data = []
data_tmp = {}
with open(src) as fsrc, \
open(tgt) as ftgt:
for i, (lsrc, ltgt) in enumerate(zip(fsrc, ftgt)):
if i % 2 == 0:
cur = 'important'
data_tmp = {}
else:
cur = 'unimportant'
lsrc = lsrc.strip()
ltgt = ltgt.strip()
entity = lsrc.split(' => ')[0]
source = ' => '.join(lsrc.split(' => ')[1:])
if entity in ltgt and entity.strip() != ltgt.strip():
data_tmp[cur] = {'id': i, 'entity': entity,
'article': source, 'summary': ltgt}
if i % 2 != 0 and len(data_tmp) == 2:
assert data_tmp['important']['article'] == data_tmp['unimportant']['article']
data.append({'id': data_tmp['important']['id'],
'article': data_tmp['important']['article'],
'important_keywords': data_tmp['important']['entity'],
'important_summary': data_tmp['important']['summary'],
'unimportant_keywords': data_tmp['unimportant']['entity'],
'unimportant_summary': data_tmp['unimportant']['summary']})
if len(data) < nsample:
raise ValueError('valid examples are not enough')
data = data[:nsample]
data_frame = {key: [] for key in data[0].keys()}
for d in data:
for k, v in d.items():
data_frame[k].append(v)
order = ['id', 'article', 'important_keywords', 'important_summary',
'unimportant_keywords', 'unimportant_summary']
data_frame = OrderedDict(sorted(data_frame.items(), key=lambda t: order.index(t[0])))
df = pd.DataFrame(data=data_frame)
df.to_csv('human_study_entity.csv')
| 14,304
|
def logout_route():
"""logout route"""
logout_user()
return redirect(url_for('app.index_route'))
| 14,305
|
def setup_command_line_parser():
"""
Sets up command line argument parser. Additional arguments could be added
easily. For example if the version needed to be passed in with -v you
could add it as a positional argument like so:
parser.add_argument("-v", "--version", help="Current version of application"
"""
parser = argparse.ArgumentParser()
parser.add_argument("-d", "--debug", action='store_true', help="Run script in debug mode")
args = parser.parse_args()
return parser
| 14,306
|
def test_fenced_code_blocks_extra_03x():
"""
Test case extra 03: variation of 1 where list already opened but no new list item
NOTE: Small change to output to remove newline at pre/code at end.
"""
# Arrange
source_markdown = """- ```
some text
some other text
```
"""
expected_tokens = [
"[ulist(1,1):-::2:: ]",
"[fcode-block(1,3):`:3::::::]",
"[text(2,3):some text:]",
"[end-fcode-block:::True]",
"[end-ulist:::True]",
"[para(3,1):]",
"[text(3,1):some other text:]",
"[end-para:::False]",
"[fcode-block(4,1):`:3::::::]",
"[text(5,1)::]",
"[end-fcode-block:::True]",
]
expected_gfm = """<ul>
<li>
<pre><code>some text
</code></pre>
</li>
</ul>
<p>some other text</p>
<pre><code></code></pre>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens, show_debug=False)
| 14,307
|
def get_repo_name(
name: str, in_mode: str, include_host_name: bool = False
) -> str:
"""
Return the full/short name of a Git repo based on the other name.
:param in_mode: the values `full_name` or `short_name` determine how to interpret
`name`
"""
repo_map = get_complete_repo_map(in_mode, include_host_name)
dbg.dassert_in(
name, repo_map, "Invalid name='%s' for in_mode='%s'", name, in_mode
)
ret = repo_map[name]
return ret
| 14,308
|
def uvSnapshot(aa=1,b="int",euv=1,ff="string",g="int",n="string",o=1,r="int",umx="float",umn="float",uvs="string",vmx="float",vmn="float",xr="int",yr="int"):
"""
http://help.autodesk.com/cloudhelp/2019/ENU/Maya-Tech-Docs/CommandsPython/uvSnapshot.html
-----------------------------------------
uvSnapshot is NOT undoable, NOT queryable, and NOT editable.
Builds an image containg the UVs of the selected objects.
-----------------------------------------
Return Value:
None
-----------------------------------------
Flags:
-----------------------------------------
aa : antiAliased [boolean] []
When this flag is set, lines are antialiased.
-----------------------------------------
b : blueColor [int] []
Blue component of line drawing. Default is 255.
-----------------------------------------
euv : entireUVRange [boolean] []
When this flag is set, the generated image will contain the entire uv range. Default is UV in 0-1 range.
-----------------------------------------
ff : fileFormat [string] []
Output file format. Any of those keyword: "iff", "sgi", "pic", "tif", "als", "gif", "rla", "jpg" Default is iff.
-----------------------------------------
g : greenColor [int] []
Green component of line drawing. Default is 255.
-----------------------------------------
n : name [string] []
Name of the file to be created.
-----------------------------------------
o : overwrite [boolean] []
When this flag is set, existing file can be ovewritten.
-----------------------------------------
r : redColor [int] []
Red component of line drawing. Default is 255.
-----------------------------------------
umx : uMax [float] []
Optional User Specified Max value for U. Default value is 1. This will take precedence over the "entire range" -euv flag.
-----------------------------------------
umn : uMin [float] []
Optional User Specified Min value for U. Default value is 0. This will take precedence over the "entire range" -euv flag.
-----------------------------------------
uvs : uvSetName [string] []
Name of the uv set to use. Default is the current one.
-----------------------------------------
vmx : vMax [float] []
Optional User Specified Max value for V. Default value is 1. This will take precedence over the "entire range" -euv flag.
-----------------------------------------
vmn : vMin [float] []
Optional User Specified Min value for V. Default value is 0. This will take precedence over the "entire range" -euv flag.
-----------------------------------------
xr : xResolution [int] []
Horizontal size of the image. Default is 512.
-----------------------------------------
yr : yResolution [int]
Vertical size of the image. Default is 512.
"""
| 14,309
|
def binary_search_hi(a,d,lo,hi):
"""
Created for leetcode prob 34
"""
if d!=a[lo]:
raise Exception("d should be a[lo]")
while hi>lo:
mid=(lo+hi)//2+1
if a[mid]==d:
lo=mid
else:
hi=mid-1
if a[hi]==d:
return hi
else:
return lo
| 14,310
|
def log_to_file(csifile='csifile.dat'):
"""Implement log_to_file in Python"""
f = open(csifile, 'wb')
# show frequency
count = 0
SLOW_MSG_CNT = 1
# /usr/include/linux/connector.h
# #define CN_NETLINK_USERS 10 /* Highest index + 1 */
CN_NETLINK_USERS = 10
CN_IDX_IWLAGN = CN_NETLINK_USERS + 0xf
# CN_VAL_IWLAGN = 0x1 # useless
# /usr/include/linux/netlink.h
# #define NETLINK_CONNECTOR 11
# #define NETLINK_ADD_MEMBERSHIP 1
socket.NETLINK_CONNECTOR = 11
NETLINK_ADD_MEMBERSHIP = 1
with socket.socket(socket.AF_NETLINK, socket.SOCK_DGRAM, socket.NETLINK_CONNECTOR) as s:
# proc_addr
s.bind((os.getpid(), CN_IDX_IWLAGN))
# kern_addr, useless, pass
# And subscribe to netlink group
s.setsockopt(270, NETLINK_ADD_MEMBERSHIP, CN_IDX_IWLAGN)
while True:
ret = s.recv(4096)
# /usr/include/linux/netlink.h
# struct nlmsghdr {
# __u32 nlmsg_len; /* Length of message including header */
# __u16 nlmsg_type; /* Message content */
# __u16 nlmsg_flags; /* Additional flags */
# __u32 nlmsg_seq; /* Sequence number */
# __u32 nlmsg_pid; /* Sending process port ID */
# };
nlmsg_len, nlmsg_type, nlmsg_flags, nlmsg_seq, nlmsg_pid = struct.unpack("=LHHLL", ret[:16])
# /usr/include/linux/connector.h
# struct cb_id {
# __u32 idx;
# __u32 val;
# };
# struct cn_msg {
# struct cb_id id;
# __u32 seq;
# __u32 ack;
# __u16 len; /* Length of the following data */
# __u16 flags;
# __u8 data[0];
# };
cnmsg_idx, cnmsg_val, cnmsg_seq, cnmsg_ack, cnmsg_len, cnmsg_flag = struct.unpack("=LLLLHH", ret[16:36])
# linux-80211n-csitool: /drivers/net/wireless/iwlwifi/iwl-connector.c
# /**
# * Sends the message over the kernel connector to a userspace program.
# */
# void connector_send_msg(const u8 *data, const u32 size, const u8 code)
# {
# struct cn_msg *m;
# u8 *buf;
# u32 payload_size;
# /* Payload + 1-byte "code" */
# payload_size = size + 1 + sizeof(struct cn_msg);
# m = kmalloc(payload_size, GFP_ATOMIC);
# if (m == NULL) {
# printk(KERN_ERR "%s: malloc failed\n", __func__);
# return;
# }
# buf = ((u8 *) m) + sizeof(struct cn_msg);
# /* Set up message */
# memcpy(&m->id, &connector_id, sizeof(struct cb_id));
# m->seq = 0;
# m->len = size + 1;
# buf[0] = code;
# memcpy(&buf[1], data, size);
# /* Enqueue message -- may free on failure */
# connector_enqueue_msg(m);
# return;
# }
cnmsg_data = ret[36:]
if count % SLOW_MSG_CNT == 0:
print("received %d bytes: id: %d val: %d seq: %d clen: %d" % (cnmsg_len, cnmsg_idx, cnmsg_val, cnmsg_seq, cnmsg_len))
# print("data: %s", bytes.decode(cnmsg_data))
l2 = struct.pack('!H', cnmsg_len)
f.write(l2)
ret = f.write(cnmsg_data)
if count % 100 == 0:
print('wrote %d bytes [msgcnt=%u]' % (ret, count))
count += 1
f.close()
| 14,311
|
def delete(task_id):
"""Remove task in db with given id."""
with _tasks_db():
tasks.delete(task_id)
| 14,312
|
def check_subscription(func):
"""Checks if the user signed up for a paid subscription """
@wraps(func)
def wrapper(*args, **kwargs):
if current_user.is_authenticated():
subscription = current_user.subscription
if not subscription.active and subscription.plan.name != 'Free':
return redirect(url_for('account.subscribe', plan_id=subscription.plan_id))
return func(*args, **kwargs)
return wrapper
| 14,313
|
def spikesbetter(P):
"""
same as the custom cython function _dice6, a python implementation for easy use on other computers
does spin selection procedure based on given array of probabilities
--------------------------------------------------------------------
Inputs:
P: probability of silence array. shape (loop, xmax, N)
-------------------------------------------------------------------
Output:
array of spin values in {0,1} with shape (loop, xmax, N)
"""
spikes=np.zeros(P.shape)
for i in range(P.shape[0]):
for j in range(P.shape[1]):
for k in range(P.shape[2]):
if np.random.rand() > P[i,j,k]:
spikes[i,j,k] += 1
return spikes
| 14,314
|
def generate_edits(diff):
"""Generate edit commands from an RCS diff block.
DIFF is a string holding an entire RCS file delta. Generate a tuple
(COMMAND, INPUT_POS, ARG) for each block implied by DIFF. Tuples
describe the ed commands:
('a', INPUT_POS, LINES) : add LINES at INPUT_POS. LINES is a
list of strings.
('d', INPUT_POS, COUNT) : delete COUNT input lines starting at
line INPUT_POS.
In all cases, INPUT_POS is expressed as a zero-offset line number
within the input revision."""
diff = msplit(diff)
i = 0
while i < len(diff):
m = ed_command_re.match(diff[i])
if not m:
raise MalformedDeltaException('Bad ed command')
i += 1
command = m.group(1)
start = int(m.group(2))
count = int(m.group(3))
if command == 'd':
# "d" - Delete command
yield ('d', start - 1, count)
else:
# "a" - Add command
if i + count > len(diff):
raise MalformedDeltaException('Add block truncated')
yield ('a', start, diff[i:i + count])
i += count
| 14,315
|
def questionnaire():
"""
Questions to ask if no arguments given.
"""
mcc = questionnaire_3digit("MCC")
mnc = questionnaire_3digit("MNC")
device = scriptutils.questionnaire_device()
bundles = utilities.i2b("CHECK BUNDLES?: ")
if bundles:
download = False
upgrade = False
export = False
blitz = False
else:
export = utilities.i2b("EXPORT TO FILE?: ")
download = utilities.i2b("DOWNLOAD?: ")
upgrade = False if not download else utilities.i2b("Y=UPGRADE BARS, N=DEBRICK BARS?: ")
blitz = False if not download else (utilities.i2b("CREATE BLITZ?: ") if upgrade else False)
directory = os.getcwd()
print(" ")
carrierchecker_main(mcc, mnc, device, download, upgrade, directory, export, blitz, bundles, None, False)
| 14,316
|
def main():
"""Entry point for gameoflife."""
# Command line argument parser
parser = ArgumentParser(prog='gameoflife',
description="Conway's Game of Life",
epilog='Suggestions and bug reports are greatly '
'appreciated: '
'https://github.com/wlof/gameoflife/issues',
add_help=False)
parser.add_argument('--impl', '-i', type=str, default='normal',
choices=['normal', 'light',
'numpy', 'numpy-light'],
help='game implementation')
parser.add_argument('--width', '-w', type=int, default=100,
help='grid width')
parser.add_argument('--height', '-h', type=int, default=100,
help='grid height')
parser.add_argument('--prob', '-p', type=float, default=0.5,
help='initial population probability')
parser.add_argument('--color', '-c', type=str, default='auto',
choices=['auto', 'yes', 'no'],
help='use colors')
parser.add_argument('--version', action='version', version=__version__)
parser.add_argument('--help', action='help',
help='show this help message and exit')
# Parse args
args = parser.parse_args()
# Parse numpy flag
if args.impl in ('numpy', 'numpy-light'):
try:
imp.find_module('numpy')
except ImportError:
parser.error("can't find numpy module. "
"Check if NumPy is installed correctly.")
try:
imp.find_module('scipy')
except ImportError:
parser.error("can't find scipy module. "
"Check if SciPy is installed correctly.")
# Parse dimensions
if args.width <= 0:
parser.error('width needs to be a positive integer')
if args.height <= 0:
parser.error('height needs to be a positive integer')
# Parse probability
if not 0.0 <= args.prob <= 1.0:
parser.error('probability needs to be between 0.0 and 1.0')
curses.wrapper(curses_wrapped_main, args)
| 14,317
|
def onUrlFound(__url):
"""
Called by onPacketFound, if the packet contains a url.
"""
_url_ = resolveUrl(_url)
logger.info(_url_)
| 14,318
|
def t3x1_y(y):
"""
Translation in y.
"""
return t3x1(0.0, y, 0.0)
| 14,319
|
def write_primer_info(primers, prefix):
"""
Write tsv of primer metadata
"""
if primers is None:
primers = {}
outfile = prefix + '.tsv'
header = ['primer_set', 'amplicon_size_consensus',
'amplicon_size_avg', 'amplicon_size_sd',
'primer_id', 'primer_type', 'sequence',
'length', 'degeneracy', 'degeneracy_3prime',
'position_start', 'position_end',
'Tm_avg', 'Tm_sd', 'GC_avg', 'GC_sd',
'hairpin_avg', 'hairpin_sd',
'homodimer_avg', 'homodimer_sd']
with open(outfile, 'w') as outF:
outF.write('\t'.join(header) + '\n')
for num in sorted(primers.keys(), key=lambda x: int(x)):
for cat in primers[num].keys():
fwi = CAT_IDX[cat]
for degen_seq in primers[num][cat].keys():
x = [
str(num),
str(primers[num][cat][degen_seq]['amplicon_size_consensus']),
str(primers[num][cat][degen_seq]['amplicon_size_avg']),
str(primers[num][cat][degen_seq]['amplicon_size_sd']),
'{}{}'.format(num, fwi),
cat,
degen_seq,
str(primers[num][cat][degen_seq]['length']),
str(primers[num][cat][degen_seq]['degeneracy']),
str(primers[num][cat][degen_seq]['degeneracy_3p']),
str(primers[num][cat][degen_seq]['start']),
str(primers[num][cat][degen_seq]['end']),
str(primers[num][cat][degen_seq]['Tm'][0]),
str(primers[num][cat][degen_seq]['Tm'][1]),
str(primers[num][cat][degen_seq]['GC'][0]),
str(primers[num][cat][degen_seq]['GC'][1]),
str(primers[num][cat][degen_seq]['hairpin'][0]),
str(primers[num][cat][degen_seq]['hairpin'][1]),
str(primers[num][cat][degen_seq]['homodimer'][0]),
str(primers[num][cat][degen_seq]['homodimer'][1]),
]
outF.write('\t'.join(x) + '\n')
logging.info('File written: {}'.format(outfile))
| 14,320
|
def perm_data_time(x, indices):
"""
Permute data matrix, i.e. exchange node ids,
so that binary unions form the clustering tree.
"""
if indices is None:
return x
N, M, Q = x.shape
Mnew = len(indices)
assert Mnew >= M
xnew = np.empty((N, Mnew, Q))
for i,j in enumerate(indices):
# Existing vertex, i.e. real data.
if j < M:
xnew[:, i, :] = x[:, j, :]
# Fake vertex because of singeltons.
# They will stay 0 so that max pooling chooses the singelton.
# Or -infty ?
else:
xnew[:, i, :] = np.zeros((N, Q))
return xnew
| 14,321
|
def make_regress_files(regress_files, out_dir=None, regress_dir=None, clean=None):
"""
Copy ``regress_files`` from ``out_dir`` to ``regress_dir``, maintaining the
relative directory structure.
The ``clean`` parameter specifies a dict of rules for "cleaning" files so that
uninteresting diffs are eliminated. Each dict key is the path name (corresponding
to ``regress_files``) and the value is a 2-tuple of (match_regex, substitution_string).
:param regress_files: list of relative path names
:param out_dir: top-level directory for source of files
:param regress_dir: top-level directory where files are copied
:param clean: dict of regex substitution rules
:returns: None
"""
if clean is None:
clean = {}
# Fall back on environment variables that are defined during package testing.
if out_dir is None:
out_dir = os.environ.get('TESTR_OUT_DIR')
if regress_dir is None:
regress_dir = os.environ.get('TESTR_REGRESS_DIR')
# make sure these are paths
regress_dir = Path(regress_dir)
out_dir = Path(out_dir)
# Make the top-level directory where files go
if not regress_dir.exists():
os.makedirs(regress_dir)
for regress_file in regress_files:
with open(out_dir / regress_file, 'r') as fh:
lines = fh.readlines()
if regress_file in clean:
for sub_in, sub_out in clean[regress_file]:
lines = [re.sub(sub_in, sub_out, x) for x in lines]
# Might need to make output directory since regress_file can
# contain directory prefix.
regress_path = regress_dir / regress_file
regress_path_dir = regress_path.parent
if not regress_path_dir.exists():
os.makedirs(regress_path_dir)
with open(regress_path, 'w') as fh:
fh.writelines(lines)
| 14,322
|
def test_auth_proj_no_token():
"""Token required by endpoint decorator"""
response = requests.get(dds_cli.DDSEndpoint.AUTH_PROJ)
assert response.status_code == 400
response_json = response.json()
assert response_json.get("message")
assert "JWT Token not found in HTTP header" in response_json.get("message")
| 14,323
|
def check_chromium() -> bool:
"""Check if chromium is placed at correct path."""
return chromium_executable().exists()
| 14,324
|
def draw_lane(image, extracted_lane: Dict = {}, output_path: Optional[str] = None):
"""render extracted lane"""
# TODO: refactor separate concern moving out the saving to a file
lane_annotation_image = image.copy()
if "right" in extracted_lane:
lane_annotation_image = draw_lines(
lane_annotation_image, [extracted_lane["right"]], color=(0, 255, 0),
thickness=10) # right side in green
if "left" in extracted_lane:
lane_annotation_image = draw_lines(
lane_annotation_image, [extracted_lane["left"]], color=(255, 0, 0),
thickness=10) # left in red
output_image = weighted_img(lane_annotation_image, image, .5, .5)
save_status = None
if output_path:
save_status = plt.imsave(
output_path, output_image
) # TODO: use cv2.imsave instead
return output_image
| 14,325
|
def plot_edges(lattice : Lattice,
labels : np.ndarray = 0,
color_scheme : np.ndarray = ['k','r','b'],
subset : np.ndarray = slice(None, None, None),
directions : np.ndarray = None,
ax = None,
arrow_head_length = None,
**kwargs):
"""
Plot the edges of a lattice with optional arrows.
This uses matplotlib.collections.LineColection under the hood and you may
pass in any keyword to be passed along to it.
Note that arrays for alpha or linestyle don't currently work since they would have to be tiled correctly, and are not currently.
If directions is not none, arrows are plotted from the first vertex to the second unless direction[i] == -1
:param lattice: The lattice to use.
:type lattice: Lattice
:param labels: int or array of ints specifying the colors, defaults to 0. May be the same size as the vertices or of the subset.
:type labels: np.ndarray, optional
:param color_scheme: List or array of colors, defaults to ['black', ]
:type color_scheme: np.ndarray, optional
:param subset: An array of indices, boolean array or slice that selects which elements to plot, defaults to plotting all.
:type subset: np.ndarray, optional
:param directions: An array of arrow directions +/-1, defaults to None.
:type directions: np.ndarray, optional
:param ax: The axis to plot on, defaults to plt.gca()
:type subset: axis, optional
"""
labels, colors, color_scheme, subset, ax, transform = _process_plot_args(lattice, ax, labels, color_scheme, subset, lattice.n_edges)
edge_colors = np.tile(colors, 9)
edge_vertices = lattice.vertices.positions[lattice.edges.indices[subset]]
edge_vertices[:, 0, :] -= lattice.edges.crossing[subset]
unit_cell_vectors = generate_point_array(np.array([0,0]), padding = 1)[:, None, None, :] #shape (9, 2) -> (9, 1, 1, 2)
replicated_edges = edge_vertices[None,...] + unit_cell_vectors #shape (n_edges, 2, 2) -> (9, n_edges, 2, 2)
replicated_edges = replicated_edges.reshape((-1,2,2)) #shape (9, n_edges, 2, 2) -> (9*n_edges, 2, 2)
vis = _lines_cross_unit_cell(replicated_edges) | _line_fully_in_unit_cell(replicated_edges)
# print(edge_colors.shape, replicated_edges.shape, vis.shape)
lc = LineCollection(replicated_edges[vis, ...], colors = edge_colors[vis], transform = transform, path_effects=[path_effects.Stroke(capstyle="round")], **kwargs)
ax.add_collection(lc)
if directions is not None:
directions = _broadcast_args(directions, subset, lattice.n_edges, dtype = int)
directions = np.tile(directions, 9)
_plot_edge_arrows(ax, edge_colors[vis],replicated_edges[vis, ...],directions[vis], lc, lattice.unit_cell, arrow_head_length = arrow_head_length)
return ax
| 14,326
|
def menu_items_api(restaurant_id):
"""Route handler for api endpoint retreiving menu items for a restaurant.
Args:
restaurant_id: An int representing the id of the restaurant whose menu
items are to be retrieved
Returns:
response: A json object containing all menu items for a given
restaurant
"""
menu_items = (
session.query(MenuItem).filter_by(restaurant_id=restaurant_id).all()
)
response = jsonify(
menu_items=[menu_item.serialize for menu_item in menu_items]
)
return response
| 14,327
|
def run(indata):
"""indata: event detection DataArray or DataSet"""
if isinstance(indata, xr.DataArray):
events = indata
else:
events = indata["Event_ID"]
logging.info("events array defined.")
# turn events into time x space by stacking lat & lon:
events_stacked = events.stack(z=("lat", "lon"))
logging.info("Stacked events.")
# events_stacked is [time, z]
# make sure to only have integers for the event IDs:
zint = events_stacked.values.astype(int)
logging.info(f"Convert events to integers. Result is shape {zint.shape}.") # should still be [time, z]
mx = np.max(zint)
logging.info(f"Max number of events is {mx}; output dimesion size (add one for zeros).")
ids, ndx, dur = theloop(zint)
logging.info("Loop done.")
logging.info(f"kind of ids: {type(ids)}\n ndx: {type(ndx)}, shape: {ndx.shape}\n dur: {type(dur)}")
# use ndx to go back to 'time' and construct array of datetimes
dates = np.full(ndx.shape, np.datetime64('NaT'), dtype='datetime64[D]') # fill value should be numpy's "not a time" value. (what if time is in cftime, though?); dtype needs to be set with correct unit (D = days)
for loc in tqdm(np.arange(ndx.shape[0]), desc="Dates Loop"):
last_event = ids[loc, :].max()
dates[loc, 0:last_event] = indata.time[ndx[loc, 0:last_event]] # loc: int; dates: datetime; ndx: int
logging.info("Finished the initial dates reconstruction.")
# dates[:, 1:] = np.ma.masked_where(ndx[:, 1:] == 0, dates[:, 1:], copy=False) # mask where eventID == 0
# Convert resulting numpy arrays to Xarray DataArrays
ids_da = xr.DataArray(ids, coords={"z":events_stacked['z'], 'events':np.arange(1,mx+2)},
dims=("z", "events"))
ndx_da = xr.DataArray(ndx, coords={"z":events_stacked['z'], 'events':np.arange(1,mx+2)},
dims=("z", "events"))
cnt_da = xr.DataArray(dur, coords={"z":events_stacked['z'], 'events':np.arange(1,mx+2)},
dims=("z", "events"))
dates_da = xr.DataArray(dates, coords={"z":events_stacked['z'], 'events':np.arange(1,mx+2)},
dims=("z", "events"))
ids_da.name = "Event_ID"
ndx_da.name = "initial_index"
cnt_da.name = "duration"
dates_da.name = 'initial_date'
logging.info("DataArray are made")
ids_da = ids_da.unstack()
ndx_da = ndx_da.unstack()
cnt_da = cnt_da.unstack()
dates_da = dates_da.unstack()
logging.info("Unstacked.")
return xr.merge([ids_da, ndx_da, cnt_da, dates_da])
| 14,328
|
def goto_location(session: Session, args: Any) -> None:
"""https://scalameta.org/metals/docs/integrations/new-editor/#goto-location"""
if isinstance(args, list) and args:
open_location(session.window, args[0])
| 14,329
|
def format_trace_id(trace_id: int) -> str:
"""Format the trace id according to b3 specification."""
return format(trace_id, "032x")
| 14,330
|
def get_count_name(df):
"""Indicate if a person has a 'Name'
Parameters
----------
df : panda dataframe
Returns
-------
Categorical unique code
"""
# Feature that tells whether a passenger had a cabin on the Titanic
df['Words_Count'] = df['Name'].apply(lambda x: len(x.split())).astype(int)
return df
| 14,331
|
def register_action(request):
"""
从这个django.contrib/auth.models 库里倒入里User方法。(其实User是orm方式操作用户表的实例)
然后我们直接用User.objects.create_user方法生成一个用户,参数为用户名和密码。然后保存这个生成的用户 就是注册成功了
但是如果用户表中已存在这个用户名,那么,这个生成语句就会报错。所以我们用try来捕获这个异常,如果发送错误那就是“用户已经存在”,如实给用户返回这句话。如果没问题,那么就返回 注册成功
:param request:
:return:
"""
u_name = request.GET['username']
p_word = request.GET['password']
# 开始 联通 django 用户库,查看用户名密码是否正确
from django.contrib.auth.models import User
try:
user = User.objects.create_user(username=u_name, password=p_word)
user.save()
return HttpResponse('注册成功')
except:
return HttpResponse('注册失败~用户名好像已经存在了~')
| 14,332
|
async def run_blocking_io(func: Callable, *args, **kwargs) -> Any:
"""|coro|
Run some blocking function in an event loop.
If there is a running loop, ``'func'`` is executed in it.
Otherwise, a new loop is being created and closed at the end of the execution.
Example:
.. code-block:: python3
def make_image():
... # long code of creating an image
# somewhere in an async function:
await run_blocking_io(make_image)
"""
loop = acquire_loop(running=True)
asyncio.set_event_loop(loop)
return await loop.run_in_executor(None, functools.partial(func, *args, **kwargs))
| 14,333
|
def parse_config(cfg, section):
""" parse config data structure, return data of required section """
def is_valid_section(s):
valid_sections = ["info", "project", "variables", "refdata"]
return s in valid_sections
cfg_data = None
if is_valid_section(section):
try:
cfg_data = cfg[section]
except KeyError:
log.critical(cfg.keys())
log.critical("Section <%s> not found in config" % section)
exit(1)
else:
log.critical("Section <%s> not a valid name" % section)
exit(1)
return cfg_data
| 14,334
|
def _predict_states(freqs):
"""Use frequencies to predict states across a chromosome.
Normalize so heterozygote blocks are assigned state 0 and homozygous
are assigned state 1.
"""
from hmmlearn import hmm
freqs = np.column_stack([np.array(freqs)])
model = hmm.GaussianHMM(2, covariance_type="full")
model.fit(freqs)
states = model.predict(freqs)
freqs_by_state = collections.defaultdict(list)
for i, state in enumerate(states):
freqs_by_state[state].append(freqs[i])
if np.median(freqs_by_state[0]) > np.median(freqs_by_state[1]):
states = [0 if s == 1 else 1 for s in states]
return states
| 14,335
|
def save_npz_dict(save_list=None, name='model.npz'):
"""Input parameters and the file name, save parameters as a dictionary into .npz file.
Use ``tl.files.load_and_assign_npz_dict()`` to restore.
Parameters
----------
save_list : list of parameters
A list of parameters (tensor) to be saved.
name : str
The name of the `.npz` file.
"""
if save_list is None:
save_list = []
save_list_names = [tensor.name for tensor in save_list]
save_list_var = tf_variables_to_numpy(save_list)
save_var_dict = {save_list_names[idx]: val for idx, val in enumerate(save_list_var)}
np.savez(name, **save_var_dict)
save_list_var = None
save_var_dict = None
del save_list_var
del save_var_dict
logging.info("[*] Model saved in npz_dict %s" % name)
| 14,336
|
def test_add_constant():
"""Test the add_constant function."""
a = rs.randn(10, 5)
wanted = np.column_stack((a, np.ones(10)))
got = stat.add_constant(a)
assert_array_equal(wanted, got)
| 14,337
|
def tp(*args) -> np.ndarray:
"""Tensor product.
Recursively calls `np.tensordot(a, b, 0)` for argument list
`args = [a0, a1, a2, ...]`, yielding, e.g.,
tp(a0, a1, a2) = tp(tp(a0, a1), a2)
Parameters
----------
args : sequence
Sequence of tensors
Returns
-------
np.ndarray
Tensor product
Examples
--------
>>> a = np.random.rand(2, 3, 4)
... b = np.random.rand(7, 8, 9)
... c = tp(a, b) # c_ijkmno = a_ijk b_mno
... c.shape == (2, 3, 4, 7, 8, 9)
"""
temp = args[0]
for i in range(1, len(args)):
temp = np.tensordot(temp, args[i], 0)
return temp
| 14,338
|
def main():
"""Run a simple test
"""
version = get_version()
if version:
print('Found tesseract OCR version %s' % version)
print('Available languages:', get_list_of_langs())
else:
print('Tesseract is not available')
| 14,339
|
def delete():
"""Remove the current user's avatar image."""
user = _get_current_user_or_404()
try:
avatar_service.remove_avatar_image(user.id)
except ValueError:
# No avatar selected.
# But that's ok, deletions should be idempotent.
flash_notice(gettext('No avatar image is set that could be removed.'))
else:
flash_success(gettext('Avatar image has been removed.'))
| 14,340
|
def aws_aws_page():
"""main endpoint"""
form = GenericFormTemplate()
return render_template(
'aws_page.html',
form=form,
text=util.get_text(module_path(), config.language),
options=g.user.get_options(),
)
| 14,341
|
def popen_program(cmd, minimized=False, pipe=False, shell=False, **kwargs):
"""Run program and return a subprocess.Popen object."""
LOG.debug(
'cmd: %s, minimized: %s, pipe: %s, shell: %s',
cmd, minimized, pipe, shell,
)
LOG.debug('kwargs: %s', kwargs)
cmd_kwargs = build_cmd_kwargs(
cmd,
minimized=minimized,
pipe=pipe,
shell=shell,
**kwargs)
try:
# pylint: disable=consider-using-with
proc = subprocess.Popen(**cmd_kwargs)
except FileNotFoundError:
LOG.error('Command not found: %s', cmd)
raise
LOG.debug('proc: %s', proc)
# Done
return proc
| 14,342
|
def precompute(instr):
"""
Args:
instr:
Returns:
"""
qecc = instr.qecc
if qecc.name == '4.4.4.4 Surface Code' and qecc.circuit_compiler.name == 'Check2Circuits':
precomputed_data = code_surface4444(instr)
elif qecc.name == 'Medial 4.4.4.4 Surface Code' and qecc.circuit_compiler.name == 'Check2Circuits':
precomputed_data = code_surface4444medial(instr)
else:
raise Exception('Can only handle the non-medial surface code!')
return precomputed_data
| 14,343
|
def remove_url(txt):
"""Replace URLs found in a text string with nothing
(i.e. it will remove the URL from the string).
Parameters
----------
txt : string
A text string that you want to parse and remove urls.
Returns
-------
The same txt string with url's removed.
"""
return " ".join(re.sub("([^0-9A-Za-z \t])|(\w+:\/\/\S+)", "", txt).split())
| 14,344
|
def use_database(fn):
"""
Ensure that the correct database context is used for the wrapped function.
"""
@wraps(fn)
def inner(self, *args, **kwargs):
with self.database.bind_ctx(self.models):
return fn(self, *args, **kwargs)
return inner
| 14,345
|
def write_csv(obj, filename):
"""
Convert dictionary items to a CSV file the dictionary format:
::
{'result category 1':
[
# 1st line of results
{'header 1' : value_xxx,
'header 2' : value_yyy},
# 2nd line of results: same headers, different results
{'header 1' : value_www,
'header 2' : value_zzz}
],
'result_category 2':
[
{},{}
]
}
The generated csv file will be:
::
result_category 1
header 1 header 2
value_xxx value_yyy
value_www value_zzz
result_category 2
...
"""
with open(filename, 'w', encoding='utf-8') as f:
w = writer(f)
for data_key, data_list in obj.items():
# main header
w.writerow([data_key])
# sub headers:
headers = [_ for _ in data_list[0].keys()]
w.writerow(headers)
for data_dict in data_list:
w.writerow([_ for _ in data_dict.values()])
| 14,346
|
def format_help(title, lines, os_file):
"""Nicely print section of lines.
:param title: help title, if exist
:param lines: strings to format
:param os_file: open filehandle for output of RST file
"""
close_entry = False
if title:
os_file.write("**" + title + ":**" + "\n\n")
continued_line = ''
for line in lines:
if not line or line[0] != ' ':
break
# We have to handle these cases:
# 1. command Explanation
# 2. command
# Explanation on next line
# 3. command Explanation continued
# on next line
# If there are more than 8 spaces, let's treat it as
# explanation.
if line.startswith(' '):
# Explanation
xline = continued_line + quote_rst(line.lstrip(' '))
continued_line = ''
# Concatenate the command options with "-"
# For example:
# see 'glance image-
# show'
if xline.endswith('-'):
continued_line = xline
continue
# check niceness
if len(xline) > (MAXLINELENGTH - 2):
xline = xline.replace(' ', '\n ')
os_file.write(" " + xline + "\n")
continue
# Now we have a command or parameter to handle
split_line = extract_options(line)
if not close_entry:
close_entry = True
else:
os_file.write("\n")
xline = split_line[0]
# check niceness work around for long option name, glance
xline = xline.replace('[<RESOURCE_TYPE_ASSOCIATIONS> ...]',
'[...]')
os_file.write("``" + xline + "``\n")
if len(split_line) > 1:
# Explanation
xline = continued_line + quote_rst(split_line[1])
continued_line = ''
# Concatenate the command options with "-"
# For example:
# see 'glance image-
# show'
if xline.endswith('-'):
continued_line = xline
continue
# check niceness
if len(xline) > (MAXLINELENGTH - 2):
# check niceness
xline = xline.replace(' ', '\n ')
os_file.write(" " + xline + "\n")
os_file.write("\n")
return
| 14,347
|
def getImapMailboxEmail(server, user, password, index, path="INBOX", searchSpec=None):
"""
imap_headers(server, user, password, index, path="INBOX", searchSpec=None)
Load specified email header from an imap server. index starts from 0.
Example
WITH RECURSIVE
cnt(x) AS (VALUES(1) UNION ALL SELECT x+1 FROM cnt WHERE x<imap_count("127.0.0.1","jj","pass","test"))
select x-1 as num, imap_email("127.0.0.1","jj","pass",x-1,"test") as message FROM cnt;
See also
https://gist.github.com/robulouski/7441883
https://oracle-base.com/articles/10g/utl_mail-send-email-from-the-oracle-database
https://docs.python.org/3/library/sqlite3.html#sqlite3.Connection.create_aggregate
"""
try:
import imaplib, email, email.header
with imaplib.IMAP4_SSL(server) as M:
M.login(user,password)
typ, data=M.select(path)
if(data[0]==b'0'):
print ("*SELECT*FAILED",path,typ,data)
return "ERR NO MAILBOX:"+path
if searchSpec== None:
typ, data = M.search(None, 'ALL')
else:
typ, data = M.search(None, searchSpec)
if len(data[0].split()) >0:
id2fetch= (data[0].split())[index]
typ, data = M.fetch(id2fetch, '(RFC822)')
msg_return=data[0][1]
else:
msg_return=None
M.logout()
return msg_return
except Exception as e:
raise SqliteFunctionException( e )
| 14,348
|
def trace(msg, html=False, attachment=None, launch_log=False):
"""Write the message to the log file using the ``TRACE`` level."""
write(msg, "TRACE", html, attachment, launch_log)
| 14,349
|
def test_lj2atomen():
"""Test om de potentiaal tussen 2 atomen te berekenen"""
afstand = 1.5
pot = 4*((1/afstand)**12 - (1/afstand)**6)
potf = f90.ljpot2atomen(afstand)
assert round(pot, 7) == round(potf, 7)
| 14,350
|
def create_storage(uri=None):
"""factory to create storage based on `uri`, the ANYVAR_STORAGE_URI
environment value, or in-memory storage.
The URI format is one of the following:
* in-memory dictionary:
`memory:`
Remaining URI elements ignored, if provided
* Python shelf (dbm) persistence
`file:///full/path/to/filename.db`
`path/to/filename`
The `file` scheme permits only full paths. When scheme is not
provided, the path may be absolute or relative.
* Redis URI
`redis://[[username]:[password]]@localhost:6379/0`
`rediss://[[username]:[password]]@localhost:6379/0`
`unix://[[username]:[password]]@/path/to/socket.sock?db=0`
The URIs are passed as-is to `redis.Redis.from_url()`
"""
uri = uri or os.environ.get("ANYVAR_STORAGE_URI", default_storage_uri)
parsed_uri = urlparse(uri)
if parsed_uri.scheme == "memory":
_logger.warning("Using memory storage; stored data will be discarded when process exits")
storage = dict()
elif parsed_uri.scheme in ("", "file"):
from .shelf import ShelfStorage
storage = ShelfStorage(parsed_uri.path)
elif parsed_uri.scheme == "redis":
import redis
from .redisobjectstore import RedisObjectStore
storage = RedisObjectStore(redis.Redis.from_url(uri))
else:
raise ValueError(f"URI scheme {parsed_uri.scheme} is not implemented")
_logger.debug(f"create_storage: {uri} → {storage}")
return storage
| 14,351
|
def test_python_module_ctia_positive_malware(
module_headers, module_tool_client):
"""Perform testing for malware entity of custom threat intelligence python
module
ID: CCTRI-164-056ef37c-171d-4b1d-ae3d-4601aaa465bb
Steps:
1. Send POST request to create new malware entity using custom python
module
2. Send GET request using custom python module to read just created
entity back.
3. Send same GET request, but using direct access to the server
4. Compare results
5. Update malware entity using custom python module
6. Repeat GET request using python module and validate that entity was
updated
7. Delete entity from the system
Expectedresults: Malware entity can be created, fetched, updated and
deleted using custom python module. Data stored in the entity is
the same no matter you access it directly or using our tool
Importance: Critical
"""
malware = module_tool_client.private_intel.malware
payload = {
'type': 'malware',
'schema_version': SERVER_VERSION,
'name': 'TinyZBot',
'labels': ['malware']
}
# Create new entity using provided payload
post_tool_response = malware.post(payload=payload,
params={'wait_for': 'true'})
values = {
key: post_tool_response[key] for key in [
'name',
'labels',
'type',
'schema_version'
]
}
assert values == payload
entity_id = post_tool_response['id'].rpartition('/')[-1]
# Validate that GET request return same data for direct access and access
# through custom python module
get_tool_response = malware.get(entity_id)
get_direct_response = ctia_get_data(
target_url=MALWARE,
entity_id=entity_id,
**{'headers': module_headers}
).json()
assert get_tool_response == get_direct_response
# Update entity values
put_tool_response = delayed_return(
malware.put(
id_=entity_id,
payload={'name': 'XBot', 'labels': ['malware']}
)
)
assert put_tool_response['name'] == 'XBot'
get_tool_response = malware.get(entity_id)
assert get_tool_response['name'] == 'XBot'
# Delete the entity and make attempt to get it back to validate it is
# not there anymore
delayed_return(malware.delete(entity_id))
with pytest.raises(HTTPError):
malware.get(entity_id)
| 14,352
|
def nmatches_mem(txt, pat, t, p, mem):
"""Find number of matches with recursion + memoization using a dictionary
(this solution will also crash when recursion limit is reached)
nmatches_mem(text, pattern, len(text), len(pattern), {})
"""
if (t,p) in mem:
return mem[t, p]
if p==0:
return 1
if t==0:
return 0
matches = 0
for i in range(t, 0, -1):
if txt[t-i] == pat[p-1]:
matches += nmatches_mem(txt, pat, t-i, p-1, mem)
mem[t, p] = matches
return matches
| 14,353
|
def get_size_stats(args):
"""
Calculate size for each of the iterator.
It recusively iterate though a directory to find a specific extension file and report their size in preferred format.
"""
lang_size_dict = {}
for (dirpath, dirnames, filenames) in os.walk(args.data_folder_path):
for filename in filenames:
if not (filename.startswith(args.name_prefix) and filename.endswith(args.extension_name)):
continue
full_file_path = os.path.join(dirpath, filename)
lang_size = subprocess.check_output("du -s {}".format(full_file_path), shell=True)
lang_size = int(lang_size.decode("utf-8").split("\t")[0])
if args.size_format == 'KB':
_conv = 1
elif args.size_format == 'MB':
_conv = 1024
elif args.size_format == 'GB':
_conv = 1024 * 1024
elif args.size_format == 'TB':
_conv = 1024 * 1024 * 1024
lang_size_ = round(lang_size / float(_conv), 2)
lang_size_dict[full_file_path] = lang_size_
return lang_size_dict
| 14,354
|
def main():
""" main with an infinite loop"""
username, password, address = get_conf()
checker = Checker(username, password, address)
while True:
checker.store_next_events()
checker.print_next_events()
checker.notify_if_in_less(hours=20)
time.sleep(60 * DEFAULT_CHECK_PERIOD_IN_MINUTES)
| 14,355
|
def data_get():
"""
Get shared data from this server's local store.
"""
consistency = request.json["consistency"]
name = request.json["name"]
field = request.json["field"]
value = ""
error = "ok"
if consistency == "strict":
store = globalvars.get_data_store(globalvars.STRICT_CENTRALIZED)
with store_lock:
try:
value = store.get(name, field)
except AttributeError as ex:
error = str(ex)
res = {
"value" : value,
"error" : error,
}
return jsonify(res)
| 14,356
|
def pad_sequences(sequences, maxlen=None, value=0):
"""
pad sequences (num_samples, num_timesteps) to same length
"""
if maxlen is None:
maxlen = max(len(x) for x in sequences)
outputs = []
for x in sequences:
x = x[:maxlen]
pad_range = (0, maxlen - len(x))
x = np.pad(array=x, pad_width=pad_range, mode='constant', constant_values=value)
outputs.append(x)
return np.array(outputs)
| 14,357
|
def test_ct():
"""Test if Ct calculation work"""
sample_time = np.loadtxt('ref.txt', usecols=[0])
reference_ct = np.loadtxt('ref.txt', usecols=[1])
instance = trial.xixihaha.Kubo(delta=1 , tau=1)
calculate_ct = instance.calculate_Ct(time=sample_time)[:,1]
assert np.allclose(calculate_ct , reference_ct)
| 14,358
|
def test_filter_by_patron(app, patron_pid, qs, should_raise):
"""Test the function filter_by_patron."""
search = RecordsSearch()
if should_raise:
with pytest.raises(UnauthorizedSearchError):
_filter_by_patron(patron_pid, search, qs)
else:
_search, _qs = _filter_by_patron(patron_pid, search, qs)
term = _search.to_dict()["query"]["bool"]["filter"][0]["term"]
assert term == {"patron_pid": patron_pid}
| 14,359
|
def reset_password():
"""
Three main states of this controller
1. by default just show the email field
2. in a second step, also show the field for the code and new password
3. in a third step, if code is correct, redirect to login
:return: template to be rendered
"""
form = flask.request.form
email = form.get("email", "")
code = form.get("code", "")
password = form.get("password", "")
if email and not code:
#generate_code_and_send_email(email)
account_management.request_code(email)
flash("Now check your inbox for a one-time code")
return flask.render_template("account/reset_pass.html", code_active=True, email=email)
if email and code and password:
try:
account_management.reset_password(code, email, password)
flash("Password was reset successfully!")
return flask.redirect('login')
except APIException as e:
flash(e.message)
traceback.print_exc(file=sys.stdout)
return flask.render_template("account/reset_pass.html", message=True)
flash("This will be fast. We promise.")
return flask.render_template("account/reset_pass.html")
| 14,360
|
def download(request, path):
"""
Downloads a file.
This is inspired by django.views.static.serve.
?disposition={attachment, inline}
"""
decoded_path = urllib.unquote(path)
if path != decoded_path:
path = decoded_path
if not SHOW_DOWNLOAD_BUTTON.get():
return serve_403_error(request)
if not request.fs.exists(path):
raise Http404(_("File not found: %(path)s.") % {'path': escape(path)})
if not request.fs.isfile(path):
raise PopupException(_("'%(path)s' is not a file.") % {'path': path})
content_type = mimetypes.guess_type(path)[0] or 'application/octet-stream'
stats = request.fs.stats(path)
mtime = stats['mtime']
size = stats['size']
if not was_modified_since(request.META.get('HTTP_IF_MODIFIED_SINCE'), mtime, size):
return HttpResponseNotModified()
# TODO(philip): Ideally a with statement would protect from leaks, but tricky to do here.
fh = request.fs.open(path)
# Verify read permissions on file first
try:
request.fs.read(path, offset=0, length=1)
except WebHdfsException, e:
if e.code == 403:
raise PopupException(_('User %s is not authorized to download file at path "%s"') %
(request.user.username, path))
else:
raise PopupException(_('Failed to download file at path "%s": %s') % (path, e))
if REDIRECT_DOWNLOAD.get() and hasattr(fh, 'read_url'):
response = HttpResponseRedirect(fh.read_url())
setattr(response, 'redirect_override', True)
else:
response = StreamingHttpResponse(file_reader(fh), content_type=content_type)
response["Last-Modified"] = http_date(stats['mtime'])
response["Content-Length"] = stats['size']
response['Content-Disposition'] = request.GET.get('disposition', 'attachment; filename="' + stats['name'] + '"') if _can_inline_display(path) else 'attachment'
request.audit = {
'operation': 'DOWNLOAD',
'operationText': 'User %s downloaded file %s with size: %d bytes' % (request.user.username, path, stats['size']),
'allowed': True
}
return response
| 14,361
|
def spacegroup_a_to_spacegroup_b(atoms, spgroup_a, spgroup_b, target_b_contribution, create_replicas_by,
min_nb_atoms=None, target_nb_atoms=None, max_diff_nb_atoms=None, radius=None,
target_replicas=None, max_rel_error=0.01, **kwargs):
"""Remove central atoms for bcc to sc"""
# get number of replicas
replicas = nb_of_replicas(atoms, create_replicas_by=create_replicas_by, min_nb_atoms=min_nb_atoms,
target_nb_atoms=target_nb_atoms, max_diff_nb_atoms=max_diff_nb_atoms, radius=radius,
target_replicas=target_replicas)
atoms = standardize_cell(atoms, **kwargs)
# make a spgroup_a-type supercell before removing atoms
atoms_a = atoms.copy()
atoms_a = atoms_a * replicas
# check initial spacegroup
mg_structure = AseAtomsAdaptor.get_structure(atoms)
finder = SpacegroupAnalyzer(mg_structure)
init_spgroup = finder.get_space_group_symbol()
if init_spgroup == spgroup_a:
logger.debug('Initial spacegroup is {0} as expected'.format(init_spgroup))
else:
raise Exception("Initial spacegroup is {0} "
"while the expected spacegroup is {1}".format(init_spgroup, spgroup_a))
# initially the mix structure has all the spgroup_a atoms
atoms_mix = atoms_a.copy()
idx_remove_list = []
TOL = 1e-03
if spgroup_a == 'Im-3m' and spgroup_b == 'Pm-3m':
# from bcc to simple cubic
for idx in range(atoms.get_number_of_atoms()):
# deleting all atoms from spgroup_a to go in spgroup_b
# removing the atoms that are in position (0.0, 0.0, 0.0)
if (abs(atoms.positions[idx][0]) <= TOL and abs(atoms.positions[idx][1]) <= TOL and abs(
atoms.positions[idx][2]) <= TOL):
pass
else:
idx_remove_list.append(idx)
elif spgroup_a == 'Fd-3m' and spgroup_b == 'Fm-3m':
# from diamond to fcc
for idx in range(atoms.get_number_of_atoms()):
# deleting all atoms from spgroup_a to go in spgroup_b
# removing the atoms that are "inside" the cube
# keep only the atoms that have one coordinate which is
# 1/2 of the cell length or position (0.0, 0.0, 0.0)
cell_length = atoms.get_cell_lengths_and_angles()[0]
if abs(atoms.positions[idx][0] - cell_length / 2.0) <= TOL or abs(
atoms.positions[idx][1] - cell_length / 2.0) <= TOL or abs(
atoms.positions[idx][2] - cell_length / 2.0) <= TOL:
pass
elif (abs(atoms.positions[idx][0]) <= TOL and abs(atoms.positions[idx][1]) <= TOL and abs(
atoms.positions[idx][2]) <= TOL):
pass
else:
idx_remove_list.append(idx)
else:
raise NotImplementedError("Transformation from spacegroup {0} to spacegroup {1}"
"is not implemented".format(spgroup_a, spgroup_b))
# delete all the indices added to the list
del atoms[[atom.index for atom in atoms if atom.index in idx_remove_list]]
atoms_b = atoms * replicas
# check final spacegroup
mg_structure = AseAtomsAdaptor.get_structure(atoms_b)
finder = SpacegroupAnalyzer(mg_structure)
final_spgroup = finder.get_space_group_symbol()
if final_spgroup == spgroup_b:
logger.debug('Final spacegroup is {0} as expected'.format(final_spgroup))
else:
logger.debug("Final spacegroup is {0}".format(final_spgroup))
logger.debug("Expected final spacegroup is {0}".format(spgroup_b))
raise Exception("The transformation provided does not give the expected final "
" spacegroup. Expected: {0}; obtained: {1}".format(spgroup_b, final_spgroup))
# find the rows that are in bcc-type supercell and not in sc
atoms_a_rows = atoms_a.positions.view([('', atoms_a.positions.dtype)] * atoms_a.positions.shape[1])
atoms_b_rows = atoms_b.positions.view([('', atoms_b.positions.dtype)] * atoms_b.positions.shape[1])
a_b_diff_pos = np.setdiff1d(atoms_a_rows, atoms_b_rows).view(atoms_a.positions.dtype).reshape(-1,
atoms_a.positions.shape[
1])
atoms_a_only_ids = []
for idx in range(atoms_a.get_number_of_atoms()):
for row_idx in range(a_b_diff_pos.shape[0]):
if np.allclose(atoms_a.positions[idx], a_b_diff_pos[row_idx, :], rtol=1e-03):
atoms_a_only_ids.append(idx)
break
else:
pass
# take a random subset of atoms to remove
nb_atoms_to_rm = int(len(atoms_a_only_ids) * target_b_contribution)
actual_b_contribution = nb_atoms_to_rm / len(atoms_a_only_ids)
if target_b_contribution != 0.0:
rel_error = abs(target_b_contribution - actual_b_contribution) / target_b_contribution
if rel_error > max_rel_error:
logger.warning("Difference between target and actual vacancy ratio "
"bigger than the threshold ({0}%).\n"
"Target/actual vacancy ratio: {1}%/{2}%.".format(max_rel_error * 100.0,
target_b_contribution * 100.0,
actual_b_contribution * 100.0))
# random sampling of the list without replacement
atoms_a_only_ids_subset = random.sample(atoms_a_only_ids, nb_atoms_to_rm)
# remove atoms from the bcc_atoms_only_ids
del atoms_mix[[atom.index for atom in atoms_mix if atom.index in atoms_a_only_ids_subset]]
return atoms_mix
| 14,362
|
def empty_surface(fill_color, size=None, flags=0):
"""Returns an empty surface filled with fill_color.
:param fill_color: color to fill the surface with
:type fill_color: pygame.Color
:param size: the size of the new surface, if None its created
to be the same size as the screen
:type size: int-2-tuple
"""
if size is None:
sr = pygame.display.get_surface().get_rect()
surf = pygame.Surface((sr.w, sr.h), flags=flags)
else:
surf = pygame.Surface(size, flags=flags)
surf.fill(fill_color)
return surf
| 14,363
|
def getNetAddress(ip, netmask):
"""
Get the netaddress from an host ip and the netmask.
:param ip: Hosts IP address
:type ip: str
:param netmask: Netmask of the network
:type netmask:
:returns: Address of the network calculated using hostIP and netmask
:rtype: str
"""
return str(IPNetwork(ip + "/" + str(getPrefix(netmask))).network)
| 14,364
|
def wait_for_mysqld(config, mysqld):
"""Wait for new mysql instance to come online"""
client = connect(config, PassiveMySQLClient)
LOG.debug("connect via client %r", config['socket'])
while mysqld.process.poll() is None:
try:
client.connect()
client.ping()
LOG.debug("Ping succeeded")
except MySQLError:
time.sleep(0.75)
continue
else:
break
client.disconnect()
| 14,365
|
def begin_operation(name: str) -> dict:
"""
Gets the stats for the current operation.
Parameters
----------
name: str
name of the operation
Returns
-------
dict
dictionary with the operation stats
Examples
--------
>>> from pymove.utils.mem import begin_operation
>>> operation = begin_operation('operation')
>>> operation
{
'process': psutil.Process(
pid=103401, name='python', status='running', started='21:48:11'
),
'init': 293732352, 'start': 1622082973.8825781, 'name': 'operation'
}
"""
process = psutil.Process(os.getpid())
init = process.memory_info()[0]
start = time.time()
return {'process': process, 'init': init, 'start': start, 'name': name}
| 14,366
|
def create_or_load_vocabulary(data_path,training_data_path,vocab_size,test_mode=False,tokenize_style='word',fine_tuning_stage=False,model_name=None):
"""
create or load vocabulary and label using training data.
process as: load from cache if exist; load data, count and get vocabularies and labels, save to file.
:param data_path: folder of data
:param training_data_path: path of training data
:param vocab_size: size of word vocabulary
:param test_mode: if True only select few to test functional, else use all data
:param tokenize_style: tokenize input as word(default) or character.
:return: vocab_word2index, label2index
"""
print("create_or_load_vocabulary.data_path:",data_path,";training_data_path:",training_data_path,";vocab_size:",vocab_size,";test_mode:",test_mode,";tokenize_style:",tokenize_style)
t1 = time.clock()
if not os.path.isdir(data_path): # create folder if not exists.
os.makedirs(data_path)
# 1.if cache exists,load it; otherwise create it.
if model_name is not None:
cache_path =data_path+model_name+'vocab_label.pik'
else:
cache_path =data_path+'vocab_label.pik'
print("cache_path:",cache_path,"file_exists:",os.path.exists(cache_path))
if os.path.exists(cache_path):
with open(cache_path, 'rb') as data_f:
print("going to load cache file.vocab of words and labels")
return pickle.load(data_f)
# 2.load and shuffle raw data
file_object = codecs.open(training_data_path, mode='r', encoding='utf-8')
lines=file_object.readlines()
file_object.close()
random.shuffle(lines)
if test_mode:
lines=lines[0:20000]
else:
lines = lines# [0:200*1000] # to make create vocabulary process more quicker, we only random select 200k lines.
print("==total data==", len(lines))
# 3.loop each line,put to counter
c_inputs=Counter()
c_labels=Counter()
for i,line in enumerate(lines):
input_list,input_label=get_input_strings_and_labels(line, tokenize_style=tokenize_style)
c_inputs.update(input_list)
c_labels.update(input_label)
if i % 1000 == 0: # print some information for debug purpose
print(i,"create_or_load_vocabulary.line:",line)
print(i,"create_or_load_vocabulary.input_label:",input_label,";input_list:",input_list)
print()
# 4.get most frequency words and all labels
if tokenize_style=='char':vocab_size=6000 # if we are using character instead of word, then use small vocabulary size.
vocab_list=c_inputs.most_common(vocab_size)
vocab_word2index={}
vocab_word2index[_PAD]=PAD_ID
vocab_word2index[_UNK]=UNK_ID
vocab_word2index[_CLS]=CLS_ID
vocab_word2index[_MASK]=MASK_ID
for i,tuplee in enumerate(vocab_list):
word,freq=tuplee
# if word in vocab_word2index:
# continue
vocab_word2index[word]=i+4
label2index={}
label_list=c_labels.most_common()
for i,tuplee in enumerate(label_list):
label_name, freq = tuplee
label_name=label_name.strip()
label2index[label_name]=i
# 5.save to file system if vocabulary of words not exists.
if not os.path.exists(cache_path):
with open(cache_path, 'ab') as data_f:
print("going to save cache file of vocab of words and labels")
pickle.dump((vocab_word2index, label2index), data_f)
t2 = time.clock()
print('create_vocabulary.ended.time spent for generate training data:', (t2 - t1))
print(vocab_word2index[_CLS], _CLS, CLS_ID, "===============================")
return vocab_word2index, label2index
| 14,367
|
def diff(
macros: List[str],
citations: List[str],
styles: Optional[str],
) -> Iterable[AtomicIterable]:
"""Display changes in local template files."""
raise NotImplementedError
| 14,368
|
def balance_intent_data(df):
"""Balance the data for intent detection task
Args:
df (pandas.DataFrame): data to be balance, should contain "Core Relations" column
Returns:
pandas.DataFrame: balanced data
"""
relation_counter = build_counter(df, "Core Relations")
# augment each low resource label to average count
avg_count = int(
sum(relation_counter.values()) / len(relation_counter.values())
)
sample_df = df.sample(0)
for k, v in relation_counter.items():
# only augment the low resource label
if v >= avg_count:
continue
# to be sample amount
sample_count = avg_count - v
idx_of_label_k = df["Core Relations"].apply(lambda label: k in label)
# if sample amount if larger, then sample all the value until it exceed the sample count
while sample_count > relation_counter[k]:
temp_df = df[idx_of_label_k].sample(relation_counter[k])
sample_df = pd.concat([sample_df, temp_df])
sample_count -= relation_counter[k]
sample_df = pd.concat(
[sample_df, df[idx_of_label_k].sample(sample_count)]
)
balance_df = pd.concat([df.copy(), sample_df])
return balance_df
| 14,369
|
def ddm_iadd(a, b):
"""a += b"""
for ai, bi in zip(a, b):
for j, bij in enumerate(bi):
ai[j] += bij
| 14,370
|
def plot_with_bbox(image, fv, size_correction = None):
"""
Graph the bounding boxes on an image
Take the feature vectors, corresponding to the
element class and parameters of the bounding box
and plot it
Parameters
----------
image : np.array
3 - Channel image numpy array.
fv : np,array
Feature vector containing n - bounding boxes
with 5 elements (1 class + 4 parameters)
"""
print(fv)
fig, ax = plt.subplots()
for feature_vector in fv:
# print(feature_vector[1])
[x, y, w, h] = feature_vector[-4:]
plt.imshow(image * 255)
(IH, IW) = image.shape[0:2]
w = IW * w
h = IH * h
x = (IW * x) - w/2
y = (IH * y) - h/2
# Prepare the rect
rect = patches.Rectangle((x, y), w, h, linewidth=1, edgecolor='r', facecolor='none')
# Add the patch to the Axes
ax.add_patch(rect)
plt.show()
| 14,371
|
def npareamajority(values, areaclass):
"""
numpy area majority procedure
:param values:
:param areaclass:
:return:
"""
uni,ind = np.unique(areaclass,return_inverse=True)
return np.array([np.argmax(np.bincount(values[areaclass == group])) for group in uni])[ind]
| 14,372
|
def calcReward(eventPos, carPos, closeReward, cancelPenalty, openedPenalty):
"""
this function calculates the reward that will be achieved assuming event is picked up
:param eventPos: position of events
:param carPos: position of cars
:param closeReward: reward if event is closed
:param cancelPenalty: penalty if event is canceled (for now assuming events are not canceled)
:param openedPenalty: penalty for time events are waiting (for now assuming events dont wait since they are picked up as spesific time)
:return: rewardCarsToEvents - R_{cars,events},
rewardEventsToEvents - R_{events,events}
"""
nCars = carPos.shape[0]
nEvents = eventPos.shape[0]
distEventsToEvents = cdist(eventPos, eventPos, metric='cityblock')
distCarsToEvents = cdist(carPos, eventPos, metric='cityblock')
rewardCarsToEvents = -distCarsToEvents + np.ones(shape=(nCars, nEvents))*closeReward
rewardEventsToEvents = -distEventsToEvents + np.ones(shape=(nEvents, nEvents))*closeReward
timeEventsToEvents = distEventsToEvents
timeCarsToEvents = distCarsToEvents
return rewardCarsToEvents, rewardEventsToEvents, timeCarsToEvents, timeEventsToEvents
| 14,373
|
def b64encode(s: Any, altchars: Any = None) -> bytes:
"""Encode bytes using the standard Base64 alphabet.
Argument ``s`` is a :term:`bytes-like object` to encode.
Optional ``altchars`` must be a byte string of length 2 which specifies
an alternative alphabet for the '+' and '/' characters. This allows an
application to e.g. generate url or filesystem safe Base64 strings.
The result is returned as a :class:`bytes` object.
"""
if altchars is not None:
altchars = _get_bytes(altchars)
assert len(altchars) == 2, repr(altchars)
return builtin_encode(s, altchars)
| 14,374
|
def select_programs(args, filter_paused=True, force=False):
"""
Return a list of selected programs from command line arguments
"""
if not (args.all ^ bool(args.names)):
if args.all:
log.error("You may not specify a program name when you use the -a/--all option (See -h/--help for more details)")
else:
log.error("You must select at least one program from the command line (See -h/--help for more details)")
raise SystemExit(1)
if args.all:
programs = list(Program.find_for_user(force=force))
if filter_paused:
programs = [prog for prog in programs if not prog.is_paused]
else:
programs = [Program(name, force=force) for name in args.names]
erorrs = 0
for program in programs:
if not program.exists():
erorrs += 1
log.error("Program '%s' does not exist" % program.name)
if erorrs:
raise SystemExit(1)
return list(programs)
| 14,375
|
def test_mat_lu_det():
"""Test that the mat_lu() function returns correctly a matrix determinant"""
my_mats = [
[[2, 4, 6, 8], [1, 9, 2, 0], [7, 1, 2, 3], [8, 11, 3, 2]],
[[3, 6, 3], [8, 2, 1], [5, 3, 2]],
[[4, 1], [3, 2]],
[[0, 3, 2, 1], [0, 0, 3, 6], [0, 0, 0, 18], [0, 0, 0, 12]],
[[9, 2, 1, 6, 2], [6, 3, 9, 0, 1], [3, 1, 9, 2, 2], [8, 2, 3, 1, 0], [9, 2, 2, 2, 0]],
[[0, 0], [0, 0]]
]
expected_dets = [round(np.linalg.det(np.array(my_mat)), 9) for my_mat in my_mats]
calculated_dets = [round(mat_lu(my_mat)[2], 9) for my_mat in my_mats]
assert calculated_dets == expected_dets
| 14,376
|
def three_comp_two_objective_functions(obj_vars, hz: int,
ttes: SimpleTTEMeasures,
recovery_measures: SimpleRecMeasures):
"""
Two objective functions for recovery and expenditure error
that get all required params as arguments
:param obj_vars: values that define the three comp agent [anf, ans, m_ae, m_anf, m_ans, theta, gamma, phi]
:param hz: estimations per second for agent
:param ttes: time to exhaustion tests to use
:param recovery_measures: recovery trials to compare to
:return: tte_nrmse and rec_nrmse values to minimise (the smaller the better)
"""
# differences in exhaustion times determine fitness
tte_se = [] # TTE standard errors
ttes_exp = [] # TTEs that are expected (original)
rec_se = [] # Recovery standard errors
recs_exp = [] # Recovery ratios expected (original)
three_comp_agent = ThreeCompHydAgent(hz=hz,
a_anf=obj_vars[0],
a_ans=obj_vars[1],
m_ae=obj_vars[2],
m_ans=obj_vars[3],
m_anf=obj_vars[4],
the=obj_vars[5],
gam=obj_vars[6],
phi=obj_vars[7])
# compare tte times
for tte_t, tte_p in ttes.iterate_pairs():
# use the simulator
try:
tte = ThreeCompHydSimulator.do_a_tte(agent=three_comp_agent,
p_exp=tte_p)
except UserWarning:
tte = 5000
# square time difference
tte_se.append(pow(tte - tte_t, 2))
ttes_exp.append(tte_t)
# get NRMSE (Normalised Root Mean Squared Error)
tte_nrmse = math.sqrt(sum(tte_se) / len(tte_se)) / np.mean(ttes_exp)
# compare all available recovery ratio measures
for p_exp, p_rec, t_rec, expected in recovery_measures.iterate_measures():
# use the simulator
try:
achieved = ThreeCompHydSimulator.get_recovery_ratio_wb1_wb2(three_comp_agent,
p_exp=p_exp,
p_rec=p_rec,
t_rec=t_rec)
except UserWarning:
achieved = 200
# add the squared difference
rec_se.append(pow(expected - achieved, 2))
recs_exp.append(expected)
# get NRMSE
rec_nrmse = math.sqrt(sum(rec_se) / len(rec_se)) / np.mean(recs_exp)
# determine return value
return tte_nrmse, rec_nrmse
| 14,377
|
def scale(data, new_min, new_max):
"""Scales a normalised data series
:param data: The norrmalised data series to be scaled
:type data: List of numeric values
:param new_min: The minimum value of the scaled data series
:type new_min: numeric
:param new_max: The new maximum of the scaled data series
:type new_max: numeric
:return: A scaled data series
:rtype: list
"""
return [(x*(new_max-new_min))+new_min for x in data]
| 14,378
|
def default_config() -> ClientConfig:
"""
:return: Default configuration for the experiment
"""
simulation_config = SimulationConfig(render=False, sleep=0.8, video=True, log_frequency=1,
video_fps=5, video_dir=default_output_dir() + "/videos", num_episodes=1000,
gifs=True, gif_dir=default_output_dir() + "/gifs", video_frequency = 1)
env_name = "idsgame-v3"
client_config = ClientConfig(env_name=env_name, attacker_type=AgentType.RANDOM.value,
defender_type=AgentType.DEFEND_MINIMAL_VALUE.value, mode=RunnerMode.SIMULATE.value,
simulation_config=simulation_config, output_dir=default_output_dir(),
title="RandomAttacker vs DefendMinimalDefender")
return client_config
| 14,379
|
def get_same_padding(kernel_size: int, stride: int, dilation: int) -> int:
"""Calculates the padding size to obtain same padding.
Same padding means that the output will have the
shape input_shape / stride. That means, for
stride = 1 the output shape is the same as the input,
and stride = 2 gives an output that is half of the
input shape.
Args:
kernel_size : convolution kernel size. Only tested to be correct with odd values.
stride : convolution stride
dilation : convolution dilation
Raises:
ValueError: Only stride or dilation may be greater than 1
Returns:
padding value to obtain same padding.
"""
if stride > 1 and dilation > 1:
raise ValueError("Only stride OR dilation may be greater than 1")
if dilation > 1:
return (dilation * (kernel_size - 1) + 1) // 2
return kernel_size // 2
| 14,380
|
def move_child_position(context, request):
""" Move the child from one position to another.
:param context: "Container" node in which the child changes its position.
:type context: :class:kotti.resources.Node or descendant
:param request: Current request (of method POST). Must contain either
"from" and "to" params or a json_body that contain(s) the
0-based old (i.e. the current index of the child to be
moved) and new position (its new index) values.
:type request:
:result: JSON serializable object with a single attribute ("result") that is
either "success" or "error".
:rtype: dict
"""
data = request.POST or request.json_body
if ("from" in data) and ("to" in data):
max_pos = len(context.children) - 1
try:
old_position = int(data["from"])
new_position = int(data["to"])
if not ((0 <= old_position <= max_pos) and (0 <= new_position <= max_pos)):
raise ValueError
except ValueError:
return {"result": "error"}
# sqlalchemy.ext.orderinglist takes care of the "right" sequence
# numbers (immediately consecutive, starting with 0) for us.
context.children.insert(new_position, context.children.pop(old_position))
result = "success"
else:
result = "error"
return {"result": result}
| 14,381
|
def test_hawk_tail():
"""Test module hawk_tail.py by downloading
hawk_tail.csv and testing shape of
extracted data has 838 rows and 2 columns
"""
test_path = tempfile.mkdtemp()
x_train, metadata = hawk_tail(test_path)
try:
assert x_train.shape == (838, 2)
except:
shutil.rmtree(test_path)
raise()
| 14,382
|
def get_current_version(package: str) -> str:
"""
Query PyPi index to find latest version of package
:param package: str - name of pacahe
:return: str - version if available
"""
url = f'{PYPI_BASE_URL}/pypi/{package}/json'
headers = {
'Content-Type': 'application/json'
}
response = requests.get(url=url, headers=headers)
if response.status_code == 200:
data = response.json()
if 'info' in data and 'version' in data['info']:
# only return version if everything went OK, otherwise, too bad!
return data['info']['version']
return None
| 14,383
|
def get_model_path(model_name):
"""
Returns path to the bird species classification model of the given name.
Parameters
----------
model_name : str
Name of classifier model. Should be in format
``<model id>_<taxonomy version>-<taxonomy md5sum>``.
*v0.3.1 UPDATE: model names with taxonomy md5 checksum
2e7e1bbd434a35b3961e315cfe3832fc or
beb9234f0e13a34c7ac41db72e85addd are not available in this version
but are restored in v0.3.1 for backwards compatibility. They will no
longer be supported starting with v0.4. Please use model names with
taxonomy md5 checksums 3c6d869456b2705ea5805b6b7d08f870 and
2f6efd9017669ef5198e48d8ec7dce4c (respectively) instead.*
Returns
-------
model_path : str
Path to classifier model weights. Should be in format
``<BirdVoxClassify dir>/resources/models/<model id>_<taxonomy version>-<taxonomy md5sum>.h5``
"""
# Python 3.8 requires a different model for compatibility
if sys.version_info.major == 3 and sys.version_info.minor == 8:
model_name = model_name.replace(MODEL_PREFIX, MODEL_PREFIX + '-py3pt8')
if model_name.endswith("2e7e1bbd434a35b3961e315cfe3832fc"):
warnings.warn(f"The version of taxonomy with md5 "
f"checksum 2e7e1bbd434a35b3961e315cfe3832fc has been "
f"deprecated and will be removed in v0.4. Please use "
f"model names with "
f"3c6d869456b2705ea5805b6b7d08f870 instead.",
DeprecationWarning, stacklevel=2)
elif model_name.endswith("beb9234f0e13a34c7ac41db72e85addd"):
warnings.warn(f"The version of taxonomy with md5 "
f"checksum beb9234f0e13a34c7ac41db72e85addd has been "
f"deprecated and will be removed in v0.4. Please use "
f"model names with "
f"2f6efd9017669ef5198e48d8ec7dce4c instead.",
DeprecationWarning, stacklevel=2)
path = os.path.join(os.path.dirname(__file__),
"resources",
"models",
model_name + '.h5')
# Use abspath to get rid of the relative path
return os.path.abspath(path)
| 14,384
|
def mconcat(xs : [a]) -> a:
"""
mconcat :: (Monoid m) => [m] -> m
Fold a list using the monoid.
"""
return Monoid[xs[0]].mconcat(xs)
| 14,385
|
def maybe_zero_out_padding(inputs, kernel_size, nonpadding_mask):
"""If necessary, zero out inputs to a conv for padding positions.
Args:
inputs: a Tensor with shape [batch, length, ...]
kernel_size: an integer or pair of integers
nonpadding_mask: a Tensor with shape [batch, length]
Returns:
Tensor of the same shape as inputs.
"""
if (kernel_size != 1 and kernel_size != (1, 1) and
nonpadding_mask is not None):
while nonpadding_mask.get_shape().ndims < inputs.get_shape().ndims:
nonpadding_mask = tf.expand_dims(nonpadding_mask, -1)
return inputs * nonpadding_mask
return inputs
| 14,386
|
def display_json(value):
"""
Display input JSON as a code
"""
if value is None:
return display_for_value(value)
if isinstance(value, str):
value = json.loads(value)
return display_code(json.dumps(value, indent=2, ensure_ascii=False, cls=DjangoJSONEncoder))
| 14,387
|
def get_rgba_from_color(rgba):
"""Return typle of R, G, B, A components from given color.
Arguments:
rgba - color
"""
r = (rgba & 0xFF000000) >> 24
g = (rgba & 0x00FF0000) >> 16
b = (rgba & 0x0000FF00) >> 8
a = (rgba & 0x000000FF)
return r, g, b, a
| 14,388
|
def beauty_factor(G):
"""Return the "beauty factor" of an arbitrary graph, the minimum distance
between a vertex and a non-incident edge."""
V, E = G[0], G[1]
dists = []
for (i, u) in enumerate(V):
for (j, k) in E:
if i == j or i == k:
continue
v, w = V[j], V[k]
a, b = u-v, w-v
proj = (a.real*b.real+a.imag*b.imag) / abs(b) # scalar projection
if 0 <= proj <= abs(b):
dists.append(abs(a - b * proj / abs(b)))
else:
dists.extend((abs(a), abs(u-w)))
return min(dists)
| 14,389
|
def simulationTwoDrugsVirusPopulations():
"""
Run simulations and plot graphs examining the relationship between
administration of multiple drugs and patient outcome.
Plots of total and drug-resistant viruses vs. time are made for a
simulation with a 300 time step delay between administering the 2 drugs and
a simulations for which drugs are administered simultaneously.
"""
runSim(100, 150, 0, 150)
| 14,390
|
def line_plane_cost(line, plane):
"""
A cost function for a line and a plane
"""
P = normalised((line|plane)*I5)
L = normalised(meet(P, plane))
return line_cost_function(L, line)
| 14,391
|
def run_covid_update(update_name: str) -> None:
"""Enacts a scheduled update and either repeats or deletes it.
Updates COVID data for the area specified in config.json, and
schedules a new update if the update repeats, otherwise the
update is removed.
Args:
update_name: The identifier of the update.
"""
nation, ltla, interval = read_config(
"covid_nation", "covid_local", "covid_update_interval_seconds")
covid_API_request(location=ltla, location_type="ltla")
covid_API_request(location=nation, location_type="nation")
logging.info("COVID update '%s' completed.", update_name)
if covid_updates[update_name]["repeats"]:
schedule_covid_updates(interval, update_name)
repeat_log = "COVID update '%s' scheduled to repeat."
logging.info(repeat_log, update_name)
else:
covid_updates.pop(update_name)
logging.info("COVID update '%s' removed.", update_name)
| 14,392
|
def register_document_to_documentsbundle(bundle_id, payload):
"""
Relaciona documento com seu fascículo(DocumentsBundle).
Utiliza a endpoint do Kernel /bundles/{{ DUNDLE_ID }}
"""
try:
response = hooks.kernel_connect(
"/bundles/%s/documents" % bundle_id, "PUT", payload)
return response
except requests.exceptions.HTTPError as exc:
raise LinkDocumentToDocumentsBundleException(str(exc)) from None
| 14,393
|
def test_alt():
"""Setup input for running Alternate Live Load.
Inputs the Alternate loading with the following inputs:
span_lengths = [5.0,6.0,7.0,8.0,9.0,10.0,11.0,12.0,13.0,14.0,16.0,
18.0,20.0,24.0,28.0,32.0,36.0,40.0,45.0,50.0,55.0,
60.0,70.0,80.0,90.0,100.0,120.0,140.0,160.0,180.0,
200.0,250.0,300.0,350.0,400.0]
axle_spacing_alt = [5.00, 6.00, 5.00]
axle_wt_alt = [100.00, 100.00, 100.00, 100.00]
space_to_trailing_load_alt = 0.0
distributed_load_alt = 0.0
num_user_nodes = 21
Args:
None
Returns:
None
Notes:
Outputs results
"""
span_lengths = [5.0,6.0,7.0,8.0,9.0,10.0,11.0,12.0,13.0,14.0,16.0,
18.0,20.0,24.0,28.0,32.0,36.0,40.0,45.0,50.0,55.0,
60.0,70.0,80.0,90.0,100.0,120.0,140.0,160.0,180.0,
200.0,250.0,300.0,350.0,400.0]
axle_spacing_alt = [5.00, 6.00, 5.00]
axle_wt_alt = [100.00, 100.00, 100.00, 100.00]
space_to_trailing_load_alt = 0.0
distributed_load_alt = 0.0
num_user_nodes = 21
vehicle_type = "Alternate"
max_moment_alt = [62.50,75.00,87.50,100.00,117.36,140.63,
164.20,188.02,212.83,250.30,325.27,400.24,
475.00,668.75,866.07,1064.06,1262.50,1461.25,
1710.00,1959.00]
max_moment_qtr_pt_alt = [46.88,56.25,68.75,87.50,106.25,125.00,
143.75,162.50,181.25,200.00,250.00,
318.79,362.50,500.00,650.00,800.00,
950.00,1100.00,1287.48,1481.05]
max_shear_end_alt = [50.00,58.33,64.29,68.75,72.22,75.00,77.27,
83.33,88.46,92.86,100.00,111.11,120.00,
133.33,142.86,150.00,155.56,160.00,164.44]
max_shear_qtr_pt_alt = [None,37.50,39.29,43.75,47.23,50.00,
52.28,54.17,55.76,57.14,62.50,68.05,
72.50,83.33,92.86,100.00,105.56,
110.00,114.45,118.42,120.91,123.33]
max_shear_ctr_alt = [25.00,25.00,25.00,25.00,25.00,25.00,
27.28,29.17,30.76,32.14,34.38,36.11,
37.50,41.67,46.43,50.00,55.56,60.00,
64.45,68.00,70.91,73.33,77.14,80.00,
82.22,84.00]
max_pier_reac_alt = [50.00,58.33,71.43,81.25,88.89,95.00,
100.00,108.33,115.39,121.43,131.25,
138.89,145.00,154.17]
run_vehicle(vehicle_type,
span_lengths,
axle_spacing_alt,
axle_wt_alt,
num_user_nodes,
space_to_trailing_load_alt,
distributed_load_alt,
max_moment_alt,
max_moment_qtr_pt_alt,
max_shear_end_alt,
max_shear_qtr_pt_alt,
max_shear_ctr_alt,
max_pier_reac_alt)
| 14,394
|
def let_it_roll(path):
"""
takes a list of image files and returns a video
"""
pass
| 14,395
|
def test_save_unregistered_email(mock_email_address_qs):
"""
If the provided email address doesn't exist in the system, saving
should do nothing.
"""
address = "test@example.com"
mock_email_address_qs.get.side_effect = models.EmailAddress.DoesNotExist
data = {"email": address}
serializer = serializers.PasswordResetRequestSerializer(data=data)
assert serializer.is_valid()
result = serializer.save()
assert result is None
assert serializer.data == data
assert mock_email_address_qs.get.call_args[1] == {
"address__iexact": address,
"is_verified": True,
}
| 14,396
|
def force_full_index(dataframe: pd.DataFrame, resampling_step: int = None,
resampling_unit: str = "min", timestamp_start: int = None,
timestamp_end: int = None) -> pd.DataFrame:
""" forces a full index. Missing index will be replaced by Nan.
Note: resampling should be done before to benefit from sampling strategies.
Args:
dataframe(dataframe): data frame containing NaN values
resampling_step (int, 8): This is the desired time step of final dataframe.
resampling_unit (str, 't'): unit of desired time step
timestamp_start (string, none): index at which the dataframe starts
timestamp_end (string, none): index at which the dataframe ends
Returns
dataframe(pandas.Dataframe): dataframe with full index
"""
if timestamp_start is None:
print("start index was not provided")
timestamp_start = dataframe.first_valid_index()
if timestamp_end is None:
print("end index is not provided")
timestamp_end = dataframe.last_valid_index()
freq = str(resampling_step) + resampling_unit
new_index = pd.date_range(start=timestamp_start, end=timestamp_end, freq=freq)
new_index = new_index.astype(numpy.int64) // 10 ** 9
delta_time_tmp = dataframe.reindex(index=new_index, fill_value=numpy.nan)
return delta_time_tmp
| 14,397
|
def legislature_to_number(leg):
"""
Takes a full session and splits it down to the values for
FormatDocument.asp.
session = '49th-1st-regular'
legislature_to_number(session) --> '49Leg/1s'
"""
l = leg.lower().split('-')
return '%sLeg/%s%s' % (l[0][0:2], l[1][0], l[2][0])
| 14,398
|
def pin_tags(pb, config_file):
"""
Tag every item in pinboard with the name of it's
originating website
"""
all_bookmarks = pb.posts.all()
# Boolean to determine wheter to do additional filtering
# Only run if filter config file exists
domain_map = None
if config_file and os.path.isfile(config_file):
f = open(config_file, "r")
domain_map = yaml.safe_load(f)['domains']
for bookmark in all_bookmarks:
url = bookmark.url
tags = bookmark.tags
parsed = urlparse(url).netloc
parsed = parsed[:parsed.rfind('.')]
parsed = replace_stuff(parsed)
parsed = process_domain_assoc(parsed, domain_map)
if parsed not in tags:
print("=============================================")
print("Bookmark: {}".format(bookmark.description))
print("Tags to add: {}".format(parsed))
print("Current tags: {}".format(tags))
tags = [parsed] + tags
bookmark.tags = tags
print("New tags: {}".format(bookmark.tags))
bookmark.save()
print("=============================================")
| 14,399
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.