content stringlengths 22 815k | id int64 0 4.91M |
|---|---|
def get_new_listing(old_listing):
"""Get the new listing."""
try:
fetched_listing = requests.get(cfg['api_url']).json()['product']
except requests.exceptions.RequestException:
return old_listing
else:
old_item_ids = {old_item['productId'] for old_item in old_listing}
new_listing = [fetched_item for fetched_item in fetched_listing if
fetched_item['productId'] not in old_item_ids]
if new_listing:
save_listing(new_listing)
return new_listing | 35,000 |
def tracks(lonp, latp, fname, grid, fig=None, ax=None, Title=None):
"""
Plot tracks as lines with starting points in green and ending points in
red.
Args:
lonp,latp: Drifter track positions [time x ndrifters]
fname: Plot name to save
"""
if fig is None:
fig = plt.figure(figsize=(12, 10))
if ax is None:
ax = plt.gca()
# Starting marker
ax.plot(lonp[:, 0], latp[:, 0], 'o', color='g', markersize=3,
label='_nolegend_', alpha=0.4, transform=pc)
# Plot tracks
ax.plot(lonp.T, latp.T, '-', color='grey', linewidth=.2, transform=pc)
# Find final positions of drifters
lonpc, latpc = tools.find_final(lonp, latp)
ax.plot(lonpc, latpc, 'o', color='r', label='_nolegend_', transform=pc)
if Title is not None:
ax.set_title(Title)
# Legend, of sorts
# ax = gca()
xtext = 0.45
ytext = 0.18
ax.text(xtext, ytext, 'starting location', fontsize=16, color='green',
alpha=.8, transform=ax.transAxes)
ax.text(xtext, ytext-.03, 'track', fontsize=16, color='grey',
transform=ax.transAxes)
ax.text(xtext, ytext-.03*2, 'ending location', fontsize=16, color='red',
transform=ax.transAxes)
# Save figure into a local directory called figures. Make directory if it
# doesn't exist.
if not os.path.exists('figures'):
os.makedirs('figures')
fig.savefig('figures/' + fname + 'tracks.png', bbox_inches='tight') | 35,001 |
def parse_numpy():
"""Yield a function to parse Numpy docstrings.
Yields:
A parser function.
"""
yield from parser(numpy) | 35,002 |
def setup_logging(root_folder, log_level=None, env_key="LOG_CFG"):
"""Setup logging configuration
"""
default_path = root_folder + '/logging.yaml'
path = default_path
value = os.getenv(env_key, None)
if value:
path = value
if os.path.exists(path):
with open(path, 'rt') as f:
config = yaml.safe_load(f.read())
if log_level:
for logger in config['loggers']:
config['loggers'][logger]['level'] = log_level
logging.config.dictConfig(config)
else:
logging.basicConfig(level=default_level) | 35,003 |
def create_user(engine: create_engine, data: dict) -> Union[User, None]:
"""
Function for creating row in database
:param engine: sqlmodel's engine
:param data: dictionary with data that represents user
:return: Created user instance or nothing
"""
logging.info('Creating an user')
user = User(**data)
with Session(engine) as session:
try:
session.add(user)
session.commit()
session.refresh(user)
logging.info('User was created')
except exc.CompileError:
logging.warning('User was not created')
return None
return user | 35,004 |
def _densify_2D(a, fact=2):
"""Densify a 2D array using np.interp.
:fact - the factor to density the line segments by
:Notes
:-----
:original construction of c rather than the zero's approach
: c0 = c0.reshape(n, -1)
: c1 = c1.reshape(n, -1)
: c = np.concatenate((c0, c1), 1)
"""
# Y = a changed all the y's to a
a = np.squeeze(a)
n_fact = len(a) * fact
b = np.arange(0, n_fact, fact)
b_new = np.arange(n_fact - 1) # Where you want to interpolate
c0 = np.interp(b_new, b, a[:, 0])
c1 = np.interp(b_new, b, a[:, 1])
n = c0.shape[0]
c = np.zeros((n, 2))
c[:, 0] = c0
c[:, 1] = c1
return c | 35,005 |
def conf_paths(filename) -> list:
"""Get config paths"""
home = os.path.expanduser('~')
paths = [path.format(home=home, filename=filename) for path in
PATH_TEMPLATES]
return paths | 35,006 |
def get_neighbor_edge(
graph: srf.Alignment,
edge: tuple[int, int],
column: str = 'z',
direction: str = 'up',
window: Union[None, int] = None,
statistic: str = 'min'
) -> Union[None, tuple[int, int]]:
"""Return the neighboring edge having the lowest minimum value
Parameters:
graph: directed network graph
edge: edge for which to determine a neighbor
Other Parameters:
column: column to test in vertices
direction: 'up' tests predecessor edges; 'down' tests successors
window: number of neighbor vertices to test
statistic: test statistic
Returns:
edge meeting the criteria
"""
vertices = graph.vertices
result = None
val = None
if direction == 'up':
neighbors = [(i, edge[0]) for i in graph.predecessors(edge[0])]
else:
neighbors = [(edge[1], i) for i in graph.successors(edge[1])]
if len(neighbors) > 0:
for neighbor in neighbors:
if window:
test_verts = vertices[vertices['edge'] == neighbor].tail(window)
else:
test_verts = vertices[vertices['edge'] == neighbor]
if statistic == 'min':
test_val = test_verts[column].min()
if val:
if test_val < val:
result = neighbor
val = test_val
else:
result = neighbor
val = test_val
return result | 35,007 |
def show_comparison(x_coordinates: np.ndarray, analytic_expression: callable, numeric_solution: [dict, np.ndarray],
numeric_label: str = "Numeric Solution", analytic_label: str = "Analytic Solution",
title: str = None, x_label: str = None, y_label: str = None, save_file_as: str = None):
"""
Method that shows the comparison between the analytic and numeric solutions.
:param x_coordinates: Array of input values for function.
:param numeric_solution: Array of values for the numeric solution.
:param analytic_expression: Function that describes the analytic solution.
:param numeric_label: Label for numeric solution on graph.
:param analytic_label: Label for analytic solution on graph.
:param title: Title of plot figure.
:param x_label: Label for the x axis.
:param y_label: Label for the y axis.
:param save_file_as: Filename used to save generated figure. If not defined figure is not saved.
:return: Displays the graphical comparison.
"""
check_method_call(x_coordinates)
check_method_call(analytic_expression)
check_method_call(numeric_solution)
analytic_solution = analytic_expression(x_coordinates)
default_cycler = cycler('color', ['b', 'g', 'k']) * cycler('linestyle', ['--', '-.', ':'])
plt.rc('axes', prop_cycle=default_cycler)
plt.plot(x_coordinates, analytic_solution, "r-", label=analytic_label)
if isinstance(numeric_solution, dict):
[plt.plot(x_coordinates, numeric_solution[key], label=("{:.4f}s".format(key) if isinstance(key, (float, int))
else key)) for key in sorted(numeric_solution)]
else:
plt.plot(x_coordinates, numeric_solution, "b--", label=numeric_label)
axes = plt.gca()
if x_label:
axes.set_xlabel(x_label)
if y_label:
axes.set_ylabel(y_label)
if title:
axes.set_title(title)
plt.grid()
plt.legend()
# Calculate errors
numeric_solution = np.array(numeric_solution if not isinstance(numeric_solution, dict) else
numeric_solution[max(numeric_solution.keys())])
error_array = np.nan_to_num(abs(numeric_solution - analytic_solution)/analytic_solution)
print("Mean Error: {0}\nStandard Error: {1}".format(np.mean(error_array), np.std(error_array)))
if save_file_as is not None and isinstance(save_file_as, str):
plt.savefig("{0}".format(save_file_as))
return plt.show() | 35,008 |
async def fixt_db(fixt_redis_client):
"""Actual fixture for requests working with db."""
await start_gino()
yield
await stop_gino() | 35,009 |
def has_master(mc: MasterCoordinator) -> bool:
""" True if `mc` has a master. """
return bool(mc.sc and not mc.sc.master and mc.sc.master_url) | 35,010 |
def sz_margin_details(date='', retry_count=3, pause=0.001):
"""
获取深市融资融券明细列表
Parameters
--------
date:string
明细数据日期 format:YYYY-MM-DD 默认为空''
retry_count : int, 默认 3
如遇网络等问题重复执行的次数
pause : int, 默认 0
重复请求数据过程中暂停的秒数,防止请求间隔时间太短出现的问题
Return
------
DataFrame
opDate:信用交易日期
stockCode:标的证券代码
securityAbbr:标的证券简称
rzmre: 融资买入额(元)
rzye:融资余额(元)
rqmcl: 融券卖出量
rqyl: 融券余量
rqye: 融券余量(元)
rzrqye:融资融券余额(元)
"""
for _ in range(retry_count):
time.sleep(pause)
try:
request = Request(rv.MAR_SZ_MX_URL%(ct.P_TYPE['http'], ct.DOMAINS['szse'],
ct.PAGES['szsefc'], date))
lines = urlopen(request, timeout = 10).read()
if len(lines) <= 200:
return pd.DataFrame()
df = pd.read_html(lines, skiprows=[0])[0]
df.columns = rv.MAR_SZ_MX_COLS
df['stockCode'] = df['stockCode'].map(lambda x:str(x).zfill(6))
df['opDate'] = date
except Exception as e:
print(e)
else:
return df
raise IOError(ct.NETWORK_URL_ERROR_MSG) | 35,011 |
def delay_slot_insn(*args):
"""
delay_slot_insn(ea, bexec, fexec) -> bool
Helper function to get the delay slot instruction.
@param ea (C++: ea_t *)
@param bexec (C++: bool *)
@param fexec (C++: bool *)
"""
return _ida_idp.delay_slot_insn(*args) | 35,012 |
def decode_argument(params, word_embeddings, argument_extractors):
"""
:type params: dict
:type word_embeddings: nlplingo.embeddings.WordEmbedding
:type argument_extractors: list[nlplingo.nn.extractor.Extractor] # argument extractors
"""
if len(argument_extractors) == 0:
raise RuntimeError('At least one argument extractor must be specified to decode over arguments.')
test_docs = prepare_docs(params['data']['test']['filelist'], word_embeddings, params)
# TODO CYS: this is current a hack. This needs to be better factorized and integrated more generically with the existing decode code
if argument_extractors[0].engine == 'transformers':
return decode_event_transformer_using_gold_trigger(params, argument_extractors[0], test_docs) | 35,013 |
def pairwise_radial_basis(K: numpy.ndarray, B: numpy.ndarray) -> numpy.ndarray:
"""Compute the TPS radial basis function phi(r) between every row-pair of K
and B where r is the Euclidean distance.
Arguments
---------
K : numpy.array
n by d vector containing n d-dimensional points.
B : numpy.array
m by d vector containing m d-dimensional points.
Return
------
P : numpy.array
n by m matrix where.
P(i, j) = phi( norm( K(i,:) - B(j,:) ) ),
where phi(r) = r^2*log(r), if r >= 1
r*log(r^r), if r < 1
"""
# r_mat(i, j) is the Euclidean distance between K(i, :) and B(j, :).
r_mat = cdist(K, B)
pwise_cond_ind1 = r_mat >= 1
pwise_cond_ind2 = r_mat < 1
r_mat_p1 = r_mat[pwise_cond_ind1]
r_mat_p2 = r_mat[pwise_cond_ind2]
# P correcponds to the matrix K from [1].
P = numpy.empty(r_mat.shape)
P[pwise_cond_ind1] = (r_mat_p1**2) * numpy.log(r_mat_p1)
P[pwise_cond_ind2] = r_mat_p2 * numpy.log(numpy.power(r_mat_p2, r_mat_p2))
return P | 35,014 |
def low_index_subgroups(G, N, Y=[]):
"""
Implements the Low Index Subgroups algorithm, i.e find all subgroups of
``G`` upto a given index ``N``. This implements the method described in
[Sim94]. This procedure involves a backtrack search over incomplete Coset
Tables, rather than over forced coincidences.
Parameters
==========
G: An FpGroup < X|R >
N: positive integer, representing the maximum index value for subgroups
Y: (an optional argument) specifying a list of subgroup generators, such
that each of the resulting subgroup contains the subgroup generated by Y.
Examples
========
>>> from sympy.combinatorics.free_groups import free_group
>>> from sympy.combinatorics.fp_groups import FpGroup, low_index_subgroups
>>> F, x, y = free_group("x, y")
>>> f = FpGroup(F, [x**2, y**3, (x*y)**4])
>>> L = low_index_subgroups(f, 4)
>>> for coset_table in L:
... print(coset_table.table)
[[0, 0, 0, 0]]
[[0, 0, 1, 2], [1, 1, 2, 0], [3, 3, 0, 1], [2, 2, 3, 3]]
[[0, 0, 1, 2], [2, 2, 2, 0], [1, 1, 0, 1]]
[[1, 1, 0, 0], [0, 0, 1, 1]]
References
==========
.. [1] Holt, D., Eick, B., O'Brien, E.
"Handbook of Computational Group Theory"
Section 5.4
.. [2] Marston Conder and Peter Dobcsanyi
"Applications and Adaptions of the Low Index Subgroups Procedure"
"""
C = CosetTable(G, [])
R = G.relators
# length chosen for the length of the short relators
len_short_rel = 5
# elements of R2 only checked at the last step for complete
# coset tables
R2 = {rel for rel in R if len(rel) > len_short_rel}
# elements of R1 are used in inner parts of the process to prune
# branches of the search tree,
R1 = {rel.identity_cyclic_reduction() for rel in set(R) - R2}
R1_c_list = C.conjugates(R1)
S = []
descendant_subgroups(S, C, R1_c_list, C.A[0], R2, N, Y)
return S | 35,015 |
async def async_setup_entry(
hass: HomeAssistant,
config_entry: ConfigEntry,
async_add_entities: AddEntitiesCallback,
) -> None:
"""Set up the sensor platform."""
client = hass.data[DOMAIN][config_entry.entry_id][DATA_CLIENT]
coordinator = hass.data[DOMAIN][config_entry.entry_id][DATA_COORDINATOR]
entities: list[SensorEntity] = []
for description in ACCOUNT_SENSORS:
entities.append(ChargePointSensorEntity(client, coordinator, description))
for charger_id in coordinator.data[ACCT_HOME_CRGS].keys():
for description in CHARGER_SENSORS:
entities.append(
ChargePointChargerSensorEntity(
client, coordinator, description, charger_id
)
)
async_add_entities(entities) | 35,016 |
def test_cli_direct_task_ansible_21_min(capsys):
"""
Test cli direct task
"""
ansible_galaxy_cli.main('aci-ansible-galaxy direct')
out, err = capsys.readouterr()
assert err == '' # nosec
assert 'Usage: ansible-galaxy' in out | 35,017 |
def ifft_complex(fft_sig_complex) -> np.ndarray:
"""
Compute the one-dimensional inverse discrete Fourier Transform.
:param fft_sig_complex: input array, can be complex.
:return: the truncated or zero-padded input, transformed along the axis
"""
ifft_sig = np.fft.ifft(fft_sig_complex)
fft_points = len(ifft_sig)
ifft_sig *= fft_points
return ifft_sig | 35,018 |
def merge_eopatches(*eopatches, features=..., time_dependent_op=None, timeless_op=None):
""" Merge features of given EOPatches into a new EOPatch
:param eopatches: Any number of EOPatches to be merged together
:type eopatches: EOPatch
:param features: A collection of features to be merged together. By default all features will be merged.
:type features: object
:param time_dependent_op: An operation to be used to join data for any time-dependent raster feature. Before
joining time slices of all arrays will be sorted. Supported options are:
- None (default): If time slices with matching timestamps have the same values, take one. Raise an error
otherwise.
- 'concatenate': Keep all time slices, even the ones with matching timestamps
- 'min': Join time slices with matching timestamps by taking minimum values. Ignore NaN values.
- 'max': Join time slices with matching timestamps by taking maximum values. Ignore NaN values.
- 'mean': Join time slices with matching timestamps by taking mean values. Ignore NaN values.
- 'median': Join time slices with matching timestamps by taking median values. Ignore NaN values.
:type time_dependent_op: str or Callable or None
:param timeless_op: An operation to be used to join data for any timeless raster feature. Supported options
are:
- None (default): If arrays are the same, take one. Raise an error otherwise.
- 'concatenate': Join arrays over the last (i.e. bands) dimension
- 'min': Join arrays by taking minimum values. Ignore NaN values.
- 'max': Join arrays by taking maximum values. Ignore NaN values.
- 'mean': Join arrays by taking mean values. Ignore NaN values.
- 'median': Join arrays by taking median values. Ignore NaN values.
:type timeless_op: str or Callable or None
:return: A dictionary with EOPatch features and values
:rtype: Dict[(FeatureType, str), object]
"""
reduce_timestamps = time_dependent_op != 'concatenate'
time_dependent_op = _parse_operation(time_dependent_op, is_timeless=False)
timeless_op = _parse_operation(timeless_op, is_timeless=True)
all_features = {feature for eopatch in eopatches for feature in FeatureParser(features)(eopatch)}
eopatch_content = {}
timestamps, sort_mask, split_mask = _merge_timestamps(eopatches, reduce_timestamps)
eopatch_content[FeatureType.TIMESTAMP] = timestamps
for feature in all_features:
feature_type, feature_name = feature
if feature_type.is_raster():
if feature_type.is_time_dependent():
eopatch_content[feature] = _merge_time_dependent_raster_feature(
eopatches, feature, time_dependent_op, sort_mask, split_mask
)
else:
eopatch_content[feature] = _merge_timeless_raster_feature(eopatches, feature,
timeless_op)
if feature_type.is_vector():
eopatch_content[feature] = _merge_vector_feature(eopatches, feature)
if feature_type is FeatureType.META_INFO:
eopatch_content[feature] = _select_meta_info_feature(eopatches, feature_name)
if feature_type is FeatureType.BBOX:
eopatch_content[feature] = _get_common_bbox(eopatches)
return eopatch_content | 35,019 |
def plot_each_ring_mean_intensityc( times, mean_int_sets, xlabel= 'Frame',save=False, *argv,**kwargs):
"""
Plot time dependent mean intensity of each ring
"""
num_rings = mean_int_sets.shape[1]
fig, ax = plt.subplots(figsize=(8, 8))
uid = 'uid'
if 'uid' in kwargs.keys():
uid = kwargs['uid']
ax.set_title("%s--Mean intensity of each ROI"%uid)
for i in range(num_rings):
#print( markers[i], colors[i] )
ax.plot( times, mean_int_sets[:,i], label="ROI "+str(i+1),marker = markers[i], color=colors[i], ls='-')
ax.set_xlabel(xlabel)
ax.set_ylabel("Mean Intensity")
ax.legend(loc = 'best',fontsize='x-small', fancybox=True, framealpha=0.5)
if save:
path = kwargs['path']
fp = path + "%s_t_ROIs"%uid + '.png'
fig.savefig( fp, dpi=fig.dpi)
save_arrays( np.hstack( [times.reshape(len(times),1), mean_int_sets]),
label= ['frame']+ ['ROI_%d'%i for i in range( num_rings ) ],
filename='%s_t_ROIs'%uid, path= path )
#plt.show() | 35,020 |
def parse_args():
"""Parse cli arguments."""
parser = argparse.ArgumentParser(description="Java project package name changer.")
parser.add_argument("--directory", default=".", type=str, help="Working directory.")
parser.add_argument(
"--current",
required=True,
type=str,
help='Current package name. For example: "com.example".',
)
parser.add_argument(
"--target",
required=True,
type=str,
help='Target package name. For example: "org.another".',
)
parser.add_argument(
"--protected_dirs",
default=[],
type=str,
nargs="+",
help="List of protected from any changes directories",
)
parser.add_argument(
"--protected_files",
default=[],
type=str,
nargs="+",
help="List of protected from any changes files",
)
return parser.parse_args() | 35,021 |
def dispos(dra0, decd0, dra, decd):
"""
Source/credit: Skycat
dispos computes distance and position angle solving a spherical
triangle (no approximations)
INPUT :coords in decimal degrees
OUTPUT :dist in arcmin, returns phi in degrees (East of North)
AUTHOR :a.p.martinez
Parameters:
dra0: center RA decd0: center DEC dra: point RA decd: point DEC
Returns:
distance in arcmin
"""
radian = 180.0/math.pi
# coord transformed in radians
alf = dra / radian
alf0 = dra0 / radian
del_ = decd / radian
del0 = decd0 / radian
sd0 = math.sin(del0)
sd = math.sin(del_)
cd0 = math.cos(del0)
cd = math.cos(del_)
cosda = math.cos(alf - alf0)
cosd = sd0*sd + cd0*cd*cosda
dist = math.acos(cosd)
phi = 0.0
if dist > 0.0000004:
sind = math.sin(dist)
cospa = (sd*cd0 - cd*sd0*cosda)/sind
#if cospa > 1.0:
# cospa=1.0
if math.fabs(cospa) > 1.0:
# 2005-06-02: fix from awicenec@eso.org
cospa = cospa/math.fabs(cospa)
sinpa = cd*math.sin(alf-alf0)/sind
phi = math.acos(cospa)*radian
if sinpa < 0.0:
phi = 360.0-phi
dist *= radian
dist *= 60.0
if decd0 == 90.0:
phi = 180.0
if decd0 == -90.0:
phi = 0.0
return (phi, dist) | 35,022 |
def bind_port(sock, host=HOST):
"""Bind the socket to a free port and return the port number. Relies on
ephemeral ports in order to ensure we are using an unbound port. This is
important as many tests may be running simultaneously, especially in a
buildbot environment. This method raises an exception if the sock.family
is AF_INET and sock.type is SOCK_STREAM, *and* the socket has SO_REUSEADDR
or SO_REUSEPORT set on it. Tests should *never* set these socket options
for TCP/IP sockets. The only case for setting these options is testing
multicasting via multiple UDP sockets.
Additionally, if the SO_EXCLUSIVEADDRUSE socket option is available (i.e.
on Windows), it will be set on the socket. This will prevent anyone else
from bind()'ing to our host/port for the duration of the test.
"""
if sock.family == socket.AF_INET and sock.type == socket.SOCK_STREAM:
if hasattr(socket, 'SO_REUSEADDR'):
if sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR) == 1:
raise TestFailed("tests should never set the SO_REUSEADDR " \
"socket option on TCP/IP sockets!")
if hasattr(socket, 'SO_REUSEPORT'):
try:
if sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT) == 1:
raise TestFailed("tests should never set the SO_REUSEPORT " \
"socket option on TCP/IP sockets!")
except OSError:
# Python's socket module was compiled using modern headers
# thus defining SO_REUSEPORT but this process is running
# under an older kernel that does not support SO_REUSEPORT.
pass
if hasattr(socket, 'SO_EXCLUSIVEADDRUSE'):
sock.setsockopt(socket.SOL_SOCKET, socket.SO_EXCLUSIVEADDRUSE, 1)
sock.bind((host, 0))
port = sock.getsockname()[1]
return port | 35,023 |
def node_to_html(node: Union[str, NodeElement, list]) -> str:
"""
Convert Nodes to HTML
:param node:
:return:
"""
if isinstance(node, str): # Text
return escape(node)
elif isinstance(node, list): # List of nodes
result = ''
for child_node in node:
result += node_to_html(child_node)
return result
elif not isinstance(node, NodeElement):
raise TypeError(f"Node must be instance of str or NodeElement, not {type(node)}")
# NodeElement
# Open
result = "<" + node.tag
if node.attrs:
result += ' ' + ' '.join(f"{k}=\"{v}\"" for k, v in node.attrs.items())
if node.tag in VOID_ELEMENTS: # Close void element
result += '/>'
else:
result += '>'
for child_node in node.children: # Container body
result += node_to_html(child_node)
result += '</' + node.tag + '>' # Close tag
return result | 35,024 |
def cli(
topology: PathLike,
trajectory: List[str],
reference: PathLike,
outfile: PathLike,
logfile: PathLike,
step: int,
mask: str,
tol: float,
verbose: bool,
) -> None:
"""Align a trajectory to average structure using Kabsch fitting."""
start_time: float = time.perf_counter()
# Setup logging
logging.config.dictConfig(create_logging_dict(logfile))
logger: logging.Logger = logging.getLogger(__name__)
step = step if step > 0 else 1
logger.info("Loading %s and %s", topology, trajectory)
positions: NDArray[(Any, ...), Float] = get_positions(
topology, trajectory, mask=_MASK[mask], stride=step
)
# Calculate average structure
ref_traj: md.Trajectory = get_average_structure(
topology, trajectory, mask=_MASK[mask], stride=step
)
logger.info("Saving average structure to %s", reference)
ref_traj.save(reference)
unitcell_angles: NDArray[(Any, ...), Float] = ref_traj.unitcell_angles.copy()
unitcell_lengths: NDArray[(Any, ...), Float] = ref_traj.unitcell_lengths.copy()
unitcell_vectors: NDArray[(Any, ...), Float] = ref_traj.unitcell_vectors.copy()
if not (
".gro" in "".join(trajectory)
or ".xtc" in "".join(trajectory)
or ".trj" in "".join(trajectory)
or ".tng" in "".join(trajectory)
):
in_units_of(ref_traj.xyz, "nanometer", "angstroms", inplace=True)
logger.info("Aligning trajectory to average structures")
ref_traj.xyz = align_trajectory(
positions, ref_traj.xyz[0], tol=tol, verbose=verbose
)
n_frames = ref_traj.n_frames
ref_traj.time = np.arange(n_frames)
ref_traj.unitcell_angles = np.repeat(unitcell_angles, n_frames, axis=0)
ref_traj.unitcell_lengths = np.repeat(unitcell_lengths, n_frames, axis=0)
ref_traj.unitcell_vectors = np.repeat(unitcell_vectors, n_frames, axis=0)
if not (
".gro" in "".join(trajectory)
or ".xtc" in "".join(trajectory)
or ".trj" in "".join(trajectory)
or ".tng" in "".join(trajectory)
):
in_units_of(ref_traj.xyz, "angstroms", "nanometer", inplace=True)
logger.info("Saving aligned trajectory to %s}", outfile)
ref_traj.save(outfile)
stop_time: float = time.perf_counter()
dt: float = stop_time - start_time
struct_time: time.struct_time = time.gmtime(dt)
if verbose:
output: str = time.strftime("%H:%M:%S", struct_time)
logger.info(f"Total execution time: {output}") | 35,025 |
def cuda_reshape(a, shape):
""" Reshape a GPUArray.
Parameters:
a (gpu): GPUArray.
shape (tuple): Dimension of new reshaped GPUArray.
Returns:
gpu: Reshaped GPUArray.
Examples:
>>> a = cuda_reshape(cuda_give([[1, 2], [3, 4]]), (4, 1))
array([[ 1.],
[ 2.],
[ 3.],
[ 4.]])
>>> type(a)
<class 'pycuda.gpuarray.GPUArray'>
"""
return a.reshape(shape) | 35,026 |
async def test_multiple_store_function_race_condition(
db: sqlalchemy.orm.Session, async_client: httpx.AsyncClient
):
"""
This is testing the case that the retry_on_conflict decorator is coming to solve, see its docstring for more details
"""
await tests.api.api.utils.create_project_async(async_client, PROJECT)
# Make the get function method to return None on the first two calls, and then use the original function
get_function_mock = tests.conftest.MockSpecificCalls(
mlrun.api.utils.singletons.db.get_db()._get_class_instance_by_uid, [1, 2], None
).mock_function
mlrun.api.utils.singletons.db.get_db()._get_class_instance_by_uid = (
unittest.mock.Mock(side_effect=get_function_mock)
)
function = {
"kind": "job",
"metadata": {
"name": "function-name",
"project": "project-name",
"tag": "latest",
},
}
request1_task = asyncio.create_task(
async_client.post(
f"func/{function['metadata']['project']}/{function['metadata']['name']}",
json=function,
)
)
request2_task = asyncio.create_task(
async_client.post(
f"func/{function['metadata']['project']}/{function['metadata']['name']}",
json=function,
)
)
response1, response2 = await asyncio.gather(
request1_task,
request2_task,
)
assert response1.status_code == HTTPStatus.OK.value
assert response2.status_code == HTTPStatus.OK.value
# 2 times for two store function requests + 1 time on retry for one of them
assert (
mlrun.api.utils.singletons.db.get_db()._get_class_instance_by_uid.call_count
== 3
) | 35,027 |
def test_ep_basic_equivalence(stateful, state_tuple, limits):
"""
Test that EpisodeRoller is equivalent to a
BasicRoller when run on a single environment.
"""
def env_fn():
return SimpleEnv(3, (4, 5), 'uint8')
env = env_fn()
model = SimpleModel(env.action_space.low.shape,
stateful=stateful,
state_tuple=state_tuple)
basic_roller = BasicRoller(env, model, **limits)
expected = basic_roller.rollouts()
batched_env = batched_gym_env([env_fn], sync=True)
ep_roller = EpisodeRoller(batched_env, model, **limits)
actual = ep_roller.rollouts()
_compare_rollout_batch(actual, expected) | 35,028 |
def test_styles(patch_click, message, kwargs, supported):
""" Test to ensure the ClickLogger is sytling messges correctly """
# uses click.style to make the expected formatted string
if supported:
expected_output = click.style(message, **kwargs)
else:
expected_output = message
# logs the message with the given kwargs
logger.info(message, **kwargs)
# ensure the formatted string is the same as what click would have done
click.echo.assert_called_with(expected_output) | 35,029 |
def _find_crate_root_src(srcs, file_names=["lib.rs"]):
"""Finds the source file for the crate root."""
if len(srcs) == 1:
return srcs[0]
for src in srcs:
if src.basename in file_names:
return src
fail("No %s source file found." % " or ".join(file_names), "srcs") | 35,030 |
def decode_check(string):
"""Returns the base58 decoded value, verifying the checksum.
:param string: The data to decode, as a string.
"""
number = b58decode(string)
# Converting to bytes in order to verify the checksum
payload = number.to_bytes(sizeof(number), 'big')
if payload and sha256d(payload[:-4])[:4] == payload[-4:]:
return payload[:-4]
else:
return None | 35,031 |
def rotate(origin, point, angle):
"""
Rotate a point counterclockwise by a given angle around a given origin.
The angle should be given in radians.
"""
ox, oy = origin
px, py = point
qx = ox + math.cos(angle) * (px - ox) - math.sin(angle) * (py - oy)
qy = oy + math.sin(angle) * (px - ox) + math.cos(angle) * (py - oy)
return qx, qy | 35,032 |
def get_users_info(token, ids):
"""Return a response from vk api users.get
:param token: access token
:param ids: users ids
:return: dict with users info
"""
args = {
'user_ids': ids,
'fields': 'city,bdate,connections,photo_200',
'access_token': token,
'v': settings.api_v
}
return send_vk_request('users.get', **args)[0] | 35,033 |
def train_aggressive(batch, kl_coef=1, teacher_forcing=1, max_iter=20):
"""
Train the encoder part aggressively
:param batch: *first* batch of data, each entry correspond to a word.
:param kl_coef: weight of KL divergence term in loss.
:param teacher_forcing: rate of teacher forcing, default=1.
:return: None
"""
assert enc_opt is not None, "No encoder optimizer available."
vae.train()
burn_pre_loss = math.inf
burn_cur_loss = 0 # running loss to validate convergence.
for num_iter in range(1, max_iter+1):
# update the encoder
loss, rec_loss, kl_loss = calc_batch_loss(batch, kl_coef, teacher_forcing)
vae.zero_grad()
loss.backward()
clip_grad_norm_(vae.parameters(), opt.clip)
enc_opt.step()
# find next batch randomly
batch = random.choice(train_batch_list)
# return in converge
burn_cur_loss += loss.item()
if num_iter % 5 == 0:
if burn_cur_loss >= burn_pre_loss: # the smaller loss, the better.
return
burn_pre_loss = burn_cur_loss
burn_cur_loss = 0 | 35,034 |
def _receive_release_rq(event):
"""Standard logging handler for receiving an A-RELEASE-RQ PDU."""
pass | 35,035 |
def create_consistencygroup(ctxt,
host='test_host@fakedrv#fakepool',
name='test_cg',
description='this is a test cg',
status='available',
availability_zone='fake_az',
volume_type_id=None,
cgsnapshot_id=None,
source_cgid=None,
**kwargs):
"""Create a consistencygroup object in the DB."""
cg = objects.ConsistencyGroup(ctxt)
cg.host = host
cg.user_id = ctxt.user_id or 'fake_user_id'
cg.project_id = ctxt.project_id or 'fake_project_id'
cg.status = status
cg.name = name
cg.description = description
cg.availability_zone = availability_zone
if volume_type_id:
cg.volume_type_id = volume_type_id
cg.cgsnapshot_id = cgsnapshot_id
cg.source_cgid = source_cgid
for key in kwargs:
setattr(cg, key, kwargs[key])
cg.create()
return cg | 35,036 |
def hex(generator):
"""
Decorator for transactions' and queries generators.
Allows preserving the type of binaries for Binary Testing Framework.
"""
prefix = 'T' if generator.__name__.lower().endswith('tx') else 'Q'
print('{}{}'.format(prefix, binascii.hexlify(generator().SerializeToString()).decode('utf-8'))) | 35,037 |
def test_verify_file(monkeypatch, capsys):
"""
.
"""
device = Device()
device.set("device", "/dev", "Device Serial", "ABCDEF", 1)
file_obj = File()
file_obj.set_properties("test", "path", "abc")
file_obj.set_security("644", "owner", "group")
file_obj.device_name = device.device_name
file_obj.device = device
monkeypatch.setattr(db, "get_files", lambda path: [file_obj])
monkeypatch.setattr(
utility,
"checksum_file",
lambda file_path: "abc"
if file_path == path.join(device.device_path, file_obj.file_name)
else "def",
)
assert library.verify_file(
file_obj.file_path, True
), "Device path verification works"
assert not library.verify_file(
file_obj.file_path, False
), "Local file path verification fails"
out = capsys.readouterr()
assert "Checksum mismatch" in out.out, "Device verifcation failed message printed"
monkeypatch.setattr(db, "get_files", lambda path: [])
assert not library.verify_file("/test", False), "Nonexistent file should fail"
out = capsys.readouterr()
assert "File record not in database" in out.out, "Missing file message prints" | 35,038 |
def ndigit(num):
"""Returns the number of digits in non-negative number num"""
with nowarn(): return np.int32(np.floor(np.maximum(1,np.log10(num))))+1 | 35,039 |
def data_store_folder_unzip_public(request, pk, pathname):
"""
Public version of data_store_folder_unzip, incorporating path variables
:param request:
:param pk:
:param pathname:
:return HttpResponse:
"""
return data_store_folder_unzip(request, res_id=pk, zip_with_rel_path=pathname) | 35,040 |
def privateDataOffsetLengthTest10():
"""
Offset doesn't begin immediately after last table.
>>> doctestFunction1(testPrivateDataOffsetAndLength, privateDataOffsetLengthTest10())
(None, 'ERROR')
"""
header = defaultTestData(header=True)
header["privOffset"] = header["length"] + 4
header["privLength"] = 1
header["length"] += 2
return packTestHeader(header) | 35,041 |
def meanncov(x, y=[], p=0, norm=True):
"""
Wrapper to multichannel case of new covariance *ncov*.
Args:
*x* : numpy.array
multidimensional data (channels, data points, trials).
*y* = [] : numpy.array
multidimensional data. If not given the autocovariance of *x*
will be calculated.
*p* = 0: int
window shift of input data. It can be negative as well.
*norm* = True: bool
normalization - if True the result is divided by length of *x*,
otherwise it is not.
Returns:
*mcov* : np.array
covariance matrix
"""
chn, N, trls = x.shape
for tr in range(trls):
if tr == 0:
if not len(y):
mcov = ncov(x[:, :, tr], p=p, norm=norm)
else:
mcov = ncov(x[:, :, tr], y[:, :, tr], p=p, norm=norm)
continue
if not len(y):
mcov += ncov(x[:, :, tr], p=p, norm=norm)
else:
mcov += ncov(x[:, :, tr], y[:, :, tr], p=p, norm=norm)
return mcov/trls | 35,042 |
def nasnet_6a4032(**kwargs):
"""
NASNet-A 6@4032 (NASNet-A-Large) model from 'Learning Transferable Architectures for Scalable Image Recognition,'
https://arxiv.org/abs/1707.07012.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_nasnet(
repeat=6,
penultimate_filters=4032,
init_block_channels=96,
final_pool_size=11,
extra_padding=False,
skip_reduction_layer_input=True,
in_size=(331, 331),
model_name="nasnet_6a4032",
**kwargs) | 35,043 |
def start_mining():
"""
Start the mining process.
Get query, or use standard.
Execute mining.
"""
query = raw_input("input search query, press enter for standard. \n")
if query == '':
# all tweets containing one of the words and not 'RT'
query = "Finance OR Investment OR Economy OR Growth AND -RT"
language = "no"
cursor_extraction(query, language, 1000, ".") | 35,044 |
def get_character_card(character_id, preston, access_token):
"""Get all the info for the character card.
Args:
character_id (int): ID of the character.
preston (preston): Preston object to make scope-required ESI calls.
access_token (str): Access token for the scope-required ESI calls.
Returns:
json: Character card information.
"""
# Get character.
characterPayload = SharedInfo['util'].make_esi_request("https://esi.tech.ccp.is/latest/characters/{}/?datasource=tranquility".format(str(character_id)))
if characterPayload.status_code != 200:
flash('There was an error ({}) when trying to retrieve character with ID {}'.format(str(characterPayload.status_code), str(character_id)), 'danger')
return None
characterJSON = characterPayload.json()
characterJSON['portrait'] = SharedInfo['util'].make_esi_request("https://esi.tech.ccp.is/latest/characters/{}/portrait/?datasource=tranquility".format(str(character_id))).json()
# Get corporation.
corporationPayload = SharedInfo['util'].make_esi_request("https://esi.tech.ccp.is/latest/corporations/{}/?datasource=tranquility".format(str(characterJSON['corporation_id'])))
if corporationPayload.status_code != 200:
flash('There was an error ({}) when trying to retrieve corporation with ID {}'.format(str(corporationPayload.status_code), str(characterJSON['corporation_id'])), 'danger')
return None
characterJSON['corporation'] = corporationPayload.json()
characterJSON['corporation']['logo'] = SharedInfo['util'].make_esi_request("https://esi.tech.ccp.is/latest/corporations/{}/icons/?datasource=tranquility".format(
str(characterJSON['corporation_id']))).json()
# Get alliance.
if 'alliance_id' in characterJSON:
alliancePayload = SharedInfo['util'].make_esi_request("https://esi.tech.ccp.is/latest/alliances/{}/?datasource=tranquility".format(str(characterJSON['alliance_id'])))
if alliancePayload.status_code != 200:
flash('There was an error ({}) when trying to retrieve alliance with ID {}'.format(str(alliancePayload.status_code), str(characterJSON['alliance_id'])), 'danger')
return None
characterJSON['alliance'] = alliancePayload.json()
characterJSON['alliance']['logo'] = SharedInfo['util'].make_esi_request("https://esi.tech.ccp.is/latest/alliances/{}/icons/?datasource=tranquility".format(
str(characterJSON['alliance_id']))).json()
# Get wallet.
walletIsk = SharedInfo['util'].make_esi_request_with_scope(preston, ['esi-wallet.read_character_wallet.v1'],
"https://esi.tech.ccp.is/latest/characters/{}/wallet/?datasource=tranquility&token={}".format(
str(character_id), access_token))
walletIskJSON = None
if walletIsk is not None:
walletIskJSON = walletIsk.json()
if walletIskJSON is not None and type(walletIskJSON) is not float:
flash('There was an error ({}) when trying to retrieve wallet for character.'.format(str(walletIsk.status_code)), 'danger')
return None
else:
characterJSON['wallet_isk'] = walletIskJSON
# Get skillpoints
characterSkills = SharedInfo['util'].make_esi_request_with_scope(preston, ['esi-skills.read_skills.v1'],
"https://esi.tech.ccp.is/latest/characters/{}/skills/?datasource=tranquility&token={}".format(
str(character_id), access_token))
characterSkillsJSON = None
if characterSkills is not None:
characterSkillsJSON = characterSkills.json()
if characterSkillsJSON is not None and 'error' in characterSkillsJSON:
flash('There was an error ({}) when trying to retrieve skills.'.format(str(characterSkills.status_code)), 'danger')
return None
else:
characterJSON['skills'] = characterSkillsJSON
return characterJSON | 35,045 |
def expand_ALL_constant(model, fieldnames):
"""Replaces the constant ``__all__`` with all concrete fields of the model"""
if "__all__" in fieldnames:
concrete_fields = []
for f in model._meta.get_fields():
if f.concrete:
if f.one_to_one or f.many_to_many:
concrete_fields.append(f.name)
else:
concrete_fields.append(f.name)
i = fieldnames.index("__all__")
return fieldnames[:i] + concrete_fields + fieldnames[i + 1 :]
return fieldnames | 35,046 |
def add_image_profile_row(form):
"""
Generates html component of configuration dropdowns
"""
_label = LABEL(SPAN('Template:', ' ', SPAN('*', _class='fld_required'), ' '))
select = SELECT(_name='image_profile', _id='request_queue_image_profile')
idx=1
for imageprofile in getImageProfileList():
display = imageprofile['type']
value = idx
idx=idx+1
select.insert(len(select), OPTION(display, _value=value))
#Create TR tag, and insert label and select box
template_elem = TR(_label,select,TD())
form[0].insert(3, template_elem) | 35,047 |
def generate_all_documents(course, timestamps):
"""A generator for all docs for a given course.
Args:
course: models.courses.Course. the course to be indexed.
timestamps: dict from doc_ids to last indexed datetimes. An empty dict
indicates that all documents should be generated.
Yields:
A sequence of search.Document. If a document is within the freshness
threshold, no document will be generated. This function does not modify
timestamps.
"""
link_dist = {}
for resource_type, unused_result_type in RESOURCE_TYPES:
for resource in resource_type.generate_all(course, timestamps):
if isinstance(resource, LessonResource) and resource.notes:
link_dist[resource.notes] = 0
for link in resource.get_links():
link_dist[link] = 1
yield resource.get_document()
for resource in ExternalLinkResource.generate_all_from_dist_dict(
link_dist, timestamps):
yield resource.get_document() | 35,048 |
def server(user, password):
"""A shortcut to use MailServer.
SMTP:
server.send_mail([recipient,], mail)
POP3:
server.get_mail(which)
server.get_mails(subject, sender, after, before)
server.get_latest()
server.get_info()
server.stat()
Parse mail:
server.show(mail)
server.get_attachment(mail)
"""
return MailServer(user, password) | 35,049 |
def get_rackspace_token(username, apikey):
"""Get Rackspace Identity token.
Login to Rackspace with cloud account and api key from environment vars.
Returns dict of the token and tenant id.
"""
auth_params = {
"auth": {
"RAX-KSKEY:apiKeyCredentials": {
"username": username,
"apiKey": apikey,
}
}
}
response = requests.post(RS_IDENTITY_URL, json=auth_params)
if not response.ok:
sys.exit(RS_AUTH_ERROR.format(response.status_code, response.text))
identity = response.json()
return (identity['access']['token']['id'],
identity['access']['token']['tenant']['id']) | 35,050 |
def calculate_hessian(model, data, step_size):
"""
Computes the mixed derivative using finite differences mathod
:param model: The imported model module
:param data: The sampled data in structured form
:param step_size: The dx time step taken between each
:returns: mixed derivative
"""
hessian = pd.DataFrame(0, index = np.arange(data.shape[0]), columns=pd.MultiIndex.from_product([model.output_names, model.perturbation_feature_pairs + model.feature_names], names=['model.output_names','model.feature_pairs']))
for output_name in model.output_names:
hessian_calculation_helpers = create_hessian_calculation_columns(model, output_name)
mixed_derivative = (data.loc[:, hessian_calculation_helpers[0]].values - data.loc[:, hessian_calculation_helpers[1]].values - data.loc[:, hessian_calculation_helpers[2]].values + data.loc[:, hessian_calculation_helpers[3]].values) / (step_size * step_size)
mixed_derivative *= np.sign(data.loc[:, hessian_calculation_helpers[1]].values + data.loc[:, hessian_calculation_helpers[2]].values - 2 * data.loc[:, hessian_calculation_helpers[0]].values)
hessian.loc[:, zip([output_name] * len(model.perturbation_feature_pairs), model.perturbation_feature_pairs)] = mixed_derivative
hessian.loc[:, zip([output_name] * len(model.feature_names), model.feature_names)] = np.array([(data.loc[:, (output_name,f)] - data.loc[:, (output_name,'core')]) / (step_size) for f in model.feature_names]).T
return hessian | 35,051 |
def get_quarterly_income_statements(symbol):
"""
Returns quarterly IS for the past 5 yrs.
"""
df = query_av(function="INCOME_STATEMENT", symbol=symbol, datatype='quarterlyReports')
return df | 35,052 |
def BRepBlend_HCurve2dTool_IsPeriodic(*args):
"""
:param C:
:type C: Handle_Adaptor2d_HCurve2d &
:rtype: bool
"""
return _BRepBlend.BRepBlend_HCurve2dTool_IsPeriodic(*args) | 35,053 |
def moveZeroesB(nums):
"""
:type nums: List[int]
:rtype: void Do not return anything, modify nums in-place instead.
"""
nonzeroes = [x for x in nums if x != 0]
if nonzeroes and len(nonzeroes) < len(nums):
nums[:len(nonzeroes)] = nonzeroes
nums[len(nonzeroes):] = [0] * (len(nums) - len(nonzeroes)) | 35,054 |
def plot_signle_affinity_map(name, origin_affinity_map_index_onepatch, softmax=False):
"""prob_vect_index: [vc_dict_size, kernel, kernel]
"""
vc_dict_size, kernel, kernel = origin_affinity_map_index_onepatch.shape
origin_affinity_map_index_onepatch = origin_affinity_map_index_onepatch.view(vc_dict_size, kernel*kernel)
if softmax:
origin_affinity_map_index_onepatch = F.softmax(origin_affinity_map_index_onepatch, dim=0)
origin_affinity_map_index_onepatch = torch.transpose(origin_affinity_map_index_onepatch, 0, 1)
min_ = origin_affinity_map_index_onepatch.min()
max_ = origin_affinity_map_index_onepatch.max()
plt.clf()
fig = plt.figure(figsize=(32, 32))
gs1 = gridspec.GridSpec(ncols=kernel, nrows=kernel)
axs = []
for i in range(kernel*kernel):
axs.append(fig.add_subplot(gs1[i]))
axs[-1].plot(np.arange(vc_dict_size), np.array(origin_affinity_map_index_onepatch[i]))
axs[-1].set_ylim([min_, max_])
plt.savefig(name)
plt.close() | 35,055 |
def ParagraphEditor(TargetDocument, Keyword, Value, OutputFile):
"""
**TargetDocument: str of the location and file name
**Keyword: list of the keyword that will be replaced
**Value: list of the desired value
**OutputFile: str of the location and file name (replaces files with the same name)
Searches in a document in a directory to replace all keywords with their equavalent values using the same style
P.s. For Paragraphs only (free text not tables) and copies style only
"""
document = Document(TargetDocument)
# loop through each paragraph inside the target document (free text only)
for paragraph in document.paragraphs:
# loop in the list of Keywords
for key in Keyword:
# check the presence of the key
if key in paragraph.text:
x= paragraph.text
# copies the current text style
styleHolder = paragraph.style
#replace the key with the value provided
Y = x.replace(key , Value[Keyword.index(key)])
paragraph.text = Y
# apply the copied text
paragraph.style = styleHolder
#remove the values and the keys used
Value.pop(Keyword.index(key))
Keyword.pop(Keyword.index(key))
# save the document in the provided directory with the provided name
document.save(str(OutputFile)) | 35,056 |
def HLRBRep_SurfaceTool_OffsetValue(*args):
"""
:param S:
:type S: Standard_Address
:rtype: float
"""
return _HLRBRep.HLRBRep_SurfaceTool_OffsetValue(*args) | 35,057 |
def update_cv_validation_info(test_validation_info, iteration_validation_info):
"""
Updates a dictionary with given values
"""
test_validation_info = test_validation_info or {}
for metric in iteration_validation_info:
test_validation_info.setdefault(metric, []).append(iteration_validation_info[metric])
return test_validation_info | 35,058 |
def epoch_to_datetime(epoch: str) -> datetime:
"""
:param epoch:
:return:
"""
return datetime.datetime.fromtimestamp(int(epoch) / 1000) | 35,059 |
def calculate_log_odds(
attr: Tensor,
k: float,
replacement_emb: Tensor,
model,
input_emb: Tensor,
attention_mask: Tensor,
prediction: Tensor,
) -> float:
"""
Log-odds scoring of an attribution
:param attr: Attribution scores for one sentence
:param k: top-k value (how many embeddings are replaced)
:param replacement_emb: embedding for one word that should be used as replacement
:param input_emb: Embedding of the sentence for which the attribution was computed
:param attention_mask: Original attention mask for the sentence
:param prediction: what model outputs for the input
"""
# get logits of masked prediction:
replaced_embed = replace_k_percent(attr, k, replacement_emb, input_emb)
new_pred = predict(model, replaced_embed, attention_mask)
# convert logits of (original) prediction and new_prediction to probabilities:
new_pred = softmax(new_pred, dim=1)
prediction = softmax(prediction, dim=1)
pred_i = torch.argmax(prediction).item()
return torch.log(new_pred[0, pred_i] / torch.max(prediction)).item() | 35,060 |
async def wallet_config(
context: InjectionContext, provision: bool = False
) -> Tuple[Profile, DIDInfo]:
"""Initialize the root profile."""
mgr = context.inject(ProfileManager)
settings = context.settings
profile_cfg = {}
for k in CFG_MAP:
pk = f"wallet.{k}"
if pk in settings:
profile_cfg[k] = settings[pk]
# may be set by `aca-py provision --recreate`
if settings.get("wallet.recreate"):
profile_cfg["auto_recreate"] = True
if provision:
profile = await mgr.provision(context, profile_cfg)
else:
try:
profile = await mgr.open(context, profile_cfg)
except ProfileNotFoundError:
if settings.get("auto_provision", False):
profile = await mgr.provision(context, profile_cfg)
else:
raise
if provision:
if profile.created:
print("Created new profile")
else:
print("Opened existing profile")
print("Profile backend:", profile.backend)
print("Profile name:", profile.name)
wallet_seed = context.settings.get("wallet.seed")
wallet_local_did = context.settings.get("wallet.local_did")
txn = await profile.transaction()
wallet = txn.inject(BaseWallet)
public_did_info = await wallet.get_public_did()
public_did = None
if public_did_info:
public_did = public_did_info.did
if wallet_seed and seed_to_did(wallet_seed) != public_did:
if context.settings.get("wallet.replace_public_did"):
replace_did_info = await wallet.create_local_did(
method=DIDMethod.SOV, key_type=KeyType.ED25519, seed=wallet_seed
)
public_did = replace_did_info.did
await wallet.set_public_did(public_did)
print(f"Created new public DID: {public_did}")
print(f"Verkey: {replace_did_info.verkey}")
else:
# If we already have a registered public did and it doesn't match
# the one derived from `wallet_seed` then we error out.
raise ConfigError(
"New seed provided which doesn't match the registered"
+ f" public did {public_did}"
)
# wait until ledger config to set public DID endpoint - wallet goes first
elif wallet_seed:
if wallet_local_did:
endpoint = context.settings.get("default_endpoint")
metadata = {"endpoint": endpoint} if endpoint else None
local_did_info = await wallet.create_local_did(
method=DIDMethod.SOV,
key_type=KeyType.ED25519,
seed=wallet_seed,
metadata=metadata,
)
local_did = local_did_info.did
if provision:
print(f"Created new local DID: {local_did}")
print(f"Verkey: {local_did_info.verkey}")
else:
public_did_info = await wallet.create_public_did(
method=DIDMethod.SOV, key_type=KeyType.ED25519, seed=wallet_seed
)
public_did = public_did_info.did
if provision:
print(f"Created new public DID: {public_did}")
print(f"Verkey: {public_did_info.verkey}")
# wait until ledger config to set public DID endpoint - wallet goes first
if provision and not wallet_local_did and not public_did:
print("No public DID")
# Debug settings
test_seed = context.settings.get("debug.seed")
if context.settings.get("debug.enabled"):
if not test_seed:
test_seed = "testseed000000000000000000000001"
if test_seed:
await wallet.create_local_did(
method=DIDMethod.SOV,
key_type=KeyType.ED25519,
seed=test_seed,
metadata={"endpoint": "1.2.3.4:8021"},
)
await txn.commit()
return (profile, public_did_info) | 35,061 |
def nodePreset(atr="string",ctm="string",delete="[name, string]",ex="[name, string]",ivn="string",ls="name",ld="[name, string]",sv="[name, string]"):
"""
http://help.autodesk.com/cloudhelp/2019/ENU/Maya-Tech-Docs/CommandsPython/nodePreset.html
-----------------------------------------
nodePreset is NOT undoable, NOT queryable, and NOT editable.
Command to save and load preset settings for a node. This command allows you
to take a snapshot of the values of all attributes of a node and save it to
disk as a preset with user specified name. Later the saved preset can be
loaded and applied onto a different node of the same type. The end result is
that the node to which the preset is applied takes on the same values as the
node from which the preset was generated had at the time of the snapshot.
-----------------------------------------
Return Value:
boolean if isValidName or exists is used.
-----------------------------------------
Flags:
-----------------------------------------
atr : attributes [string] []
A white space separated string of the named attributes to save to the preset file. If not specified, all attributes will be stored.
-----------------------------------------
ctm : custom [string] []
Specifies a MEL script for custom handling of node attributes that are not handled by the general save preset mechanism (ie. multis, dynamic attributes, or connections). The identifiers #presetName and #nodeName will be expanded before the script is run. The script must return an array of strings which will be saved to the preset file and issued as commands when the preset is applied to another node. The custom script can query #nodeName in determining what should be saved to the preset, or issue commands to query the selected node in deciding how the preset should be applied.
-----------------------------------------
delete : delete [[name, string]] []
Deletes the existing preset for the node specified by the first argument with the name specified by the second argument.
-----------------------------------------
ex : exists [[name, string]] []
Returns true if the node specified by the first argument already has a preset with a name specified by the second argument. This flag can be used to check if the user is about to overwrite an existing preset and thereby provide the user with an opportunity to choose a different name.
-----------------------------------------
ivn : isValidName [string] []
Returns true if the name consists entirely of valid characters for a preset name. Returns false if not. Because the preset name will become part of a file name and part of a MEL procedure name, some characters must be disallowed. Only alphanumeric characters and underscore are valid characters for the preset name.
-----------------------------------------
ls : list [name] []
Lists the names of all presets which can be loaded onto the specified node.
-----------------------------------------
ld : load [[name, string]] []
Sets the settings of the node specified by the first argument according to the preset specified by the second argument. Any attributes on the node which are the destinations of connections or whose children (multi children or compound children) are destinations of connections will not be changed by the preset.
-----------------------------------------
sv : save [[name, string]]
Saves the current settings of the node specified by the first argument to a preset of the name specified by the second argument. If a preset for that node with that name already exists, it will be overwritten with no warning. You can use the -exists flag to check if the preset already exists. If an attribute of the node is the destination of a connection, the value of the attribute will not be written as part of the preset.
""" | 35,062 |
def parse_ini(path):
"""Simple ini as config parser returning the COHDA protocol."""
config = configparser.ConfigParser()
try:
config.read(path)
return True, ""
except (
configparser.NoSectionError,
configparser.DuplicateSectionError,
configparser.DuplicateOptionError,
configparser.NoOptionError,
configparser.InterpolationDepthError,
configparser.InterpolationMissingOptionError,
configparser.InterpolationSyntaxError,
configparser.InterpolationError,
configparser.MissingSectionHeaderError,
configparser.ParsingError,
) as err:
return False, slugify(err) | 35,063 |
def setup_logging(args):
"""Setup logging handlers based on arguments
"""
handler = logging.StreamHandler()
handler.setFormatter(LOG_FORMAT)
if args.debug:
handler.setLevel(logging.DEBUG)
elif args.quiet:
handler.setLevel(logging.WARNING)
else:
handler.setLevel(logging.INFO)
logger.addHandler(handler)
if args.logfile:
handler = logging.handlers.WatchedFileHandler(args.logfile)
handler.setFormatter(LOG_FORMAT)
if args.debug:
handler.setLevel(logging.DEBUG)
else:
handler.setLevel(logging.INFO)
logger.addHandler(handler)
if args.eventlogfile:
handler = logging.handlers.WatchedFileHandler(args.eventlogfile)
else:
handler = logging.NullHandler()
handler.setLevel(logging.INFO)
event_logger.addHandler(handler)
event_logger.setLevel(logging.INFO)
if args.trafficfile:
handler = logging.handlers.WatchedFileHandler(args.trafficfile)
else:
handler = logging.NullHandler()
handler.setLevel(logging.INFO)
traffic_logger.addHandler(handler)
traffic_logger.setLevel(logging.INFO)
logger.setLevel(logging.DEBUG) | 35,064 |
def number_of_positional_args(fn):
"""Return the number of positional arguments for a function, or None if the number is variable.
Looks inside any decorated functions."""
try:
if hasattr(fn, "__wrapped__"):
return number_of_positional_args(fn.__wrapped__)
if any(p.kind == p.VAR_POSITIONAL for p in signature(fn).parameters.values()):
return None
else:
return sum(
p.kind in (p.POSITIONAL_ONLY, p.POSITIONAL_OR_KEYWORD)
for p in signature(fn).parameters.values()
)
except ValueError:
# signatures don't work for built-in operators, so try to extract from the docstring(!)
if hasattr(fn, "__doc__") and hasattr(fn, "__name__") and fn.__doc__ is not None:
specs = re.findall(r"{}\(.*?\)".format(re.escape(fn.__name__)), fn.__doc__)
specs = [re.sub(r", \*, .*\)", ")", re.sub(r"[[\]]", "", spec)) for spec in specs]
if any("*" in spec for spec in specs):
return None
elif specs:
return max(0 if spec.endswith("()") else spec.count(",") + 1 for spec in specs)
raise NotImplementedError("Bult-in operator {} not supported".format(fn)) | 35,065 |
def run_tests(args=None):
"""This can be executed from within an "if __name__ == '__main__'" block
to execute the tests found in that module.
"""
if args is None:
args = []
sys.exit(main(list(args) + [__import__('__main__').__file__])) | 35,066 |
def complete_charges():
"""
Update the state of all charges in progress.
"""
for charge in Charge.objects.filter(state=Charge.CREATED):
charge.retrieve() | 35,067 |
def check_space(space):
""" Check the properties of an environment state or action space """
if isinstance(space, spaces.Box):
dim = space.shape
discrete = False
elif isinstance(space, spaces.Discrete):
dim = space.n
discrete = True
else:
raise NotImplementedError('This type of space is not supported')
return dim, discrete | 35,068 |
def finish_work(state):
"""Move all running nodes to done"""
state.progress.done = state.progress.done | state.progress.running
state.progress.running = set()
return state | 35,069 |
def show_submit_form(context, task, user, redirect, show_only_source=False):
"""Renders submit form for specified task"""
context["task"] = task
context["competition_ignored"] = user.is_competition_ignored(task.round.semester.competition)
context["constants"] = constants
context["redirect_to"] = redirect
context["show_only_source"] = show_only_source
if task.has_source:
context["source_form"] = SourceSubmitForm()
if task.has_description:
context["description_form"] = DescriptionSubmitForm()
if task.has_testablezip:
context["testablezip_form"] = TestableZipSubmitForm()
if task.has_text_submit:
context["text_submit_form"] = TextSubmitForm()
return context | 35,070 |
def getkey(key="foo"):
"""
Returns the latest version of the key on the hosts specified
"""
row = re.compile(r'version ([\d\.]+), value: (.*)', re.I)
data = execute(_getkey, key)
table = []
for host, line in data.items():
match = row.match(line)
if match is None:
version = 0.0
value = red(line.split("\n")[-1])
else:
version, value = match.groups()
table.append([host, float(version), value])
table = [["Host", "Version", "Value"]] + table
print(tabulate(table, tablefmt='simple', headers='firstrow', floatfmt=".2f")) | 35,071 |
def index_of_masked_word(sentence, bert):
"""Return index of the masked word in `sentence` using `bert`'s' tokenizer.
We use this function to calculate the linear distance between the target
and controller as BERT sees it.
Parameters
----------
sentence : str
Returns
-------
int
"""
tokens = bert.tokenize(sentence)
try:
return tokens.index(MASK)
except ValueError: # MASK not in sentence
return -1 | 35,072 |
def _get_headers():
"""Get headers for GitHub API request.
Attempts to add a GitHub token to headers if available.
Returns:
dict: The headers for a GitHub API request.
"""
headers = {}
github_token = os.getenv(env.GH_TOKEN, None)
if github_token is not None:
headers['Authorization'] = 'token ' + github_token
return headers | 35,073 |
def vgg19(down=8, bn=False, o_cn=1, final='abs'):
"""VGG 19-layer model (configuration "E")
model pre-trained on ImageNet
"""
model = VGG(make_layers(cfg['E'], batch_norm=False), down=down, o_cn=o_cn, final=final)
model.load_state_dict(model_zoo.load_url(model_urls['vgg19']), strict=False)
return model | 35,074 |
def get_short_link_from_vk(login: str, password: str, link: str) -> str:
"""
Функция для получения короткой ссылки используя сервис vk.
"""
from vk_auth__requests_re import auth
session, rs = auth(login, password)
# Страница нужна чтобы получить hash для запроса
rs = session.get('https://vk.com/cc')
import re
match = re.search(r"Shortener\.submitLink\('(.+)'\)", rs.text)
if match is None:
raise Exception('Не удалось получить hash для Shortener')
shortener_hash = match.group(1)
# Данные для POST запроса для получения короткой ссылки
data = {
'act': 'shorten',
'link': link,
'al': '1',
'hash': shortener_hash,
}
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:52.0) Gecko/20100101 Firefox/52.0',
'X-Requested-With': 'XMLHttpRequest',
'Content-Type': 'application/x-www-form-urlencoded',
}
rs = session.post('https://vk.com/cc', headers=headers, data=data)
print(rs)
from bs4 import BeautifulSoup
root = BeautifulSoup(rs.content, 'lxml')
a_short_link = root.select_one('.shortened_link.shorten_list_header > a[href]')
return a_short_link['href'] | 35,075 |
def student_list(request):
"""
List all students, or create a new student.
"""
if request.method == 'GET':
students = Student.objects.all()
serializer = StudentSerializer(students, many=True)
return Response(serializer.data)
elif request.method == 'POST':
serializer = StudentSerializer(data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST) | 35,076 |
def get_model_features(url, chromedriver):
"""For given model url, grab categories and tags for that model"""
try:
BROWSER.get(url)
time.sleep(5)
cats = BROWSER.find_elements_by_xpath("//section[@class='model-meta-row categories']//ul//a")
cats = [cat.text for cat in cats]
tags = BROWSER.find_elements_by_xpath("//section[@class='model-meta-row tags']//ul//a")
tags = [tag.text for tag in tags]
except:
print('Difficulty grabbing these features {}'.format(url))
print('Reload browser and try again')
cats, tags = None, None
return cats, tags | 35,077 |
def sent2labels(sent):
"""
Extracts gold labels for each sentence.
Input: sentence list
Output: list with labels list for each token in the sentence
"""
# gold labels at index 18
return [word[18] for word in sent] | 35,078 |
def get_course_id_from_capa_module(capa_module):
"""
Extract a stringified course run key from a CAPA module (aka ProblemBlock).
This is a bit of a hack. Its intended use is to allow us to pass the course id
(if available) to `safe_exec`, enabling course-run-specific resource limits
in the safe execution environment (codejail).
Arguments:
capa_module (ProblemBlock|None)
Returns: str|None
The stringified course run key of the module.
If not available, fall back to None.
"""
if not capa_module:
return None
try:
return str(capa_module.scope_ids.usage_id.course_key)
except (AttributeError, TypeError):
# AttributeError:
# If the capa module lacks scope ids or has unexpected scope ids, we
# would rather fall back to `None` than let an AttributeError be raised
# here.
# TypeError:
# Old Mongo usage keys lack a 'run' specifier, and may
# raise a type error when we try to serialize them into a course
# run key. This is tolerable because such course runs are deprecated.
return None | 35,079 |
def find_free_box_id() -> str:
"""
limits = prog_info["limits"] if "limits" in prog_info else {}
Returns is of the first available sandbox directory. Searched for
non-existing directories in /tmp/box.
"""
# Search for free id in EXEC_PATH
ids = [True for i in range(MAX_CONCURRENT_EXEC)]
for d in os.listdir(EXEC_PATH):
if d.isdigit():
ids[int(d)] = False
for i in range(len(ids)):
if ids[i]:
return str(i)
return None | 35,080 |
def _gershgorin_circles_test(expr, var_to_idx):
"""Check convexity by computing Gershgorin circles without building
the coefficients matrix.
If the circles lie in the nonnegative (nonpositive) space, then the matrix
is positive (negative) definite.
Parameters
----------
expr : QuadraticExpression
the quadratic expression
var_to_idx : dict-like
map a var to a numerical index between 0 and n, where n is the number
of args of expr
Returns
-------
Convexity if the expression is Convex or Concave, None otherwise.
"""
n = expr.nargs()
row_circles = np.zeros(n)
diagonal = np.zeros(n)
for term in expr.terms:
i = var_to_idx[term.var1]
j = var_to_idx[term.var2]
if i == j:
diagonal[i] = term.coefficient
else:
coef = np.abs(term.coefficient / 2.0)
row_circles[j] += coef
row_circles[i] += coef
if np.all((diagonal - row_circles) >= 0):
return Convexity.Convex
if np.all((diagonal + row_circles) <= 0):
return Convexity.Concave
return None | 35,081 |
def generate(env):
"""
Add Builders and construction variables for CUDA compilers to an Environment.
"""
# create builders that make static & shared objects from .cu files
static_obj, shared_obj = SCons.Tool.createObjBuilders(env)
for suffix in CUDASuffixes:
# Add this suffix to the list of things buildable by Object
static_obj.add_action('$CUDAFILESUFFIX', '$NVCCCOM')
shared_obj.add_action('$CUDAFILESUFFIX', '$SHNVCCCOM')
static_obj.add_emitter(suffix, SCons.Defaults.StaticObjectEmitter)
shared_obj.add_emitter(suffix, SCons.Defaults.SharedObjectEmitter)
# Add this suffix to the list of things scannable
SCons.Tool.SourceFileScanner.add_scanner(suffix, CUDAScanner)
add_common_nvcc_variables(env)
# set the "CUDA Compiler Command" environment variable
# windows is picky about getting the full filename of the executable
if os.name == 'nt':
env['NVCC'] = 'clang.exe'
env['SHNVCC'] = 'clang.exe'
else:
env['NVCC'] = 'clang'
env['SHNVCC'] = 'clang'
env['CC'] = 'clang'
env['CXX'] = 'clang++'
env['LINK'] = 'clang'
# set the include path, and pass both c compiler flags and c++ compiler
# flags
env['NVCCFLAGS'] = SCons.Util.CLVar('')
env['SHNVCCFLAGS'] = SCons.Util.CLVar('') + ' -shared'
# 'NVCC Command'
env['NVCCCOM'] = '$NVCC -o $TARGET -c $NVCCFLAGS $_NVCCWRAPCFLAGS $NVCCWRAPCCFLAGS $_NVCCCOMCOM $SOURCES'
env['SHNVCCCOM'] = '$SHNVCC -o $TARGET -c $SHNVCCFLAGS $_NVCCWRAPSHCFLAGS $_NVCCWRAPSHCCFLAGS $_NVCCCOMCOM $SOURCES'
# the suffix of CUDA source files is '.cu'
env['CUDAFILESUFFIX'] = '.cu' | 35,082 |
def make_papers(
*,
n_papers: int,
authors: AuthorList,
funders: FunderList,
publishers: PublisherList,
fields_of_study: List,
faker: Faker,
min_title_length: int = 2,
max_title_length: int = 10,
min_authors: int = 1,
max_authors: int = 10,
min_funders: int = 0,
max_funders: int = 3,
min_events: int = 0,
max_events: int = 100,
min_fields_of_study: int = 1,
max_fields_of_study: int = 20,
) -> PaperList:
"""Generate the list of ground truth papers.
:param n_papers: the number of papers to generate.
:param authors: the authors list.
:param funders: the funders list.
:param publishers: the publishers list.
:param fields_of_study: the fields of study list.
:param faker: the faker instance.
:param min_title_length: the min paper title length.
:param max_title_length: the max paper title length.
:param min_authors: the min number of authors for each paper.
:param max_authors: the max number of authors for each paper.
:param min_funders: the min number of funders for each paper.
:param max_funders: the max number of funders for each paper.
:param min_events: the min number of events per paper.
:param max_events: the max number of events per paper.
:param min_fields_of_study: the min fields of study per paper.
:param max_fields_of_study: the max fields of study per paper.
:return: the list of papers.
"""
papers = []
for i, _ in enumerate(range(n_papers)):
# Random title
n_words_ = random.randint(min_title_length, max_title_length)
title_ = faker.sentence(nb_words=n_words_)
# Random date
published_date_ = pendulum.from_format(faker.date(), "YYYY-MM-DD").date()
published_date_ = pendulum.date(year=published_date_.year, month=published_date_.month, day=published_date_.day)
# Output type
output_type_ = random.choice(OUTPUT_TYPES)
# Pick a random list of authors
n_authors_ = random.randint(min_authors, max_authors)
authors_ = random.sample(authors, n_authors_)
# Random funder
n_funders_ = random.randint(min_funders, max_funders)
if n_funders_ > 0:
funders_ = random.sample(funders, n_funders_)
else:
funders_ = []
# Random publisher
publisher_ = random.choice(publishers)
# Journal
journal_ = random.choice(publisher_.journals)
# Random DOI
doi_ = make_doi(publisher_.doi_prefix)
# Random events
n_events_ = random.randint(min_events, max_events)
events_ = []
today = datetime.now()
today_ts = int(today.timestamp())
start_date = datetime(today.year - 2, today.month, today.day)
start_ts = int(start_date.timestamp())
for _ in range(n_events_):
event_date_ = date_between_dates(start_ts=start_ts, end_ts=today_ts)
events_.append(Event(source=random.choice(EVENT_TYPES), event_date=event_date_))
# Fields of study
n_fos_ = random.randint(min_fields_of_study, max_fields_of_study)
level_0_index = 199
fields_of_study_ = [random.choice(fields_of_study[:level_0_index])]
fields_of_study_.extend(random.sample(fields_of_study, n_fos_))
# Open access status
is_free_to_read_at_publisher_ = True
if journal_.license is not None:
# Gold
license_ = journal_.license
else:
license_ = random.choice(LICENSES)
if license_ is None:
# Bronze: free to read on publisher website but no license
is_free_to_read_at_publisher_ = bool(random.getrandbits(1))
# Hybrid: license=True
# Green: in a 'repository'
is_in_institutional_repo_ = bool(random.getrandbits(1))
# Green not bronze: Not free to read at publisher but in a 'repository'
# Make paper
paper = Paper(
i,
doi=doi_,
title=title_,
published_date=published_date_,
output_type=output_type_,
authors=authors_,
funders=funders_,
journal=journal_,
publisher=publisher_,
events=events_,
fields_of_study=fields_of_study_,
license=license_,
is_free_to_read_at_publisher=is_free_to_read_at_publisher_,
is_in_institutional_repo=is_in_institutional_repo_,
)
papers.append(paper)
# Create paper citations
# Sort from oldest to newest
papers.sort(key=lambda p: p.published_date)
for i, paper in enumerate(papers):
# Create cited_by
n_papers_forwards = len(papers) - i
n_cited_by = random.randint(0, int(n_papers_forwards / 2))
paper.cited_by = random.sample(papers[i + 1 :], n_cited_by)
return papers | 35,083 |
def test_pid_downarrow4():
"""
"""
d = bivariates['boom']
pid = PID_downarrow(d)
assert pid[((0,), (1,))] == pytest.approx(0.20751874963942218, abs=1e-4)
assert pid[((0,),)] == pytest.approx(0.45914791702724433, abs=1e-4)
assert pid[((1,),)] == pytest.approx(0.33333333333333348, abs=1e-4)
assert pid[((0, 1),)] == pytest.approx(0.12581458369391196, abs=1e-4) | 35,084 |
def parse_cypher_file(path: str):
"""Returns a list of cypher queries in a file. Comments (starting with "//") will be filtered out and queries needs to be seperated by a semilicon
Arguments:
path {str} -- Path to the cypher file
Returns:
[str] -- List of queries
"""
def chop_comment(line):
# this function removes inline comments
comment_starter = "//"
possible_quotes = ["'", '"']
# a little state machine with two state varaibles:
in_quote = False # whether we are in a quoted string right now
quoting_char = None
backslash_escape = False # true if we just saw a backslash
comment_init = ""
for i, ch in enumerate(line):
if not in_quote:
if ch == comment_starter[len(comment_init)]:
comment_init += ch
else:
# reset comment starter detection
comment_init = ""
if comment_starter == comment_init:
# a comment started, just return the non comment part of the line
comment_init = ""
return line[: i - (len(comment_starter) - 1)]
if ch in possible_quotes:
# quote is starting
comment_init = ""
quoting_char = ch
in_quote = True
else:
if ch in quoting_char:
# quotes is ending
in_quote = False
quoting_char = None
return line
queries = []
with open(path) as f:
query = ""
for line in f:
line = chop_comment(line)
line = line.rstrip()
if line == "":
# empty line
continue
if not line.endswith("\n"):
query += "\n"
query += line
if line.endswith(";"):
query = query.strip(";")
queries.append(query)
query = ""
return queries | 35,085 |
def del_method():
"""del: Cleanup an item on destroy."""
# use __del__ with caution
# it is difficult to know when the object will be actually removed
context = ""
class _Destroyable:
def __del__(self):
nonlocal context
context = "burn the lyrics"
item = _Destroyable()
del item
return context | 35,086 |
def DevoilerConversation(vals,elements):
"""Reveal hidden discussion topic"""
#location id, conversation id
#basically, set a variable
#location id and/or conversation id might be expressions
vals["postcode"] = "set convo_hidden_%s_%s false\n"%(elements[0],elements[1]) | 35,087 |
def main():
"""
Main program
"""
conn = pycovenantsql.connect(**DB_CONFIG)
with conn.cursor() as cursor:
# Create a new table
sql_create_table = """
CREATE TABLE IF NOT EXISTS `users` (
`id` INTEGER PRIMARY KEY AUTOINCREMENT,
`created_at` DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP,
`email` VARCHAR(255) NOT NULL,
`password` VARCHAR(255) NOT NULL
);
"""
print('create table')
cursor.execute(sql_create_table)
# Insert some data
print('insert sample data')
sql_insert_data = """INSERT INTO `users` (`email`, `password`) VALUES (%s, %s)"""
affected_rows = cursor.execute(
sql_insert_data,
('Apple', 'appleisdelicious'),
)
print('affected rows: %d, lastrowid: %d' % (
affected_rows,
cursor.lastrowid,
))
# Query data
print('select data from the table')
sql_select_data = """SELECT * FROM `users`"""
cursor.execute(sql_select_data)
for row in cursor:
print(row)
conn.close() | 35,088 |
def plot_planar_polar_positions(filename):
"""Process data as x vs y"""
x = []
y = []
with open(filename) as data_file:
odom_data = csv.reader(data_file)
for row in odom_data:
x.append(float(row[0]) * cos(float(row[1])))
y.append(float(row[0]) * sin(float(row[1])))
# plot the poitns
plt.plot(x, y, 'r*', markersize=2.0)
plt.legend(['Sensor values'])
plt.xlabel('x position (m)')
plt.ylabel('y position (m)')
plt.show() | 35,089 |
def sample_user(email="test@local.com", password="testpass"):
"""Create a sample User"""
return get_user_model().objects.create_user(email, password) | 35,090 |
async def test_vec_add(dut):
""" Test Vector Adder """
# Create a 10us period clock on port clk
clock = Clock(dut.clk_i, 10, units="us")
cocotb.fork(clock.start())
# Reset system
await FallingEdge(dut.clk_i)
dut.rst_n_i <= 0
dut.data0_i <= np2bv(np.zeros(shape=DUT_VECTOR_SIZE, dtype=np.int16), 16)
dut.data1_i <= np2bv(np.zeros(shape=DUT_VECTOR_SIZE, dtype=np.int16), 16)
dut.data2_i <= np2bv(np.zeros(shape=DUT_VECTOR_SIZE, dtype=np.int16), 16)
dut.last0_i <= 0
dut.last1_i <= 0
dut.last2_i <= 0
dut.valid0_i <= 0
dut.valid1_i <= 0
dut.valid2_i <= 0
dut.ready_i <= 0
await FallingEdge(dut.clk_i)
dut.rst_n_i <= 1
dut.data0_i <= np2bv(np.zeros(shape=DUT_VECTOR_SIZE, dtype=np.int16), 16)
dut.data1_i <= np2bv(np.zeros(shape=DUT_VECTOR_SIZE, dtype=np.int16), 16)
dut.data2_i <= np2bv(np.zeros(shape=DUT_VECTOR_SIZE, dtype=np.int16), 16)
dut.ready_i <= 1
# Generate random values and compare results
await FallingEdge(dut.clk_i)
for _ in range(10):
val0 = np.random.randint(-2 ** 15, 2 ** 15 - 1, size=DUT_VECTOR_SIZE,
dtype=np.int16)
val1 = np.random.randint(-2 ** 15, 2 ** 15 - 1, size=DUT_VECTOR_SIZE,
dtype=np.int16)
val2 = np.random.randint(-2 ** 15, 2 ** 15 - 1, size=DUT_VECTOR_SIZE,
dtype=np.int16)
dut.valid0_i <= 1
dut.valid1_i <= 1
dut.valid2_i <= 1
dut.data0_i <= np2bv(val0, 16)
dut.data1_i <= np2bv(val1, 16)
dut.data2_i <= np2bv(val2, 16)
add = val0.astype(np.int64) + val1.astype(np.int64) + val2.astype(np.int64)
expected = np2bv(add, n_bits=18)
await FallingEdge(dut.clk_i)
for j in range(DUT_VECTOR_SIZE):
observed = dut.data_o.value
assert observed == expected,\
"data0_i = %d, data1_i = %d, data2_i = %d, expected = %d, observed = %d" %\
(val0[j], val1[j], val2[j], expected.value, observed) | 35,091 |
def _precedence(match):
"""
in a dict spec, target-keys may match many
spec-keys (e.g. 1 will match int, M > 0, and 1);
therefore we need a precedence for which order to try
keys in; higher = later
"""
if type(match) in (Required, Optional):
match = match.key
if type(match) in (tuple, frozenset):
if not match:
return 0
return max([_precedence(item) for item in match])
if isinstance(match, type):
return 2
if hasattr(match, "glomit"):
return 1
return 0 | 35,092 |
def main(argv):
"""
The main method of the program. It is responsible for:
- Managing the server and request handler
- Spawning the GA instance
- Starts the interface or parses command-line arguments
And finally returns the exit code
"""
controller = SATController.instance()
from RequestHandler import decode
if len(argv) == 0:
# Start the interface
controller.server_thread = SATServer(default_host, default_port, decode)
controller.server_thread.start()
controller.server_thread.join()
pass
else:
parser = OptionParser()
parser.add_option("-p", "--port", dest="port", type="int", help="Port number on which the server should run.",
metavar="<port>")
parser.add_option("-f", "--file", dest="file", type="string", help="The CNF source file in DIMACS format.",
metavar="<filename>")
parser.add_option("--tabu-list-length", dest="tabu_list_length", type="int",
help="Length of tabu list - a fixed size FIFO queue.",
metavar="<tabu list length>")
parser.add_option("--max-false", dest="max_false", type="int",
help='How many times a clause must be false to be considered a "stumble-clause".',
metavar="<max false>")
parser.add_option("--rec", dest="rec", type="int",
help="The number of times false clauses induced "
"by a flip are forced to become true recursively.", metavar="<rec>")
parser.add_option("-k", dest="k", type="int", help="How long an atom in a stumble-clause is prevented from "
"flipping.", metavar="<k>")
parser.add_option("--max-generations", dest="max_generations", type="int",
help="The max number of iterations for the genetic algorithm.", metavar="<max generations>")
parser.add_option("--population-size", dest="population_size", type="int",
help="The number of individuals in the population.", metavar="<population size>")
parser.add_option("--sub-population-size", dest="sub_population_size", type="int",
help="The number of individuals to use in the selection process for parents.",
metavar="<sub population size>")
parser.add_option("--crossover-operator", dest="crossover_operator", type="int",
help="0 - Corrective Clause; 1 - Corrective Clause with Truth Maintenance; "
"2 - Fluerent and Ferland.",
metavar="<crossover operator>")
parser.add_option("--max-flip", dest="max_flip", type="int",
help="The maximum number of flips that can be performed during a tabu search procedure.",
metavar="<max flip>")
parser.add_option("--rvcf", dest="is_rvcf", type="int",
help="Refinement of variable choice to flip - "
"using weight criterion to choose a better variable to flip - "
"0 for False; 1 for True.")
parser.add_option("--diversification", dest="is_diversification", type="int",
help="A mechanism to help flip the last few stubborn false clauses - "
"0 for False; 1 for True.")
(options, args) = parser.parse_args()
options = vars(options)
if options["port"] is not None:
# Port has been specified start server
controller.server_thread = SATServer(default_host, options["port"], decode)
controller.server_thread.start()
f = open(options['file'], "r")
formula, number_of_variables, number_of_clauses = controller.parse_formula(f.readlines())
port_number = options['port']
del options['port']
del options['file']
options['formula'] = formula
options['number_of_variables'] = number_of_variables
options['number_of_clauses'] = number_of_clauses
try:
controller.create_ga(options)
except InputError as ie:
print(ie)
if port_number is not None:
# Close Server
pass
return
controller.start_ga() | 35,093 |
def experiment_set_reporting_data(connection, **kwargs):
"""
Get a snapshot of all experiment sets, their experiments, and files of
all of the above. Include uuid, accession, status, and md5sum (for files).
"""
check = CheckResult(connection, 'experiment_set_reporting_data')
check.status = 'IGNORE'
exp_sets = {}
search_query = '/search/?type=ExperimentSetReplicate&experimentset_type=replicate&sort=-date_created'
set_hits = ff_utils.search_metadata(search_query, key=connection.ff_keys, page_limit=20)
# run a second search for status=deleted and status=replaced
set_hits_del = ff_utils.search_metadata(search_query + '&status=deleted&status=replaced',
key=connection.ff_keys, page_limit=20)
set_hits.extend(set_hits_del)
for hit in set_hits:
add_to_report(hit, exp_sets)
check.full_output = exp_sets
return check | 35,094 |
def display2D(data,show=None,xsize=None,ysize=None,pal=None):
"""display2D(data,show=None,xsize=None,ysize=None) - create color image object
from 2D list or array data, and the color palette is extracted from 'pal.dat',
if show=1 specified by default a 300x300 window shows the data image
xsize, ysize override the default 300 pixel setting
pal[768] - if specified the color table palette will be used
"""
if type(data) == type([]):
data = array(data)
w,h = data.shape[1],data.shape[0]
if pal == None:
file = "pal.dat"
if os.path.isfile(file) == 0:
CT = readCT()
pal = readPalette()
pixel = data2pixel(data,p=pal)
im = Image.new('RGB',(w,h))
for j in range(h):
for i in range(w):
ij = i+j*w
im.putpixel((i,j),pixel[ij])
if show != None:
if xsize == None:
xsize = 300
if ysize == None:
ysize = 300
resizeImage(im,xsize,ysize)
return im | 35,095 |
def read_port_await_str(expected_response_str):
"""
It appears that the Shapeoko responds with the string "ok" (or an "err nn" string) when
a command is processed. Read the shapeoko_port and verify the response string in this routine.
If an error occurs, a message.
:param expected_response_str: a string, typically "ok"
:return: True if "ok" received, otherwise False
"""
assert isinstance(expected_response_str, str)
global __shapeoko_port
response_str = __shapeoko_port.readline().strip()
if expected_response_str != response_str:
print "RESPONSE_STR_LEN({0}), RESPONSE_STR({1})".format(len(response_str), response_str)
return expected_response_str == response_str | 35,096 |
def doify(f, *, name=None, tock=0.0, **opts):
"""
Returns Doist compatible copy, g, of converted generator function f.
Each invoction of doify(f) returns a unique copy of doified function f.
Imbues copy, g, of converted generator function, f, with attributes used by
Doist.enter() or DoDoer.enter().
Allows multiple instances of copy, g, of generator function, f, each with
unique attributes.
Usage:
def f():
pass
c = doify(f, name='c')
Parameters:
f is generator function
name is new function name for returned doified copy g. Default is to copy
f.__name__
tock is default tock attribute of doified copy g
opts is dictionary of remaining parameters that becomes .opts attribute
of doified copy g
Based on:
https://stackoverflow.com/questions/972/adding-a-method-to-an-existing-object-instance
"""
g = helping.copyfunc(f, name=name)
g.done = None # default done state
g.tock = tock # default tock attributes
g.opts = dict(opts) # default opts attribute
if inspect.ismethod(f): # f.__self__ instance method
g = types.MethodType(g, f.__self__) # make g a method of f.__self__ only
return g | 35,097 |
def deserialize(config, custom_objects=None):
"""Inverse of the `serialize` function.
Args:
config: Optimizer configuration dictionary.
custom_objects: Optional dictionary mapping names (strings) to custom
objects (classes and functions) to be considered during deserialization.
Returns:
A Keras Optimizer instance.
"""
# loss_scale_optimizer has a direct dependency of optimizer, import here
# rather than top to avoid the cyclic dependency.
from keras.mixed_precision import loss_scale_optimizer # pylint: disable=g-import-not-at-top
all_classes = {
'adadelta': adadelta_v2.Adadelta,
'adagrad': adagrad_v2.Adagrad,
'adam': adam_v2.Adam,
'adamax': adamax_v2.Adamax,
'nadam': nadam_v2.Nadam,
'rmsprop': rmsprop_v2.RMSprop,
'sgd': gradient_descent_v2.SGD,
'ftrl': ftrl.Ftrl,
'lossscaleoptimizer': loss_scale_optimizer.LossScaleOptimizer,
# LossScaleOptimizerV1 deserializes into LossScaleOptimizer, as
# LossScaleOptimizerV1 will be removed soon but deserializing it will
# still be supported.
'lossscaleoptimizerv1': loss_scale_optimizer.LossScaleOptimizer,
}
# Make deserialization case-insensitive for built-in optimizers.
if config['class_name'].lower() in all_classes:
config['class_name'] = config['class_name'].lower()
return deserialize_keras_object(
config,
module_objects=all_classes,
custom_objects=custom_objects,
printable_module_name='optimizer') | 35,098 |
def unquote_to_bytes(string):
"""unquote_to_bytes('abc%20def') -> b'abc def'."""
# Note: strings are encoded as UTF-8. This is only an issue if it contains
# unescaped non-ASCII characters, which URIs should not.
if isinstance(string, str):
string = string.encode('utf-8')
res = string.split(b'%')
res[0] = res[0]
for i in range(1, len(res)):
item = res[i]
try:
res[i] = bytes([int(item[:2], 16)]) + item[2:]
except ValueError:
res[i] = b'%' + item
return b''.join(res) | 35,099 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.