content stringlengths 22 815k | id int64 0 4.91M |
|---|---|
def aggregate_metrics_by_nodesets(df, nodelists, nodeset_names=None, weightlists=None, level_name="node",
use_metrics=None, print_looptime=True):
"""Aggregates a dataframe by nodes (into nodesets), returning a data frame with same structure
but with nodesets instead of nodes.
Accepts different weights for each node.
Let it registered: this single function with ugly implementation took me days of suffering to figure out
a way for it to *simply work* using the pandas logic.
Parameters
----------
df : pd.DataFrame
Thebig_df
Signature: df[(strat_name, sim_prefix, exec, node)] -> [metrics]
nodelists : list or None
This represents the sets of nodes to aggregate.
It is a nested list, each one containing the node indexes of each set. Nodes may be in more than
one set.
nodeset_names : sequence
List of names of the nodesets. Used in out_df to label the sets, so they must be unique.
weightlists : list
This represents the weights of each node in each nodeset.
Must have the same nested structure (and lenghts) of nodelists.
level_name : hashable
Name of the level to be aggregated (i.e., the nodes).
The method is agnostic to the other levels.
use_metrics : sequence
Metrics to use. Must be a subset of the names of columns in df.
print_looptime : bool
Whether the method should print the main loop execution time.
It's quite costly and can be optimized...
Returns
-------
Possible signature of the output dataframe:
out_df(strategy, sim_prefix, exec, nodeset)[use_metrics]
"""
num_nodesets = len(nodelists)
if nodeset_names is None:
# Uses simple numbers
nodeset_names = list(range(num_nodesets))
if weightlists is None:
# Weights not passed - set all to 1
weightlists = [[1.]*len(l_nodes) for l_nodes in nodelists]
# Precalculate the normalization of weights in each nodeset
weight_sums = [sum(weights) for weights in weightlists]
if use_metrics is None:
use_metrics = df.columns
# print(df.index)
# print(df.index.set_levels(nodeset_names, level="node"))
# This complicated routine designs a new index obeject, with nodesets instead of nodes
# It is agnostic to the other levels, only "node" is replaced.
new_index = df.index.droplevel(level=level_name) # Multiindex without level 'node'
df_from_index = new_index.to_frame(index=False) # Converts multiindex to a frame with each level as a column
df_from_index.drop_duplicates(inplace=True, ignore_index=True) #
new_index = pd.MultiIndex.from_frame(df_from_index) # Creates an index yet without level "node"
num_rows = len(df_from_index) # Number of rows without the "node" level.
# nodeset_col = nodeset_names * len(df_from_index) # Creates a repeated list of nodeset names
tmp_df = pd.DataFrame({name: np.repeat(np.nan, num_rows) for name in nodeset_names}, index=new_index)
stacked_df = tmp_df.stack(dropna=False) # Reshapes the previous df, finally making the desired multiindex
stacked_df.index.set_names("nodeset", level=-1, inplace=True)
# After everything, allocates the output df.
# Possible Signature: out_df(strategy, sim_prefix, exec, nodeset)[metrics]
out_df = pd.DataFrame({metric: np.repeat(np.nan, len(stacked_df)) for metric in use_metrics},
index=stacked_df.index)
# TODO: remove the trash from memory? (Eg. tmp_df, stacked_df)
# ---------------------------
# Main loop over executions (agnostic to the df levels, except the one given as level_name)
levels_to_iterate = list(df.index.names)
levels_to_iterate.remove(level_name) # Only removes the node level, agnostic to the others
loop_t0 = time.time()
# Progress monitoring
# TODO: The next loop seems painfully slow, but I think that actually the ugly ways of indexing and cross-sectioning
# are the main bottleneck (rather than the calculations themselves). Check that and see what can be done.
loop_size = len(df.groupby(level=levels_to_iterate)) # Yup, twice. Sorry.
param_t0 = time.time()
for i_param, (params, exec_df) in enumerate(df.groupby(level=levels_to_iterate)):
# Local loop over each set of nodes
for i, l_nodes in enumerate(nodelists):
# Calculates the weighted average of required metrics for the current nodeset
nset_average = sum(weight * exec_df.loc[(*params, ni)][use_metrics]
for (ni, weight) in zip(l_nodes, weightlists[i]))
nset_average /= weight_sums[i]
# Put into final data frame
out_df.loc[(*params, nodeset_names[i])] = nset_average
# Iteration time feedback
if i_param % 100 == 0:
param_tf = time.time()
print("{:0.3f}%: {:6.4}s\n".format(100 * i_param / loop_size, param_tf - param_t0), end=" ")
sys.stdout.flush()
param_t0 = param_tf
# # WOULD NOT PARALLELIZE - the slow part is probably not paralellizable
# def func(i_param, params, exec_df):
#
# # Local loop over each set of nodes
# for i, l_nodes in enumerate(nodelists):
#
# # Calculates the weighted average of required metrics for the current nodeset
# nset_average = sum(weight * exec_df.loc[(*params, ni)][use_metrics]
# for (ni, weight) in zip(l_nodes, weightlists[i]))
# nset_average /= weight_sums[i]
#
# # Put into final data frame
# out_df.loc[(*params, nodeset_names[i])] = nset_average
#
#
# pool = ProcessPool(num_processes)
print()
# Execution time feedback
loop_tf = time.time()
if print_looptime:
print(" - Time calculating nodeset averages: {} ({:0.5f}s)"
"".format(seconds_to_hhmmss(loop_tf - loop_t0), loop_tf - loop_t0))
return out_df | 32,900 |
def zero_pad(data, window_size):
"""
Pads with window_size / 2 zeros the given input.
Args:
data (numpy.ndarray): data to be padded.
window_size (int): parameter that controls the size of padding.
Returns:
numpy.ndarray: padded data.
"""
pad_width = ceil(window_size / 2)
padded = np.pad(data, (pad_width, pad_width), 'constant', constant_values=(0,0))
return padded | 32,901 |
def test_recorder_setup_failure():
"""Test some exceptions."""
hass = get_test_home_assistant()
with patch.object(Recorder, "_setup_connection") as setup, patch(
"homeassistant.components.recorder.time.sleep"
):
setup.side_effect = ImportError("driver not found")
rec = Recorder(
hass,
keep_days=7,
purge_interval=2,
commit_interval=1,
uri="sqlite://",
db_max_retries=10,
db_retry_wait=3,
include={},
exclude={},
)
rec.start()
rec.join()
hass.stop() | 32,902 |
def get_results(url_id):
"""Get the scanned results of a URL"""
r = requests.get('https://webcookies.org/api2/urls/%s' % url_id, headers=headers)
return r.json() | 32,903 |
def assert_equal(
actual: Tuple[numpy.ndarray, numpy.ndarray], desired: List[Union[float, int]]
):
"""
usage.scipy: 1
"""
... | 32,904 |
def build_dense_constraint(base_name, v_vars, u_exprs, pos, ap_x):
"""Alias for :func:`same_act`"""
return same_act (base_name, v_vars, u_exprs, pos, ap_x) | 32,905 |
def list_snapshots(client, data_args) -> Tuple[str, dict, Union[list, dict]]:
""" List all snapshots at the system.
:type client: ``Client``
:param client: client which connects to api.
:type data_args: ``dict``
:param data_args: request arguments.
:return: human readable format, context output and the original raw response.
:rtype: ``tuple``
"""
limit = arg_to_number(data_args.get('limit'))
offset = arg_to_number(data_args.get('offset'))
params = assign_params(limit=limit, offset=offset)
raw_response = client.do_request(method='GET',
url_suffix='/plugin/products/threat-response/api/v1/snapshot',
params=params)
snapshots = raw_response.get('snapshots', [])
for snapshot in snapshots:
if created := snapshot.get('created'):
try:
snapshot['created'] = timestamp_to_datestring(created)
except ValueError:
pass
context = createContext(snapshots, removeNull=True)
headers = ['uuid', 'name', 'evidenceType', 'hostname', 'created']
outputs = {'Tanium.Snapshot(val.uuid === obj.uuid)': context}
human_readable = tableToMarkdown('Snapshots:', snapshots, headers=headers,
headerTransform=pascalToSpace, removeNull=True)
return human_readable, outputs, raw_response | 32,906 |
def data_context_path_computation_context_pathuuid_linktopology_uuidlink_uuid_get(uuid, topology_uuid, link_uuid): # noqa: E501
"""data_context_path_computation_context_pathuuid_linktopology_uuidlink_uuid_get
returns tapi.topology.LinkRef # noqa: E501
:param uuid: Id of path
:type uuid: str
:param topology_uuid: Id of link
:type topology_uuid: str
:param link_uuid: Id of link
:type link_uuid: str
:rtype: TapiTopologyLinkRef
"""
return 'do some magic!' | 32,907 |
def ipfs_qm_hash_to_32_bytes(ipfs_qm: str) -> str:
"""
Transform IPFS base58 Qm... hash to a 32 bytes sting (without 2 heading '0x' bytes).
:param ipfs_qm: IPFS base58 Qm... hash.
:return: 32 bytes sting (without 2 heading bytes).
"""
return f"0x{b58decode(ipfs_qm).hex()[4:]}" | 32,908 |
def get_service_state(scheduler):
"""Return the current state of the job service."""
return {"state": get_service_state_str(scheduler)}, 200 | 32,909 |
def test_create_branch_with_bad_start_ref(mock_repo):
"""
GIVEN GitRepo is initialized with a path and repo
WHEN branch.create is called with a valid name and invalid start_ref
THEN a ReferenceNotFoundException is raised
"""
repo = GitRepo(repo=mock_repo)
with patch('git.repo.fun.name_to_object') as mock_name_to_object:
mock_name_to_object.side_effect = git.exc.BadName()
with pytest.raises(exceptions.ReferenceNotFoundException):
assert repo.branch.create("test", "badref") | 32,910 |
def tst_insert_read_left1(dut):
""" expected output : 5 6 7 8 9 895
test when only one element left
10 clock cycles wait before getting last element.
"""
cocotb.fork(Clock(dut.clk, 6.4, 'ns').start())
tb = axistream_fifo_TB(dut)
yield tb.async_rst()
tb.insertContinuousBatch(5, 5)
tb.stream_in.append(895, tlast=1)
dut.stream_out_tready <= 0
yield ClockCycles(dut.clk, 10)
dut.stream_out_tready <= 1
while dut.stream_out_tdata != 9:
yield ClockCycles(dut.clk, 1)
dut.stream_out_tready <= 0
yield ClockCycles(dut.clk, 10)
dut.stream_out_tready <= 1
yield ClockCycles(dut.clk, 1)
dut.stream_out_tready <= 0
yield ClockCycles(dut.clk, 10) | 32,911 |
def determine_file_type(filename):
"""
:param filename: str
:rtype: FileType
"""
if filename.endswith('.cls'):
return FileType.CLS
elif filename.endswith('.java'):
return FileType.JAVA
elif filename.endswith('.js'):
return FileType.JAVASCRIPT
elif filename.endswith('.php'):
return FileType.PHP
elif filename.endswith('.py'):
return FileType.PYTHON
elif (
filename.endswith(
('.yaml', '.yml'),
)
):
return FileType.YAML
return FileType.OTHER | 32,912 |
def sanitise_text(text):
"""When we process text before saving or executing, we sanitise it
by changing all CR/LF pairs into LF, and then nuking all remaining CRs.
This consistency also ensures that the files we save have the correct
line-endings depending on the operating system we are running on.
It also turns out that things break when after an indentation
level at the very end of the code, there is no empty line. For
example (thanks to Emiel v. IJsseldijk for reproducing!):
def hello():
print "hello" # and this is the last line of the text
Will not completely define method hello.
To remedy this, we add an empty line at the very end if there's
not one already.
"""
text = text.replace('\r\n', '\n')
text = text.replace('\r', '')
lines = text.split('\n')
if lines and len(lines[-1]) != 0:
return text + '\n'
else:
return text | 32,913 |
def create_regression(
n_samples=settings["make_regression"]["n_samples"]
) -> pd.DataFrame:
"""Creates a fake regression dataset with 20 features
Parameters
----------
n_samples : int
number of samples to generate
Returns
-------
pd.DataFrame of features and targets:
feature names are lowercase letters, targets are in the column "target"
"""
X, y = make_regression(n_samples=n_samples, n_features=20, n_informative=5)
features = pd.DataFrame(X, columns=list(string.ascii_lowercase[: X.shape[1]]))
targets = pd.Series(y, name="target")
data = features.join(targets)
return data | 32,914 |
def _divide_no_nan(x, y, epsilon=1e-8):
"""Equivalent to tf.math.divide_no_nan but supports bfloat16."""
# need manual broadcast...
safe_y = tf.where(
tf.logical_and(tf.greater_equal(y, -epsilon), tf.less_equal(y, epsilon)),
tf.ones_like(y), y)
return tf.where(
tf.logical_and(
tf.greater_equal(tf.broadcast_to(y, x.get_shape()), -epsilon),
tf.less_equal(tf.broadcast_to(y, x.get_shape()), epsilon)),
tf.zeros_like(x), x / safe_y) | 32,915 |
def write_json_file(compositions, filename:str, path:str=None):
"""
Write one or more `Compositions <Composition>` and associated objects to file in the `general JSON format
<JSON_Model_Specification>`
.. _JSON_Write_Multiple_Compositions_Note:
.. note::
At present, if more than one Composition is specified, all must be fully disjoint; that is, they must not
share any `Components <Component>` (e.g., `Mechanism`, `Projections` etc.). This limitation will be
addressed in a future update.
Arguments
---------
compositions : Composition or list˚
specifies `Composition` or list of ones to be written to **filename**
filename : str
specifies name of file in which to write JSON specification of `Composition(s) <Composition>`
and associated objects.
path : str : default None
specifies path of file for JSON specification; if it is not specified then the current directory is used.
"""
compositions = convert_to_list(compositions)
for c in compositions:
from psyneulink.core.compositions.composition import Composition
if not isinstance(c, Composition):
raise PNLJSONError(f'Item in compositions arg of write_to_json_file() is not a Composition: {c}.')
if path:
if path[-1] != '/':
path += '/'
filename = path + filename
merged_dict_summary = {}
for c in compositions:
try:
merged_dict_summary[MODEL_SPEC_ID_COMPOSITION].extend(
c._dict_summary[MODEL_SPEC_ID_COMPOSITION]
)
except KeyError:
merged_dict_summary.update(c._dict_summary)
with open(filename, 'w') as json_file:
json_file.write(_dump_pnl_json_from_dict(merged_dict_summary)) | 32,916 |
def make_release(t, **params_or_funcs):
"""Create particle release table to be used for testing"""
t = np.array(t)
i = np.arange(len(t))
params = {
k: (p(i, t) if callable(p) else p) + np.zeros_like(t)
for k, p in params_or_funcs.items()
}
start_date = np.datetime64("2000-01-02T03")
minute = np.timedelta64(60, "s")
dates = start_date + np.array(t) * minute
return pd.DataFrame(data={**dict(release_time=dates.astype(str)), **params}) | 32,917 |
def upgrade_v21_to_v22(db: 'DBHandler') -> None:
"""Upgrades the DB from v21 to v22
Changes the ETH2 deposit table to properly name the deposit index column
and deletes all old data so they can be populated again.
"""
cursor = db.conn.cursor()
# delete old table and create new one
cursor.execute('DROP TABLE IF EXISTS eth2_deposits;')
cursor.execute('DELETE from used_query_ranges WHERE name LIKE "eth2_deposits_%";')
cursor.execute("""
CREATE TABLE IF NOT EXISTS eth2_deposits (
tx_hash VARCHAR[42] NOT NULL,
log_index INTEGER NOT NULL,
from_address VARCHAR[42] NOT NULL,
timestamp INTEGER NOT NULL,
pubkey TEXT NOT NULL,
withdrawal_credentials TEXT NOT NULL,
amount TEXT NOT NULL,
usd_value TEXT NOT NULL,
deposit_index INTEGER NOT NULL,
PRIMARY KEY (tx_hash, log_index)
);""")
db.conn.commit() | 32,918 |
def get_apartment_divs(driver):
"""Scrapes the url the driver is pointing at and extract
any divs with "listitems". Those divs are used as
apartment objects at Immowelt.
Args:
driver (Webdriver): A Webdriver instance.
Returns:
list: returns a list of all divs of class listitem...
"""
source = get_list_source(driver)
regex = re.compile('listitem.*relative js-listitem')
return set(source.findAll("div", regex)) | 32,919 |
def smart_apply(tensor, static_fn, dynamic_fn):
"""
Apply transformation on `tensor`, with either `static_fn` for static
tensors (e.g., Numpy arrays, numbers) or `dynamic_fn` for dynamic
tensors.
Args:
tensor: The tensor to be transformed.
static_fn: Static transformation function.
dynamic_fn: Dynamic transformation function.
Returns:
Tensor: The transformed tensor.
"""
if isinstance(tensor, (tf.Tensor, tf.Variable, StochasticTensor,
zs.StochasticTensor)):
return dynamic_fn(tensor)
else:
return static_fn(tensor) | 32,920 |
def new_scan(host, publish = "off", start_new = "on", all = "done", ignoreMismatch = "on"):
"""This function requests SSL Labs to run new scan for the target domain."""
if helpers.is_ip(host):
print(red("[!] Your target host must be a domain, not an IP address! \
SSL Labs will onyl scan domains."))
exit()
else:
path = "analyze"
payload = {'host': host, 'publish': publish, 'start_new': start_new, 'all': all, 'ignoreMismatch': ignoreMismatch}
results = request_api(path, payload)
payload.pop('start_new')
while results['status'] != 'READY' and results['status'] != 'ERROR':
print("Scan in progress, please wait for the results.")
time.sleep(30)
results = request_api(path, payload)
return results | 32,921 |
def removeBubbles(I, kernelSize = (11,11)):
"""remove bright spots (mostly bubbles) in retardance images. Need to add a size filter
Parameters
----------
I
kernelSize
Returns
-------
"""
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, kernelSize)
Bg = cv2.morphologyEx(I, cv2.MORPH_OPEN, kernel)
I8bit = I/np.nanmax(I[:])*255 # rescale to 8 bit as OpenCV only takes 8 bit (REALLY????)
I8bit = I8bit.astype(np.uint8, copy=False) # convert to 8 bit
ITh = cv2.adaptiveThreshold(I8bit,255,cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY,201,-1)
kernelSize = (3,3)
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, kernelSize)
IThBig = cv2.morphologyEx(ITh, cv2.MORPH_CLOSE, kernel)
kernelSize = (21,21)
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, kernelSize)
IThBig = cv2.morphologyEx(IThBig, cv2.MORPH_OPEN, kernel)
ITh=ITh-IThBig
IBi = ITh.astype(np.bool_, copy=True) # convert to 8 bit
INoBub = np.copy(I)
INoBub[IBi] = Bg[IBi]
figSize = (8,8)
fig = plt.figure(figsize = figSize)
a=fig.add_subplot(2,2,1)
plt.tick_params(labelbottom='off',labelleft='off') # labels along the bottom edge are off
plt.imshow(imadjust(I), cmap='gray')
plt.title('Retardance (MM)')
plt.show()
a=fig.add_subplot(2,2,2)
plt.tick_params(labelbottom='off',labelleft='off') # labels along the bottom edge are off
plt.imshow(IThBig, cmap='gray')
plt.title('Orientation (MM)')
plt.show()
a=fig.add_subplot(2,2,3)
plt.tick_params(labelbottom='off',labelleft='off') # labels along the bottom edge are off
plt.imshow(ITh, cmap='gray')
plt.title('Retardance (Py)')
plt.show()
a=fig.add_subplot(2,2,4)
plt.tick_params(labelbottom='off',labelleft='off') # labels along the bottom edge are off
plt.imshow(imadjust(INoBub), cmap='gray')
plt.title('Orientation (Py)')
plt.show()
return INoBub | 32,922 |
def test_categories_type_in_kwargs(df_categorical):
"""
Raise TypeError if the wrong argument is supplied to
the `categories` parameter in kwargs.
"""
with pytest.raises(TypeError):
df_categorical.encode_categorical(col1=({1: 2, 3: 3}, None)) | 32,923 |
def setchances():
""" Sets the number of ``chances`` the player gets per question. """
global chances
chances = input("How many chances would you like per question: ")
while not chances.isdigit() or int(chances) < 1:
chances = input("Please enter a number that is greater than 0: ")
chances = int(chances)
print ("") | 32,924 |
def assert_json_response(response, status_code, body, headers=None, body_cmp=operator.eq):
"""Assert JSON response has the expected status_code, body, and headers.
Asserts that the response's content-type is application/json.
body_cmp is a callable that takes the JSON-decoded response body and
expected body and returns a boolean stating whether the comparison
succeeds.
body_cmp(json.loads(response.data.decode('utf-8')), body)
"""
headers = dict(headers or {})
headers['Content-Type'] = 'application/json'
def json_cmp(response_body, body):
return body_cmp(json.loads(response_body.decode('utf-8')), body)
assert_response(response, status_code, body, headers, json_cmp) | 32,925 |
def test_frame_attribute_descriptor():
""" Unit tests of the Attribute descriptor """
from astropy.coordinates.attributes import Attribute
class TestAttributes(metaclass=OrderedDescriptorContainer):
attr_none = Attribute()
attr_2 = Attribute(default=2)
attr_3_attr2 = Attribute(default=3, secondary_attribute='attr_2')
attr_none_attr2 = Attribute(default=None, secondary_attribute='attr_2')
attr_none_nonexist = Attribute(default=None, secondary_attribute='nonexist')
t = TestAttributes()
# Defaults
assert t.attr_none is None
assert t.attr_2 == 2
assert t.attr_3_attr2 == 3
assert t.attr_none_attr2 == t.attr_2
assert t.attr_none_nonexist is None # No default and non-existent secondary attr
# Setting values via '_'-prefixed internal vars (as would normally done in __init__)
t._attr_none = 10
assert t.attr_none == 10
t._attr_2 = 20
assert t.attr_2 == 20
assert t.attr_3_attr2 == 3
assert t.attr_none_attr2 == t.attr_2
t._attr_none_attr2 = 40
assert t.attr_none_attr2 == 40
# Make sure setting values via public attribute fails
with pytest.raises(AttributeError) as err:
t.attr_none = 5
assert 'Cannot set frame attribute' in str(err.value) | 32,926 |
def variable(value, dtype=None, name=None, constraint=None):
"""Instantiates a variable and returns it.
# Arguments
value: Numpy array, initial value of the tensor.
dtype: Tensor type.
name: Optional name string for the tensor.
constraint: Optional projection function to be
applied to the variable after an optimizer update.
# Returns
A variable instance (with Keras metadata included).
# Examples
```python
>>> from keras import backend as K
>>> val = np.array([[1, 2], [3, 4]])
>>> kvar = K.variable(value=val, dtype='float64', name='example_var')
>>> K.dtype(kvar)
'float64'
>>> print(kvar)
example_var
>>> K.eval(kvar)
array([[ 1., 2.],
[ 3., 4.]])
```
"""
if name is None:
name = 'variable' + str(get_uid('variable'))
if constraint is not None:
raise NotImplementedError('Constraints are not supported')
if is_tensor(value):
variable = av.variable_from_node(name, value)
else:
if dtype is None:
value = np.array(value)
if value.dtype == 'int64':
value = np.array(value, dtype='int32')
dtype = 'int32'
elif value.dtype == 'float64':
dtype = floatx()
value = np.array(value, dtype=floatx())
else:
dtype = value.dtype.name
else:
value = np.array(value, dtype=dtype)
variable = av.variable(
name, value.shape, avalanche_dtype(dtype),
av.value_initializer(value))
variable._uses_learning_phase = False
variable._keras_shape = value.shape
variable._is_variable = True
return variable | 32,927 |
def autoprotocol_protocol(protocol_id):
"""Get autoprotocol-python representation of a protocol."""
current_protocol = Protocol.query.filter_by(id=protocol_id).first()
if not current_protocol:
flash('No such specification!', 'danger')
return redirect('.')
if current_protocol.public:
print("PUBLIC")
else:
print("NOT PUBLIC")
if current_protocol.user != current_user and not current_protocol.public:
flash('Not your project!', 'danger')
return redirect('.')
if not current_protocol.protocol:
return ""
protocol_object = json.loads(current_protocol.protocol)
converter = AutoProtocol()
resp = make_response(converter.convert(protocol_object, current_protocol.name, current_protocol.description))
resp.headers['Content-Type'] = "text"
resp.headers['Content-Disposition'] = "attachment; filename=" + current_protocol.name + "-autoprotocol.py"
return resp | 32,928 |
def test_block_quotes_213b():
"""
Test case 213b: variation of 213 with an extra list line
and all three lines in the block quote
"""
# Arrange
source_markdown = """> - foo
> - bar
> - bar"""
expected_tokens = [
"[block-quote(1,1)::> \n> \n> ]",
"[ulist(1,3):-::4: ]",
"[para(1,5):]",
"[text(1,5):foo:]",
"[end-para:::True]",
"[li(2,3):4: :]",
"[para(2,5):]",
"[text(2,5):bar:]",
"[end-para:::True]",
"[li(3,3):4: :]",
"[para(3,5):]",
"[text(3,5):bar:]",
"[end-para:::True]",
"[end-ulist:::True]",
"[end-block-quote:::True]",
]
expected_gfm = """<blockquote>
<ul>
<li>foo</li>
<li>bar</li>
<li>bar</li>
</ul>
</blockquote>"""
# Act & Assert
act_and_assert(source_markdown, expected_gfm, expected_tokens) | 32,929 |
def tensor_to_P(tensor, wig3j = None):
"""
Transform an arbitray SO(3) tensor into real P which transforms under the irreducible
representation with l = 1. Wigner-3j symbols can be provided or calculated on
the fly for faster evaluation. If providedn, wig3j should be an array
with indexing [l1,l2,m,m1,m2]
"""
P = []
n_rad, n_l = get_max(tensor)
lam = 1
# It is faster to pre-evaluate the wigner-3j symbol, even faster if it is passed
if not isinstance(wig3j, np.ndarray):
wig3j = np.zeros([n_l,n_l,2*n_l+1,2*n_l+1,2*n_l+1])
wig3j = wig3j.astype(np.complex128)
for l1 in range(n_l):
for l2 in range(n_l):
for m in range(-lam,lam+1):
for m1 in range(-n_l,n_l+1):
for m2 in range(-n_l,n_l+1):
wig3j[l2,l1,m,m1,m2] = N(wigner_3j(lam,l2,l1,m,m1,m2))
for mu in range(-lam,lam + 1):
P.append([])
for n1 in range(n_rad):
for n2 in range(n_rad):
for l1 in range(n_l):
for l2 in range(n_l):
if (l1 + l2)%2 == 0: continue
p = 0
for m in range(-n_l, n_l+1):
wig = wig3j[l2,l1,mu,(m-mu),-m]
if wig != 0:
p += tensor['{},{},{}'.format(n1,l1,m)]*tensor['{},{},{}'.format(n2,l2,m-mu)].conj() *\
(-1)**m * wig
p *= (-1)**(lam-l2)
P[mu+lam].append(p)
p_real = []
for pi in np.array(P).T:
p_real.append((T.dot(pi))[[2,0,1]])
P = np.array(p_real).T
if not np.allclose(P.imag,np.zeros_like(P)):
raise Exception('Ooops, something went wrong. P not purely real.')
return P.real.T | 32,930 |
def _print_projects():
"""
Print the list of projects (uses the folder names)
"""
project_dir = projects_path()
print(' '.join(
['aeriscloud'] +
[
pro
for pro in os.listdir(project_dir)
if os.path.exists(os.path.join(project_dir, pro,
'.aeriscloud.yml'))
]
)) | 32,931 |
def createNotificationMail(request, *args, **kwargs):
"""Appengine task that sends mail to the subscribed users.
Expects the following to be present in the POST dict:
comment_key: Specifies the comment id for which to send the notifications
task_key: Specifies the task key name for which the comment belongs to
Args:
request: Django Request object
"""
from soc.modules.ghop.logic.helper import notifications as ghop_notifications
from soc.modules.ghop.logic.models import comment as ghop_comment_logic
from soc.modules.ghop.logic.models import task_subscription as \
ghop_task_subscription_logic
# set default batch size
batch_size = 10
post_dict = request.POST
comment_key = post_dict.get('comment_key')
task_key = post_dict.get('task_key')
if not (comment_key and task_key):
# invalid task data, log and return OK
return error_handler.logErrorAndReturnOK(
'Invalid createNotificationMail data: %s' % post_dict)
comment_key = long(comment_key)
# get the task entity under which the specified comment was made
task_entity = ghop_task_logic.logic.getFromKeyName(task_key)
# get the comment for the given id
comment_entity = ghop_comment_logic.logic.getFromID(
comment_key, task_entity)
if not comment_entity:
# invalid comment specified, log and return OK
return error_handler.logErrorAndReturnOK(
'Invalid comment specified: %s/%s' % (comment_key, task_key))
# check and retrieve the subscriber_start_key that has been done last
if 'subscriber_start_index' in post_dict:
subscriber_start_index = post_dict['subscriber_start_index']
else:
subscriber_start_index = 0
# get all subscribers to GHOP task
fields = {
'task': task_entity,
}
ts_entity = ghop_task_subscription_logic.logic.getForFields(
fields, unique=True)
subscribers = db.get(ts_entity.subscribers[
subscriber_start_index:subscriber_start_index+batch_size])
task_url = "http://%(host)s%(task)s" % {
'host': system.getHostname(),
'task': redirects.getPublicRedirect(
task_entity, {'url_name': 'ghop/task'}),
}
# create the data for the mail to be sent
message_properties = {
'task_url': task_url,
'redirect_url': "%(task_url)s#c%(cid)d" % {
'task_url': task_url,
'cid': comment_entity.key().id_or_name()
},
'comment_entity': comment_entity,
'task_entity': task_entity,
}
subject = DEF_TASK_UPDATE_SUBJECT_FMT % {
'title': task_entity.title,
}
for subscriber in subscribers:
ghop_notifications.sendTaskUpdate(entity, subject, message_properties)
if len(subscribers) == batch_size:
# spawn task for sending out notifications to next set of subscribers
next_start = subscriber_start_index + batch_size
task_params = {
'comment_key': comment_key,
'task_key': task_key,
'subscriber_start_index': next_start
}
task_url = '/tasks/ghop/task/mail/create'
new_task = taskqueue.Task(params=task_params, url=task_url)
new_task.add('mail')
# return OK
return http.HttpResponse() | 32,932 |
def mergediscnodes(tree):
"""Reverse transformation of ``splitdiscnodes()``."""
treeclass = tree.__class__
for node in tree.subtrees():
merge = defaultdict(list) # a series of queues of nodes
# e.g. merge['VP_2*'] = [Tree('VP_2', []), ...]
# when origin is present (index after *), the node is moved to where
# the next one is expected, e.g., VP_2*1 after VP_2*0 is added.
nodes = list(node) # the original, unmerged children
node[:] = [] # the new, merged children
for child in nodes:
if not isinstance(child, Tree):
node.append(child)
continue
match = SPLITLABELRE.search(child.label)
if not match:
node.append(child)
continue
label, part, _ = match.groups()
grandchildren = list(child)
child[:] = []
if not merge[child.label]:
merge[child.label].append(treeclass(label, []))
node.append(merge[child.label][0])
merge[child.label][0].extend(grandchildren)
if part:
nextlabel = '%s*%d' % (label, int(part) + 1)
merge[nextlabel].append(merge[child.label].pop(0))
return tree | 32,933 |
def main():
# type: () -> typing.Any
"""Parse the command line options and launch the requested command.
If the command is 'help' then print the help message for the subcommand; if
no subcommand is given, print the standard help message.
"""
colorama.init(strip=not sys.stdout.isatty())
doc = usage.get_primary_command_usage()
allow_subcommands = "<command>" in doc
args = docopt(
doc, version=settings.version, options_first=allow_subcommands
)
if sys.excepthook is sys.__excepthook__:
sys.excepthook = log.excepthook
try:
log.enable_logging(log.get_log_level(args))
default_args = sys.argv[2 if args.get("<command>") else 1 :]
if (
args.get("<command>") == "help"
and None not in settings.subcommands
):
subcommand = next(iter(args.get("<args>", default_args)), None)
return usage.get_help_usage(subcommand)
argv = [args.get("<command>")] + args.get("<args>", default_args)
return _run_command(argv)
except exc.InvalidCliValueError as e:
return str(e) | 32,934 |
def count_go_nogo_trials(eventcode):
"""
:param eventcode: list of event codes from operant conditioning file
:return: number of go and no go trials in the go/no go tasks
"""
lever_on = get_events_indices(eventcode, ['RLeverOn', 'LLeverOn'])
(go_trials, nogo_trials) = (0, 0)
for lever in lever_on:
if eventcode[lever + 1] in ('LightOn1', 'LightOn2'):
nogo_trials += 1
else:
go_trials += 1
return go_trials, nogo_trials | 32,935 |
def getScriptExecutionContext():
"""
Returns the repository description instance and
the set of items selected on script action execution.
@return: Script execution context.
@rtype: L{ScriptExecutionContext<datafinder.gui.user.script_api.ScriptExecutionContext>}
"""
scriptExecutionContext = None
if not _context.scriptController.boundScriptExecutionContext is None:
repository, items = _context.scriptController.boundScriptExecutionContext
itemPaths = [item.path for item in items]
scriptExecutionContext = ScriptExecutionContext(RepositoryDescription(repository), itemPaths)
return scriptExecutionContext | 32,936 |
def electrondensity_spin(ccdata, volume, mocoeffslist):
"""Calculate the magnitude of the electron density at every point in a volume for either up or down spin
Inputs:
ccdata -- ccData object
volume -- Volume object (will not be altered)
mocoeffslist -- list of molecular orbital to calculate electron density from;
i.e. [ccdata.mocoeffs[0][1:2]]
Output:
Volume object with wavefunction at each grid point stored in data attribute
Attributes:
coords -- the coordinates of the atoms
mocoeffs -- mocoeffs for all of the occupied eigenvalues
gbasis -- gbasis from a parser object
volume -- a template Volume object (will not be altered)
Note: mocoeffs is a list of NumPy arrays. The list will be of length 1.
"""
assert len(mocoeffslist) == 1, "mocoeffslist input to the function should have length of 1."
bfs = getbfs(ccdata)
density = copy.copy(volume)
density.data = numpy.zeros(density.data.shape, "d")
x, y, z = getGrid(density)
# For occupied orbitals
# `mocoeff` and `gbasis` in ccdata object is ordered in a way `homos` can specify which orbital
# is the highest lying occupied orbital in mocoeff and gbasis.
for mocoeffs in mocoeffslist:
for mocoeff in mocoeffs:
wavefn = numpy.zeros(density.data.shape, "d")
for bs in range(len(bfs)):
data = numpy.zeros(density.data.shape, "d")
for i, xval in enumerate(x):
for j, yval in enumerate(y):
tmp = []
for zval in z:
tmp.append(pyamp(bfs, bs, xval, yval, zval))
data[i, j, :] = tmp
data *= mocoeff[bs]
wavefn += data
density.data += wavefn ** 2
return density | 32,937 |
def create_missing_dataframe(nrows, ncols, density=.9, random_state=None, index_type=None, freq=None):
"""Create a Pandas dataframe with random missingness.
Parameters
----------
nrows : int
Number of rows
ncols : int
Number of columns
density: float
Amount of available data
random_state: float, optional
Random seed. If not given, default to 33.
index_type: float, optional
Accepts the following values: "dt" for timestamp, "int" for integer.
freq: string, optional:
Sampling frequency. This option is only available is index_type is "dt".
Returns
-------
df : pandas.DataFrame
Pandas dataframe containing sample data with random missing rows.
"""
# Create a nrows x ncols matrix
data = np.random.uniform(100, size=(nrows, ncols))
df = pd.DataFrame(data)
if index_type:
if index_type == "dt":
if freq is None:
freq='h'
idx = _makeDatetimeIndex(nrows, freq=freq)
df = df.set_index(idx)
elif index_type == "int":
return
else:
raise ValueError("Can't recognize index_type. Try the following values: 'dt', 'int'.")
i_idx, j_idx = _create_missing_idx(nrows, ncols, density, random_state)
df.values[i_idx, j_idx] = None
return df | 32,938 |
def __check_waiting_time(waiting_time):
"""
校验窗口配置项:等待时间
:param waiting_time: 等待时间
"""
# 校验等待时间(单位:s)
if waiting_time not in [0, 10, 30, 60, 180, 300, 600]:
raise StreamWindowConfigCheckError(_("属性[%s] 目前只支持 %s") % ("waiting_time", "[0, 10, 30, 60, 180, 300, 600]")) | 32,939 |
def lint(session):
"""Run flake8.
Returns a failure if flake8 finds linting errors or sufficiently
serious code quality issues.
"""
session.install('yapf')
session.run('python3', '-m', 'yapf', '--diff', '-r', '.') | 32,940 |
def console(endpoint, admin_secret):
"""
Opens the Hasura console
Note: requires installing the Hasura CLI. See https://docs.hasura.io/graphql/manual/hasura-cli/install-hasura-cli.html
"""
try:
cmd = shlex.split(
f"hasura console --endpoint {endpoint} --admin-secret {admin_secret} --skip-update-check"
)
subprocess.check_output(cmd, cwd=HASURA_DIR)
except Exception:
click.secho("\nCould not open console!", bg="red", bold=True)
raise Exception from None | 32,941 |
def decode_name_value_pairs(buffer):
"""
Decode a name-value pair list from a buffer.
:param bytearray buffer: a buffer containing a FastCGI name-value pair list
:raise ProtocolError: if the buffer contains incomplete data
:return: a list of (name, value) tuples where both elements are unicode strings
:rtype: list
"""
index = 0
pairs = []
while index < len(buffer):
if buffer[index] & 0x80 == 0:
name_length = buffer[index]
index += 1
elif len(buffer) - index > 4:
name_length = length4_struct.unpack_from(buffer, index)[0] & 0x7fffffff
index += 4
else:
raise ProtocolError('not enough data to decode name length in name-value pair')
if len(buffer) - index > 1 and buffer[index] & 0x80 == 0:
value_length = buffer[index]
index += 1
elif len(buffer) - index > 4:
value_length = length4_struct.unpack_from(buffer, index)[0] & 0x7fffffff
index += 4
else:
raise ProtocolError('not enough data to decode value length in name-value pair')
if len(buffer) - index >= name_length + value_length:
name = buffer[index:index + name_length].decode('ascii')
value = buffer[index + name_length:index + name_length + value_length].decode('utf-8')
pairs.append((name, value))
index += name_length + value_length
else:
raise ProtocolError('name/value data missing from buffer')
return pairs | 32,942 |
def host_is_local(host: str) -> bool:
"""
Tells whether given host is local.
:param host: host name or address
:return: True if host is local otherwise False
"""
local_names = {
"localhost",
"127.0.0.1",
}
is_local = any(local_name in host for local_name in local_names)
return is_local | 32,943 |
def find_in_path(name, path):
"""Search PATH for a binary.
Args:
name: the filename to search for
path: the path ['./', './path/to/stuff']
Returns:
The abspath to the fie or None if not found.
"""
for dir in path:
binpath = os.path.join(dir, name)
if os.path.exists(binpath):
return os.path.abspath(binpath)
return None | 32,944 |
def sigma_function(coeff_matU, coeff_matX, order, V_slack):
"""
:param coeff_matU: array with voltage coefficients
:param coeff_matX: array with inverse conjugated voltage coefficients
:param order: should be prof - 1
:param V_slack: slack bus voltage vector. Must contain only 1 slack bus
:return: sigma complex value
"""
if len(V_slack) > 1:
print('Sigma values may not be correct')
V0 = V_slack[0]
coeff_matU = coeff_matU / V0
coeff_matX = coeff_matX / V0
nbus = coeff_matU.shape[1]
complex_type = nb.complex128
sigmes = np.zeros(nbus, dtype=complex_type)
if order % 2 == 0:
M = int(order / 2) - 1
else:
M = int(order / 2)
for d in range(nbus):
a = coeff_matU[1:2 * M + 2, d]
b = coeff_matX[0:2 * M + 1, d]
C = np.zeros((2 * M + 1, 2 * M + 1), dtype=complex_type)
for i in range(2 * M + 1):
if i < M:
C[1 + i:, i] = a[:2 * M - i]
else:
C[i - M:, i] = - b[:3 * M - i + 1]
lhs = np.linalg.solve(C, -a)
sigmes[d] = np.sum(lhs[M:]) / (np.sum(lhs[:M]) + 1)
return sigmes | 32,945 |
def get_logger(verbose=0):
""" set up logging according to the verbose level given on the
command line """
global LOGGER
if LOGGER is None:
LOGGER = logging.getLogger(sys.argv[0])
stderr = logging.StreamHandler()
level = logging.WARNING
lformat = "%(message)s"
if verbose == 1:
level = logging.INFO
elif verbose > 1:
stderr.setFormatter(logging.Formatter("%(asctime)s: %(levelname)s: %(message)s"))
level = logging.DEBUG
LOGGER.setLevel(level)
LOGGER.addHandler(stderr)
syslog = logging.handlers.SysLogHandler("/dev/log")
syslog.setFormatter(logging.Formatter("%(name)s: %(message)s"))
LOGGER.addHandler(syslog)
LOGGER.debug("Setting verbose to %s" % verbose)
return LOGGER | 32,946 |
def get_dbs(db_names: List[str], db_file: str = "./db_info.pub.json") -> List:
"""Read the db_file and get the databases corresponding to <<db_name>>
Args:
db_name (List[str]): A list of names of the database we want
db_file (str): The db_file we are reading from
Returns:
MongograntStore: the store we need to access
"""
db_dict = loadfn(db_file)
stores = []
for j_name in db_names:
if j_name not in db_dict:
raise ValueError(
f"The store named {j_name} is missing from the db_file")
stores.append(db_dict[j_name])
return stores | 32,947 |
def knn_threshold(data, column, threshold=15, k=3):
"""
Cluster rare samples in data[column] with frequency less than
threshold with one of k-nearest clusters
Args:
data - pandas.DataFrame containing colums: latitude, longitude, column
column - the name of the column to threshold
threshold - the minimum sample frequency
k - the number of k-neighbors to explore when selecting cluster partner
"""
from sklearn import neighbors
def ids_centers_sizes(data):
dat = np.array([(i, data.latitude[data[column]==i].mean(),
data.longitude[data[column]==i].mean(),
(data[column]==i).sum())
for i in set(list(data[column]))])
return dat[:,0], dat[:,1:-1].astype(float), dat[:,-1].astype(int)
knn = neighbors.NearestNeighbors(n_neighbors=k)
while True:
ids, centers, sizes = ids_centers_sizes(data)
asrt = np.argsort(sizes)
if sizes[asrt[0]] >= threshold:
break
cids = np.copy(ids)
knn.fit(centers)
for i in asrt:
if sizes[i] < threshold:
nearest = knn.kneighbors(centers[i])[1].flatten()
nearest = nearest[nearest != i]
sel = nearest[np.argmin(sizes[nearest])]
total_size = sizes[sel] + sizes[i]
data[column][data[column]==cids[i]] = cids[sel]
cids[cids==i] = cids[sel]
sizes[i] = total_size
sizes[sel] = total_size
return data | 32,948 |
def _render_helper(scene, spp=None, sensor_index=0):
"""
Internally used function: render the specified Mitsuba scene and return a
floating point array containing RGB values and AOVs, if applicable
"""
from mitsuba.core import (Float, UInt32, UInt64, Vector2f,
is_monochromatic, is_rgb, is_polarized, DEBUG)
from mitsuba.render import ImageBlock
sensor = scene.sensors()[sensor_index]
film = sensor.film()
sampler = sensor.sampler()
film_size = film.crop_size()
if spp is None:
spp = sampler.sample_count()
total_sample_count = ek.hprod(film_size) * spp
if sampler.wavefront_size() != total_sample_count:
sampler.seed(ek.arange(UInt64, total_sample_count))
pos = ek.arange(UInt32, total_sample_count)
pos //= spp
scale = Vector2f(1.0 / film_size[0], 1.0 / film_size[1])
pos = Vector2f(Float(pos % int(film_size[0])),
Float(pos // int(film_size[0])))
pos += sampler.next_2d()
rays, weights = sensor.sample_ray_differential(
time=0,
sample1=sampler.next_1d(),
sample2=pos * scale,
sample3=0
)
spec, mask, aovs = scene.integrator().sample(scene, sampler, rays)
spec *= weights
del mask
if is_polarized:
from mitsuba.core import depolarize
spec = depolarize(spec)
if is_monochromatic:
rgb = [spec[0]]
elif is_rgb:
rgb = spec
else:
from mitsuba.core import spectrum_to_xyz, xyz_to_srgb
xyz = spectrum_to_xyz(spec, rays.wavelengths)
rgb = xyz_to_srgb(xyz)
del xyz
aovs.insert(0, Float(1.0))
for i in range(len(rgb)):
aovs.insert(i + 1, rgb[i])
del rgb, spec, weights, rays
block = ImageBlock(
size=film.crop_size(),
channel_count=len(aovs),
filter=film.reconstruction_filter(),
warn_negative=False,
warn_invalid=DEBUG,
border=False
)
block.clear()
block.put(pos, aovs)
del pos
del aovs
data = block.data()
ch = block.channel_count()
i = UInt32.arange(ek.hprod(block.size()) * (ch - 1))
weight_idx = i // (ch - 1) * ch
values_idx = (i * ch) // (ch - 1) + 1
weight = ek.gather(data, weight_idx)
values = ek.gather(data, values_idx)
return values / (weight + 1e-8) | 32,949 |
def format_formula(formula):
"""Converts str of chemical formula into latex format for labelling purposes
Parameters
----------
formula: str
Chemical formula
"""
formatted_formula = ""
number_format = ""
for i, s in enumerate(formula):
if s.isdigit():
if not number_format:
number_format = "_{"
number_format += s
if i == len(formula) - 1:
number_format += "}"
formatted_formula += number_format
else:
if number_format:
number_format += "}"
formatted_formula += number_format
number_format = ""
formatted_formula += s
return r"$%s$" % (formatted_formula) | 32,950 |
def test_get_run_no_actions(subject: RunStore) -> None:
"""It can get a previously stored run entry."""
run = RunResource(
run_id="run-id",
protocol_id=None,
created_at=datetime(year=2021, month=1, day=1, tzinfo=timezone.utc),
actions=[],
is_current=False,
)
subject.insert(run)
result = subject.get(run_id="run-id")
assert result == run | 32,951 |
def ParseQuery(query):
"""Parses the entire query.
Arguments:
query: The command the user sent that needs to be parsed.
Returns:
Dictionary mapping clause names to their arguments.
Raises:
bigquery_client.BigqueryInvalidQueryError: When invalid query is given.
"""
clause_arguments = {
'SELECT': [],
'AS': {},
'WITHIN': {},
'FROM': [],
'JOIN': [],
'WHERE': [],
'GROUP BY': [],
'HAVING': [],
'ORDER BY': [],
'LIMIT': [],
}
try:
_EBQParser(clause_arguments).parseString(query)
except ValueError as e:
raise bigquery_client.BigqueryInvalidQueryError(e, None, None, None)
return clause_arguments | 32,952 |
def _bundle_assets(assets_directory, zip_file):
"""Bundle the assets directory
:param assets_directory: path to the assets directory
:type assets_directory: str
:param zip_file: zip file object
:type zip_file: zipfile.ZipFile
:rtype: None
"""
for filename in sorted(os.listdir(assets_directory)):
fullpath = os.path.join(assets_directory, filename)
if filename == 'uris' and os.path.isdir(fullpath):
_bundle_uris(fullpath, zip_file)
else:
# anything else is an error
raise DCOSException(
('Error bundling package. Extra file in package '
'directory [{}]').format(fullpath)) | 32,953 |
def check_special_status(char, combat_handler, winner = None):
"""
Checks whether the combatant was trying to rescue, flee, or shift targets
at the time, and processes that accordingly with the round.
"""
RESCUE_COST = 2
FLEE_COST = 3
SHIFT_COST = 2
scripts = char.scripts.all()
dbref = char.id
string = ""
for script in scripts:
if script.key == "flee":
flee_counter = combat_handler.db.flee_count[dbref]
if flee_counter > 1:
if char.ndb.pos < 3:
string = "{RFLEE:{R {M%s{n doesn't have the footing" % \
(char.db.sdesc) + \
" to flee, and they are back in the thick of the battle!"
combat_handler.del_flee_char(char)
else:
# subtract positioning
pos = char.ndb.pos - 3
char.set_pos(pos)
string = "{RFLEE:{n {M%s{n has" % (char.db.sdesc) + \
"successfully fled from combat!"
combat_handler.del_flee_char(char)
combat_handler.remove_character(char)
else:
string = "{RFLEE:{n {M%s{n is still" % (char.db.sdesc) + \
"trying to flee from combat!"
combat_handler.add_flee_count(char)
if script.key == "rescue":
rescuee = combat_handler.db.rescue[dbref]
attacker = combat_handler.db.pairs[rescuee]
if winner:
combat_handler.rescue_tgt(char, rescuee)
string = "{RRESCUE:{n {M%s{n" % (rescuee.db.sdesc) + \
" has been rescued by {M%s{n!" % (char.db.sdesc) + \
" {M%s{n must now fight with {M%s{n." % \
(attacker.db.sdesc, char.db.sdesc)
else:
string = "{RRESCUE:{n {M%s{n has" % (char.db.sdesc) + \
"failed to rescue {M%s{n from {M%s{n." % \
(char.db.sdesc, rescuee.db.sdesc, attacker.db.sdesc)
combat_handler.del_rescue(char)
# subtract the positioning cost even if they fail
# if they have the cowardice trait, they spend twice the
# positioning
if char.scripts.get("Cowardice"):
RESCUE_COST *= 2
elif char.scripts.get("Relentless Cunning"):
RESCUE_COST = 0
pos = char.ndb.pos - RESCUE_COST
char.set_pos(pos)
if script.key == "shift target":
if char.scripts.get("Relentless Cunning"):
SHIFT_COST = 0
# subtract positioning
pos = char.ndb.pos - SHIFT_COST
char.set_pos(pos)
new_target = combat_handler.db.shifting[dbref]
old_target = combat_handler.db.pairs[char]
combat_handler.switch_target(char, new_target)
string = "{M%s{n has successfully" % (char.db.sdesc) + \
" shifted targets from {M%s{n to {M%s{n!" % \
(old_target.db.sdesc, new_target.db.sdesc)
if string:
combat_handler.msg_all(string) | 32,954 |
def primary_astigmatism_00(rho, phi):
"""Zernike primary astigmatism 0°."""
return rho**2 * e.cos(2 * phi) | 32,955 |
def lpt_prototype(mesh,
nc=FLAGS.nc,
bs=FLAGS.box_size,
batch_size=FLAGS.batch_size,
a0=FLAGS.a0,
a=FLAGS.af,
nsteps=FLAGS.nsteps):
"""
Prototype of function computing LPT deplacement.
Returns output tensorflow and mesh tensorflow tensors
"""
klin = np.loadtxt('../flowpm/data/Planck15_a1p00.txt').T[0]
plin = np.loadtxt('../flowpm/data/Planck15_a1p00.txt').T[1]
ipklin = iuspline(klin, plin)
stages = np.linspace(a0, a, nsteps, endpoint=True)
# Define the named dimensions
# Parameters of the small scales decomposition
n_block_x = FLAGS.nx
n_block_y = FLAGS.ny
n_block_z = 1
halo_size = FLAGS.hsize
if halo_size >= 0.5 * min(nc // n_block_x, nc // n_block_y, nc // n_block_z):
new_size = int(0.5 *
min(nc // n_block_x, nc // n_block_y, nc // n_block_z))
print('WARNING: REDUCING HALO SIZE from %d to %d' % (halo_size, new_size))
halo_size = new_size
# Parameters of the large scales decomposition
downsampling_factor = 0
lnc = nc // 2**downsampling_factor
#
fx_dim = mtf.Dimension("nx", nc)
fy_dim = mtf.Dimension("ny", nc)
fz_dim = mtf.Dimension("nz", nc)
tfx_dim = mtf.Dimension("tx", nc)
tfy_dim = mtf.Dimension("ty", nc)
tfz_dim = mtf.Dimension("tz", nc)
tx_dim = mtf.Dimension("tx_lr", nc)
ty_dim = mtf.Dimension("ty_lr", nc)
tz_dim = mtf.Dimension("tz_lr", nc)
nx_dim = mtf.Dimension('nx_block', n_block_x)
ny_dim = mtf.Dimension('ny_block', n_block_y)
nz_dim = mtf.Dimension('nz_block', n_block_z)
sx_dim = mtf.Dimension('sx_block', nc // n_block_x)
sy_dim = mtf.Dimension('sy_block', nc // n_block_y)
sz_dim = mtf.Dimension('sz_block', nc // n_block_z)
k_dims = [tx_dim, ty_dim, tz_dim]
batch_dim = mtf.Dimension("batch", batch_size)
pk_dim = mtf.Dimension("npk", len(plin))
pk = mtf.import_tf_tensor(mesh, plin.astype('float32'), shape=[pk_dim])
# Compute necessary Fourier kernels
kvec = flowpm.kernels.fftk((nc, nc, nc), symmetric=False)
kx = mtf.import_tf_tensor(mesh,
kvec[0].squeeze().astype('float32'),
shape=[tfx_dim])
ky = mtf.import_tf_tensor(mesh,
kvec[1].squeeze().astype('float32'),
shape=[tfy_dim])
kz = mtf.import_tf_tensor(mesh,
kvec[2].squeeze().astype('float32'),
shape=[tfz_dim])
kv = [ky, kz, kx]
# kvec for low resolution grid
kvec_lr = flowpm.kernels.fftk([nc, nc, nc], symmetric=False)
kx_lr = mtf.import_tf_tensor(mesh,
kvec_lr[0].squeeze().astype('float32'),
shape=[tx_dim])
ky_lr = mtf.import_tf_tensor(mesh,
kvec_lr[1].squeeze().astype('float32'),
shape=[ty_dim])
kz_lr = mtf.import_tf_tensor(mesh,
kvec_lr[2].squeeze().astype('float32'),
shape=[tz_dim])
kv_lr = [ky_lr, kz_lr, kx_lr]
shape = [batch_dim, fx_dim, fy_dim, fz_dim]
lr_shape = [batch_dim, fx_dim, fy_dim, fz_dim]
hr_shape = [batch_dim, nx_dim, ny_dim, nz_dim, sx_dim, sy_dim, sz_dim]
part_shape = [batch_dim, fx_dim, fy_dim, fz_dim]
# Begin simulation
initc = mtfpm.linear_field(mesh, shape, bs, nc, pk, kv)
# # Reshaping array into high resolution mesh
# field = mtf.slicewise(lambda x:tf.expand_dims(tf.expand_dims(tf.expand_dims(x, axis=1),axis=1),axis=1),
# [initc],
# output_dtype=tf.float32,
# output_shape=hr_shape,
# name='my_reshape',
# splittable_dims=lr_shape[:-1]+hr_shape[1:4]+part_shape[1:3])
#
state = mtfpm.lpt_init_single(
initc,
a0,
kv_lr,
halo_size,
lr_shape,
hr_shape,
part_shape[1:],
antialias=True,
)
# Here we can run our nbody
final_state = state #mtfpm.nbody(state, stages, lr_shape, hr_shape, k_dims, kv_lr, kv_hr, halo_size, downsampling_factor=downsampling_factor)
# paint the field
final_field = mtf.zeros(mesh, shape=hr_shape)
for block_size_dim in hr_shape[-3:]:
final_field = mtf.pad(final_field, [halo_size, halo_size],
block_size_dim.name)
final_field = mesh_utils.cic_paint(final_field, final_state[0], halo_size)
# Halo exchange
for blocks_dim, block_size_dim in zip(hr_shape[1:4], final_field.shape[-3:]):
final_field = mpm.halo_reduce(final_field, blocks_dim, block_size_dim,
halo_size)
# Remove borders
for block_size_dim in hr_shape[-3:]:
final_field = mtf.slice(final_field, halo_size, block_size_dim.size,
block_size_dim.name)
#final_field = mtf.reshape(final_field, [batch_dim, fx_dim, fy_dim, fz_dim])
# Hack usisng custom reshape because mesh is pretty dumb
final_field = mtf.slicewise(lambda x: x[:, 0, 0, 0], [final_field],
output_dtype=tf.float32,
output_shape=[batch_dim, fx_dim, fy_dim, fz_dim],
name='my_dumb_reshape',
splittable_dims=part_shape[:-1] + hr_shape[:4])
return initc, final_field
## | 32,956 |
def in_collision(box1: OrientedBox, box2: OrientedBox) -> bool:
"""
Check for collision between two boxes. First do a quick check by approximating each box with a circle,
if there is an overlap, check for the exact intersection using geometry Polygon
:param box1: Oriented box (e.g., of ego)
:param box2: Oriented box (e.g., of other tracks)
:return True if there is a collision between the two boxes.
"""
return bool(box1.geometry.intersects(box2.geometry)) if collision_by_radius_check(box1, box2) else False | 32,957 |
def process_embedded_query_expr(input_string):
"""
This function scans through the given script and identify any path/metadata
expressions. For each expression found, an unique python variable name will
be generated. The expression is then substituted by the variable name.
:param str input_string: The input script
:return: A 2-element tuple of the substituted string and a dict of substitutions
:rtype: (str, dict)
"""
keep = []
state = ''
idx_char = idx_var = 0
substitutions = {} # keyed by query expression
query_expr = []
while idx_char < len(input_string):
c = input_string[idx_char]
if state == STATE_EMBEDDED_QUERY:
if c == '}':
state = STATE_IDLE
s = ''.join(query_expr).strip()
query_expr = []
if s not in substitutions:
varname = 'PBK_{}'.format(idx_var)
idx_var += 1
substitutions[s] = varname
else:
varname = substitutions[s]
keep.append(varname)
else:
query_expr.append(c)
elif (c == "'" or c == '"') and state != STATE_EMBEDDED_QUERY:
if state == c: # quoting pair found, pop it
state = STATE_IDLE
elif state == '': # new quote begins
state = c
keep.append(c)
elif c == '$' and state == STATE_IDLE: # an unquoted $
if idx_char + 1 < len(input_string) and input_string[idx_char + 1] == '{':
state = STATE_EMBEDDED_QUERY
# Once it enters the embedded query state, any pond,
# double/single quotes will be ignored
idx_char += 1
else:
keep.append(c)
elif c == '#' and state == STATE_IDLE:
state = STATE_COMMENT
keep.append(c)
elif c == '\n' and state == STATE_COMMENT:
state = STATE_IDLE
keep.append(c)
else:
keep.append(c)
idx_char += 1
return ''.join(keep), substitutions | 32,958 |
def format_elemwise(vars_):
"""Formats all the elementwise cones for the solver.
Parameters
----------
vars_ : list
A list of the LinOp expressions in the elementwise cones.
Returns
-------
list
A list of LinLeqConstr that represent all the elementwise cones.
"""
# Create matrices Ai such that 0 <= A0*x0 + ... + An*xn
# gives the format for the elementwise cone constraints.
spacing = len(vars_)
prod_size = (spacing*vars_[0].size[0], vars_[0].size[1])
# Matrix spaces out columns of the LinOp expressions.
mat_size = (spacing*vars_[0].size[0], vars_[0].size[0])
terms = []
for i, var in enumerate(vars_):
mat = get_spacing_matrix(mat_size, spacing, i)
terms.append(lu.mul_expr(mat, var, prod_size))
return [lu.create_geq(lu.sum_expr(terms))] | 32,959 |
def extract_stars(image, noise_threshold):
"""
Extract all star from the given image
Returns a list of rectangular images
"""
roi_list = []
image_list = []
# Threshold to remove background noise
image = image.copy()
image[image < noise_threshold] = 0.0
# Create binary image by thresholding
binary = image.copy()
binary[binary > 0] = 1
# Find the next white pixel in the image
i, j = find_next_while_pixel(binary)
while i is not None and j is not None:
# Construct the ROI around the pixel
i, j, w, h = construct_roi(binary, i, j)
# Save ROI to list or roi
roi_list.append([i, j, w, h])
# Erase ROI from image
binary[i:i+h, j:j+w] = 0
# Extract image region
image_list.append(np.array(image[i:i+h, j:j+w]))
# Find the next white pixel and repeat
i, j = find_next_while_pixel(binary)
return np.array(roi_list), image_list | 32,960 |
def lovasz_hinge(logits, labels):
"""
Binary Lovasz hinge loss
logits: [P] Variable, logits at each prediction (between -\infty and +\infty)
labels: [P] Tensor, binary ground truth labels (0 or 1)
"""
if len(labels) == 0:
# only void pixels, the gradients should be 0
return logits.sum() * 0.
signs = 2. * labels.float() - 1.
errors = (1. - logits * Variable(signs))
errors_sorted, perm = torch.sort(errors, dim=0, descending=True)
perm = perm.data
gt_sorted = labels[perm]
grad = lovasz_grad(gt_sorted)
loss = torch.dot(F.relu(errors_sorted), Variable(grad))
return loss | 32,961 |
def extract_metadata(url: str, body: BeautifulSoup) -> Website:
"""
Extract metadata from a site and put it into a `Website object`.
"""
try:
name = body.title.get_text().strip()
except AttributeError:
name = url
try:
description = (
body.find(attrs={"name": "description"}).get("content").strip()
)
except AttributeError:
description = extract_text(body)[:400] + "..."
try:
icon = urljoin(url, body.find("link", rel="icon").get("href"))
except AttributeError:
# As Browsers do, if the html doesn't specify an icon we will just try
# the default path
icon = urljoin(url, "/favicon.ico")
return Website(
url,
name,
description,
icon,
) | 32,962 |
def markContinuing(key, idea, oldest_idea_id, oldest_idea_detect_time, accum):
"""
Mark IDEA as continuing event.
:return: marked key, IDEA
"""
# If idea is present
if idea:
# Equality of ID's in tuple and idea, if true mark will be added
if oldest_idea_id != idea.id:
# Add {key: (ID, DetectTime)} to accumulator
accum.add(dict([(key, (oldest_idea_id, oldest_idea_detect_time))]))
# Add id mark for continuing event
idea.aida_continuing=oldest_idea_id
# Return tuple: key for next deduplication phase and IDEA
return (key[0:3], idea) | 32,963 |
def box1_in_box2(corners1:torch.Tensor, corners2:torch.Tensor):
"""check if corners of box1 lie in box2
Convention: if a corner is exactly on the edge of the other box, it's also a valid point
Args:
corners1 (torch.Tensor): (B, N, 4, 2)
corners2 (torch.Tensor): (B, N, 4, 2)
Returns:
c1_in_2: (B, N, 4) Bool
"""
a = corners2[:, :, 0:1, :] # (B, N, 1, 2)
b = corners2[:, :, 1:2, :] # (B, N, 1, 2)
d = corners2[:, :, 3:4, :] # (B, N, 1, 2)
ab = b - a # (B, N, 1, 2)
am = corners1 - a # (B, N, 4, 2)
ad = d - a # (B, N, 1, 2)
p_ab = torch.sum(ab * am, dim=-1) # (B, N, 4)
norm_ab = torch.sum(ab * ab, dim=-1) # (B, N, 1)
p_ad = torch.sum(ad * am, dim=-1) # (B, N, 4)
norm_ad = torch.sum(ad * ad, dim=-1) # (B, N, 1)
# NOTE: the expression looks ugly but is stable if the two boxes are exactly the same
# also stable with different scale of bboxes
cond1 = (p_ab / norm_ab > - 1e-6) * (p_ab / norm_ab < 1 + 1e-6) # (B, N, 4)
cond2 = (p_ad / norm_ad > - 1e-6) * (p_ad / norm_ad < 1 + 1e-6) # (B, N, 4)
return cond1*cond2 | 32,964 |
def in_line_rate(line, container_line):
"""一个线段和另一个线段的重合部分,占该线段总长的占比"""
inter = intersection_line(line, container_line)
return inter / (line[1] - line[0]) | 32,965 |
def f2():
"""A simple function that sleeps for a short period of time
"""
time.sleep(0.1) | 32,966 |
def test_arbitrary_loader_module_not_found():
"""Raise when loader not found."""
with pytest.raises(PyModuleNotFoundError):
pipeline_cache.clear()
pypyr.moduleloader.set_working_directory('arb/dir')
pypyr.pipelinerunner.load_and_run_pipeline(
pipeline_name='arb pipe',
pipeline_context_input='arb context input',
loader='not_found_loader'
) | 32,967 |
def test_cli_requires():
"""Test to ensure your can add requirements to a CLI"""
def requires_fail(**kwargs):
return {'requirements': 'not met'}
@hug.cli(output=str, requires=requires_fail)
def cli_command(name: str, value: int):
return (name, value)
assert cli_command('Testing', 1) == ('Testing', 1)
assert hug.test.cli(cli_command, 'Testing', 1) == {'requirements': 'not met'} | 32,968 |
def tab_printer(args):
"""
Function to print the logs in a nice tabular format.
:param args: Parameters used for the model.
"""
args = vars(args)
t = Texttable()
t.add_rows([["Parameter", "Value"]] + [[k.replace("_"," ").capitalize(),v] for k,v in args.iteritems()])
print t.draw() | 32,969 |
def comm_for_pid(pid):
"""Retrieve the process name for a given process id."""
try:
return slurp('/proc/%d/comm' % pid)
except IOError:
return None | 32,970 |
def get_machine_type_from_run_num(run_num):
"""these are the values to be used in config for machine dependent settings"""
id_to_machine = {
'MS001': 'miseq',
'NS001': 'nextseq',
'HS001': 'hiseq 2500 rapid',
'HS002': 'hiseq 2500',
'HS003': 'hiseq 2500',
'HS004': 'hiseq 2500',
'HS005': 'macrogen',
'HS006': 'hiseq 4000',
'HS007': 'hiseq 4000',
'HS008': 'hiseq 4000',
'NG001': 'novogene hiseq x5',
'NG002': 'novogene hiseq x5',
'NG003': 'novogene hiseq x5',
'NG004': 'novogene hiseq x5',
'NG005': 'novogene hiseq x5',
}
machine_id = run_num.split('-')[0]
try:
machine_type = id_to_machine[machine_id]
except KeyError:
logger.critical("Unknown machine id %s", machine_id)
raise
return machine_type | 32,971 |
def crawler(address_list):
"""
A list of addresses is provided to this method and it goes and downloads those articles. E.g an Element of the given list can be this:
https://www.jyi.org/2019-march/2019/3/1/the-implication-of-the-corticotropin-releasing-factor-in-nicotine-dependence-and-significance-for-pharmacotherapy-in-smoking-cessation
"""
print("Downloading Articles.. Please wait.")
#Whole Corpus which will contain every article from the input excel file.
corpus = []
#Setting browser options which will be running in the background.
options = ChromeOptions()
options.add_argument('headless')
options.add_argument('--log-level=3')
#options.add_argument('--disable-extensions')
browser = webdriver.Chrome('chromedriver', chrome_options=options)
#Visiting website and Downloading the text article data.
for address in address_list:
try:
#Download the whole webpage.
site = 'https://www.jyi.org' + address
browser.get(site)
except:
print("Can't connect to URL. Check your internet connection.")
sys.exit()
#Parse the webpage as HTML.
soup = bs(browser.page_source, 'html.parser')
#soup.prettify()
#This list will contain whole the article.
document = []
try:
#Finding the components which contain textual research data from the soup.
parents = soup.find('div', 'entry-content e-content')
parents = parents.find_all('div', 'sqs-block html-block sqs-block-html')
except:
print("Website structure is changed. OR Internet connection isn't smooth.")
sys.exit()
for parent in parents:
#Since there are many components in a single webpage which contain textual research data.
#We go through them one by one extracting that text.
parent = parent.find('div', 'sqs-block-content')
#Deleting some unwanted components from the soup.
disposable = parent.find_all('ol')
if disposable != None:
for all in disposable:
all.decompose()
#Each paragraph will be an element in our document list.
Children = parent.find_all('p')
for each in Children:
document.append(each.text)
#we join all the paragraphs into one single passage.
doc_str = " ".join(document)
#Removing unwanted unicode characters.
doc_str = doc_str.replace(u'\xa0', u' ')
#A single document is appended to whole corpus. 1 element of corpus list
#is a complete downloaded document.
corpus.append(doc_str)
print("All articles downloaded successfully.")
#Finding cosine similarity between all documents.
array = cosine_sim(corpus)
browser.quit()
return array, corpus | 32,972 |
def login():
"""Log user in"""
# Forget any user_id
session.clear()
# User reached route via POST (as by submitting a form via POST)
if request.method == "POST":
# Ensure username was submitted
if not request.form.get("username"):
return apology("must provide username", 403)
# Ensure password was submitted
elif not request.form.get("password"):
return apology("must provide password", 403)
# Query database for username
rows = db.execute("SELECT * FROM users WHERE username = :username",
username=request.form.get("username"))
# Ensure username exists and password is correct
if len(rows) != 1 or not check_password_hash(rows[0]["hash"], request.form.get("password")):
return apology("invalid username and/or password", 403)
# Remember which user has logged in
session["user_id"] = rows[0]["id"]
# Redirect user to home page
return redirect("/personal")
# User reached route via GET (as by clicking a link or via redirect)
else:
return render_template("login.html") | 32,973 |
def twisted_sleep(time):
"""
Return a deferred that will be triggered after the specified amount of
time passes
"""
return task.deferLater(reactor, time, lambda: None) | 32,974 |
def setup():
"""
extension: setup the archive/experiment directory.
input: None
output: None
notes: creates an empty directory for storing data
"""
global finished_setup
global archive_path
if finished_setup is True:
logger.warn( 'archive setup called again. Ignoring' )
return None
path = os.path.join( defn.archive_path, defn.experiment )
if os.path.isdir( path ) is True:
logger.warn( '{} already exists.'.format( path ) )
if (defn.overwrite is False):
#need to generate new archive path name
extn = defn.extension
if '<E>' not in extn:
extn = '<E>' + extn
if '<I>' not in extn:
extn = extn + '<I>'
template = extn.replace( '<E>', defn.experiment )
i = 1
unique = False
while unique is False:
path = os.path.join( defn.archive_path, template.replace('<I>',str(i)) )
unique = not os.path.isdir( path )
i += 1
logger.warn( 'moved archive to {}'.format( path ) )
else:
logger.warn( 'deleted old archive.' )
archive_path = path
file_handle.empty_dir( archive_path )
if defn.desc_name != '':
#add description to archive as text file.
with open( os.path.join( archive_path, defn.desc_name ), 'w' ) as desc_file:
desc_file.write( defn.description )
finished_setup = True | 32,975 |
async def async_load_cache(
filename: str,
) -> dict[str, str | dict[str, dict[str, dict[str, dict[str, str]]]]]:
"""Load cache from file."""
async with aiofiles.open(filename, "rb") as file:
pickled_foo = await file.read()
return pickle.loads(pickled_foo) | 32,976 |
def do_scrape():
"""
Runs the craigslist scraper, and posts data to slack.
"""
# Create a slack client.
sc = SlackClient(settings.SLACK_TOKEN)
# Get all the results from craigslist.
all_results = []
for area in settings.AREAS:
print area
all_results += scrape_area(area)
print("{}: Got {} results".format(time.ctime(), len(all_results)))
# Post each result to slack.
for result in all_results:
post_listing_to_slack(sc, result) | 32,977 |
def make_function(function, name, arity):
"""Make a function node, a representation of a mathematical relationship.
This factory function creates a function node, one of the core nodes in any
program. The resulting object is able to be called with NumPy vectorized
arguments and return a resulting vector based on a mathematical
relationship.
Parameters
----------
function : callable
A function with signature `function(x1, *args)` that returns a Numpy
array of the same shape as its arguments.
name : str
The name for the function as it should be represented in the program
and its visualizations.
arity : int
The number of arguments that the `function` takes.
"""
if not isinstance(arity, int):
raise ValueError('arity must be an int, got %s' % type(arity))
if not isinstance(function, np.ufunc):
if six.get_function_code(function).co_argcount != arity:
raise ValueError('arity %d does not match required number of '
'function arguments of %d.'
% (arity,
six.get_function_code(function).co_argcount))
if not isinstance(name, six.string_types):
raise ValueError('name must be a string, got %s' % type(name))
# Check output shape
args = [np.ones(10) for _ in range(arity)]
try:
function(*args)
except ValueError:
raise ValueError('supplied function %s does not support arity of %d.'
% (name, arity))
if not hasattr(function(*args), 'shape'):
raise ValueError('supplied function %s does not return a numpy array.'
% name)
if function(*args).shape != (10,):
raise ValueError('supplied function %s does not return same shape as '
'input vectors.' % name)
# Check closure for zero & negative input arguments
args = [np.zeros(10) for _ in range(arity)]
if not np.all(np.isfinite(function(*args))):
raise ValueError('supplied function %s does not have closure against '
'zeros in argument vectors.' % name)
args = [-1 * np.ones(10) for _ in range(arity)]
if not np.all(np.isfinite(function(*args))):
raise ValueError('supplied function %s does not have closure against '
'negatives in argument vectors.' % name)
return _Function(function, name, arity) | 32,978 |
def data_type_validator(type_name='data type'):
"""
Makes sure that the field refers to a valid data type, whether complex or primitive.
Used with the :func:`field_validator` decorator for the ``type`` fields in
:class:`PropertyDefinition`, :class:`AttributeDefinition`, :class:`ParameterDefinition`,
and :class:`EntrySchema`.
Extra behavior beyond validation: generated function returns true if field is a complex data
type.
"""
def validator(field, presentation, context):
field.default_validate(presentation, context)
value = getattr(presentation, field.name)
if value is not None:
# Test for circular definitions
container_data_type = get_container_data_type(presentation)
if (container_data_type is not None) and (container_data_type._name == value):
context.validation.report(
'type of property "%s" creates a circular value hierarchy: %s'
% (presentation._fullname, safe_repr(value)),
locator=presentation._get_child_locator('type'), level=Issue.BETWEEN_TYPES)
# Can be a complex data type
if get_type_by_full_or_shorthand_name(context, value, 'data_types') is not None:
return True
# Can be a primitive data type
if get_primitive_data_type(value) is None:
report_issue_for_unknown_type(context, presentation, type_name, field.name)
return False
return validator | 32,979 |
def list_challenge_topics(account_name, challenge_name): # noqa: E501
"""List stargazers
Lists the challenge topics. # noqa: E501
:param account_name: The name of the account that owns the challenge
:type account_name: str
:param challenge_name: The name of the challenge
:type challenge_name: str
:rtype: ArrayOfTopics
"""
try:
account = DbAccount.objects.get(login=account_name)
account_id = account.to_dict().get("id")
db_challenge = DbChallenge.objects.get(
ownerId=account_id, name=challenge_name
) # noqa: E501
res = ArrayOfTopics(topics=db_challenge.to_dict().get("topics"))
status = 200
except DoesNotExist:
status = 404
res = Error("The specified resource was not found", status)
except Exception as error:
status = 500
res = Error("Internal error", status, str(error))
return res, status | 32,980 |
def ee_reg2(x_des, quat_des, sim, ee_index, kp=None, kv=None, ndof=12):
"""
same as ee_regulation, but now also accepting quat_des.
"""
kp = np.eye(len(sim.data.body_xpos[ee_index]))*10 if kp is None else kp
kv = np.eye(len(sim.data.body_xpos[ee_index]))*1 if kv is None else kv
jacp,jacr=jac(sim, ee_index, ndof)
# % compute position error terms as before
xdot = np.matmul(jacp, sim.data.qvel[:ndof])
error_vel = xdot
error_pos = x_des - sim.data.body_xpos[ee_index]
pos_term = np.matmul(kp,error_pos)
vel_term = np.matmul(kv,error_vel)
# % compute orientation error terms
current_ee_quat = copy.deepcopy(sim.data.body_xquat[ee_index])
current_ee_rotmat = R.from_quat([current_ee_quat[1],
current_ee_quat[2],
current_ee_quat[3],
current_ee_quat[0]])
target_ee_rotmat = R.from_quat([quat_des[1],
quat_des[2],
quat_des[3],
quat_des[0]])
ori_error = calculate_orientation_error(target_ee_rotmat.as_dcm(), current_ee_rotmat.as_dcm())
euler_dot = np.matmul(jacr, sim.data.qvel[:ndof])
ori_pos_term = np.matmul(kp, ori_error)
ori_vel_term = np.matmul(kv, euler_dot)
# % commanding ee pose only
F_pos = pos_term - vel_term
F_ori = ori_pos_term - ori_vel_term
J_full = np.concatenate([jacp, jacr])
F_full = np.concatenate([F_pos, F_ori])
torques = np.matmul(J_full.T, F_full) + sim.data.qfrc_bias[:ndof]
return torques | 32,981 |
def test_tc1():
""" Test T of parity distributions """
for n in range(3, 7):
d = n_mod_m(n, 2)
yield assert_almost_equal, T(d), 1.0 | 32,982 |
def _default_clipping(
inner_factory: factory.AggregationFactory) -> factory.AggregationFactory:
"""The default adaptive clipping wrapper."""
# Adapts relatively quickly to a moderately high norm.
clipping_norm = quantile_estimation.PrivateQuantileEstimationProcess.no_noise(
initial_estimate=1.0, target_quantile=0.8, learning_rate=0.2)
return robust.clipping_factory(clipping_norm, inner_factory) | 32,983 |
def make_pred_multilabel(data_transforms, model, PATH_TO_IMAGES, epoch_loss, CHROMOSOME):
"""
Gives predictions for test fold and calculates AUCs using previously trained model
Args:
data_transforms: torchvision transforms to preprocess raw images; same as validation transforms
model: densenet-121 from torchvision previously fine tuned to training data
PATH_TO_IMAGES: path at which NIH images can be found
Returns:
pred_df: dataframe containing individual predictions and ground truth for each test image
auc_df: dataframe containing aggregate AUCs by train/test tuples
"""
# calc preds in batches of 16, can reduce if your GPU has less RAM
BATCH_SIZE = 32
# set model to eval mode; required for proper predictions given use of batchnorm
model.train(False)
# create dataloader
dataset = CXR.CXRDataset(
path_to_images=PATH_TO_IMAGES,
fold="test",
transform=data_transforms['val'])
dataloader = torch.utils.data.DataLoader(
dataset, BATCH_SIZE, shuffle=False, num_workers=0)
size = len(dataset)
# create empty dfs
pred_df = pd.DataFrame(columns=["Image Index"])
true_df = pd.DataFrame(columns=["Image Index"])
# iterate over dataloader
for i, data in enumerate(dataloader):
inputs, labels, _ = data
inputs, labels = Variable(inputs.cuda()), Variable(labels.cuda())
true_labels = labels.cpu().data.numpy()
batch_size = true_labels.shape
outputs = model(inputs)
probs = outputs.cpu().data.numpy()
return BATCH_SIZE | 32,984 |
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry):
"""Set up a config entry for solarlog."""
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(entry, "sensor")
)
return True | 32,985 |
def test_board_group_board_by_unknown() -> None:
"""Test that the boards property throws an exception with unknown indices."""
board_group = BoardGroup.get_board_group(MockBoard, OneBoardMockBackend)
with pytest.raises(TypeError):
board_group[0] # type: ignore
with pytest.raises(KeyError):
board_group[""]
with pytest.raises(TypeError):
board_group[{}] # type: ignore
with pytest.raises(KeyError):
board_group["ARGHHHJ"] | 32,986 |
def SqueezeNet_v1(include_top=True,
input_tensor=None, input_shape=None,
classes=10):
"""Instantiates the SqueezeNet architecture.
"""
input_shape = _obtain_input_shape(input_shape,
default_size=32,
min_size=32,
data_format=K.image_data_format(),
require_flatten=include_top)
if input_tensor is None:
img_input = Input(shape=input_shape)
else:
if not K.is_keras_tensor(input_tensor):
img_input = Input(tensor=input_tensor, shape=input_shape)
else:
img_input = input_tensor
x = Conv2D(96, (3, 3), padding='same', name='conv1')(img_input)
x = Activation('relu', name='relu_conv1')(x)
# x = MaxPooling2D(pool_size=(2, 2), name='pool1')(x)
x = fire_module(x, fire_id=2, squeeze=16, expand=64)
x = fire_module(x, fire_id=3, squeeze=16, expand=64)
x = fire_module(x, fire_id=4, squeeze=32, expand=128)
x = MaxPooling2D(pool_size=(2, 2), name='pool4')(x)
x = fire_module(x, fire_id=5, squeeze=32, expand=128)
x = fire_module(x, fire_id=6, squeeze=48, expand=192)
x = fire_module(x, fire_id=7, squeeze=48, expand=192)
x = fire_module(x, fire_id=8, squeeze=64, expand=256)
x = MaxPooling2D(pool_size=(2, 2), name='pool8')(x)
x = fire_module(x, fire_id=9, squeeze=64, expand=256)
x = BatchNormalization()(x)
# x = Dropout(0.5, name='drop9')(x)
# x = Convolution2D(1000, (1, 1), padding='valid', name='conv10')(x)
x = Activation('relu', name='relu_10')(x)
x = GlobalAveragePooling2D(name="avgpool10")(x)
x = Dense(classes, activation='softmax', name="softmax-10")(x)
# x = Activation('softmax', name='softmax')(x)
# Ensure that the model takes into account
# any potential predecessors of `input_tensor`.
if input_tensor is not None:
inputs = get_source_inputs(input_tensor)
else:
inputs = img_input
model = Model(inputs, x, name='squeezenet')
return model | 32,987 |
def sigterm_hndlr(args, sigterm_def, signum, frame):
"""Signal wrapper for the shutdown function."""
if args.verbose >= INFO:
print()
print(f'Signal {repr(signum)} caught.')
shutdown(args)
if sigterm_def != signal.SIG_DFL:
sigterm_def(signum, frame)
else:
sys.exit(EXIT_OK) | 32,988 |
def xcafdoc_ColorRefGUID(*args):
"""
* Return GUIDs for TreeNode representing specified types of colors
:param type:
:type type: XCAFDoc_ColorType
:rtype: Standard_GUID
"""
return _XCAFDoc.xcafdoc_ColorRefGUID(*args) | 32,989 |
def green_on_yellow(string, *funcs, **additional):
"""Text color - green on background color - yellow. (see _combine())."""
return _combine(string, code.GREEN, *funcs, attributes=(code.BG_YELLOW,)) | 32,990 |
def _register_models(format_str, cls, forward=True):
"""Registers reward models of type cls under key formatted by format_str."""
forwards = {"Forward": {"forward": forward}, "Backward": {"forward": not forward}}
control = {"WithCtrl": {}, "NoCtrl": {"ctrl_coef": 0.0}}
res = {}
for k1, cfg1 in forwards.items():
for k2, cfg2 in control.items():
fn = registry.build_loader_fn_require_space(cls, **cfg1, **cfg2)
key = format_str.format(k1 + k2)
reward_serialize.reward_registry.register(key=key, value=fn)
return res | 32,991 |
def configure_pseudolabeler(pseudolabel: bool, pseudolabeler_builder, pseudolabeler_builder_args):
"""Pass in a class that can build a pseudolabeler (implementing __call__) or a builder function
that returns a pseudolabeling function.
"""
if pseudolabel:
return globals()[pseudolabeler_builder](*pseudolabeler_builder_args)
return None | 32,992 |
def get_encoders(filename=None):
"""Get an ordered list of all encoders. If a `filename` is provided,
encoders supporting that extension will be ordered first in the list.
"""
encoders = []
if filename:
extension = os.path.splitext(filename)[1].lower()
encoders += _encoder_extensions.get(extension, [])
encoders += [e for e in _encoders if e not in encoders]
return encoders | 32,993 |
def _is_id_in_allowable_range(
nodes_or_links: str,
project_name: str,
subject_id: int,
range_in_use: dict,
):
"""
Checks if the new node or link id is in the allowable range defined in the config file
Args:
nodes_or_links (str): "node" or "link", which is used in error message
project_name (str): project name, which is used in error message
subject_id (int): the proposed new node or link id number
range_in_use (dict): a dictionary defining the id range with a bool indicating if the id number is used in the base network
Raises:
ValueError: informs the user of the disconnect between config file and the Project Card
"""
if subject_id not in range_in_use:
msg = (
"New {} id ({}) in project '{}' is not in the base networks allowable range"
"({} to {}) as defined in the configuration file.".format(
nodes_or_links,
project_name,
min(range_in_use.keys()),
max(range_in_use.keys()),
)
)
raise ValueError(msg) | 32,994 |
def test_compute_timeseries():
"""
test the compute_timeseries function
"""
reduced_potentials = np.random.rand(100)
data = compute_timeseries(reduced_potentials)
assert len(data[3]) <= len(reduced_potentials), f"the length of uncorrelated data is at most the length of the raw data" | 32,995 |
def flip_dict(dict, unique_items=False, force_list_values=False):
"""Swap keys and values in a dictionary
Parameters
----------
dict: dictionary
dictionary object to flip
unique_items: bool
whether to assume that all items in dict are unique, potential speedup but repeated items will be lost
force_list_values: bool
whether to force all items in the result to be lists or to let unique items have unwrapped values. Doesn't apply if unique_items is true.
"""
if unique_items:
return {v: k for k, v in dict.items()}
elif force_list_values:
new_dict = {}
for k, v in dict.items():
if v not in new_dict:
new_dict[v] = []
new_dict[v].append(k)
return new_dict
else:
new_dict = {}
for k, v in dict.items():
if v in new_dict:
if isinstance(new_dict[v], list):
new_dict[v].append(k)
else:
new_dict[v] = [new_dict[v], k]
else:
new_dict[v] = k
return new_dict | 32,996 |
def randn(N, R, var = 1.0, dtype = tn.float64, device = None):
"""
A torchtt.TT tensor of shape N = [N1 x ... x Nd] and rank R is returned.
The entries of the fuill tensor are alomst normal distributed with the variance var.
Args:
N (list[int]): the shape.
R (list[int]): the rank.
var (float, optional): the variance. Defaults to 1.0.
dtype (torch.dtype, optional): the dtype of the returned tensor. Defaults to tn.float64.
device (torch.device, optional): the device where the TT cores are created (None means CPU). Defaults to None.
Returns:
torchtt.TT: the result.
"""
d = len(N)
v1 = var / np.prod(R)
v = v1**(1/d)
cores = [None] * d
for i in range(d):
cores[i] = tn.randn([R[i],N[i][0],N[i][1],R[i+1]] if isinstance(N[i],tuple) else [R[i],N[i],R[i+1]], dtype = dtype, device = device)*np.sqrt(v)
return TT(cores) | 32,997 |
def test_invalid_repository(invalid_repository, change_dir_main_fixtures):
"""Validate correct response if `cookiecutter.json` file not exist."""
assert not repository_has_tackle_file(invalid_repository) | 32,998 |
def flatten_all_dimensions_but_first(a):
"""
Flattens all dimensions but the first of a multidimensional array.
Parameters
----------
a : ndarray
Array to be flattened.
Returns
-------
b : ndarray
Result of flattening, two-dimensional.
"""
s = a.shape
s_flattened = (s[0], np.prod(s[1:]))
return a.reshape(*s_flattened) | 32,999 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.