content
stringlengths 22
815k
| id
int64 0
4.91M
|
|---|---|
def add_users_to_groups_based_on_users_permissions(apps, schema_editor):
"""Add every user to group with "user_permissions" if exists, else create new one.
For each user, if the group with the exact scope of permissions exists,
add the user to it, else create a new group with this scope of permissions
and add the user to it.
"""
User = apps.get_model("account", "User")
Group = apps.get_model("auth", "Group")
groups = Group.objects.all().prefetch_related("permissions")
counter = get_counter_value(Group)
mapping = create_permissions_mapping(User)
for perms, users in mapping.items():
group = get_group_with_given_permissions(perms, groups)
if group:
group.user_set.add(*users)
continue
group = create_group_with_given_permissions(perms, counter, Group)
group.user_set.add(*users)
counter += 1
| 19,900
|
def enforce(*types):
"""
decorator function enforcing, and converting, argument data types
"""
def decorator(fn):
def new_function(*args, **kwargs):
# convert args into something mutable, list in this case
newargs = []
for original_argument, type_to_convert in zip(args, types):
newargs.append(type_to_convert(original_argument))
return fn(*newargs, **kwargs)
return new_function
return decorator
| 19,901
|
def modify_repr(_cls: Type[Any]) -> None:
"""Improved dataclass repr function.
Only show non-default non-internal values, and summarize containers.
"""
# let classes still create their own
if _cls.__repr__ is not object.__repr__:
return
def new_repr(self: Any) -> str:
name = self.__class__.__qualname__
lines = []
for f in sorted(fields(self), key=lambda f: f.name not in ("name", "id")):
if f.name.endswith("_"):
continue
# https://github.com/python/mypy/issues/6910
if f.default_factory is not MISSING: # type: ignore
default = f.default_factory() # type: ignore
else:
default = f.default
current = getattr(self, f.name)
if current != default:
if isinstance(current, Sequence) and not isinstance(current, str):
rep = f"[<{len(current)} {f.name.title()}>]"
elif isinstance(current, Enum):
rep = repr(current.value)
elif isinstance(current, datetime):
rep = f"datetime.fromisoformat({current.isoformat()!r})"
else:
rep = repr(current)
lines.append(f"{f.name}={rep},")
if len(lines) == 1:
body = lines[-1].rstrip(",")
elif lines:
body = "\n" + indent("\n".join(lines), " ") + "\n"
else:
body = ""
out = f"{name}({body})"
return out
setattr(_cls, "__repr__", new_repr)
| 19,902
|
def _is_binary(path):
"""Checks if the file at |path| is an ELF executable.
This is done by inspecting its FourCC header.
"""
with open(path, 'rb') as f:
file_tag = f.read(4)
return file_tag == '\x7fELF'
| 19,903
|
def test_cwd():
"""Test the generator cwd."""
with cwd(".") as dir:
# nothing is yielded
assert dir is None
| 19,904
|
def rotationNcropping(folder_in,folder_out):
""" Function to rotate a set of 3D images such a a way the struts of the scaffold
the scaffols are alligned with x and y directions """
for filename in os.listdir(folder_in):
imp =IJ.openImage(os.path.join(folder_in,filename))
IJ.run(imp, "TransformJ Rotate", "z-angle=9 y-angle=-6 x-angle=0.0 interpolation=Linear background=0.0")
imp = IJ.getImage()
stack = imp.getImageStack()
stackcropped = stack.crop(130,64,77,1356,1296,1540)
imp = ImagePlus("2",stackcropped)
output = "nrrd=["+folder_out+filename+"]"
IJ.run(imp, "Nrrd ... ", output)
imp.close()
imp = None
stack = None
stackcropped = None
gc.collect()
time.sleep(15)
gc.collect()
IJ.run("Collect Garbage", "")
IJ.run("Collect Garbage", "")
IJ.getImage().close()
| 19,905
|
def update_item(*, table: str, hash_key: str, sort_key: Optional[str] = None, update_expression: Optional[str],
expression_attribute_values: typing.Dict, return_values: str = 'ALL_NEW'):
"""
Update an item from a dynamoDB table.
Will determine the type of db this is being called on by the number of keys provided (omit
sort_key to UPDATE from a db with only 1 primary key).
NOTE:
https://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_UpdateItem.html
:param table: Name of the table in AWS.
:param str hash_key: 1st primary key that can be used to fetch associated sort_keys and values.
:param str sort_key: 2nd primary key, used with hash_key to fetch a specific value.
Note: If not specified, this will DELETE only 1 key (hash_key) and 1 value.
:param str update_expression: Expression used to update value, needs action to be performed and new value
:param str expression_attribute_values: attribute values to use from the expression
:param str return_values: return values to get back from the dynamodb API, defaults to 'ALL_NEW'
which provides all item attributes after the update.
:return: None
"""
query = {'TableName': table,
'Key': _format_item(hash_key=hash_key, sort_key=sort_key, value=None)}
if update_expression:
query['UpdateExpression'] = update_expression
query['ExpressionAttributeValues'] = expression_attribute_values
query['ReturnValues'] = return_values
resp = db.update_item(**query)
return _format_ddb_response(resp.get('Attributes'))
| 19,906
|
def get_configuration(configuration_path=DEFAULT_CONFIGURATION_FILE):
"""
Return a dict containing configuration values.
:param str configuration_path: path to parse yaml from.
:return: dict
"""
global _configuration
if _configuration is None:
LOGGER.debug('Loading configuration: %s', configuration_path)
parser = YAML(typ='rt')
try:
with open(configuration_path) as configuration_file:
_configuration = parser.load(configuration_file)
except FileNotFoundError:
raise RuntimeError(f'Cannot find configuration file {configuration_path}')
except YAMLParserError as ype:
raise RuntimeError(f'Cannot parse configuration file {configuration_path}, see {ype.problem_mark}')
return _configuration
| 19,907
|
def scrap_docs_pages():
"""
Scrape the documentation pages one-by-one while filling the clipboard every 10k characters for a grammar check-up.
"""
url = "https://help.close.com/docs/welcome"
response = requests.get(url)
document = BeautifulSoup(response.text, 'html.parser')
all_text = '' # Store content of multiple pages before pasting into Grammarly - instead doing it page-by-page
for document in document.select('li a'): # Main menu items
attributes = document.attrs
if 'ui-sref' not in attributes:
continue
if attributes['ui-sref'].startswith("docs.show({'doc'"):
# Extract the URL (can't extract 'href' due to web-page JavaScript not being executed)
ui_sref = attributes['ui-sref']
ui_sref = ui_sref.replace('docs.show(', '')[:-1].replace("'", '"') # Clean the JSON
ui_sref_json = json.loads(ui_sref)
slug = ui_sref_json['doc']
page_text = scrap_page(slug)
if not page_text:
continue
if (len(page_text) + len(all_text)) > GRAMMARLY_CHARACTER_LIMIT:
pyperclip.copy(all_text) # Copy to clipboard
print()
print('Character limit reached. Text copied to clipboard!')
input('Paste the text into Grammarly, and press Enter to continue...')
all_text = ''
all_text += f'\n{url}\n'
all_text += page_text
| 19,908
|
def load_and_process(event_id, stations, st, min_magnitude=7, event_client="USGS", event_et=3600,
stat_client="IRIS", inv=None, save_raw=True, save_processed=True,
process_d=True, sampling_rate=40.0, gen_plot=True, gen_audio=True,
folder_name="default_folder", split=1, audio_params=None, plot_params=None):
"""
NOTE: Function is incomplete; edit it later to load and process downloaded data later
Use this function if the data is already downloaded and some additional processing is
required. This function assumes that the processed/trimmed data is already available.
Inventory will be re-downloaded if inv object is not provided.
Args:
event_id: Ideally should be time stamp YYYY-MM-DDTHH:MM:SS.000
stations (list of lists): Each entry of the list is a list of form [network, station,
location, channel]
min_magnitude (float): Events >= min_magnitude will be retrieved
event_client (str): Client to use for retrieving events
event_et (int): Specifies how long the event range should be from start time (in seconds)
stat_client (str): Client to use for retrieving waveforms and inventory info
save_raw (bool): If true, raw data will also be saved (takes a lot of space)
save_processed (bool): if true, processed (untrimmed data) will be save (space heavy)
process_d (bool): If true, the raw data is also processed
sampling_rate (int): Sampling rate in Hz
gen_plot (bool): If true, plots are generated
gen_audio (bool): If true, audio is generated from the waveforms
folder_name (str): Name of the folder in which the data gets saved
split (int): A split > 1 specifies that the data needs to be broken down into that many
parts. For example, if split = 2, the downloaded signal will be processed
and saved into two different halves
audio_params (dict): Audio specific parameters to pass into gen_audio func
plot_params (dict): Plotting/trimming specific params to pass into gen_plot func
Returns:
"""
# Download event information with mag >= min_magnitude
e_client = Client(event_client)
event_id_utc = UTCDateTime(event_id)
event_cat = e_client.get_events(starttime=event_id_utc - 100, endtime=event_id_utc + event_et,
minmagnitude=min_magnitude)
# For each event, find local earthquakes
s_client = Client(stat_client)
print(len(event_cat))
print(event_cat)
if len(event_cat) > 0:
event = event_cat[0]
origin = event.preferred_origin()
start_time = origin.time
end_time = origin.time + 2 * 3600
if not inv:
inv = Inventory(networks=[], source="")
# Download inventory info based on the station client and station names
for net, sta, loc, cha in stations:
try:
inv += s_client.get_stations(network=net, station=sta, location=loc,
channel=cha, level="response")
except FDSNException:
print("Failed to download inventory information {} {}".format(net, sta))
print("Inventory downloaded")
| 19,909
|
def test_save_load_replay_buffer(tmp_path, recwarn, online_sampling, truncate_last_trajectory):
"""
Test if 'save_replay_buffer' and 'load_replay_buffer' works correctly
"""
# remove gym warnings
warnings.filterwarnings(action="ignore", category=DeprecationWarning)
warnings.filterwarnings(action="ignore", category=UserWarning, module="gym")
path = pathlib.Path(tmp_path / "replay_buffer.pkl")
path.parent.mkdir(exist_ok=True, parents=True) # to not raise a warning
env = BitFlippingEnv(n_bits=4, continuous=True)
model = SAC(
"MultiInputPolicy",
env,
replay_buffer_class=HerReplayBuffer,
replay_buffer_kwargs=dict(
n_sampled_goal=2, goal_selection_strategy="future", online_sampling=online_sampling, max_episode_length=4,
),
gradient_steps=1,
train_freq=4,
buffer_size=int(2e4),
policy_kwargs=dict(net_arch=[64]),
seed=1,
)
model.learn(200)
if online_sampling:
old_replay_buffer = deepcopy(model.replay_buffer)
else:
old_replay_buffer = deepcopy(model.replay_buffer.replay_buffer)
model.save_replay_buffer(path)
del model.replay_buffer
with pytest.raises(AttributeError):
model.replay_buffer
# Check that there is no warning
assert len(recwarn) == 0
model.load_replay_buffer(path, truncate_last_traj=truncate_last_trajectory)
if truncate_last_trajectory:
assert len(recwarn) == 1
warning = recwarn.pop(UserWarning)
assert "The last trajectory in the replay buffer will be truncated" in str(warning.message)
else:
assert len(recwarn) == 0
if online_sampling:
n_episodes_stored = model.replay_buffer.n_episodes_stored
assert np.allclose(
old_replay_buffer._buffer["observation"][:n_episodes_stored],
model.replay_buffer._buffer["observation"][:n_episodes_stored],
)
assert np.allclose(
old_replay_buffer._buffer["next_obs"][:n_episodes_stored],
model.replay_buffer._buffer["next_obs"][:n_episodes_stored],
)
assert np.allclose(
old_replay_buffer._buffer["action"][:n_episodes_stored], model.replay_buffer._buffer["action"][:n_episodes_stored],
)
assert np.allclose(
old_replay_buffer._buffer["reward"][:n_episodes_stored], model.replay_buffer._buffer["reward"][:n_episodes_stored],
)
# we might change the last done of the last trajectory so we don't compare it
assert np.allclose(
old_replay_buffer._buffer["done"][: n_episodes_stored - 1],
model.replay_buffer._buffer["done"][: n_episodes_stored - 1],
)
else:
replay_buffer = model.replay_buffer.replay_buffer
assert np.allclose(old_replay_buffer.observations["observation"], replay_buffer.observations["observation"])
assert np.allclose(old_replay_buffer.observations["desired_goal"], replay_buffer.observations["desired_goal"])
assert np.allclose(old_replay_buffer.actions, replay_buffer.actions)
assert np.allclose(old_replay_buffer.rewards, replay_buffer.rewards)
assert np.allclose(old_replay_buffer.dones, replay_buffer.dones)
# test if continuing training works properly
reset_num_timesteps = False if truncate_last_trajectory is False else True
model.learn(200, reset_num_timesteps=reset_num_timesteps)
| 19,910
|
def test_get_method_edit_customer_view(client, authenticated_user, create_customer):
"""test the get method for the edit customer view"""
customer = Customer.objects.all()
response = client.get(
reverse("customer_update", kwargs={"customerid": customer[0].id})
)
assert response.status_code == 200
assertTemplateUsed("event/update_customer_form.html")
| 19,911
|
def google_login_required(fn):
"""Return 403 unless the user is logged in from a @google.com domain."""
def wrapper(self, *args, **kwargs):
user = users.get_current_user()
if not user:
self.redirect(users.create_login_url(self.request.uri))
return
email_match = re.match('^(.*)@(.*)$', user.email())
if email_match:
_, domain = email_match.groups()
if domain == 'google.com':
return fn(self, *args, **kwargs)
self.error(403) # Unrecognized email or unauthroized domain.
self.response.out.write('unauthroized email %s' % user.user_id())
return wrapper
| 19,912
|
def init_res_fig(n_subplots, max_sess=None, modif=False):
"""
init_res_fig(n_subplots)
Initializes a figure in which to plot summary results.
Required args:
- n_subplots (int): number of subplots
Optional args:
- max_sess (int): maximum number of sessions plotted
default: None
- modif (bool) : if True, plots are made in a modified (simplified
way)
default: False
Returns:
- fig (plt Fig): figure
- ax (plt Axis): axis
"""
subplot_hei = 14
subplot_wid = 7.5
if max_sess is not None:
subplot_wid *= max_sess/4.0
if modif:
sess_plot_util.update_plt_linpla()
figpar_init = sess_plot_util.fig_init_linpla(sharey=True)["init"]
fig, ax = plot_util.init_fig(n_subplots, **figpar_init)
else:
fig, ax = plot_util.init_fig(n_subplots, 2, sharey=True,
subplot_hei=subplot_hei, subplot_wid=subplot_wid)
return fig, ax
| 19,913
|
def setup() -> None:
"""
Call all 'setup_*' methods.
"""
setup_colorblind()
setup_latex_fonts()
| 19,914
|
def change_personal_data_settings(request):
"""
Creates a question with summarized data to be changed
:param request: POST request from "Change personal data settings" Dialogflow intent
:return: JSON with summarized data to be changed
"""
language = request.data['queryResult']['languageCode']
response_spoken_pl = "Nie mogę zmienić żadnych ustawień, ponieważ nie posiadasz jeszcze konta. Jeśli chcesz " \
"założyć konto w best transport Polska, wybierz poniższą opcję Zarejestruj się"
display_spoken_pl = "Nie mogę zmienić żadnych ustawień. Załóż konto przez wybranie poniższej opcji Zarejestruj się"
response_spoken_en = "I can't change any settings, because you don't have an account yet. If you want to create a best" \
" transport Poland account, select the option \"Sign up\" below"
display_spoken_en = "I can't change any settings. Create an account by selecting the option below \"Sign up\""
access_token = request.data['originalDetectIntentRequest']['payload']['user']
if 'accessToken' in access_token:
access_token = access_token['accessToken']
else:
access_token = None
if access_token:
account_exist = check_token(access_token, language, response_spoken_pl, display_spoken_pl, response_spoken_en,
display_spoken_en)
if account_exist == "token exist":
with open('api/response.json') as json_file:
response = json.load(json_file)
part_to_modify = response['payload']['google']['richResponse']
parameters_from_request = request.data["queryResult"]["parameters"]
if language == "pl":
entities_pl = {'First_name': 'imię', 'Surname': "nazwisko", 'Email': 'email', 'telephone-number': 'numer telefonu',
'geo-city': 'miejsce zamieszkania', 'post-code': 'kod pocztowy','geo-country': 'kraj',
'tax_number': "numer płatnika"}
response_pl = "Czy na pewno chcesz zmienić "
for k,v in parameters_from_request.items():
if v != "" and k in entities_pl:
response_pl += entities_pl[k] + " na " + v + ", "
response_pl = response_pl[:-2]
response_pl += "?"
suggestions_pl = [{"title": "Tak"}, {"title": "Nie"}]
part_to_modify['items'][0]['simpleResponse']['textToSpeech'] = response_pl
part_to_modify['items'][0]['simpleResponse']['displayText'] = response_pl
part_to_modify['suggestions'] = suggestions_pl
elif language == "en":
entities_en = {'First_name': 'name', 'Surname': "surname", 'Email': 'email',
'telephone-number': 'phone number', 'geo-city': 'residence place', 'post-code': 'post code',
'geo-country': 'country', 'tax_number': "tax number"}
response_en = "Are you sure you want to change "
for k, v in parameters_from_request.items():
if v != "" and k in entities_en:
response_en += entities_en[k] + " to " + v + ", "
response_en = response_en[:-2]
response_en += "?"
suggestions_en = [{"title": "Yes"}, {"title": "No"}]
part_to_modify['items'][0]['simpleResponse']['textToSpeech'] = response_en
part_to_modify['items'][0]['simpleResponse']['displayText'] = response_en
part_to_modify['suggestions'] = suggestions_en
response['payload']['google']['richResponse'] = part_to_modify
return response
else:
return account_exist
else:
access_token = "There is no"
account_exist = check_token(access_token, language, response_spoken_pl, display_spoken_pl, response_spoken_en,
display_spoken_en)
return account_exist
| 19,915
|
def test_filter_invalid_items():
"""
filters the item which is not valid. it should return a list equal to input list.
"""
schema = ResultSchema(depth=5)
results = [{1, 2, 3}, {4, 5, 6}, {7, 8, 9}]
filtered = schema.filter(results)
assert len(filtered) == len(results)
assert filtered == results
assert all(isinstance(item, set) for item in filtered)
| 19,916
|
def get_selected_cells(mesh, startpos, endpos):
"""
Return a list of cells contained in the startpos-endpos rectangle
"""
xstart, ystart = startpos
xend, yend = endpos
selected_cells = set()
vertex_coords = mesh.coordinates()
for cell in dolfin.cells(mesh):
cell_vertices = cell.entities(0)
for vid in cell_vertices:
x, y = vertex_coords[vid]
if xstart <= x <= xend and ystart <= y <= yend:
selected_cells.add(cell.index())
break
return selected_cells
| 19,917
|
def flip_axis(array, axis):
"""
Flip the given axis of an array. Note that the ordering follows the
numpy convention and may be unintuitive; that is, the first axis
flips the axis horizontally, and the second axis flips the axis vertically.
:param array: The array to be flipped.
:type array: `ndarray`
:param axis: The axis to be flipped.
:type axis: `int`
:returns: The flipped array.
:rtype: `ndarray`
"""
# Rearrange the array so that the axis of interest is first.
array = np.asarray(array).swapaxes(axis, 0)
# Reverse the elements along the first axis.
array = array[::-1, ...]
# Put the array back and return.
return array.swapaxes(0, axis)
| 19,918
|
def fizz_buzz_tree(input_tree):
""" traverses a tree and performs fizz buzz on each element, agumenting the val """
input_tree.in_order_trav(lambda x: fizzbuzz(x))
return input_tree
| 19,919
|
def scan_to_headerword(serial_input, maximum_bytes=9999, header_magic=HeaderWord.MAGIC_MASK):
"""
Consume bytes until header magic is found in a word
:param header_magic:
:param maximum_bytes:
:param serial_input:
:rtype : MTS.Header.Header
"""
headerword = 0x0000
bytecount = 0
while headerword & header_magic != header_magic:
# BlockingIOError
# Read a single byte
nextbyte = serial_input.read(1)
if len(nextbyte) == 0:
raise BufferError("Reached end of stream")
bytecount += 1
# Take the low word and shift it high; Use OR to add this byte
nextint = ord(nextbyte)
# if DEBUG: print('0x{byte:02X} {byte:08b}'.format(byte=nextint))
headerword = ((headerword & 0x00FF) << 8) | nextint
if 0 < maximum_bytes <= bytecount:
raise BufferError("Failed to detect header word in serial stream")
try:
h = MTS.Header.Header(word=headerword)
# if DEBUG: print("Found header word. 0x{:04X}".format(h.word))
return h
except ValueError as e:
print("Invalid header word 0x{:04X}".format(headerword))
raise e
| 19,920
|
def _c2_set_instrument_driver_parameters(reference_designator, data):
""" Set one or more instrument driver parameters, return status.
Accepts the following urlencoded parameters:
resource: JSON-encoded dictionary of parameter:value pairs
timeout: in milliseconds, default value is 60000
Sample: localhost:12572/instrument/api/reference_designator/resource [POST]
The UI sends all READ_WRITE parameters in data; so data should never be empty.
"""
debug = False
response_status = {}
response_status['status_code'] = 200
response_status['message'] = ""
response_status['range_errors'] = ""
response_status['display_parameters'] = {}
insufficient_data = 'Insufficient data, or bad data format.'
valid_args = [ 'resource', 'timeout']
try:
if not reference_designator:
message = insufficient_data + ' (reference_designator is None or empty)'
raise Exception(message)
if not data:
message = insufficient_data + ' (data is None or empty)'
raise Exception(message)
try:
payload = convert(data)
except Exception as err:
message = 'Failed to process request data; %s' % str(err.message)
raise Exception(message)
if debug: print '\n debug --- Original payload: ', json.dumps(payload, indent=4, sort_keys=True)
# Validate arguments required for uframe are provided.
for arg in valid_args:
if arg not in payload:
raise Exception(insufficient_data)
# Get instrument status.
_status = get_instrument_status(reference_designator)
if _status is None:
message = 'Failed to retrieve instrument (%s) status.' % reference_designator
raise Exception(message)
# Verify payload['resource'] is not empty or None
if payload['resource'] is None or not payload['resource']:
message = 'The payload [resource] element is None or empty.'
raise Exception(message)
# Get dict of parameters and range values
parameter_dict, key_dict_ranges = get_range_dictionary(payload['resource'], _status, reference_designator)
# Scrub data and determine if range errors
result, error_result = scrub_ui_request_data(payload['resource'], parameter_dict, key_dict_ranges)
# If range error messages, return error dictionary
if error_result:
# Create dictionary with response data and return.
result = {}
response_status['message'] = 'Range Error(s)'
response_status['range_errors'] = error_result
response_status['status_code'] = 400
result['response'] = response_status
if debug: print '\n debug ***** RANGE Error(s): %s' % json.dumps(result, indent=4, sort_keys=True)
return result
# If no errors and result is empty or None, raise exception
elif result is None or not result:
message = 'Unable to process resource payload (result is None or empty).'
raise Exception(message)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Process parameter set request in uframe
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Update value of resource in payload.
payload['resource'] = json.dumps(result)
if 'CAMDS' in reference_designator:
payload['timeout'] = 200000 # 200 millisecs
if debug: print '\n debug --- payload: ', json.dumps(payload, indent=4, sort_keys=True)
# Send request and payload to instrument/api and process result
try:
response = _uframe_post_instrument_driver_set(reference_designator, 'resource', payload)
except Exception as err:
message = str(err.message)
raise Exception(message)
if response.status_code != 200:
message = '(%s) Failed to execute instrument driver set.' % str(response.status_code)
raise Exception(message)
if response.content:
try:
response_data = json.loads(response.content)
except:
message = 'Malformed data; not in valid json format. (C2 instrument driver set)'
raise Exception(message)
# Evaluate response content for error (review 'value' list in response_data)
if response_data:
status_code, status_type, status_message = _eval_POST_response_data(response_data, None)
response_status['status_code'] = status_code
response_status['message'] = status_message
else:
message = 'No response.content returned from _uframe_post_instrument_driver_set.'
raise Exception(message)
# Add response attribute information to result
result['response'] = response_status
# Get current over_all status, return in attribute 'status' of result
try:
status = _c2_get_instrument_driver_status(reference_designator)
except Exception:
status = {}
result['status'] = status
return result
except Exception:
raise
| 19,921
|
def generate_key(keysize=KEY_SIZE):
"""Generate a RSA key pair
Keyword Arguments:
keysize {int} -- Key (default: {KEY_SIZE})
Returns:
bytes -- Secret key
bytes -- Public key
"""
key = RSA.generate(keysize)
public_key = key.publickey().exportKey()
secret_key = key.exportKey(passphrase=None)
return secret_key, public_key
| 19,922
|
def main():
"""Test the linear actuator protocol for two actuators."""
parser = argparse.ArgumentParser(
description='Test the linear actuator protocol for two actuators.'
)
add_argparser_transport_selector(parser)
args = parser.parse_args()
transport_loop = parse_argparser_transport_selector(args)
batch = Batch(transport_loop)
batch.messaging_stack.run()
| 19,923
|
def get_api_endpoint(func):
"""Register a GET endpoint."""
@json_api.route(f"/{func.__name__}", methods=["GET"])
@functools.wraps(func)
def _wrapper(*args, **kwargs):
return jsonify({"success": True, "data": func(*args, **kwargs)})
return _wrapper
| 19,924
|
def scrape_radio_koeln():
"""
Fetch the currently playing song for Radio Köln.
:return: A Song, if scraping went without error. Return None otherwise.
"""
url = 'http://www.radiokoeln.de/'
tag = get_tag(url, '//div[@id="playlist_title"]')[0]
artist = tag.xpath('.//div/b/text()')
title = tag.xpath('.//div/text()')
tmp = title
title = []
for item in tmp:
s = item.strip()
if s:
title.append(s)
if artist and title:
artist = artist[0]
title = title[-1]
return Song(artist, title)
# else
sys.stderr.write("ERROR in radiokoeln: "+str(artist)+" "+str(title)+"\n")
return None
| 19,925
|
def get_mt4(alias=DEFAULT_MT4_NAME):
"""
Notes:
return mt4 object which is initialized.
Args:
alias(string): mt4 object alias name. default value is DEFAULT_MT4_NAME
Returns:
mt4 object(metatrader.backtest.MT4): instantiated mt4 object
"""
global _mt4s
if alias in _mt4s:
return _mt4s[alias]
else:
raise RuntimeError('mt4[%s] is not initialized.' % alias)
| 19,926
|
def get_log_probability_function(model=None):
"""
Builds a theano function from a PyMC3 model which takes a numpy array of
shape ``(n_parameters)`` as an input and returns returns the total log
probability of the model. This function takes the **transformed** random
variables defined withing the model context which is a different behaviour
from :func:`caustic.utils.get_log_likelihood_function`. The ordering of th
para eters in the input array should match the ordering of the RVs in model
context. The purpose of this function is to be able to use external
samplers with PyMC3 models.
Parameters
----------
model : pymc3.Model
PyMC3 model object.
Returns
-------
ndarray
Total log probability of the model.
"""
model = pm.modelcontext(model)
if (
"_interval__" or "_log__" or "_lowerbound__" or "_upperbound__"
) in str(model.vars):
warnings.warn(
"""Your model contains transformed variables. Keep in mind,
that the compiled log probability function expects the,
transformed variables as an input.""",
)
f = theano.function(model.vars, [model.logpt])
def log_prob(params):
dct = model.bijection.rmap(params[::-1])
args = (dct[k.name] for k in model.vars)
results = f(*args)
return tuple(results)[0]
return log_prob
| 19,927
|
def get_health(check_celery=True):
"""
Gets the health of the all the external services.
:return: dictionary with
key: service name like etcd, celery, elasticsearch
value: dictionary of health status
:rtype: dict
"""
health_status = {
'etcd': _check_etcd(),
'store': _check_store()
}
if check_celery:
health_status['celery'] = _check_celery()
return health_status
| 19,928
|
def add_quotation_items(quotation_items_data):
"""
添加信息
:param quotation_items_data:
:return: None/Value of user.id
:except:
"""
return db_instance.add(QuotationItems, quotation_items_data)
| 19,929
|
def load_cifar10_human_readable(path: str, img_nums: list) -> np.array:
"""
Loads the Cifar10 images in human readable format.
Args:
path:
The path to the to the folder with mnist images.
img_nums:
A list with the numbers of the images we want to load.
Returns:
The images as a Mx3x32x32 numpy array.
"""
return load_img(path, img_nums, (3, 32, 32))
| 19,930
|
def identity(target_ftrs, identity_ftrs, output_name=None, output_folder=None, cluster_tolerance="",
problem_fields={}, full_out_path=""):
""" perform identity analysis on target feature class with identity
feature class """
try:
output_location = IN_MEMORY
out_ftrs = os.path.basename(str(identity_ftrs))
if output_folder:
output_location = output_folder
out_ftrs = arcpy.CreateUniqueName(out_ftrs, output_location)
else:
out_ftrs = os.path.join(output_location, out_ftrs)
# add 'identity' in output feature class name if not present
if out_ftrs.find('_identity') == -1:
out_ftrs += '_identity'
out_ftrs = check_name_length(out_ftrs)
# identity operation to combine attributes
cnt = int(arcpy.GetCount_management(identity_ftrs)[0])
if cnt > 0:
arcpy.Identity_analysis(target_ftrs, identity_ftrs, out_ftrs,
"NO_FID", cluster_tolerance)
feature_name = check_name_length("sp" + os.path.basename(str(out_ftrs)))
if output_name:
feature_name = output_name
# convert multiparts to single part, if any
return out_ftrs
return target_ftrs
except Exception as e:
arcpy.AddError(str(e))
| 19,931
|
def get_project_linked_to_object(object_id: int) -> typing.Optional[Project]:
"""
Return the project linked to a given object, or None.
:param object_id: the ID of an existing object
:return: the linked project or None
:raise errors.ObjectDoesNotExistError: if no object with the given ID
exists
"""
association = projects.ProjectObjectAssociation.query.filter_by(
object_id=object_id
).first()
if association is None:
# make sure the object exists
objects.get_object(object_id)
return None
return get_project(association.project_id)
| 19,932
|
def main():
"""
Input 'images/poppy.png' and assign it to 'original', then show it.
Make 'shrink()' function to scale image and redistribute color values, then show it.
"""
original = SimpleImage("images/poppy.png")
original.show()
after_shrink = shrink(original) # scale image and redistribute color values
after_shrink.show()
| 19,933
|
def country_all_year (select_year, country_id):
""" Prints a list of UK countries population for selected year, in descending order.
:param select_year: (str) input year within dataset to analyse.
:param country_id: (str) column containing countries unique ID
:return: Returns a list of countries total population in descending order for selected yea. An example of the
output:
The below table list counties in descending population order for the year 2006.
CTRY20NM
England 50709444
Scotland 4975870
Wales 2985668
Northern Ireland 1743113
"""
select_year = str(select_year)
sum_country_total = country_population.groupby(country_population[country_id])[select_year].sum()
print('The table below list the countries in order of descending population for the year ' + (select_year) + '.')
print(sum_country_total.sort_values(ascending=[False]))
| 19,934
|
def processor(template: Union[str, Path] = None, format_name: str = None) -> Union[None, RecordProcessor]:
"""
Configures the record level processor for either the template or for the format_name
Args:
template: path to template or template as string
format_name: one of the valid registered formatter names
Returns:
RecordProcessor if valid template of format_name provide, None otherwise
Raises:
SpecException when format_name is not registered or if both template and format specified
Examples:
>>> import datacraft
>>> engine = datacraft.outputs.processor(template='/path/to/template.jinja')
>>> engine = datacraft.outputs.processor(template='{{ Inline: {{ variable }}')
>>> formatter = datacraft.outputs.processor(format_name='json')
>>> formatter = datacraft.outputs.processor(format_name='my_custom_registered_format')
"""
if template and format_name:
raise SpecException('Only one of template or format_name should be supplied')
# so name doesn't shadow
_processor = None
if template:
_log.debug('Using template: %s', template)
if os.path.exists(template):
_processor = template_engines.for_file(template)
elif '{{' in template: # type: ignore
_processor = template_engines.string(template) # type: ignore
else:
raise SpecException(f'Unable to determine how to handle template {template}, with type: {type(template)}')
elif format_name:
_log.debug('Using %s formatter for output', format_name)
_processor = _for_format(format_name)
return _processor
| 19,935
|
def calc_distances_for_everyon_in_frame(everyone_in_frame, people_graph, too_far_distance, minimum_distance_change):
"""
:param everyone_in_frame: [PersonPath]
:type everyone_in_frame: list
:param people_graph:
:type people_graph: Graph
:param too_far_distance:
:param minimum_distance_change:
:return:
:rtype: Graph
"""
points = [[person_path.world_frame_coordinates_list.current_frame_coord().x, person_path.world_frame_coordinates_list.current_frame_coord().y] for person_path in everyone_in_frame] # all points of everyone in this frame
points = np.array(points)
ids = [person_path.id_number for person_path in everyone_in_frame]
for index, person_path in enumerate(everyone_in_frame):
x, y = person_path.world_frame_coordinates_list.current_frame_coord_xy()
point = np.array([x, y])
all_euclidean_distances = np.linalg.norm(points - point, axis=1) # calculate all euclidean distances
closer = deque()
further = deque()
for i in range(len(ids)):
id_number = ids[i]
if id_number == person_path.id_number: # if it's the same id as the person_path's id
continue
distance = all_euclidean_distances[i]
people_graph.add_edge(person_path.id_number, id_number, distance)
if distance < too_far_distance: # if it's not too far
event = distance_event(person_path.id_number, id_number, people_graph, minimum_distance_change)
if event == DistanceEvent.CLOSER:
closer.append(id_number)
elif event == DistanceEvent.FURTHER:
further.append(id_number)
if closer:
print('%3d is getting CLOSER to' % person_path.id_number, list(closer))
if further:
print('%3d is getting FURTHER from' % person_path.id_number, list(further))
return people_graph
| 19,936
|
def conv2d(input: torch.Tensor,
weight: torch.Tensor,
bias: torch.Tensor = None,
stride=1,
padding=0,
dilation=1,
groups=1,
mode=None):
"""Standard conv2d. Returns the input if weight=None."""
if weight is None:
return input
ind = None
if mode is not None:
if padding != 0:
raise ValueError('Cannot input both padding and mode.')
if mode == 'same':
padding = (weight.shape[2] // 2, weight.shape[3] // 2)
if weight.shape[2] % 2 == 0 or weight.shape[3] % 2 == 0:
ind = (slice(-1) if weight.shape[2] % 2 == 0 else slice(None),
slice(-1) if weight.shape[3] % 2 == 0 else slice(None))
elif mode == 'valid':
padding = (0, 0)
elif mode == 'full':
padding = (weight.shape[2] - 1, weight.shape[3] - 1)
else:
raise ValueError('Unknown mode for padding.')
out = F.conv2d(input,
weight,
bias=bias,
stride=stride,
padding=padding,
dilation=dilation,
groups=groups)
if ind is None:
return out
return out[:, :, ind[0], ind[1]]
| 19,937
|
def infer(model, loader_test):
"""
Returns the prediction of a model in a dataset.
Parameters
----------
model: PyTorch model
loader_test: PyTorch DataLoader.
Returns
-------
tuple
y_true and y_pred
"""
model.eval()
ys, ys_hat = [], []
for ids, masks, y_true in tqdm(loader_test):
ids = ids.to(device)
masks = masks.to(device)
y_true = y_true.to(device)
y_hat = model(ids, masks)
loss = F.cross_entropy(y_hat, y_true)
y_pred = torch.argmax(y_hat, dim=1)
ys.extend(y_true.cpu().numpy().tolist() )
ys_hat.extend(y_pred.cpu().numpy().tolist())
return ys, ys_hat
| 19,938
|
def validate_project(project_name):
"""
Check the defined project name against keywords, builtins and existing
modules to avoid name clashing
"""
if not project_name_rx.search(project_name):
return None
if keyword.iskeyword(project_name):
return None
if project_name in dir(__builtins__):
return None
try:
__import__(project_name)
return None
except ImportError:
return project_name
| 19,939
|
def test_port_range_scan():
"""should return result of 'masscan -p1-80 54.250.0.0/16 --rate=500 -oG output.txt'"""
config = ReadConfig.get_all_settings()
output_file_name = "masscan-test_port_range_scan.txt"
try:
thread_holder(config["MASSCAN_TIMEOUT_SECONDS"], config["LOGGER"])
# Design
input_full_path = os.path.join(config["MASSCAN_OUTPUT_FILE_PATH"], "masscan-do_port_range_scan-default_output.txt")
command = '{} -p{} {} --rate=500 -oG {}'.format(config["MASSCAN_TOOL_PATH"], config["MASSCAN_TARGETED_PORT_RANGE"], config["MASSCAN_TARGETED_IP_RANGE"], input_full_path)
config["LOGGER"].info("'{}' is executed!".format(command))
# Execution and dumping
FileOperation.dump_all(output_file_name, OSOperations.execute_shell_command(command))
config["LOGGER"].info("Result dumped into '{}'".format(output_file_name))
# Merging
assert True
except Exception as e:
config["LOGGER"].error("masscan-test_port_range_scan -> {}".format(str(e)))
| 19,940
|
def test_backends_database_es_database_instantiation_with_forbidden_op_type(es):
"""Tests the ES backend instantiation with an op_type that is not allowed."""
# pylint: disable=invalid-name,unused-argument,protected-access
with pytest.raises(BackendParameterException):
ESDatabase(hosts=ES_TEST_HOSTS, index=ES_TEST_INDEX, op_type="foo")
| 19,941
|
def _print_available_filters(supported_filters):
"""Prints information on available filters and their thresholds."""
widths = (20, 40, 20)
data = [("Filter", "Description", "Threshold Values"),
("------", "-----------", "----------------")]
# this is stupid
for f, (d, t, c) in supported_filters.items():
data.append((f, d, t))
print
for row in data:
i = 1
nextline = "\n"
for col, width in zip(row, widths):
print col[:width] + " " * max(0, width - len(col)),
if not i == 2:
i += 1
continue
mycol = col[width:]
mybgn = width + 1
while len(mycol) > 1:
nextline += " " * 21
nextline += mycol[:width]
nextline += " " * (width - len(mycol))
nextline += "\n"
mycol = mycol[width:]
mybgn += width
i += 1
print nextline,
print
return 0
| 19,942
|
def _get_dates(i, *args, **kwargs):
"""
Get dates from arguments
"""
try:
start_date = kwargs['start_date']
except:
try:
start_date = args[i]
except:
start_date = None
try:
end_date = kwargs['end_date']
except:
try:
end_date = args[i+1]
except:
end_date = None
start_date, end_date = _sanitize_dates(start_date, end_date)
return(start_date, end_date)
| 19,943
|
def _execute(filepath=""):
"""
load pmd file to context.
"""
# load pmd
bl.progress_set('load %s' % filepath, 0.0)
model=reader.read_from_file(filepath)
if not model:
bl.message("fail to load %s" % filepath)
return
bl.progress_set('loaded', 0.1)
# create root object
model_name=model.english_name.decode('cp932')
if len(model_name)==0:
model_name=model.name.decode('cp932')
root=bl.object.createEmpty(model_name)
root[bl.MMD_MB_NAME]=model.name.decode('cp932')
root[bl.MMD_MB_COMMENT]=model.comment.decode('cp932')
root[bl.MMD_COMMENT]=model.english_comment.decode('cp932')
# toon textures
tex_dir=os.path.dirname(filepath)
toonTextures, toonMaterial=__importToonTextures(model, tex_dir)
bl.object.makeParent(root, toonTextures)
# import mesh
mesh_objects=__importMaterialAndMesh(model, tex_dir, toonMaterial)
for o in mesh_objects:
bl.object.makeParent(root, o)
# import armature
armature_object=__importArmature(model)
if armature_object:
bl.object.makeParent(root, armature_object)
armature = bl.object.getData(armature_object)
# add armature modifier
for o in mesh_objects:
bl.modifier.addArmature(o, armature_object)
# Limitation
for n, b in bl.object.getPose(armature_object).bones.items():
poseBoneLimit(n, b)
# import rigid bodies
rigidBodies=__importRigidBodies(model)
if rigidBodies:
bl.object.makeParent(root, rigidBodies)
# import constraints
constraints=__importConstraints(model)
if constraints:
bl.object.makeParent(root, constraints)
bl.object.activate(root)
| 19,944
|
def home_page():
"""Shows home page"""
html = """
<html>
<body>
<h1>Home Page</h1>
<p>Welcome to my simple app!</p>
<a href='/hello'>Go to hello page</a>
</body>
</html>
"""
return html
| 19,945
|
def test_foca_api():
"""Ensure foca() returns a Connexion app instance; valid api field"""
temp_file = create_temporary_copy(
API_CONF,
PATH_SPECS_2_YAML_ORIGINAL,
)
app = foca(temp_file)
assert isinstance(app, App)
os.remove(temp_file)
| 19,946
|
def GetPDFHexString(s, i, iend):
"""Convert and return pdf hex string starting at s[i],
ending at s[iend-1]."""
j = i + 1
v = []
c = ''
jend = iend - 1
while j < jend:
p = _re_pswhitespaceandcomments.match(s, j)
if p:
j = p.end()
d = chr(ordat(s, j))
if c != '':
v.append(FromHexPair(c, d))
c = ''
else:
c = d
j += 1
if c != '':
v.append(FromHexPair(c, '0'))
return ((OSTRING, ''.join(v)), iend)
| 19,947
|
def infer() -> None:
"""Perform inference on an input image."""
# Get the command line arguments, and config from the config.yaml file.
# This config file is also used for training and contains all the relevant
# information regarding the data, model, train and inference details.
args = get_args()
config = get_configurable_parameters(model_config_path=args.model_config_path)
# Get the inferencer. We use .ckpt extension for Torch models and (onnx, bin)
# for the openvino models.
extension = args.weight_path.suffix
inference: Inferencer
if extension in (".ckpt"):
module = import_module("anomalib.deploy.inferencers.torch")
TorchInferencer = getattr(module, "TorchInferencer") # pylint: disable=invalid-name
inference = TorchInferencer(config=config, model_source=args.weight_path, meta_data_path=args.meta_data)
elif extension in (".onnx", ".bin", ".xml"):
module = import_module("anomalib.deploy.inferencers.openvino")
OpenVINOInferencer = getattr(module, "OpenVINOInferencer") # pylint: disable=invalid-name
inference = OpenVINOInferencer(config=config, path=args.weight_path, meta_data_path=args.meta_data)
else:
raise ValueError(
f"Model extension is not supported. Torch Inferencer exptects a .ckpt file,"
f"OpenVINO Inferencer expects either .onnx, .bin or .xml file. Got {extension}"
)
# Perform inference for the given image or image path. if image
# path is provided, `predict` method will read the image from
# file for convenience. We set the superimpose flag to True
# to overlay the predicted anomaly map on top of the input image.
output = inference.predict(image=args.image_path, superimpose=True)
# Incase both anomaly map and scores are returned add scores to the image.
if isinstance(output, tuple):
anomaly_map, score = output
output = add_label(anomaly_map, score)
# Show or save the output image, depending on what's provided as
# the command line argument.
output = cv2.cvtColor(output, cv2.COLOR_RGB2BGR)
if args.save_path is None:
cv2.imshow("Anomaly Map", output)
else:
cv2.imwrite(filename=str(args.save_path), img=output)
| 19,948
|
def get_edge_syslog_info(edge_id):
"""Get syslog information for specific edge id"""
nsxv = get_nsxv_client()
syslog_info = nsxv.get_edge_syslog(edge_id)[1]
if not syslog_info['enabled']:
return 'Disabled'
output = ""
if 'protocol' in syslog_info:
output += syslog_info['protocol']
if 'serverAddresses' in syslog_info:
for server_address in syslog_info['serverAddresses']['ipAddress']:
output += "\n" + server_address
return output
| 19,949
|
def _get_rec_suffix(operations:List[str]) -> str:
""" finished, checked,
Parameters
----------
operations: list of str,
names of operations to perform (or has performed),
Returns
-------
suffix: str,
suffix of the filename of the preprocessed ecg signal
"""
suffix = "-".join(sorted([item.lower() for item in operations]))
return suffix
| 19,950
|
def get_A_text(params, func_type=None):
"""
Get text associated with the fit of A(s)
"""
line1 = r'$A(s|r)$ is assumed to take the form:'
line2 = (r'$A(s|r) = s^{-1}\bigg{(}\frac{s}{\Sigma(r)}\bigg{)}^a '
r'exp\bigg{(}{-\bigg{(}\frac{s}{\Sigma(r)}\bigg{)}^b}\bigg{)}$')
a, b = params['a'], params['b']
line3 = r'where a = {:.4f} and b = {:.4f}'.format(a, b)
text = '\n'.join([line1, line2, line3])
return text
| 19,951
|
def test_cifar10_basic():
"""
Validate CIFAR10
"""
logger.info("Test Cifar10Dataset Op")
# case 0: test loading the whole dataset
data0 = ds.Cifar10Dataset(DATA_DIR_10)
num_iter0 = 0
for _ in data0.create_dict_iterator(num_epochs=1):
num_iter0 += 1
assert num_iter0 == 10000
# case 1: test num_samples
data1 = ds.Cifar10Dataset(DATA_DIR_10, num_samples=100)
num_iter1 = 0
for _ in data1.create_dict_iterator(num_epochs=1):
num_iter1 += 1
assert num_iter1 == 100
# case 2: test num_parallel_workers
data2 = ds.Cifar10Dataset(DATA_DIR_10, num_samples=50, num_parallel_workers=1)
num_iter2 = 0
for _ in data2.create_dict_iterator(num_epochs=1):
num_iter2 += 1
assert num_iter2 == 50
# case 3: test repeat
data3 = ds.Cifar10Dataset(DATA_DIR_10, num_samples=100)
data3 = data3.repeat(3)
num_iter3 = 0
for _ in data3.create_dict_iterator(num_epochs=1):
num_iter3 += 1
assert num_iter3 == 300
# case 4: test batch with drop_remainder=False
data4 = ds.Cifar10Dataset(DATA_DIR_10, num_samples=100)
assert data4.get_dataset_size() == 100
assert data4.get_batch_size() == 1
data4 = data4.batch(batch_size=7) # drop_remainder is default to be False
assert data4.get_dataset_size() == 15
assert data4.get_batch_size() == 7
num_iter4 = 0
for _ in data4.create_dict_iterator(num_epochs=1):
num_iter4 += 1
assert num_iter4 == 15
# case 5: test batch with drop_remainder=True
data5 = ds.Cifar10Dataset(DATA_DIR_10, num_samples=100)
assert data5.get_dataset_size() == 100
assert data5.get_batch_size() == 1
data5 = data5.batch(batch_size=7, drop_remainder=True) # the rest of incomplete batch will be dropped
assert data5.get_dataset_size() == 14
assert data5.get_batch_size() == 7
num_iter5 = 0
for _ in data5.create_dict_iterator(num_epochs=1):
num_iter5 += 1
assert num_iter5 == 14
| 19,952
|
def transform_to_dict(closest_list: list) -> dict:
"""
Returns dict {(latitude, longitude): {film1, film2, ...}, ...} from
closest_list [[film1, (latitude, longitude)], ...], where film1,
film2 are titles of films, (latitude, longitude) is a coordinates of
a place where those films were shoot.
>>> transform_to_dict([["film1", (49, 24)]])
{(49, 24): {'film1'}}
"""
closest_dict = {}
for film, coord in closest_list:
if coord in closest_dict:
closest_dict[coord].add(film)
else:
closest_dict[coord] = {film}
return closest_dict
| 19,953
|
def _enrich_errors(
errors: Iterable[ErrorDetails], tag: Tag, loc: Any = NO_ERROR_PATH
) -> Iterable[ErrorDetails]:
"""
Enrich the stream of errors with tag and location information.
Tags are useful for determining which specs were evaluated to produce the error.
Location information can help callers pinpoint exactly where in their data structure
the error occurred. If no location information is relevant for the error (perhaps
for a scalar spec type), then the default ``NO_ERROR_PATH`` should be used.
"""
for error in errors:
yield error.with_details(tag, loc=loc)
| 19,954
|
def fixed_params(control_file):
"""
Adds fixed parameters to the control file for the second chromEvol run
:param control_file: file handler of control file
:return: NA
"""
control_file.write("_logFile log.txt\n")
control_file.write("_maxChrNum -10\n")
control_file.write("_minChrNum -1\n")
control_file.write("_branchMul 999\n")
control_file.write("_simulationsNum 1000\n")
control_file.write("_logValue 6\n")
control_file.write("_maxOptimizationIterations 5\n")
control_file.write("_epsilonLLimprovement 0.01\n")
control_file.write("_optimizePointsNum 10,2,1\n")
control_file.write("_optimizeIterNum 0,1,3\n")
control_file.write("_gainConstR 1\n")
control_file.write("_lossConstR 1\n")
control_file.write("_baseNumberR 1\n")
control_file.write("_bOptBaseNumber 1\n")
| 19,955
|
def test_count_rows_in_2d_arrays_with_nans():
"""Test that nan-containinr rows in 2d arrays are counted correctly."""
data_1_row = np.array([[1, 2, 3]])
data_2_rows = np.array([[1, 2, 3], [1, 2, 3], [np.nan, 2, 3], [1, np.nan, 3]])
data_3_rows = np.array(
[[1, 2, 3], [np.nan, 2, 3], [1, np.nan, 3], [np.nan, np.nan, np.nan]]
)
assert count_rows_with_nans(data_1_row) == 0
assert count_rows_with_nans(data_2_rows) == 2
assert count_rows_with_nans(data_3_rows) == 3
| 19,956
|
def ECGDataQuality(datastream: DataStream,
windowsize: float = 5.0,
bufferLength: int = 3,
acceptableOutlierPercent: int = 50,
outlierThresholdHigh: int = 4500,
outlierThresholdLow: int = 20,
badSegmentThreshod: int = 2,
ecgBandLooseThreshold: int = 47) -> AnnotationStream:
"""
:param datastream:
:param windowsize:
:param bufferLength:
:param acceptableOutlierPercent:
:param outlierThresholdHigh:
:param outlierThresholdLow:
:param badSegmentThreshod:
:param ecgBandLooseThreshold:
:return:
"""
# windows = window(datastream.datapoints, window_size=windowsize)
# TODO: Do something with windows here
result = DataStream.from_datastream(input_streams=[datastream])
# Do something here for data quality
# ecgQuality = []
# for i in range(1, 10):
# ecgQuality.append(Span(result.getID(),
# starttime=datetime.now(),
# endtime=datetime.now(),
# label=DataQuality.GOOD))
#
# result.set_spans(ecgQuality)
return result
| 19,957
|
def deprecation_mark(deprecation_note: str) -> None:
"""
Function used to mark something deprecated in this package
:param deprecation_note: What and when was deprecated message,
that will be collected by loggers
"""
warn(deprecation_note, DeprecationWarning)
| 19,958
|
def plot_class_activation_map_widget(model, time_series, labels, fs):
"""
Launch interactive plotting widget.
Parameters
----------
model : object
Active model with live session
time_series : np.array([m, length])
image array
labels : np.array([m,])
a 1D array of length m training examples containing class labels
fs : int
sample frequency
"""
# Launch interactive widget
interact(
plot_class_activation_map,
model=fixed(model),
index=(0, labels.shape[0]-1, 1),
time_series=fixed(time_series),
labels=fixed(labels),
fs=fixed(fs)
)
| 19,959
|
def get_numpy_val_from_form_input(input_name):
"""Get a NumPy-compatible numerical value from the request object"""
return get_numpy_val(input_name, request.form[input_name])
| 19,960
|
def load_model(model, path):
"""Load a the model parameters from a file and set them.
Parameters
----------
model : a :class:`Layer` instance
The model with unset parameters.
path : string
The file with the model parameters.
Returns
-------
a :class:`Layer` instance
The given model with set parameters.
"""
with numpy.load(path) as fobj:
values = [fobj['arr_%d' % i] for i in range(len(fobj.files))]
set_all_param_values(model, values)
return model
| 19,961
|
def transform_table(path, df_mean, df_stabw, properties):
"""
This function creates a DataFrame with mean values
and errors including the units for every feauer and calls the plot
function for them.
Args:
path (string): path to store the plots
df_mean (pandas.DataFrame): DataFrame with means of all feauters
df_stabw (pandas.DataFrame): DataFrame with standard deviation of all feauters
properties (dictionary): dictionary with parameters for processing
"""
params = df_mean.T.index.tolist()
# creating dataframe with means and errors
for param in params:
# testen ob Einheit vorliegt
try:
unit = param.split()[1]
except:
unit = '[]'
mean = df_mean[param]
stabw = df_stabw[param]
df_plot = pd.DataFrame({'mean': mean,'stabw': stabw})
# calling plot function
plot_mean(path, df_plot, param, unit, properties)
| 19,962
|
def node_id_at_cells(shape):
"""Node ID at each cell.
Parameters
----------
shape : tuple of int
Shape of grid of nodes.
Returns
-------
ndarray :
ID of node associated with each cell.
Examples
--------
>>> from landlab.grid.structured_quad.cells import node_id_at_cells
>>> node_id_at_cells((3, 4))
array([[5, 6]])
"""
node_ids = nodes.node_ids(shape)
return node_ids[1:-1, 1:-1].copy().reshape(shape_of_cells(shape))
| 19,963
|
def search_media(search_queries, media, ignore_likes=True):
"""Return a list of media matching a queary that searches for a match in the comments, likes, and tags in a list of media"""
# Initialize update message
update_message = print_update_message(len(media))
update_message.send(None)
# Initialize result data
if type(search_queries) is not list: search_queries = [search_queries]
matches = [ [] for _ in range(len(search_queries))]
# Iterate through media looking for matches to search_queries
for idx0, medium in enumerate(media):
results = search_medium(search_queries, medium, ignore_likes=ignore_likes)
for idx1, result in enumerate(results):
if result:
matches[idx1].append(medium)
# Send update message
message = Template(
'Found {} matches in {} media out of {}. {} api calls remaining'.format(
repr([len(x) for x in matches]), idx0+1, len(media),
_api.last_used_api.x_ratelimit_remaining) )
update_message.send( (idx0, message) )
return matches
| 19,964
|
def ensure_experiment_lock(obj: Union[Experiment, Scan], user: User) -> None:
"""
Raise an exception if the given user does not possess the experiment lock.
This should be called by all REST `create` methods that correspond to an object
within a Experiment, as new object creation policies are not handled by the
`UserHoldsExperimentLock` class.
"""
experiment: Experiment = obj if isinstance(obj, Experiment) else obj.experiment
if experiment.lock_owner is None:
raise NotLocked()
if experiment.lock_owner != user:
raise LockContention()
| 19,965
|
def emu_getuid(mu: Uc, emu_env: EmulatorEnv):
"""
Emulates getuid syscall functionality.
"""
uid = os.getuid()
mu.reg_write(UC_X86_REG_RAX, uid)
| 19,966
|
def validation_plot_thesis(show_plot=True, results_2010=None, results_2011=None, model_run="cosumnes_michigan_bar"):
"""
Hardcoded items because they're for my thesis, not meant for more general use.
:return:
"""
if results_2010 is None:
results_2010 = validate_flow_methods("{}_2010".format(model_run), show_plot=False)
if results_2011 is None:
results_2011 = validate_flow_methods("{}_2011".format(model_run), show_plot=False)
# Creates two subplots and unpacks the output array immediately
fig = plt.figure()
plt.margins(0)
full_plot = fig.add_subplot(1, 1, 1) # The big subplot
full_plot.set_xlabel("Percent of Available Flow")
full_plot.set_ylabel("Environmental Benefit", labelpad=20) # move it off the tick values
# Turn off axis lines and ticks of the big subplot
full_plot.spines['top'].set_color('none')
full_plot.spines['bottom'].set_color('none')
full_plot.spines['left'].set_color('none')
full_plot.spines['right'].set_color('none')
full_plot.tick_params(labelcolor='w', top=False, bottom=False, left=False, right=False)
left_plot = fig.add_subplot(1, 2, 1) # The big subplot
left_plot.plot(results_2010["x"], results_2010["y"])
left_plot.set_title('2010')
right_plot = fig.add_subplot(1, 2, 2, sharey=left_plot) # The big subplot
right_plot.plot(results_2011["x"], results_2011["y"])
right_plot.set_title('2011')
# remove the axis values on the left to make space
right_plot.tick_params(left=True, labelleft=False, )
plt.savefig(os.path.join(settings.BASE_DIR, "data", "results", "validation_plot_thesis.png"), dpi=300)
if show_plot:
plt.show()
plt.close()
return results_2010, results_2011
| 19,967
|
def get_produced_messages(func):
"""Returns a list of message fqn and channel pairs.
Args:
func (Function): function object
Returns:
list
"""
result = []
for msg, channel in func.produces:
result.append((_build_msg_fqn(msg), channel))
return result
| 19,968
|
def find_coverage_files(src_path: Path) -> Sequence:
"""
Find the coverage files within the specified src_path.
Parameters:
src_path (Path): The path in which to look for the .coverage files.
Returns:
(Sequence) The set of .coverage files within the specified folder.
"""
return Path(src_path).glob("**/*.coverage")
| 19,969
|
def checkrunsh(filename):
"""
write a temporary (run.sh) file and than checks it againts the run.sh file already there
This is used to double check that the pipeline is not being called with different options
"""
tempdir = tempfile.mkdtemp()
tmprunsh = os.path.join(tempdir,os.path.basename(filename))
makeCIVETrunsh(tmprunsh)
if filecmp.cmp(filename, tmprunsh):
if DEBUG: print("{} already written - using it".format(filename))
else:
# If the two files differ - then we use difflib package to print differences to screen
print('#############################################################\n')
print('# Found differences in {} these are marked with (+) '.format(filename))
print('#############################################################')
with open(filename) as f1, open(tmprunsh) as f2:
differ = difflib.Differ()
print(''.join(differ.compare(f1.readlines(), f2.readlines())))
sys.exit("\nOld {} doesn't match parameters of this run....Exiting".format(filename))
shutil.rmtree(tempdir)
| 19,970
|
def get_exploration_summary_from_model(exp_summary_model):
"""Returns an ExplorationSummary domain object.
Args:
exp_summary_model: ExplorationSummary. An ExplorationSummary model
instance.
Returns:
ExplorationSummary. The summary domain object correspoding to the
given exploration summary model.
"""
return exp_domain.ExplorationSummary(
exp_summary_model.id, exp_summary_model.title,
exp_summary_model.category, exp_summary_model.objective,
exp_summary_model.language_code, exp_summary_model.tags,
exp_summary_model.ratings, exp_summary_model.scaled_average_rating,
exp_summary_model.status, exp_summary_model.community_owned,
exp_summary_model.owner_ids, exp_summary_model.editor_ids,
exp_summary_model.voice_artist_ids, exp_summary_model.viewer_ids,
exp_summary_model.contributor_ids,
exp_summary_model.contributors_summary, exp_summary_model.version,
exp_summary_model.exploration_model_created_on,
exp_summary_model.exploration_model_last_updated,
exp_summary_model.first_published_msec
)
| 19,971
|
def get_service(api_name, api_version, scope, key_file_location,
service_account_email):
"""Get a service that communicates to a Google API.
Args:
api_name: The name of the api to connect to.
api_version: The api version to connect to.
scope: A list auth scopes to authorize for the application.
key_file_location: The path to a valid service account p12 key file.
service_account_email: The service account email address.
Returns:
A service that is connected to the specified API.
"""
f = open(key_file_location, 'rb')
key = f.read()
f.close()
credentials = ServiceAccountCredentials.from_p12_keyfile(service_account_email,key_file_location, scopes=scope)
http = credentials.authorize(httplib2.Http())
# Build the service object.
service = build(api_name, api_version, http=http)
return service
# Insert user email here
| 19,972
|
def model_map(lon, lat, alt, comp, binsize = 0.1, nmax = 134, a = 3393.5):
"""
Calculates a map of one component of the crustal magnetic field field model, for a given altitude.
Parameters:
lon: array
The longitude range, in degrees. Ex.: [20., 50.].
lat: array
The latitude range, in degrees.
alt: float
The altitude in which the map will be computed, in km.
comp: string
The desired magnetic field component, in spherical coordinates. Options are 'Br', 'Btheta', 'Bphi', and 'Bt'.
binsize: float, list, optional
The resolution of the grid. If a float, apply the same binsize for longitude and latitude.
If a list, the first value represents the longitude binsize and the second, the latitude binsize.
nmax: integer, optional
The maximum degree and order of the functions.
a: float, optional
The radius of the planet. Default is the Mars' radius.
Returns:
A lon X lat array containing the magnetic field component.
"""
# Raise an AssertionError if arguments are invalid
assert comp == 'Br' or comp == 'Btheta' or comp == 'Bphi' or comp == 'Bt', "Check argument for comp"
assert type(binsize) is float or type(binsize) is list, "Argument for binsize should be a float or a list"
# Import the coefficient files
from IsabelaFunctions.langlais_coeff import glm as g
from IsabelaFunctions.langlais_coeff import hlm as h
# Calculate r, theta, phi, and the Legendre functions
r = a + alt
if type(binsize) is float:
binsize = [binsize, binsize]
lat_len = int(round((lat[1] - lat[0]) / binsize[1] + 1.0))
lon_len = int(round((lon[1] - lon[0]) / binsize[0] + 1.0))
longitude = np.deg2rad(np.linspace(lon[0], lon[1], lon_len))
latitude = np.linspace(lat[0], lat[1], lat_len)
P = np.empty((nmax+1, nmax+1, lat_len)) * np.nan
dP = np.empty_like(P) * np.nan
for theta in range(lat_len):
P[:, :, theta], dP[:, :, theta] = legendre_schmidt_Pyshtools(latitude[theta])
cos = np.empty((nmax+1, lon_len)) * np.nan
sen = np.empty_like(cos) * np.nan
for phi in range(lon_len):
for m in range(nmax+1):
cos[m, phi] = np.cos(m * longitude[phi])
sen[m, phi] = np.sin(m * longitude[phi])
a_over_r = np.empty((nmax+1)) * np.nan
for n in range(nmax+1):
a_over_r[n] = (a/r)**(n+2)
if comp == 'Bt':
Br = np.zeros((lon_len, lat_len))
Btheta = np.zeros((lon_len, lat_len))
Bphi = np.zeros((lon_len, lat_len))
sen_theta = np.sin(np.deg2rad(90.0 - latitude))
for n in range(1, nmax+1):
for m in range(n+1):
tmp1 = g[n, m] * cos[m, :] + h[n, m] * sen[m, :]
tmp2 = np.outer(tmp1, P[n, m, :])
tmp3 = tmp2 * (n+1) * a_over_r[n]
Br += tmp3
tmp2 = np.outer(tmp1, dP[n, m, :] * sen_theta)
tmp3 = tmp2 * a_over_r[n]
Btheta += tmp3
tmp1 = g[n, m] * sen[m, :] + h[n, m] * cos[m, :]
tmp2 = np.outer(tmp1, P[n, m, :])
tmp3 = tmp2 * m * a_over_r[n]
Bphi += tmp3
for theta in range(lat_len):
Bphi[:, theta] /= sen_theta[theta]
B = np.sqrt(Br**2 + Btheta**2 + Bphi**2)
else:
B = np.zeros((lon_len, lat_len))
if comp == 'Br':
for n in tqdm(range(1, nmax+1)):
for m in range(n+1):
tmp1 = g[n, m] * cos[m, :] + h[n, m] * sen[m, :]
tmp2 = np.outer(tmp1, P[n, m, :])
tmp3 = tmp2 * (n+1) * a_over_r[n]
B += tmp3
elif comp == 'Btheta':
sen_theta = np.sin(np.deg2rad(90.0 - latitude))
for n in tqdm(range(1, nmax+1)):
for m in range(n+1):
tmp1 = g[n, m] * cos[m, :] + h[n, m] * sen[m, :]
tmp2 = np.outer(tmp1, dP[n, m, :] * sen_theta)
tmp3 = tmp2 * a_over_r[n]
B += tmp3
else:
sen_theta = np.sin(np.deg2rad(90.0 - latitude))
for n in tqdm(range(1, nmax+1)):
for m in range(n+1):
tmp1 = g[n, m] * sen[m, :] + h[n, m] * cos[m, :]
tmp2 = np.outer(tmp1, P[n, m, :])
tmp3 = tmp2 * m * a_over_r[n]
B += tmp3
for theta in range(lat_len):
B[:, theta] /= sen_theta[theta]
return B.T
| 19,973
|
def parse_command(incoming_text):
"""
incoming_text: A text string to parse for docker commands
returns: a fully validated docker command
"""
docker_action = ''
parse1 = re.compile(r"(?<=\bdocker\s)(\w+)")
match_obj = parse1.search(incoming_text)
if match_obj:
docker_action = match_obj.group()
print("Got docker action %s" % (docker_action,))
if docker_action and docker_action in DOCKER_SUPPORTED:
# Use this type of code if we want to limit the docker commands
#parse2 = re.compile(r"(?<=\b%s\s)(\w+)" % docker_action)
#match_obj = parse2.search(incoming_text)
#if match_obj:
# docker_subcommand = match_obj.group()
# if docker_subcommand in SUBCOMMAND_SUPPORTED:
# return "docker %s %s" % (docker_action, docker_subcommand)
# Otherwise let it fly and return help if it pumps mud.
print "returning docker %s%s" % (docker_action, incoming_text[match_obj.end():])
return "docker %s%s" % (docker_action, incoming_text[match_obj.end():])
return docker_usage_message()
| 19,974
|
def deploy_heroku_ui(ctx, token, app="striker-vn-ui"):
"""
Deploy UI docker image on heroku
"""
ctx.run(docker("login",
f"--username=_",
f"--password={token}",
"registry.heroku.com"))
with ctx.cd(get_ui_directory()):
ctx.run(heroku("container:push web", f"--app={app}"))
ctx.run(heroku("container:release web", f"--app={app}"))
| 19,975
|
def crt_fill_parameters(config_path, overwrite_flag=False, debug_flag=False):
"""Calculate GSFLOW CRT Fill Parameters
Args:
config_file (str): Project config file path
ovewrite_flag (bool): if True, overwrite existing files
debug_flag (bool): if True, enable debug level logging
Returns:
None
"""
# Initialize hru_parameters class
hru = support.HRUParameters(config_path)
# Open input parameter config file
inputs_cfg = ConfigParser.ConfigParser()
try:
inputs_cfg.readfp(open(config_path))
except Exception as e:
logging.error(
'\nERROR: Config file could not be read, '
'is not an input file, or does not exist\n'
' config_file = {}\n'
' Exception: {}\n'.format(config_path, e))
sys.exit()
# Log DEBUG to file
log_file_name = 'crt_fill_parameters_log.txt'
log_console = logging.FileHandler(
filename=os.path.join(hru.log_ws, log_file_name), mode='w')
log_console.setLevel(logging.DEBUG)
log_console.setFormatter(logging.Formatter('%(message)s'))
logging.getLogger('').addHandler(log_console)
logging.info('\nGSFLOW CRT Fill Parameters')
# Parameters
exit_seg = 0
# CRT Parameters
try:
use_crt_fill_flag = inputs_cfg.getboolean(
'INPUTS', 'use_crt_fill_flag')
except ConfigParser.NoOptionError:
use_crt_fill_flag = False
logging.info(
' Missing INI parameter, setting {} = {}'.format(
'use_crt_fill_flag', use_crt_fill_flag))
try:
crt_hruflg = inputs_cfg.getint('INPUTS', 'crt_hruflg')
except ConfigParser.NoOptionError:
crt_hruflg = 0
logging.info(
' Missing INI parameter, setting {} = {}'.format(
'crt_hruflg', crt_hruflg))
try:
crt_flowflg = inputs_cfg.getint('INPUTS', 'crt_flowflg')
except ConfigParser.NoOptionError:
crt_flowflg = 1
logging.info(
' Missing INI parameter, setting {} = {}'.format(
'crt_flowflg', crt_flowflg))
try:
crt_dpit = inputs_cfg.getfloat('INPUTS', 'crt_dpit')
except ConfigParser.NoOptionError:
crt_dpit = 0.01
logging.info(
' Missing INI parameter, setting {} = {}'.format(
'crt_dpit', crt_dpit))
try:
crt_outitmax = inputs_cfg.getint('INPUTS', 'crt_outitmax')
except ConfigParser.NoOptionError:
crt_outitmax = 100000
logging.info(
' Missing INI parameter, setting {} = {}'.format(
'crt_outitmax', crt_outitmax))
# Intentionally not allowing user to change this value
crt_iprn = 1
# CRT Fill Parameters
fill_ws_name = 'fill_work'
fill_strmflg = 0
fill_visflg = 0
fill_ifill = 1
# CRT Executable
crt_exe_path = inputs_cfg.get('INPUTS', 'crt_exe_path')
output_name = 'outputstat.txt'
# Check input paths
if not arcpy.Exists(hru.polygon_path):
logging.error(
'\nERROR: Fishnet ({}) does not exist\n'.format(
hru.polygon_path))
sys.exit()
# Check that input fields exist and have data
# Fields generated by hru_parameters
for f in [hru.type_field, hru.row_field, hru.col_field]:
if not arcpy.ListFields(hru.polygon_path, f):
logging.error(
'\nERROR: Input field {} is not present in fishnet'
'\nERROR: Try re-running hru_parameters.py\n'.format(f))
sys.exit()
elif support.field_stat_func(hru.polygon_path, f, 'MAXIMUM') == 0:
logging.error(
'\nERROR: Input field {} contains only 0'
'\nERROR: Try re-running hru_parameters.py\n'.format(f))
sys.exit()
# Fields generated by dem_2_streams
for f in [hru.irunbound_field, hru.iseg_field, hru.flow_dir_field,
hru.outflow_field, hru.subbasin_field]:
if not arcpy.ListFields(hru.polygon_path, f):
logging.error(
'\nERROR: Input field {} is not present in fishnet'
'\nERROR: Try re-running dem_2_streams.py\n'.format(f))
sys.exit()
elif support.field_stat_func(hru.polygon_path, f, 'MAXIMUM') == 0:
logging.error(
'\nERROR: Input field {} contains only 0'
'\nERROR: Try re-running dem_2_streams.py\n'.format(f))
sys.exit()
# Build output folder if necessary
fill_ws = os.path.join(hru.param_ws, fill_ws_name)
if not os.path.isdir(fill_ws):
os.makedirs(fill_ws)
# Copy CRT executable if necessary
crt_exe_name = os.path.basename(crt_exe_path)
if not os.path.isfile(os.path.join(fill_ws, crt_exe_name)):
shutil.copy(crt_exe_path, fill_ws)
if not os.path.isfile(os.path.join(fill_ws, crt_exe_name)):
logging.error(
'\nERROR: CRT executable ({}) does not exist\n'.format(
os.path.join(fill_ws, crt_exe_name)))
sys.exit()
# Fill files
fill_hru_casc_path = os.path.join(fill_ws, 'HRU_CASC.DAT')
fill_outflow_hru_path = os.path.join(fill_ws, 'OUTFLOW_HRU.DAT')
fill_land_elev_path = os.path.join(fill_ws, 'LAND_ELEV.DAT')
fill_xy_path = os.path.join(fill_ws, 'XY.DAT')
# Output names
# dem_adj_raster_name = 'dem_adj'
# hru_type_raster_name = 'hru_type'
# lakes_raster_name = 'lakes'
# streams_raster_name = 'streams'
# iseg_raster_name = 'iseg'
# irunbound_raster_name = 'irunbound'
# Output raster paths
# dem_adj_raster = os.path.join(fill_ws, dem_adj_raster_name + '.img')
# hru_type_raster = os.path.join(fill_ws, hru_type_raster_name + '.img')
# Output ascii paths
# a_fmt = '{}_ascii.txt'
# dem_adj_ascii = os.path.join(fill_ws, a_fmt.format(dem_adj_raster_name))
# hru_type_ascii = os.path.join(fill_ws, a_fmt.format(hru_type_raster_name))
# Set ArcGIS environment variables
arcpy.CheckOutExtension('Spatial')
env.overwriteOutput = True
# env.pyramid = 'PYRAMIDS -1'
env.pyramid = 'PYRAMIDS 0'
env.workspace = fill_ws
env.scratchWorkspace = hru.scratch_ws
# Add fields if necessary
logging.info('\nAdding fields if necessary')
support.add_field_func(hru.polygon_path, hru.krch_field, 'LONG')
support.add_field_func(hru.polygon_path, hru.irch_field, 'LONG')
support.add_field_func(hru.polygon_path, hru.jrch_field, 'LONG')
support.add_field_func(hru.polygon_path, hru.iseg_field, 'LONG')
support.add_field_func(hru.polygon_path, hru.reach_field, 'LONG')
# add_field_func(hru.polygon_path, hru.rchlen_field, 'LONG')
support.add_field_func(hru.polygon_path, hru.maxreach_field, 'LONG')
support.add_field_func(hru.polygon_path, hru.outseg_field, 'LONG')
support.add_field_func(hru.polygon_path, hru.irunbound_field, 'LONG')
support.add_field_func(hru.polygon_path, hru.crt_elev_field, 'DOUBLE')
support.add_field_func(hru.polygon_path, hru.crt_fill_field, 'DOUBLE')
# Calculate KRCH, IRCH, JRCH for stream segments
logging.info('\nKRCH, IRCH, & JRCH for streams')
fields = [
hru.type_field, hru.iseg_field, hru.row_field, hru.col_field,
hru.krch_field, hru.irch_field, hru.jrch_field]
with arcpy.da.UpdateCursor(hru.polygon_path, fields) as update_c:
for row in update_c:
if (int(row[0]) in [1, 3] and int(row[1]) > 0):
row[4], row[5], row[6] = 1, int(row[2]), int(row[3])
else:
row[4], row[5], row[6] = 0, 0, 0
update_c.updateRow(row)
# Get list of segments and downstream cell for each stream/lake cell
# Downstream is calulated from flow direction
# Use IRUNBOUND instead of ISEG, since ISEG will be zeroed for lakes
logging.info('Cell out-flow dictionary')
cell_dict = dict()
fields = [
hru.type_field, hru.krch_field, hru.lake_id_field, hru.iseg_field,
hru.irunbound_field, hru.dem_adj_field, hru.flow_dir_field,
hru.col_field, hru.row_field, hru.id_field]
for row in arcpy.da.SearchCursor(hru.polygon_path, fields):
# Skip inactive cells
if int(row[0]) == 0:
continue
# Skip non-lake and non-stream cells
if (int(row[1]) == 0 and int(row[2]) == 0):
continue
# Read in parameters
cell = (int(row[7]), int(row[8]))
# support.next_row_col(FLOW_DIR, CELL)
# HRU_ID, ISEG, NEXT_CELL, DEM_ADJ, X, X, X
cell_dict[cell] = [
int(row[9]), int(row[4]), support.next_row_col(int(row[6]), cell),
float(row[5]), 0, 0, 0]
del cell
# Build list of unique segments
iseg_list = sorted(list(set([v[1] for v in cell_dict.values()])))
# Calculate IREACH and OUTSEG
logging.info('Calculate {} and {}'.format(
hru.reach_field, hru.outseg_field))
outseg_dict = dict()
for iseg in iseg_list:
# logging.debug(' Segment: {}'.format(iseg))
# Subset of cell_dict for current iseg
iseg_dict = dict(
[(k, v) for k, v in cell_dict.items() if v[1] == iseg])
# List of all cells in current iseg
iseg_cells = iseg_dict.keys()
# List of out_cells for all cells in current iseg
out_cells = [value[2] for value in iseg_dict.values()]
# Every iseg will (should?) have one out_cell
out_cell = list(set(out_cells) - set(iseg_cells))[0]
# If not output cell, assume edge of domain
try:
outseg = cell_dict[out_cell][1]
except KeyError:
outseg = exit_seg
# Track sub-basin outseg
outseg_dict[iseg] = outseg
if iseg > 0:
# Calculate reach number for each cell
reach_dict = dict()
start_cell = list(set(iseg_cells) - set(out_cells))[0]
for i in xrange(len(out_cells)):
# logging.debug(' Reach: {} Cell: {}'.format(i+1, start_cell))
reach_dict[start_cell] = i + 1
start_cell = iseg_dict[start_cell][2]
# For each cell in iseg, save outseg, reach, & maxreach
for iseg_cell in iseg_cells:
cell_dict[iseg_cell][4:] = [
outseg, reach_dict[iseg_cell], len(iseg_cells)]
del reach_dict, start_cell
else:
# For each lake segment cell, only save outseg
# All lake cells are routed directly to the outseg
for iseg_cell in iseg_cells:
cell_dict[iseg_cell][4:] = [outseg, 0, 0]
del iseg_dict, iseg_cells, iseg
del out_cells, out_cell, outseg
# Saving ireach and outseg
logging.info('Save {} and {}'.format(hru.reach_field, hru.outseg_field))
fields = [
hru.type_field, hru.iseg_field, hru.col_field, hru.row_field,
hru.outseg_field, hru.reach_field, hru.maxreach_field]
with arcpy.da.UpdateCursor(hru.polygon_path, fields) as update_c:
for row in update_c:
# if (int(row[0]) > 0 and int(row[1]) > 0):
# #DEADBEEF - I'm not sure why only iseg > 0 in above line
# DEADBEEF - This should set outseg for streams and lakes
if (int(row[0]) > 0 and int(row[1]) != 0):
row[4:] = cell_dict[(int(row[2]), int(row[3]))][4:]
else:
row[4:] = [0, 0, 0]
update_c.updateRow(row)
# Set all lake iseg to 0
logging.info('Lake {}'.format(hru.iseg_field))
update_rows = arcpy.UpdateCursor(hru.polygon_path)
for row in update_rows:
if int(row.getValue(hru.type_field)) != 2:
continue
iseg = int(row.getValue(hru.iseg_field))
if iseg < 0:
row.setValue(hru.iseg_field, 0)
update_rows.updateRow(row)
del row, iseg
del update_rows
# Set environment parameters
env.extent = hru.extent
env.cellsize = hru.cs
env.outputCoordinateSystem = hru.sr
# # Build rasters
# logging.info('\nOutput model grid rasters')
# arcpy.PolygonToRaster_conversion(
# hru.polygon_path, hru.type_field, hru_type_raster,
# 'CELL_CENTER', '', hru.cs)
# arcpy.PolygonToRaster_conversion(
# hru.polygon_path, hru.dem_adj_field, dem_adj_raster,
# 'CELL_CENTER', '', hru.cs)
#
# # Build rasters
# logging.info('Output model grid ascii')
# arcpy.RasterToASCII_conversion(hru_type_raster, hru_type_ascii)
# arcpy.RasterToASCII_conversion(dem_adj_raster, dem_adj_ascii)
logging.debug('\nRemoving existing CRT fill files')
if os.path.isfile(fill_outflow_hru_path):
os.remove(fill_outflow_hru_path)
if os.path.isfile(fill_hru_casc_path):
os.remove(fill_hru_casc_path)
if os.path.isfile(fill_land_elev_path):
os.remove(fill_land_elev_path)
if os.path.isfile(fill_xy_path):
os.remove(fill_xy_path)
# Input parameters files for Cascade Routing Tool (CRT)
logging.info('\nBuilding output CRT fill files')
# Generate OUTFLOW_HRU.DAT for CRT
# Outflow cells exit the model to inactive cells or out of the domain
# Outflow field is set in dem_2_streams
logging.info(' {}'.format(os.path.basename(fill_outflow_hru_path)))
outflow_hru_list = []
fields = [
hru.type_field, hru.outflow_field, hru.subbasin_field,
hru.row_field, hru.col_field]
for row in arcpy.da.SearchCursor(hru.polygon_path, fields):
if int(row[0]) != 0 and int(row[1]) == 1:
outflow_hru_list.append([int(row[3]), int(row[4])])
if outflow_hru_list:
with open(fill_outflow_hru_path, 'w+') as f:
f.write('{} NUMOUTFLOWHRU\n'.format(
len(outflow_hru_list)))
for i, outflow_hru in enumerate(outflow_hru_list):
f.write('{} {} {} OUTFLOW_ID ROW COL\n'.format(
i + 1, outflow_hru[0], outflow_hru[1]))
f.close()
else:
logging.error('\nERROR: No OUTFLOWHRU points, exiting')
sys.exit()
del outflow_hru_list
# # DEADBEEF - Old method for setting OUTFLOW_HRU.DAT
# # Only streams that flow to real gauges are used
# # Generate OUTFLOW_HRU.DAT for CRT
# logging.info(' {}'.format(
# os.path.basename(fill_outflow_hru_path)))
# outflow_hru_list = []
# fields = [
# hru.type_field, hru.iseg_field, hru.outseg_field, hru.reach_field,
# hru.maxreach_field, hru.col_field, hru.row_field]
# for row in arcpy.da.SearchCursor(hru.polygon_path, fields):
# if int(row[0]) != 1 or int(row[1]) == 0:
# continue
# if int(row[2]) == 0 and int(row[3]) == int(row[4]):
# outflow_hru_list.append([int(row[6]), int(row[5])])
# if outflow_hru_list:
# with open(fill_outflow_hru_path, 'w+') as f:
# f.write('{} NUMOUTFLOWHRU\n'.format(
# len(outflow_hru_list)))
# for i, outflow_hru in enumerate(outflow_hru_list):
# f.write('{} {} {} OUTFLOW_ID ROW COL\n'.format(
# i+1, outflow_hru[0], outflow_hru[1]))
# f.close()
# del outflow_hru_list
# Generate HRU_CASC.DAT for CRT from hru_polygon
logging.info(' {}'.format(os.path.basename(fill_hru_casc_path)))
hru_type_dict = defaultdict(dict)
for row in sorted(arcpy.da.SearchCursor(
hru.polygon_path,
[hru.row_field, hru.col_field, hru.type_field, hru.dem_adj_field])):
# Calculate CRT fill for all non-lake and non-ocean (elev > 0) cells
# if row[3] > 0 and row[2] == 0:
# hru_type_dict[int(row[0])][int(row[1])] = 1
# else: hru_type_dict[int(row[0])][int(row[1])] = row[2]
# Calculate CRT fill for all active cells
hru_type_dict[int(row[0])][int(row[1])] = row[2]
hru_casc_header = (
'{} {} {} {} {} {} {} {} '
'HRUFLG STRMFLG FLOWFLG VISFLG IPRN IFILL DPIT OUTITMAX\n').format(
crt_hruflg, fill_strmflg, crt_flowflg, fill_visflg,
crt_iprn, fill_ifill, crt_dpit, crt_outitmax)
with open(fill_hru_casc_path, 'w+') as f:
f.write(hru_casc_header)
for row, col_data in sorted(hru_type_dict.items()):
f.write(
' '.join([str(t) for c, t in sorted(col_data.items())]) +
'\n')
f.close()
del hru_casc_header, hru_type_dict
# # Generate HRU_CASC.DATA for CRT from raster/ascii
# with open(hru_type_ascii, 'r') as f: ascii_data = f.readlines()
# f.close()
# hru_casc_header = (
# '{} {} {} {} {} {} {} {} ' +
# 'HRUFLG STRMFLG FLOWFLG VISFLG ' +
# 'IPRN IFILL DPIT OUTITMAX\n').format(
# crt_hruflg, fill_strmflg, crt_flowflg, fill_visflg,
# crt_iprn, fill_ifill, crt_dpit, crt_outitmax)
# with open(fill_hru_casc_path, 'w+') as f:
# f.write(hru_casc_header)
# for ascii_line in ascii_data[6:]: f.write(ascii_line)
# f.close()
# del hru_casc_header, ascii_data
# Generate LAND_ELEV.DAT for CRT from hru_polygon
logging.info(' {}'.format(os.path.basename(fill_land_elev_path)))
dem_adj_dict = defaultdict(dict)
for row in sorted(arcpy.da.SearchCursor(
hru.polygon_path, [hru.row_field, hru.col_field, hru.dem_adj_field])):
dem_adj_dict[int(row[0])][int(row[1])] = row[2]
with open(fill_land_elev_path, 'w+') as f:
row_first = dem_adj_dict.keys()[0]
f.write('{} {} NROW NCOL\n'.format(
len(dem_adj_dict.keys()), len(dem_adj_dict[row_first])))
for row, col_data in sorted(dem_adj_dict.items()):
f.write(
' '.join(['{:10.6f}'.format(t) for c, t in sorted(col_data.items())]) +
'\n')
f.close()
del dem_adj_dict
# # Generate LAND_ELEV.DAT for CRT from raster/ascii
# logging.info(' {}'.format(os.path.basename(fill_land_elev_path)))
# with open(dem_adj_ascii, 'r') as f: ascii_data = f.readlines()
# f.close()
# with open(fill_land_elev_path, 'w+') as f:
# f.write('{} {} NROW NCOL\n'.format(
# ascii_data[1].split()[1], ascii_data[0].split()[1]))
# for ascii_line in ascii_data[6:]: f.write(ascii_line)
# f.close()
# del ascii_data
# Generate XY.DAT for CRT
logging.info(' {}'.format(os.path.basename(fill_xy_path)))
xy_list = [
map(int, row)
for row in sorted(arcpy.da.SearchCursor(
hru.polygon_path, [hru.id_field, hru.x_field, hru.y_field]))]
with open(fill_xy_path, 'w+') as f:
for line in sorted(xy_list):
f.write(' '.join(map(str, line)) + '\n')
f.close()
del xy_list
# Run CRT
logging.info('\nRunning CRT')
subprocess.check_output(crt_exe_name, cwd=fill_ws, shell=True)
# Read in outputstat.txt and get filled DEM
logging.info('\nReading CRT {}'.format(output_name))
output_path = os.path.join(fill_ws, output_name)
with open(output_path, 'r') as f:
output_data = [l.strip() for l in f.readlines()]
f.close()
# Determine where filled data is in the file
try:
crt_dem_i = output_data.index(
'CRT FILLED LAND SURFACE MODEL USED TO GENERATE CASCADES')
crt_fill_i = output_data.index(
'DIFFERENCES BETWEEN FILLED AND UNFILLED LAND SURFACE MODELS')
except ValueError:
logging.error(
'\nERROR: CRT didn\'t completely run\n' +
' Check the CRT outputstat.txt file\n')
sys.exit()
logging.info(' Break indices: {}, {}'.format(
crt_dem_i, crt_fill_i))
crt_dem_data = [
r.split() for r in output_data[crt_dem_i+1: crt_dem_i+hru.rows+1]]
crt_fill_data = [
r.split() for r in output_data[crt_fill_i+1: crt_fill_i+hru.rows+1]]
logging.info(' ROWS/COLS: {}/{}'.format(
len(crt_dem_data), len(crt_dem_data[0])))
logging.info(' ROWS/COLS: {}/{}'.format(
len(crt_fill_data), len(crt_fill_data[0])))
# crt_type_i = crt_fill_i + (crt_fill_i - crt_dem_i)
# crt_dem_data = [
# r.split() for r in output_data[crt_dem_i+1: crt_dem_i+hru.rows+1]]
# crt_fill_data = [
# r.split() for r in output_data[crt_fill_i+1: crt_type_i-1]]
# Build dictionaries of the CRT data
crt_dem_dict = defaultdict(dict)
crt_fill_dict = defaultdict(dict)
for i, r in enumerate(crt_dem_data):
crt_dem_dict[i + 1] = dict(
[(j + 1, c) for j, c in enumerate(crt_dem_data[i])])
for i, r in enumerate(crt_fill_data):
crt_fill_dict[i + 1] = dict(
[(j + 1, c) for j, c in enumerate(crt_fill_data[i])])
# Write CRT values to hru_polygon
logging.info('Writing CRT data to fishnet')
logging.debug(' {:<4s} {:<4s} {:>7s}'.format('ROW', 'COL', 'FILL'))
fields = [
hru.row_field, hru.col_field, hru.crt_elev_field, hru.crt_fill_field,
hru.dem_adj_field]
with arcpy.da.UpdateCursor(hru.polygon_path, fields) as update_c:
for row in update_c:
# If DEM values are too large for CRT, they may be symbols that will be skipped
if support.is_number(crt_dem_dict[int(row[0])][int(row[1])]):
row[2] = float(crt_dem_dict[int(row[0])][int(row[1])])
row[3] = float(crt_fill_dict[int(row[0])][int(row[1])])
if float(row[3]) > 0:
logging.debug(' {:>4d} {:>4d} {:>7.2f}'.format(
row[0], row[1], float(row[3])))
if use_crt_fill_flag and float(row[3]) > 0:
row[4] = row[2]
update_c.updateRow(row)
| 19,976
|
def get_entities_from_tags(query, tags):
"""From a set of joint IOB tags, parse the app and system entities.
This performs the reverse operation of get_tags_from_entities.
Args:
query (Query): Any query instance.
tags (list of str): Joint app and system tags, like those
created by get_tags_from_entities.
Returns:
(list of QueryEntity) The tuple containing the list of entities.
"""
normalized_tokens = query.normalized_tokens
entities = []
def _is_system_entity(entity_type):
if entity_type.split('_')[0] == 'sys':
return True
return False
def _append_entity(token_start, entity_type, tokens):
prefix = ' '.join(normalized_tokens[:token_start])
# If there is a prefix, we have to add one for the whitespace
start = len(prefix) + 1 if len(prefix) else 0
end = start - 1 + len(' '.join(tokens))
norm_span = Span(start, end)
entity = QueryEntity.from_query(query, normalized_span=norm_span, entity_type=entity_type)
entities.append(entity)
logger.debug("Appended %s.", entity)
def _append_system_entity(token_start, token_end, entity_type):
msg = "Looking for '%s' between %s and %s."
logger.debug(msg, entity_type, token_start, token_end)
prefix = ' '.join(normalized_tokens[:token_start])
# If there is a prefix, we have to add one for the whitespace
start = len(prefix) + 1 if len(prefix) else 0
end = start - 1 + len(' '.join(normalized_tokens[token_start:token_end]))
norm_span = Span(start, end)
span = query.transform_span(norm_span, TEXT_FORM_NORMALIZED, TEXT_FORM_RAW)
try:
entity = resolve_system_entity(query, entity_type, span)
entities.append(entity)
logger.debug("Appended system entity %s.", entity)
except SystemEntityResolutionError:
msg = "Found no matching system entity {}-{}, {!r}".format(
token_start, token_end, entity_type)
logger.debug(msg)
entity_tokens = []
entity_start = None
prev_ent_type = ''
for tag_idx, tag in enumerate(tags):
iob, ent_type = tag.split('|')
# Close entity and reset if the tag indicates a new entity
if (entity_start is not None and
(iob in (O_TAG, B_TAG, S_TAG) or ent_type != prev_ent_type)):
logger.debug("Entity closed at prev")
if _is_system_entity(prev_ent_type):
_append_system_entity(entity_start, tag_idx, prev_ent_type)
else:
_append_entity(entity_start, prev_ent_type, entity_tokens)
entity_start = None
prev_ent_type = ''
entity_tokens = []
# Check if an entity has started
if iob in (B_TAG, S_TAG) or ent_type not in ('', prev_ent_type):
entity_start = tag_idx
if _is_system_entity(ent_type):
# During predict time, we construct sys_candidates for the input query.
# These candidates are "global" sys_candidates, in that the entire query
# is sent to Duckling to extract sys_candidates and not just a span range
# within the query. When we append system entities for a given token,
# we pick among candidates with start_span equivalent to the token's tag_idx.
picked_by_existing_system_entity_candidates = False
sys_entities = query.get_system_entity_candidates(ent_type)
if ent_type == 'sys_time':
sys_entities = _sort_by_lowest_time_grain(sys_entities)
for sys_candidate in sys_entities:
start_span = sys_candidate.normalized_token_span.start
end_span = sys_candidate.normalized_token_span.end
if start_span == tag_idx and tag_idx <= end_span:
# We currently don't prioritize any sys_candidate if there are
# multiple candidates that meet this conditional.
entity_start = sys_candidate.normalized_token_span.start
picked_by_existing_system_entity_candidates = True
if not picked_by_existing_system_entity_candidates:
entity_start = tag_idx
# Append the current token to the current entity, if applicable.
if iob != O_TAG and entity_start is not None and not _is_system_entity(ent_type):
entity_tokens.append(normalized_tokens[tag_idx])
# Close the entity if the tag indicates it closed
if entity_start is not None and iob in (E_TAG, S_TAG):
logger.debug("Entity closed here")
if _is_system_entity(ent_type):
_append_system_entity(entity_start, tag_idx+1, ent_type)
else:
_append_entity(entity_start, ent_type, entity_tokens)
entity_start = None
ent_type = ''
entity_tokens = []
prev_ent_type = ent_type
# Handle entities that end with the end of the query
if entity_start is not None:
logger.debug("Entity closed at end")
if _is_system_entity(prev_ent_type):
_append_system_entity(entity_start, len(tags), prev_ent_type)
else:
_append_entity(entity_start, prev_ent_type, entity_tokens)
else:
logger.debug("Entity did not end: %s.", entity_start)
return tuple(entities)
| 19,977
|
async def test_3(pc):
"""setting direction should change transceiver.direction independent of transceiver.currentDirection"""
init = webrtc.RtpTransceiverInit(direction=webrtc.TransceiverDirection.recvonly)
transceiver = pc.add_transceiver(webrtc.MediaType.audio, init)
assert transceiver.direction == webrtc.TransceiverDirection.recvonly
assert transceiver.current_direction is None
offer = await pc.create_offer()
await pc.set_local_description(offer)
await pc.set_remote_description(await generate_answer(offer))
assert transceiver.current_direction == webrtc.TransceiverDirection.inactive
transceiver.direction = webrtc.TransceiverDirection.sendrecv
assert transceiver.direction == webrtc.TransceiverDirection.sendrecv
assert transceiver.current_direction == webrtc.TransceiverDirection.inactive
| 19,978
|
def angle_connectivity(ibonds):
"""Given the bonds, get the indices of the atoms defining all the bond
angles
A 'bond angle' is defined as any set of 3 atoms, `i`, `j`, `k` such that
atom `i` is bonded to `j` and `j` is bonded to `k`
Parameters
----------
ibonds : np.ndarray, shape=[n_bonds, 2], dtype=int
Each row in `ibonds` is a pair of indicies `i`, `j`, indicating that
atoms `i` and `j` are bonded
Returns
-------
iangles : np.ndarray, shape[n_angles, 3], dtype=int
n_angles x 3 array of indices, where each row is the index of three
atoms m,n,o such that n is bonded to both m and o.
"""
graph = nx.from_edgelist(ibonds)
iangles = []
for i in graph.nodes():
for (m, n) in combinations(graph.neighbors(i), 2):
# so now the there is a bond angle m-i-n
iangles.append((m, i, n))
return np.array(iangles)
| 19,979
|
def BOPDS_PassKeyMapHasher_IsEqual(*args):
"""
:param aPKey1:
:type aPKey1: BOPDS_PassKey &
:param aPKey2:
:type aPKey2: BOPDS_PassKey &
:rtype: bool
"""
return _BOPDS.BOPDS_PassKeyMapHasher_IsEqual(*args)
| 19,980
|
def replaceToSantizeURL(url_str):
"""
Take arbitrary string and search for urls with user and password and
replace it with sanitized url.
"""
def _repUrl(matchObj):
return matchObj.group(1) + matchObj.group(4)
# TODO: won't catch every case (But is it good enough (trade off to performance)?)
urlRegExpr = r'\b(((?i)http|https|ftp|mysql|oracle|sqlite)+://)([^:]+:[^@]+@)(\S+)\b'
return re.sub(urlRegExpr, _repUrl, url_str)
| 19,981
|
def dms2dd(s):
"""convert lat and long to decimal degrees"""
direction = s[-1]
degrees = s[0:4]
dd = float(degrees)
if direction in ('S','W'):
dd*= -1
return dd
| 19,982
|
def setna(self, value, na=np.nan, inplace=False):
""" set a value as missing
Parameters
----------
value : the values to set to na
na : the replacement value (default np.nan)
Examples
--------
>>> from dimarray import DimArray
>>> a = DimArray([1,2,-99])
>>> a.setna(-99)
dimarray: 2 non-null elements (1 null)
0 / x0 (3): 0 to 2
array([ 1., 2., nan])
>>> a.setna([-99, 2]) # sequence
dimarray: 1 non-null elements (2 null)
0 / x0 (3): 0 to 2
array([ 1., nan, nan])
>>> a.setna(a > 1) # boolean
dimarray: 2 non-null elements (1 null)
0 / x0 (3): 0 to 2
array([ 1., nan, -99.])
>>> a = DimArray([[1,2,-99]]) # multi-dim
>>> a.setna([-99, a>1]) # boolean
dimarray: 1 non-null elements (2 null)
0 / x0 (1): 0 to 0
1 / x1 (3): 0 to 2
array([[ 1., nan, nan]])
"""
return self.put(_matches(self.values, value), na, cast=True, inplace=inplace)
| 19,983
|
def hbox(*items, **config):
""" Create a DeferredConstraints object composed of horizontal
abutments for a given sequence of items.
"""
return LinearBoxHelper('horizontal', *items, **config)
| 19,984
|
def find_columns(clause):
"""locate Column objects within the given expression."""
cols = util.column_set()
visitors.traverse(clause, {}, {'column':cols.add})
return cols
| 19,985
|
def _preprocess_data(smiles, labels, batchsize = 100):
"""
prepares all input batches to train/test the GDNN fingerprints implementation
"""
N = len(smiles)
batches = []
num_bond_features = 6
for i in range(int(np.ceil(N*1./batchsize))):
array_rep = utils.array_rep_from_smiles(smiles[i*batchsize:min(N,(i+1)*batchsize)])
labels_b = labels[i*batchsize:min(N,(i+1)*batchsize)]
atom_features = array_rep['atom_features']
summed_bond_features_by_degree = extract_bondfeatures_of_neighbors_by_degree(array_rep)
batch_dict = {'input_atom_features':atom_features} # (num_atoms, num_atom_features)
missing_degrees = []
for degree in degrees:
atom_neighbors_list = array_rep[('atom_neighbors', degree)]
if len(atom_neighbors_list)==0:
missing_degrees.append(degree)
continue
# this matrix is used by every layer to match and sum all neighboring updated atom features to the atoms
atom_neighbor_matching_matrix = connectivity_to_Matrix(atom_neighbors_list, atom_features.shape[0])
atom_batch_matching_matrix = connectivity_to_Matrix(array_rep['atom_list'], atom_features.shape[0]).T
assert np.all(atom_batch_matching_matrix.sum(1).mean()==1)
assert np.all(atom_batch_matching_matrix.sum(0).mean()>1),'Error: looks like a single-atom molecule?'
batch_dict['bond_features_degree_'+str(degree)] = summed_bond_features_by_degree[degree]
batch_dict['atom_neighbors_indices_degree_'+str(degree)] = atom_neighbors_list
batch_dict['atom_features_selector_matrix_degree_'+str(degree)] = atom_neighbor_matching_matrix
batch_dict['atom_batch_matching_matrix_degree_'+str(degree)] = atom_batch_matching_matrix.T # (batchsize, num_atoms)
if degree==0:
print 'degree 0 bond?'
print smiles[i*batchsize:min(N,(i+1)*batchsize)]
return
# input_atom_features (292L, 62L)
# bond_features_degree_ 1 (70L, 6L)
# atom_neighbors_indices_degree_ 1 (70L, 1L)
# bond_features_degree_ 2 (134L, 6L)
# atom_neighbors_indices_degree_ 2 (134L, 2L)
# bond_features_degree_ 3 (78L, 6L)
# atom_neighbors_indices_degree_ 3 (78L, 3L)
# bond_features_degree_ 4 (10L, 6L)
# atom_neighbors_indices_degree_ 4 (10L, 4L)
num_bond_features = batch_dict['bond_features_degree_'+str(degree)].shape[1]
num_atoms = atom_neighbor_matching_matrix.shape[1]
for missing_degree in missing_degrees:
batch_dict['atom_neighbors_indices_degree_'+str(missing_degree)] = np.zeros((0, missing_degree),'int32')
batch_dict['bond_features_degree_'+str(missing_degree)] = np.zeros((0, num_bond_features),'float32')
batch_dict['atom_features_selector_matrix_degree_'+str(missing_degree)] = np.zeros((0, num_atoms),'float32')
batch_dict['atom_batch_matching_matrix_degree_'+str(missing_degree)] = atom_batch_matching_matrix.T
batches.append((batch_dict,labels_b))
return batches
| 19,986
|
def test_makedep_error(cc):
"""Check missing header error."""
src = make_source('foo.c', '#include "foo.h"')
with capture_output() as (out, err), pytest.raises(CallError):
check_makedep(cc, src, 1)
stdout = out.getvalue()
stderr = err.getvalue()
print(stdout)
print(stderr)
| 19,987
|
def linear_activation_forward(A_prev, W, b, activation, keep_prob=1):
"""
Implement the forward propagation for the LINEAR->ACTIVATION layer
Arguments:
A_prev -- activations from previous layer (or input data): (size of previous layer, number of examples)
W -- weights matrix: numpy array of shape (size of current layer, size of previous layer)
b -- bias vector, numpy array of shape (size of the current layer, 1)
activation -- the activation to be used in this layer, stored as a text string: "sigmoid" or "relu"
Returns:
A -- the output of the activation function, also called the post-activation value
cache -- a python dictionary containing "linear_cache" and "activation_cache";
stored for computing the backward pass efficiently
"""
if activation == "sigmoid":
# Inputs: "A_prev, W, b". Outputs: "A, activation_cache".
# START CODE HERE ### (≈ 2 lines of code)
Z, linear_cache = linear_forward(A_prev, W, b)
A, activation_cache = sigmoid(Z)
### END CODE HERE ###
Dt = np.random.rand(A.shape[0], A.shape[1])
elif activation == "relu":
# Inputs: "A_prev, W, b". Outputs: "A, activation_cache".
# START CODE HERE ### (≈ 2 lines of code)
Z, linear_cache = linear_forward(A_prev, W, b)
A, activation_cache = relu(Z)
# Dropout
# Step 1: initialize matrix D2 = np.random.rand(..., ...)
Dt = np.random.rand(A.shape[0], A.shape[1])
# Step 2: convert entries of D2 to 0 or 1 (using keep_prob as the
# threshold)
Dt = Dt < keep_prob
# Step 3: shut down some neurons of A2
A = A * Dt
A = A / keep_prob
### END CODE HERE ###
assert (A.shape == (W.shape[0], A_prev.shape[1]))
cache = (linear_cache, activation_cache, Dt)
return A, cache
| 19,988
|
def get_version(file, name="__version__"):
"""Get the version of the package from the given file by
executing it and extracting the given `name`.
"""
path = os.path.realpath(file)
version_ns = {}
with io.open(path, encoding="utf8") as f:
exec(f.read(), {}, version_ns)
return version_ns[name]
| 19,989
|
def load_image(image_path, size):
"""
Load an image as a Numpy array.
:param image_path: Path of the image
:param size: Target size
:return Image array, normalized between 0 and 1
"""
image = img_to_array(load_img(image_path, target_size=size)) / 255.
return image
| 19,990
|
def species__filesystem(argt):
""" fill in species guess geometries
"""
call_task(
argt,
task.species.filesystem,
specs=(
specifier(
al.SPECIES_CSV, inp=True,
),
specifier(
al.SPECIES_CSV, out=True, opt_char=SPC_CSV_CHAR.upper(),
extra_kwargs=(('default', SPC_CSV_DEF),),
),
specifier(
al.STEREO_HANDLING, opt_char=STEREO_HANDLING_CHAR,
allowed_values=task.species.VALS.FILESYSTEM.STEREO_HANDLING,
extra_kwargs=(
('default', task.species.DEFS.FILESYSTEM.STEREO_HANDLING),)
),
specifier(
al.FILESYSTEM_PREFIX, out=True,
opt_char=FILESYSTEM_PREFIX_CHAR.upper(),
extra_kwargs=(('default', FILESYSTEM_PREFIX_DEF),),
),
)
)
| 19,991
|
def _signal_exit_code(signum: signal.Signals) -> int:
"""
Return the exit code corresponding to a received signal.
Conventionally, when a program exits due to a signal its exit code is 128
plus the signal number.
"""
return 128 + int(signum)
| 19,992
|
def make_template_matrix(msigdb_file, blacklist, checkblacklist=True):
"""
Retrieve all genes and pathways from given msigdb .gmt file
Output:
sorted gene by pathways pandas dataframe. Entries indicate membership
"""
all_db_pathways = []
all_db_genes = []
# Get a set of all genes and all pathways in MSigDB (not blacklisted)
with open(msigdb_file, 'r') as msigdb_fh:
msigdb_reader = csv.reader(msigdb_fh, delimiter='\t')
for row in msigdb_reader:
signature_name = row[0]
signature_genes = row[2:]
if checkblacklist:
if signature_name.startswith(blacklist):
continue
all_db_pathways.append(signature_name)
all_db_genes += signature_genes
big_msigdb_df = pd.DataFrame(0, index=set(all_db_genes), columns=all_db_pathways)
big_msigdb_df = big_msigdb_df.sort_index()
big_msigdb_df = big_msigdb_df.T.sort_index().T
# Loop through file again to populate dataframe. This is a fast implementation
with open(msigdb_file, 'r') as msigdb_fh:
msigdb_reader = csv.reader(msigdb_fh, delimiter='\t')
for row in msigdb_reader:
signature_name = row[0]
signature_genes = row[2:]
if checkblacklist:
if signature_name.startswith(blacklist):
continue
for gene in signature_genes:
big_msigdb_df.at[gene, signature_name] = 1
return big_msigdb_df
| 19,993
|
def test_deep_segmentation_spinalcord(params):
"""High level segmentation API"""
fname_im = sct_test_path('t2', 't2.nii.gz')
fname_centerline_manual = sct_test_path('t2', 't2_centerline-manual.nii.gz')
# Call segmentation function
im_seg, _, _ = sct.deepseg_sc.core.deep_segmentation_spinalcord(
Image(fname_im), params['contrast'], ctr_algo='file', ctr_file=fname_centerline_manual, brain_bool=False,
kernel_size=params['kernel'], threshold_seg=0.5)
assert im_seg.data.dtype == np.dtype('uint8')
# Compare with ground-truth segmentation
assert np.all(im_seg.data == Image(params['fname_seg_manual']).data)
| 19,994
|
def unwrap_key(
security_control: SecurityControlField, wrapping_key: bytes, wrapped_key: bytes
):
"""
Simple function to unwrap a key received.
"""
validate_key(security_control.security_suite, wrapping_key)
validate_key(security_control.security_suite, wrapped_key)
unwrapped_key = aes_key_unwrap(wrapping_key, wrapped_key)
return unwrapped_key
| 19,995
|
def checkOwnership(obj, login_session):
"""
This function helps to check if the current logged in user
is the creator of the given category or a given item.
This function return True if the current user owns the category,
otherwise, it will return False.
"""
# the user has logged in at this moment
userID = getUserID(login_session["email"])
# comparing user_id is a better approach
# Because different user still can have same usernames
if obj.user_id == userID:
return True
else:
return False
| 19,996
|
def move() -> str:
"""Move a file."""
if not g.ledger.options["documents"]:
raise FavaAPIException("You need to set a documents folder.")
account = request.args.get("account")
new_name = request.args.get("newName")
filename = request.args.get("filename")
if not account:
raise FavaAPIException("No account specified.")
if not filename:
raise FavaAPIException("No filename specified.")
if not new_name:
raise FavaAPIException("No new filename given.")
new_path = filepath_in_document_folder(
g.ledger.options["documents"][0], account, new_name, g.ledger
)
if not path.isfile(filename):
raise FavaAPIException(f"Not a file: '{filename}'")
if path.exists(new_path):
raise FavaAPIException(f"Target file exists: '{new_path}'")
if not path.exists(path.dirname(new_path)):
os.makedirs(path.dirname(new_path), exist_ok=True)
shutil.move(filename, new_path)
return f"Moved {filename} to {new_path}."
| 19,997
|
def transform_file_name(original_file_name):
"""
Now, this is just whatever I felt like. Whee.
So in this function I could have just used 0 and 1 as my indices directly when I look at the different parts of
the file name, but it's generally better to name these sorts of things, so people know *why* they're 0 and 1.
Another benefit is that you now know exactly why these particular things are 0 and 1 without having to guess,
and you know that these usages of 0 or 1 are different for other usages. For example, I have 2 usages of the
value 1 in this function, but they serve different purposes.
"""
# So script constants are in all caps. But when we're using constants inside a specific function or class or
# something along those lines, then we do something a little different. These values are meant to be used
# inside the function, but they're not meant to be used outside of it, returned, or anything like that. The leading
# underscore is a signal to anyone else who uses this script to indicate that.
_file_name_location = 0
_file_type_ending_location = 1
logging.info("Original file name: {}".format(original_file_name))
# Split the original filename into parts once, based on the specified separator, exactly one time.
# Also, do this by searching for the separator starting from the right-hand side of the string.
file_name_parts = original_file_name.rsplit(
# I don't want this line to be too long, so I've added line breaks here to keep things from getting too wide.
ScriptConstants.FILE_EXTENSION_SEPARATOR,
ScriptConstants.NUM_FILE_EXTENSIONS_IN_FILE_NAME
)
file_ending = file_name_parts[_file_type_ending_location]
file_name = file_name_parts[_file_name_location]
# I forget whether I mentioned this before, but when you add strings together, Python interprets it as
# an instruction to concatenate the strings together (with no separator).
new_file_name = file_name + '_derp_i_moved_this_thing' + ScriptConstants.FILE_EXTENSION_SEPARATOR + file_ending
logging.info('New file name: {}'.format(new_file_name))
return new_file_name
| 19,998
|
def remove_overlapping_cells(graph):
"""
Takes in a graph in which each node is a cell and edges connect cells that
overlap eachother in space. Removes overlapping cells, preferentially
eliminating the cell that overlaps the most cells (i.e. if cell A overlaps
cells B, C, and D, whereas cell B only overlaps cell A, cell C only overlaps
cell A, and cell D only overlaps cell A, then cell A will be removed,
leaving cells B, C, and D remaining because there is no more overlap
within this group of cells).
Args:
graph: An undirected graph, in which each node is a cell and each
edge connects overlapping cells. nodes are expected to have
the following attributes: originalFOV, assignedFOV
Returns:
A pandas dataframe containing the feature ID of all cells after removing
all instances of overlap. There are columns for cell_id, originalFOV,
and assignedFOV
"""
connectedComponents = list(nx.connected_components(graph))
cleanedCells = []
connectedComponents = [list(x) for x in connectedComponents]
for component in connectedComponents:
if len(component) == 1:
originalFOV = graph.nodes[component[0]]['originalFOV']
assignedFOV = graph.nodes[component[0]]['assignedFOV']
cleanedCells.append([component[0], originalFOV, assignedFOV])
if len(component) > 1:
sg = nx.subgraph(graph, component)
verts = list(nx.articulation_points(sg))
if len(verts) > 0:
sg = nx.subgraph(graph,
[x for x in component if x not in verts])
allEdges = [[k, v] for k, v in nx.degree(sg)]
sortedEdges = sorted(allEdges, key=lambda x: x[1], reverse=True)
maxEdges = sortedEdges[0][1]
while maxEdges > 0:
sg = nx.subgraph(graph, [x[0] for x in sortedEdges[1:]])
allEdges = [[k, v] for k, v in nx.degree(sg)]
sortedEdges = sorted(allEdges, key=lambda x: x[1],
reverse=True)
maxEdges = sortedEdges[0][1]
keptComponents = list(sg.nodes())
cellIDs = []
originalFOVs = []
assignedFOVs = []
for c in keptComponents:
cellIDs.append(c)
originalFOVs.append(graph.nodes[c]['originalFOV'])
assignedFOVs.append(graph.nodes[c]['assignedFOV'])
listOfLists = list(zip(cellIDs, originalFOVs, assignedFOVs))
listOfLists = [list(x) for x in listOfLists]
cleanedCells = cleanedCells + listOfLists
cleanedCellsDF = pandas.DataFrame(cleanedCells,
columns=['cell_id', 'originalFOV',
'assignedFOV'])
return cleanedCellsDF
| 19,999
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.