content stringlengths 35 762k | sha1 stringlengths 40 40 | id int64 0 3.66M |
|---|---|---|
def get_rating(comment):
"""
"""
return comment.xpath(
".//div[@itemprop=\"reviewRating\"]/meta[@itemprop=\"ratingValue\"]"
)[0].attrib.get("content") | c80d7f3443a20facdf5a99c3db42d8aa49e95010 | 28,400 |
def creat_netmiko_connection(username, password, host, port) -> object:
"""Logs into device and returns a connection object to the caller. """
credentials = {
'device_type': 'cisco_ios',
'host': host,
'username': username,
'password': password,
'port': port,
'session_log': 'my_file.out'}
try:
device_connect = ConnectHandler(**credentials)
except ssh_exception.AuthenticationException:
device_connect = "ssh_exception"
except EOFError:
device_connect = "Authenitcation Error"
except ssh_exception.NetmikoTimeoutException:
device_connect = 'Connection Timeout'
except ValueError:
device_connect = 'Connection Issue'
except:
device_connect = 'An Error Occured'
return device_connect | 94c7463235051f87ad106b0c960ad155101fce56 | 28,401 |
from typing import List
import argparse
import os
def parse_arguments(args: List[str] = None):
"""
Parse arguments with argparse.ArgumentParser
Args:
args: List of arguments from cmdline
Returns: Parsed arguments
Raises:
Exception: On generic failure
"""
parser = argparse.ArgumentParser(description='Xbox SmartGlass client')
"""Common arguments for logging"""
logging_args = argparse.ArgumentParser(add_help=False)
logging_args.add_argument(
'--logfile',
help="Path for logfile")
logging_args.add_argument(
'-v', '--verbose', action='count', default=0,
help='Set logging level\n'
'( -v: INFO,\n'
' -vv: DEBUG,\n'
'-vvv: DEBUG_INCL_PACKETS)')
"""Common arguments for authenticated console connection"""
xbl_token_args = argparse.ArgumentParser(add_help=False)
xbl_token_args.add_argument(
'--tokens', '-t', type=str, default=TOKENS_FILE,
help='Tokenfile to load')
xbl_token_args.add_argument(
"--client-id",
"-cid",
default=os.environ.get("CLIENT_ID", CLIENT_ID),
help="OAuth2 Client ID",
)
xbl_token_args.add_argument(
"--client-secret",
"-cs",
default=os.environ.get("CLIENT_SECRET", CLIENT_SECRET),
help="OAuth2 Client Secret",
)
xbl_token_args.add_argument(
"--redirect-uri",
"-ru",
default=os.environ.get("REDIRECT_URI", REDIRECT_URI),
help="OAuth2 Redirect URI",
)
xbl_token_args.add_argument(
'--refresh', '-r', action='store_true',
help="Refresh xbox live tokens in provided token file")
"""Common argument for console connection"""
connection_arg = argparse.ArgumentParser(add_help=False)
connection_arg.add_argument(
'--address', '-a', type=str, default=None,
help="IP address of console")
connection_arg.add_argument(
'--liveid', '-l',
help='LiveID to poweron')
"""Common argument for interactively choosing console to handle"""
interactive_arg = argparse.ArgumentParser(add_help=False)
interactive_arg.add_argument(
'--interactive', '-i', action='store_true',
help="Interactively choose console to connect to")
"""
Define commands
"""
subparsers = parser.add_subparsers(help='Available commands')
# NOTE: Setting dest and required here for py3.6 compat
subparsers.dest = 'command'
subparsers.required = True
"""Discover"""
subparsers.add_parser(Commands.Discover,
help='Discover console',
parents=[logging_args,
connection_arg])
"""Power on"""
subparsers.add_parser(
Commands.PowerOn,
help='Power on console',
parents=[logging_args, connection_arg])
"""Power off"""
poweroff_cmd = subparsers.add_parser(
Commands.PowerOff,
help='Power off console',
parents=[logging_args, xbl_token_args,
interactive_arg, connection_arg])
poweroff_cmd.add_argument(
'--all', action='store_true',
help="Power off all consoles")
"""Local REPL"""
subparsers.add_parser(
Commands.REPL,
help='Local REPL (interactive console)',
parents=[logging_args, xbl_token_args,
interactive_arg, connection_arg])
"""REPL server"""
repl_server_cmd = subparsers.add_parser(
Commands.REPLServer,
help='REPL server (interactive console)',
parents=[logging_args, xbl_token_args,
interactive_arg, connection_arg])
repl_server_cmd.add_argument(
'--bind', '-b', default='127.0.0.1',
help='Interface address to bind the server')
repl_server_cmd.add_argument(
'--port', '-p', type=int, default=REPL_DEFAULT_SERVER_PORT,
help=f'Port to bind to, default: {REPL_DEFAULT_SERVER_PORT}')
"""Fallout relay"""
subparsers.add_parser(
Commands.FalloutRelay,
help='Fallout 4 Pip boy relay',
parents=[logging_args, xbl_token_args,
interactive_arg, connection_arg])
"""Controller input"""
subparsers.add_parser(
Commands.GamepadInput,
help='Send controller input to dashboard / apps',
parents=[logging_args, xbl_token_args,
interactive_arg, connection_arg])
"""Text input"""
subparsers.add_parser(
Commands.TextInput,
help='Client to use Text input functionality',
parents=[logging_args, xbl_token_args,
interactive_arg, connection_arg])
tui_cmd = subparsers.add_parser(
Commands.TUI,
help='TUI client - fancy :)',
parents=[logging_args, xbl_token_args,
connection_arg])
tui_cmd.add_argument(
'--consoles', '-c', default=CONSOLES_FILE,
help="Previously discovered consoles (json)")
return parser.parse_args(args) | 4926c99bb574edffbee575cdabdd3913209c544a | 28,402 |
def get_requirements():
"""Read the requirements file."""
requirements = read("requirements.txt")
return [r for r in requirements.strip().splitlines()] | a178d5148b137b4a6f46112cd73bbf6d2e9fb211 | 28,403 |
def handler(fmt, station, issued):
"""Handle the request, return dict"""
pgconn = get_dbconn("asos")
if issued is None:
issued = utc()
if issued.tzinfo is None:
issued = issued.replace(tzinfo=timezone.utc)
df = read_sql(
f"""
WITH forecast as (
select id from taf where station = %s and
valid > %s - '24 hours'::interval and valid <= %s
ORDER by valid DESC LIMIT 1)
select
to_char(t.valid at time zone 'UTC', '{ISO}') as utc_valid,
raw,
is_tempo,
to_char(t.end_valid at time zone 'UTC', '{ISO}') as utc_end_valid,
sknt,
drct,
gust,
visibility,
presentwx,
skyc,
skyl,
ws_level,
ws_drct,
ws_sknt
from taf{issued.year} t JOIN forecast f on
(t.taf_id = f.id) ORDER by valid ASC
""",
pgconn,
params=(station, issued, issued),
index_col=None,
)
if fmt == "txt":
for col in ["presentwx", "skyc", "skyl"]:
df[col] = [" ".join(map(str, item)) for item in df[col]]
return df.to_csv(index=False)
if fmt == "json":
return df.to_json(orient="table", index=False) | d75940ab5accc36258473d9794fd0449f707b593 | 28,404 |
def index_count(index_file=config.vdb_bin_index):
"""
Method to return the number of indexed items
:param index_file: Index DB file
:return: Count of the index
"""
return len(storage.stream_read(index_file)) | 2abe3a9a3b1e04f67175cabc9099f490556caabd | 28,405 |
import sys
import traceback
def strexc():
"""Return current exception formatted as a single line suitable
for logging.
"""
try:
exc_type, exc_value, tb = sys.exc_info()
if exc_type is None:
return ""
# find last frame in this script
lineno, func = 0, ""
for frame in traceback.extract_tb(tb):
if frame[0] != __file__:
break
lineno, func = frame[1:3]
return "exception in %s line %s (%s: %s)" % (
func,
lineno,
exc_type.__name__,
exc_value,
)
finally:
del tb | 9b41edf403552647124fc2c4f7937ce648b72b21 | 28,406 |
def asc_to_dict(filename: str) -> dict:
"""
Load an asc file into a dict object.
:param filename: The file to load.
:return dict: A dict object containing data.
"""
return list_to_dict(asc_to_list(filename)) | 3509321bc38e53ae1e86fa8b9cea113bec55700a | 28,407 |
def merge_sort(array):
"""
Merge Sort
Complexity: O(NlogN)
"""
if len(array) > 1:
mid = len(array) // 2
left = array[:mid]
right = array[mid:]
left = merge_sort(left)
right = merge_sort(right)
array = []
# This is a queue implementation. We can also use
# a deque but slicing it needs the itertools slice
# function which I didn't want to use. More on that
# in the stacks and queues chapter.
l1 = l2 = 0
while len(left) > l1 and len(right) > l2:
if left[l1] < right[l2]:
array.append(left[l1])
l1 += 1
else:
array.append(right[l2])
l2 += 1
while len(left) > l1:
array.append(left[l1])
l1 += 1
while len(right) > l2:
array.append(right[l2])
l2 += 1
return array | 73b3ac5b950f5788cbc3e7c98d2a4d5aac427929 | 28,408 |
def semi_major_axis(P, Mtotal):
"""Semi-major axis
Kepler's third law
Args:
P (float): Orbital period [days]
Mtotal (float): Mass [Msun]
Returns:
float or array: semi-major axis in AU
"""
# convert inputs to array so they work with units
P = np.array(P)
Mtotal = np.array(Mtotal)
Mtotal = Mtotal*c.M_sun.value
P = (P * u.d).to(u.second).value
G = c.G.value
a = ((P**2)*G*Mtotal/(4*(np.pi)**2))**(1/3.)
a = a/c.au.value
return a | 338ce7857544d59dca1d78026f2559ce698faae8 | 28,409 |
import re
from re import T
from pathlib import Path
def parse_dependency_string(value: str) -> Dependency:
"""
Convert *value* to a representation as a #Dependency subclass.
* In addition to the [PEP 508][] dependency specification, the function supports a `--hash` option as is also
supported by Pip. Hashes in URL fragments are also parsed into #Dependency.hashes.
* URL formatted specifications can also be Git repository URLs or paths (must be in Posix format as absolute
path, or an explicit relative path, i.e. begin with curdir or pardir).
Args:
value: The dependency specification.
Raises:
ValueError: If the string cannot be parsed into a #Dependency.
!!! note A URL or Git dependency must still contain a package name (i.e. be of the form `<name> @ <url>`). If
a URL or Git repository URL is encountered without a package name, a #ValueError is raised.
"""
value = value.strip()
if value.startswith("http://") or value.startswith("https://") or value.startswith("git+"):
raise ValueError(f"A plain URL or Git repository URL must be prefixed with a package name: {value!r}")
# Extract trailing options from the dependency.
hashes: list[str] = []
def handle_option(match: re.Match) -> str:
if match.group(1) == "hash":
hashes.append(match.group(2))
return ""
value = re.sub(r"\s--(\w+)=(.*)(\s|$)", handle_option, value)
# Check if it's a dependency of the form `<name> @ <package>`. This can be either a #UrlDependency or #GitDependency.
if "@" in value:
markers: str | None
name, url = value.partition("@")[::2]
name, extras = split_package_name_with_extras(name)
url, markers = url.partition(";")[::2]
markers = markers.strip() or None
urlparts = urlparse(url.strip())
# Remove the fragments from the URL.
url = urlunparse((urlparts.scheme, urlparts.netloc, urlparts.path, urlparts.params, urlparts.query, None))
# Parse it as a Git URL.
if url.startswith("git+"):
def unpack(val: t.Sequence[T] | None) -> T | None:
return val[0] if val else None
options = parse_qs(urlparts.fragment)
return GitDependency(
name=name,
url=url[4:],
rev=unpack(options.get("rev")),
branch=unpack(options.get("branch")),
tag=unpack(options.get("tag")),
extras=extras,
markers=markers,
hashes=hashes or None,
)
# Parse it as a path.
elif url.startswith("/") or url.startswith("./") or url.startswith("../"):
options = parse_qs(urlparts.fragment)
return PathDependency(
name=name,
path=Path(url),
develop="develop" in options,
link="link" in options,
extras=extras,
markers=markers,
hashes=hashes or None,
)
elif urlparts.scheme:
# Treat all fragments as hash options.
hashes += [f"{item[0]}:{item[1]}" for item in parse_qsl(urlparts.fragment)]
return UrlDependency(
name=name,
url=url,
extras=extras,
markers=markers,
hashes=hashes or None,
)
else:
raise ValueError(f"invalid URL-formatted dependency: {value!r}")
# TODO (@NiklasRosenstein): Support parsing path dependencies.
dependency = PypiDependency.parse(value)
dependency.hashes = hashes or None
return dependency | 524e93937af4c110912e15138f3ad4617a93bcd4 | 28,410 |
def _check_df_load(df):
"""Check if `df` is already loaded in, if not, load from file."""
if isinstance(df, str):
if df.lower().endswith('json'):
return _check_gdf_load(df)
else:
return pd.read_csv(df)
elif isinstance(df, pd.DataFrame):
return df
else:
raise ValueError("{} is not an accepted DataFrame format.".format(df)) | 7245341c8fa58e2aea20761d6832be09e948b0e3 | 28,411 |
import sys
def confirm(question, assume_yes=True):
"""
Ask user a yes/no question and return their response as a boolean.
``question`` should be a simple, grammatically complete question such as
"Do you wish to continue?", and will have a string similar to ``" [Y/n] "``
appended automatically. This function will *not* append a question mark for
you.
By default, when the user presses Enter without typing anything, "yes" is
assumed. This can be changed by specifying ``affirmative=False``.
.. note::
If the user does not supplies input that is (case-insensitively) equal
to "y", "yes", "n" or "no", they will be re-prompted until they do.
:param str question: The question part of the input.
:param bool assume_yes:
Whether to assume the affirmative answer by default. Default value:
``True``.
:returns: A `bool`.
"""
# Set up suffix
if assume_yes:
suffix = 'Y/n'
else:
suffix = 'y/N'
# Loop till we get something we like
# TODO: maybe don't do this? It can be annoying. Turn into 'q'-for-quit?
while True:
# TODO: ensure that this is Ctrl-C friendly, ISTR issues with
# raw_input/input on some Python versions blocking KeyboardInterrupt.
response = input('{0} [{1}] '.format(question, suffix))
response = response.lower().strip() # Normalize
# Default
if not response:
return assume_yes
# Yes
if response in ['y', 'yes']:
return True
# No
if response in ['n', 'no']:
return False
# Didn't get empty, yes or no, so complain and loop
err = "I didn't understand you. Please specify '(y)es' or '(n)o'."
print(err, file=sys.stderr) | 6ba3e6956f157e2cb1c9c126537b02871d84806f | 28,412 |
def ping(host, timeout=False, return_boolean=False):
"""
Performs an ICMP ping to a host
.. versionchanged:: 2015.8.0
Added support for SunOS
CLI Example:
.. code-block:: bash
salt '*' network.ping archlinux.org
.. versionadded:: 2015.5.0
Return a True or False instead of ping output.
.. code-block:: bash
salt '*' network.ping archlinux.org return_boolean=True
Set the time to wait for a response in seconds.
.. code-block:: bash
salt '*' network.ping archlinux.org timeout=3
"""
if timeout:
if __grains__["kernel"] == "SunOS":
cmd = "ping -c 4 {} {}".format(
__utils__["network.sanitize_host"](host), timeout
)
else:
cmd = "ping -W {} -c 4 {}".format(
timeout, __utils__["network.sanitize_host"](host)
)
else:
cmd = "ping -c 4 {}".format(__utils__["network.sanitize_host"](host))
if return_boolean:
ret = __salt__["cmd.run_all"](cmd)
if ret["retcode"] != 0:
return False
else:
return True
else:
return __salt__["cmd.run"](cmd) | f5707427eaef1e436618065bea78faa15a5cce7e | 28,413 |
def comp_wind_sym(wind_mat):
"""Computes the winding pattern periodicity and symmetries
Parameters
----------
wind_mat : numpy.ndarray
Matrix of the Winding
Returns
-------
Nperw: int
Number of electrical period of the winding
"""
assert len(wind_mat.shape) == 4, "dim 4 expected for wind_mat"
# Summing on all the layers (Nlay_r and Nlay_theta)
wind_mat2 = squeeze(np_sum(np_sum(wind_mat, axis=1), axis=0))
qs = wind_mat.shape[3] # Number of phase
Zs = wind_mat.shape[2] # Number of Slot
Nperw = 1 # Number of electrical period of the winding
Nperslot = 1 # Periodicity of the winding in number of slots
# Looking for the periodicity of each phase
for q in range(0, qs):
k = 1
is_sym = False
while k <= Zs and not is_sym:
# We shift the array arround the slot and check if it's the same
if array_equal(wind_mat2[:, q], roll(wind_mat2[:, q], shift=k)):
is_sym = True
else:
k += 1
# least common multiple to find common periodicity between different phase
Nperslot = lcm(Nperslot, k)
# If Nperslot > Zs no symmetry
if Nperslot > 0 and Nperslot < Zs:
# nb of periods of the winding (2 means 180°)
Nperw = Zs / float(Nperslot)
# if Zs cannot be divided by Nperslot (non integer)
if Nperw % 1 != 0:
Nperw = 1
# Check for anti symmetries in the elementary winding pattern
if (
Nperslot % 2 == 0
and norm(
wind_mat2[0 : Nperslot // 2, :] + wind_mat2[Nperslot // 2 : Nperslot, :]
)
== 0
):
is_asym_wind = True
Nperw = Nperw * 2
else:
is_asym_wind = False
return int(Nperw), is_asym_wind | 7984eb6f3b1d7d11694ecac1237ce27b11bbd9fe | 28,414 |
import os
def FindRepoDir(path):
"""Returns the nearest higher-level repo dir from the specified path.
Args:
path: The path to use. Defaults to cwd.
"""
return osutils.FindInPathParents(
'.repo', path, test_func=os.path.isdir) | bf7f560d32f960a1dd7d3c3cd1f8f5648ef04df7 | 28,415 |
def row(data, widths="auto", spacing=3, aligns=None):
"""Format data as a table row.
data (iterable): The individual columns to format.
widths (iterable or 'auto'): Column widths in order. If "auto", widths
will be calculated automatically based on the largest value.
spacing (int): Spacing between columns, in spaces.
aligns (iterable / unicode): Column alignments in order. 'l' (left,
default), 'r' (right) or 'c' (center). If a string, value is used
for all columns.
RETURNS (unicode): The formatted row.
"""
cols = []
if hasattr(aligns, '__hash__') and aligns in ALIGN_MAP:
aligns = [aligns for _ in data]
for i, col in enumerate(data):
align = ALIGN_MAP.get(aligns[i] if aligns and i < len(aligns) else "l")
col_width = len(col) if widths == "auto" else widths[i]
tpl = "{:%s%d}" % (align, col_width)
cols.append(tpl.format(to_string(col)))
return (" " * spacing).join(cols) | 3adef2268ba1720e7480a3b4d0f873927d14f0b6 | 28,416 |
from datetime import datetime
import calendar
def _increment_date(date, grain):
"""
Creates a range of dates where the starting date is the given date and the
ending date is the given date incremented for 1 unit of the given grain
(year, month or day).
:param date: the starting date in string format 'YYYY-MM-DD'
:param grain: the grain of increment 'year', 'month' or 'day'
:return: a dictionary with starting and ending date
"""
result = {'from': date}
date_from = datetime.datetime.strptime(date, '%Y-%m-%d')
if grain == 'year':
date_to = datetime.date(date_from.year + 1, date_from.month, date_from.day)
elif grain == 'month':
days_in_month = calendar.monthrange(date_from.year, date_from.month)[1]
date_to = date_from + datetime.timedelta(days=days_in_month)
else:
date_to = date_from + datetime.timedelta(days=1)
result['to'] = str(date_to)[:10] # format 'YYYY-MM-DD'
return result | 53626ad40cdf5a2352a6129fb15ed91ede60838e | 28,417 |
def ErrorCorrect(val,fEC):
"""
Calculates the error correction parameter \lambda_{EC}. Typical val is 1.16.
Defined in Sec. IV of [1].
Parameters
----------
val : float
Error correction factor.
fEC : float
Error correction efficiency.
Returns
-------
float
Error correction parameter.
"""
return val * fEC | 83c4483c56c7c3b79060dd070ec68f6dfd5ee749 | 28,418 |
def BytesGt(left: Expr, right: Expr) -> BinaryExpr:
"""Greater than expression with bytes as arguments.
Checks if left > right, where left and right are interpreted as big-endian unsigned integers.
Arguments must not exceed 64 bytes.
Requires TEAL version 4 or higher.
Args:
left: Must evaluate to bytes.
right: Must evaluate to bytes.
"""
return BinaryExpr(Op.b_gt, TealType.bytes, TealType.uint64, left, right) | 9c509eab36ef0b174248741b656add275d8654b3 | 28,419 |
def balanceOf(account):
"""
can be invoked at every shard. If invoked at non-root shard, the shard must receive a xshard transfer before. Otherwise the function will throw an exception.
:param account: user address
:return: the token balance of account
"""
if len(account) != 20:
raise Exception("address length error")
return Invoke(SHARD_VERSION, XSHARD_ASSET_ADDR, 'oep4BalanceOf', account) | 36d56a2536f33053dc5ed2020d0124380e9ceb28 | 28,420 |
def archive_entry(title):
"""
"""
if not session.get('logged_in'):
abort(401)
db = get_db()
# Archive it
stmt = '''
insert into archived_entries select * from entries
where pretty_title like ?
'''
db.execute(stmt,
('%' + title + '%',))
db.execute('delete from entries where pretty_title like ?',
('%' + title + '%',))
db.commit()
flash('Archived page: ' + title)
return redirect(url_for('show_entries')) | 69384fbfda4090352640890105c02304782e541c | 28,421 |
def build_census_df(projection_admits: pd.DataFrame, parameters) -> pd.DataFrame:
"""ALOS for each category of COVID-19 case (total guesses)"""
n_days = np.shape(projection_admits)[0]
hosp_los, icu_los, vent_los = parameters.lengths_of_stay
los_dict = {
"Hospitalized": hosp_los,
"ICU": icu_los,
"Ventilated": vent_los,
}
census_dict = dict()
for k, los in los_dict.items():
census = (
projection_admits.cumsum().iloc[:-los, :]
- projection_admits.cumsum().shift(los).fillna(0)
).apply(np.ceil)
census_dict[k] = census[k]
census_df = pd.DataFrame(census_dict)
census_df["day"] = census_df.index
census_df = census_df[["day", "Hospitalized", "ICU", "Ventilated"]]
census_df = census_df.head(n_days)
census_df = census_df.rename(
columns={
disposition: f"{disposition}"
for disposition in ("Hospitalized", "ICU", "Ventilated")
}
)
return census_df | 0b1471f6e522a15027e2797484e573c65971e0d4 | 28,422 |
def indented_kv(key: str, value: str, indent=1, separator="=", suffix=""):
"""Print something as a key-value pair whilst properly indenting. This is useful
for implementations of`str` and `repr`.
Args:
key (str): Key.
value (str): Value.
indent (int, optional): Number of spaces to indent. Defaults to 1.
separator (str, optional): Separator between the key and value. Defaults to "=".
suffix (str, optional): Extra to print at the end. You can set this, e.g., to
",\n" or ">". Defaults to no suffix.
Returns
str: Key-value representation with proper indentation.
"""
key_string = f"{indent * ' '}{key}{separator}"
value_string = value.strip().replace("\n", "\n" + " " * len(key_string))
return key_string + value_string + suffix | b27a7ed7a0db4219332fda1e1131c888216141b2 | 28,423 |
def are_in_file(file_path, strs_to_find):
"""Returns true if every string in the given strs_to_find array is found in
at least one line in the given file. In particular, returns true if
strs_to_find is empty. Note that the strs_to_find parameter is mutated."""
infile = open(file_path)
for line in infile:
if len(strs_to_find) == 0:
return True
index = 0
while index < len(strs_to_find):
if strs_to_find[index] in line:
del strs_to_find[index]
else:
index = index + 1
return len(strs_to_find) == 0 | 474234a35bf885c5f659f32a25c23580f2014cc2 | 28,424 |
def load_data(filename: str):
"""
Load house prices dataset and preprocess data.
Parameters
----------
filename: str
Path to house prices dataset
Returns
-------
Design matrix and response vector (prices) - either as a single
DataFrame or a Tuple[DataFrame, Series]
"""
df = pd.read_csv(filename)
df.drop_duplicates(inplace=True)
df.dropna(inplace=True)
df = df.loc[filters(df)]
df.index = np.arange(len(df))
df = df.loc[df.zipcode.notnull()]
df = split_zipcode(df)
df = split_date(df)
df.drop(["id", "date", "zipcode", "sqft_living", "lat", "long"], inplace=True, axis=1)
# df = df.loc[filters(df)]
df = df.loc[df.sqft_above / df.floors <= df.sqft_lot] # Another filter to apply, we need floors > 0 first.
df["last_renovated"] = np.maximum(df.yr_built, df.yr_renovated)
# df.dropna(inplace=True)
return df.drop(["price"], axis=1), df.price | a8aed077d63c9e2df0f150b2ef7e3c06c30fcb29 | 28,425 |
import keyring
import os
import configparser
def ask_user_for_secrets(credo, source=None):
"""Ask the user for access_key and secret_key"""
typ = "amazon"
choices = []
access_key_name = "AWS_ACCESS_KEY_ID"
secret_key_name = "AWS_SECRET_ACCESS_KEY"
environment = os.environ
if access_key_name in environment and secret_key_name in environment:
choices.append(secret_sources["environment"])
if os.path.exists(os.path.expanduser("~/.aws/config")):
choices.append(secret_sources["aws_config"])
if os.path.exists(os.path.expanduser("~/.boto")):
choices.append(secret_sources["boto_config"])
if credo.providers:
choices.extend(["Saml provider '{0}'".format(provider) for provider in credo.providers])
val = None
if not source:
if choices:
val = ask_for_choice("Method of getting credentials", choices + [secret_sources["specified"]])
else:
val = secret_sources["specified"]
else:
if source not in secret_sources.keys() and source not in secret_sources.values():
raise BadCredentialSource("Unknown credential source", source=source)
if source in secret_sources:
source = secret_sources[source]
log.info("Getting credentials from %s", source)
if secret_sources["specified"] in (val, source):
access_key = get_response(prompt="Access key: ")
secret_key = get_response(prompt="Secret key: ")
elif secret_sources["environment"] in (val, source):
if access_key_name not in environment or secret_key_name not in environment:
raise BadCredentialSource("Couldn't find environment variables for {0} and {1}".format(access_key_name, secret_key_name))
access_key = environment[access_key_name]
secret_key = environment[secret_key_name]
elif secret_sources["boto_config"] in (val, source) or secret_sources["aws_config"] in (val, source):
parser = configparser.SafeConfigParser()
aws_location = os.path.expanduser("~/.aws/config")
boto_location = os.path.expanduser("~/.boto")
if source == secret_sources["aws_config"] and not os.path.exists(aws_location):
raise BadCredentialSource("Couldn't find the aws config", location=aws_location)
if source == secret_sources["boto_config"] and not os.path.exists(boto_location):
raise BadCredentialSource("Couldn't find the boto config", location=boto_location)
if secret_sources["boto_config"] in (val, source):
location = boto_location
else:
location = aws_location
# Read it in
parser.read(location)
# Find possilbe sections
sections = []
for section in boto.config.sections():
if section in ("Credentials", "default"):
sections.append(section)
elif section.startswith("profile "):
sections.append(section)
# Get sections that definitely have secrets
sections_with_secrets = []
for section in sections:
if parser.has_option(section, "aws_access_key_id") and (parser.has_option(section, "aws_secret_access_key") or parser.has_option(section, "keyring")):
sections_with_secrets.append(section)
if not sections:
raise BadConfigFile("No secrets to be found in the amazon config file", location=location)
elif len(sections) == 1:
section = sections[0]
else:
section = ask_for_choice("Which section to use?", sections)
access_key = parser.get(section, "aws_access_key_id")
if parser.has_option(section, "aws_secret_access_key"):
secret_key = parser.get(section, "aws_secret_access_key")
else:
keyring_name = parser.get(section, 'keyring')
secret_key = keyring.get_password(keyring_name, access_key)
elif secret_sources["saml_provider"] in (val, source) or "Saml provider" in val:
return "saml", "idp.realestate.com.au"
else:
raise ProgrammerError("Not possible to reach this point", source=source)
return typ, (access_key, secret_key) | e59f7ed2106c06166e89b88b7cda15fa5938f28e | 28,426 |
import unicodedata
import re
def slugify(value):
"""
Unicode version of standart slugify.
Converts spaces to hyphens. Removes characters that
aren't unicode letters, underscores, or hyphens. Converts to lowercase.
Also replaces whitespace with hyphens and
strips leading and trailing hyphens.
:param value: String to slugify.
:type value: str
:returns: Slugified value.
:rtype: str
"""
value = unicodedata.normalize('NFKC', value)
value = unicode(re.sub('(?u)[^\w\s-]+', '', value).strip().lower())
return mark_safe(re.sub('[-\s]+', '-', value).strip('-')) | e81020b76f4e29f89e44c420e8e95b89f7eb1363 | 28,427 |
import sys
def _generate_Wl(step, evt_type, energy, evtnumber):
"""
Here the settings for the Z ee simulation are added to the process.
Energy parameter is not used.
"""
func_id=mod_id+"["+sys._getframe().f_code.co_name+"]"
common.log( func_id+" Entering... ")
# Choose between muon or electron decay of the Z
electron_flag = "0"
muon_flag = "0"
tau_flag = "0"
if evt_type == "WE":
electron_flag = "1"
elif evt_type == "WM":
muon_flag = "1"
elif evt_type == "WT":
tau_flag = "1"
# Build the process source
generator = cms.EDFilter('Pythia6GeneratorFilter',
pythiaPylistVerbosity=cms.untracked.int32(0),
pythiaHepMCVerbosity=cms.untracked.bool(False),
maxEventsToPrint = cms.untracked.int32(0),
filterEfficiency = cms.untracked.double(1),
PythiaParameters = cms.PSet\
(parameterSets = cms.vstring('PythiaUESettings','processParameters'),
PythiaUESettings=user_pythia_ue_settings(),
processParameters=cms.vstring('MSEL=0 !User defined processes',
'MSUB(2) = 1',# !W production
'MDME(190,1) = 0',# !W decay into dbar u
'MDME(191,1) = 0',# !W decay into dbar c
'MDME(192,1) = 0',# !W decay into dbar t
'MDME(194,1) = 0',# !W decay into sbar u
'MDME(195,1) = 0',# !W decay into sbar c
'MDME(196,1) = 0',# !W decay into sbar t
'MDME(198,1) = 0',# !W decay into bbar u
'MDME(199,1) = 0',# !W decay into bbar c
'MDME(200,1) = 0',# !W decay into bbar t
'MDME(205,1) = 0',# !W decay into bbar tp
'MDME(206,1) = %s' %electron_flag,# !W decay into e+ nu_e
'MDME(207,1) = %s' %muon_flag,# !W decay into mu+ nu_mu
'MDME(208,1) = %s' %tau_flag,# !W decay into tau+ nu_tau
)
)
)
common.log(func_id+" Returning Generator...")
return generator | bf6c82b344cb1afb72719f7a79789dbfda738ad0 | 28,428 |
from math import factorial as f
def binomial_coefficient(n: int, m: int) -> int:
""" Binomial Coefficient
Returns n!/(m!(n-m)!). This is used in combinatronics and binomial theorem."""
return f(n)/(f(m)*f(n-m)) | e0ad7a4cd3cb85bb4c0a48890209a8f71086a853 | 28,429 |
def joint(waypoints):
"""
Calculate a trajectory by a joint operation.
"""
# total number of segments
numSegments = len(waypoints) - 1
# every segment has its own polynomial of 4th degree for X,Y and Z and a polynomial of 2nd degree for Yaw
numCoefficients = numSegments * (3*5+3)
# list of calculated trajectory coefficients
trajectory = []
# start + end X,Y,Z,Yaw position for every segment: 8
# rendezvous X,Y,Z,Yaw velocity: 4
# absolute start + end X,Y,Z (+ start Yaw) velocity: 7
numConstraints = numSegments * 8 + (numSegments - 1) * 4 + 7
P_numpy = zeros((numCoefficients, numCoefficients))
for i in range(numSegments):
P_numpy[0 + i * 18, 0 + i * 18] = 1 # minimize snap for X
# P_numpy[2 + i * 18, 2 + i * 18] = 100 # minimize acceleration for X
P_numpy[5 + i * 18, 5 + i * 18] = 1 # minimize snap for Y
# P_numpy[7 + i * 18, 7 + i * 18] = 100 # minimize acceleration for Y
P_numpy[10 + i * 18, 10 + i * 18] = 1 # minimize snap for Z
# P_numpy[12 + i * 18, 12 + i * 18] = 100 # minimize acceleration for Z
P_numpy[15 + i * 18, 15 + i * 18] = 1 # minimize acceleration for Yaw
P = csc_matrix(P_numpy) # convert to CSC for performance
# =============================
# Gradient vector (linear terms), we have none
# =============================
q = zeros((numCoefficients, 1))
q = hstack(q) # convert to hstack for performance
# =============================
# Inequality matrix (left side), we have none
# =============================
G = zeros((numConstraints, numCoefficients))
# =============================
# Inequality vector (right side), we have none
# =============================
h = zeros((numConstraints, 1))
h = hstack(h) # convert to hstack for performance
# =============================
# Equality matrix (left side)
# =============================
A = zeros((numConstraints, numCoefficients))
# =============================
# Equality vector (right side)
# =============================
b = zeros((numConstraints, 1))
# =============================
# Set up of Equality Constraints
# =============================
cc = -1 # Current Constraint
for i in range(numSegments):
# "start of segment" position constraints
cc += 1 # X Position
A[cc, 0 + i * 18] = waypoints[i].time ** 4
A[cc, 1 + i * 18] = waypoints[i].time ** 3
A[cc, 2 + i * 18] = waypoints[i].time ** 2
A[cc, 3 + i * 18] = waypoints[i].time
A[cc, 4 + i * 18] = 1
b[cc, 0] = waypoints[i].x
cc += 1 # Y Position
A[cc, 5 + i * 18] = waypoints[i].time ** 4
A[cc, 6 + i * 18] = waypoints[i].time ** 3
A[cc, 7 + i * 18] = waypoints[i].time ** 2
A[cc, 8 + i * 18] = waypoints[i].time
A[cc, 9 + i * 18] = 1
b[cc, 0] = waypoints[i].y
cc += 1 # Z Position
A[cc, 10 + i * 18] = waypoints[i].time ** 4
A[cc, 11 + i * 18] = waypoints[i].time ** 3
A[cc, 12 + i * 18] = waypoints[i].time ** 2
A[cc, 13 + i * 18] = waypoints[i].time
A[cc, 14 + i * 18] = 1
b[cc, 0] = waypoints[i].z
cc += 1 # Yaw Angle
A[cc, 15 + i * 18] = waypoints[i].time ** 2
A[cc, 16 + i * 18] = waypoints[i].time
A[cc, 17 + i * 18] = 1
b[cc, 0] = waypoints[i].yaw
# "end of segment" position constraints
cc += 1 # X Position
A[cc, 0 + i * 18] = waypoints[i + 1].time ** 4
A[cc, 1 + i * 18] = waypoints[i + 1].time ** 3
A[cc, 2 + i * 18] = waypoints[i + 1].time ** 2
A[cc, 3 + i * 18] = waypoints[i + 1].time
A[cc, 4 + i * 18] = 1
b[cc, 0] = waypoints[i + 1].x
cc += 1 # Y Position
A[cc, 5 + i * 18] = waypoints[i + 1].time ** 4
A[cc, 6 + i * 18] = waypoints[i + 1].time ** 3
A[cc, 7 + i * 18] = waypoints[i + 1].time ** 2
A[cc, 8 + i * 18] = waypoints[i + 1].time
A[cc, 9 + i * 18] = 1
b[cc, 0] = waypoints[i + 1].y
cc += 1 # Z Position
A[cc, 10 + i * 18] = waypoints[i + 1].time ** 4
A[cc, 11 + i * 18] = waypoints[i + 1].time ** 3
A[cc, 12 + i * 18] = waypoints[i + 1].time ** 2
A[cc, 13 + i * 18] = waypoints[i + 1].time
A[cc, 14 + i * 18] = 1
b[cc, 0] = waypoints[i + 1].z
cc += 1 # Yaw Angle
A[cc, 15 + i * 18] = waypoints[i + 1].time ** 2
A[cc, 16 + i * 18] = waypoints[i + 1].time
A[cc, 17 + i * 18] = 1
b[cc, 0] = waypoints[i + 1].yaw
# segment rendezvous constraints
if i == 0:
continue
cc += 1 # X Velocity Rendezvous
A[cc, 0 + i * 18] = 4 * waypoints[i].time ** 3
A[cc, 1 + i * 18] = 3 * waypoints[i].time ** 2
A[cc, 2 + i * 18] = 2 * waypoints[i].time
A[cc, 3 + i * 18] = 1
A[cc, 0 + i * 18 - 18] = -1 * A[cc, 0 + i * 18]
A[cc, 1 + i * 18 - 18] = -1 * A[cc, 1 + i * 18]
A[cc, 2 + i * 18 - 18] = -1 * A[cc, 2 + i * 18]
A[cc, 3 + i * 18 - 18] = -1 * A[cc, 3 + i * 18]
cc += 1 # Y Velocity Rendezvous
A[cc, 5 + i * 18] = 4 * waypoints[i].time ** 3
A[cc, 6 + i * 18] = 3 * waypoints[i].time ** 2
A[cc, 7 + i * 18] = 2 * waypoints[i].time
A[cc, 8 + i * 18] = 1
A[cc, 5 + i * 18 - 18] = -1 * A[cc, 5 + i * 18]
A[cc, 6 + i * 18 - 18] = -1 * A[cc, 6 + i * 18]
A[cc, 7 + i * 18 - 18] = -1 * A[cc, 7 + i * 18]
A[cc, 8 + i * 18 - 18] = -1 * A[cc, 8 + i * 18]
cc += 1 # Z Velocity Rendezvous
A[cc, 10 + i * 18] = 4 * waypoints[i].time ** 3
A[cc, 11 + i * 18] = 3 * waypoints[i].time ** 2
A[cc, 12 + i * 18] = 2 * waypoints[i].time
A[cc, 13 + i * 18] = 1
A[cc, 10 + i * 18 - 18] = -1 * A[cc, 10 + i * 18]
A[cc, 11 + i * 18 - 18] = -1 * A[cc, 11 + i * 18]
A[cc, 12 + i * 18 - 18] = -1 * A[cc, 12 + i * 18]
A[cc, 13 + i * 18 - 18] = -1 * A[cc, 13 + i * 18]
cc += 1 # Yaw Velocity Rendezvous
A[cc, 15 + i * 18] = 2 * waypoints[i].time
A[cc, 16 + i * 18] = 1
A[cc, 15 + i * 18 - 18] = -1 * A[cc, 15 + i * 18]
A[cc, 16 + i * 18 - 18] = -1 * A[cc, 16 + i * 18]
# cc += 1 # X Acceleration Rendezvous
# A[cc, 0 + i * 18] = 12 * waypoints[0].time ** 2
# A[cc, 1 + i * 18] = 6 * waypoints[0].time
# A[cc, 2 + i * 18] = 2
# A[cc, 0 + i * 18 - 18] = -1 * A[cc, 0 + i * 18]
# A[cc, 1 + i * 18 - 18] = -1 * A[cc, 1 + i * 18]
# A[cc, 2 + i * 18 - 18] = -1 * A[cc, 2 + i * 18]
# cc += 1 # Y Acceleration Rendezvous
# A[cc, 5 + i * 18] = 12 * waypoints[0].time ** 2
# A[cc, 6 + i * 18] = 6 * waypoints[0].time
# A[cc, 7 + i * 18] = 2
# A[cc, 5 + i * 18 - 18] = -1 * A[cc, 5 + i * 18]
# A[cc, 6 + i * 18 - 18] = -1 * A[cc, 6 + i * 18]
# A[cc, 7 + i * 18 - 18] = -1 * A[cc, 7 + i * 18]
# cc += 1 # Z Acceleration Rendezvous
# A[cc, 10 + i * 18] = 12 * waypoints[0].time ** 2
# A[cc, 11 + i * 18] = 6 * waypoints[0].time
# A[cc, 12 + i * 18] = 2
# A[cc, 10 + i * 18 - 18] = -1 * A[cc, 10 + i * 18]
# A[cc, 11 + i * 18 - 18] = -1 * A[cc, 11 + i * 18]
# A[cc, 12 + i * 18 - 18] = -1 * A[cc, 12 + i * 18]
# cc += 1 # Yaw Acceleration Rendezvous
# A[cc, 15 + i * 18] = 2
# A[cc, 15 + i * 18 - 18] = -1 * A[cc, 15 + i * 18]
# cc += 1 # X Jerk Rendezvous
# A[cc, 0] = 24 * waypoints[0].time
# A[cc, 1] = 6
# A[cc, 0 + i * 18 - 18] = -1 * A[cc, 0 + i * 18]
# A[cc, 1 + i * 18 - 18] = -1 * A[cc, 1 + i * 18]
# cc += 1 # Y Jerk Rendezvous
# A[cc, 5] = 24 * waypoints[0].time
# A[cc, 6] = 6
# A[cc, 5 + i * 18 - 18] = -1 * A[cc, 5 + i * 18]
# A[cc, 6 + i * 18 - 18] = -1 * A[cc, 6 + i * 18]
# cc += 1 # Z Jerk Rendezvous
# A[cc, 10] = 24 * waypoints[0].time
# A[cc, 11] = 6
# A[cc, 10 + i * 18 - 18] = -1 * A[cc, 10 + i * 18]
# A[cc, 11 + i * 18 - 18] = -1 * A[cc, 11 + i * 18]
#
# cc += 1 # X Snap Rendezvous
# A[cc, 0] = 24
# A[cc, 0 + i * 18 - 18] = -1 * A[cc, 0 + i * 18]
# cc += 1 # Y Snap Rendezvous
# A[cc, 5] = 24
# A[cc, 5 + i * 18 - 18] = -1 * A[cc, 5 + i * 18]
# cc += 1 # Z Snap Rendezvous
# A[cc, 10] = 24
# A[cc, 10 + i * 18 - 18] = -1 * A[cc, 10 + i * 18]
cc += 1 # absolute start X velocity
A[cc, 0] = 4 * waypoints[0].time ** 3
A[cc, 1] = 3 * waypoints[0].time ** 2
A[cc, 2] = 2 * waypoints[0].time
A[cc, 3] = 1
cc += 1 # absolute start Y velocity
A[cc, 5] = 4 * waypoints[0].time ** 3
A[cc, 6] = 3 * waypoints[0].time ** 2
A[cc, 7] = 2 * waypoints[0].time
A[cc, 8] = 1
cc += 1 # absolute start Z velocity
A[cc, 10] = 4 * waypoints[0].time ** 3
A[cc, 11] = 3 * waypoints[0].time ** 2
A[cc, 12] = 2 * waypoints[0].time
A[cc, 13] = 1
cc += 1 # absolute start Yaw velocity
A[cc, 15] = 2 * waypoints[0].time
A[cc, 16] = 1
cc += 1 # absolute end X velocity
A[cc, numCoefficients - 18 + 0] = 4 * waypoints[-1].time ** 3
A[cc, numCoefficients - 18 + 1] = 3 * waypoints[-1].time ** 2
A[cc, numCoefficients - 18 + 2] = 2 * waypoints[-1].time
A[cc, numCoefficients - 18 + 3] = 1
cc += 1 # absolute end Y velocity
A[cc, numCoefficients - 18 + 5] = 4 * waypoints[-1].time ** 3
A[cc, numCoefficients - 18 + 6] = 3 * waypoints[-1].time ** 2
A[cc, numCoefficients - 18 + 7] = 2 * waypoints[-1].time
A[cc, numCoefficients - 18 + 8] = 1
cc += 1 # absolute end Z velocity
A[cc, numCoefficients - 18 + 10] = 4 * waypoints[-1].time ** 3
A[cc, numCoefficients - 18 + 11] = 3 * waypoints[-1].time ** 2
A[cc, numCoefficients - 18 + 12] = 2 * waypoints[-1].time
A[cc, numCoefficients - 18 + 13] = 1
#cc += 1 # absolute end Yaw velocity
#A[cc, numCoefficients - 18 + 15] = 2 * waypoints[-1].time
#A[cc, numCoefficients - 18 + 16] = 1
#cc += 1 # absolute start X acceleration
# A[c, 0] = 12 * waypoints[0].time ** 2
# A[c, 1] = 6 * waypoints[0].time
# A[c, 2] = 2
#cc += 1 # absolute start Y acceleration
# A[c, 5] = 12 * waypoints[0].time ** 2
# A[c, 6] = 6 * waypoints[0].time
# A[c, 7] = 2
#cc += 1 # absolute start Z acceleration
# A[cc, 10] = 12 * waypoints[0].time ** 2
# A[cc, 11] = 6 * waypoints[0].time
# A[cc, 12] = 2
#cc += 1 # absolute start Yaw acceleration
# A[cc, 15] = 2
#cc += 1 # absolute end X acceleration
# A[cc, numCoefficients - 18 + 0] = 12 * waypoints[-1].time ** 2
# A[cc, numCoefficients - 18 + 1] = 6 * waypoints[-1].time
# A[cc, numCoefficients - 18 + 2] = 2
#cc += 1 # absolute end Y acceleration
# A[cc, numCoefficients - 18 + 5] = 12 * waypoints[-1].time ** 2
# A[cc, numCoefficients - 18 + 6] = 6 * waypoints[-1].time
# A[cc, numCoefficients - 18 + 7] = 2
#cc += 1 # absolute end Z acceleration
# A[cc, numCoefficients - 18 + 10] = 12 * waypoints[-1].time ** 2
# A[cc, numCoefficients - 18 + 11] = 6 * waypoints[-1].time
# A[cc, numCoefficients - 18 + 12] = 2
#cc += 1 # absolute end Yaw acceleration
# A[cc, numCoefficients - 18 + 15] = 2
#cc += 1 # absolute start X jerk
# A[cc, 0] = 24 * waypoints[0].time
# A[cc, 1] = 6
#cc += 1 # absolute start Y jerk
# A[cc, 5] = 24 * waypoints[0].time
# A[cc, 6] = 6
#cc += 1 # absolute start Z jerk
# A[cc, 10] = 24 * waypoints[0].time
# A[cc, 11] = 6
#cc += 1 # absolute end X jerk
# A[cc, numCoefficients - 18 + 0] = 24 * waypoints[-1].time
# A[cc, numCoefficients - 18 + 1] = 6
#cc += 1 # absolute end Y jerk
# A[cc, numCoefficients - 18 + 5] = 24 * waypoints[-1].time
# A[cc, numCoefficients - 18 + 6] = 6
#cc += 1 # absolute end Z jerk
# A[cc, numCoefficients - 18 + 10] = 24 * waypoints[-1].time
# A[cc, numCoefficients - 18 + 11] = 6
#cc += 1 # absolute start X snap
# A[cc, 0] = 24
#cc += 1 # absolute start Y snap
# A[cc, 5] = 24
#cc += 1 # absolute start Z snap
# A[cc, 10] = 24
#cc += 1 # absolute end X snap
# A[cc, numCoefficients - 18 + 0] = 24
#cc += 1 # absolute end Y snap
# A[cc, numCoefficients - 18 + 5] = 24
#cc += 1 # absolute end Z snap
# A[cc, numCoefficients - 18 + 10] = 24
# =============================
# Solver Setup
# =============================
# OSQP needs:
# P = quadratic terms
# q = linear terms
# A = constraint matrix of ALL constraints (inequality & equality)
# l = lower constraints
# u = upper constraints
P = csc_matrix(P)
q = hstack(q)
h = hstack(h)
b = hstack(b)
A = vstack([G, A])
A = csc_matrix(A)
l = -inf * ones(len(h))
l = hstack([l, b])
u = hstack([h, b])
# setup solver and solve
m = osqp.OSQP()
m.setup(P=P, q=q, A=A, l=l, u=u) # extra solver variables can be set here
res = m.solve()
# save to trajectory variable
for i in range(0, size(res.x), 18):
segment = res.x[i:i + 18]
trajectory.append(segment)
print("QP solution Number following: ", res.x)
return trajectory | 06b3b2f183c749405ecacd4ce639c3c2d5826e55 | 28,430 |
import math
def logistic(x: float):
"""Logistic function."""
return 1 / (1 + math.exp(-x)) | 98b4f7aebd562609789ed5f53f6a79d63eaf6ea0 | 28,431 |
def highlight_deleted(obj):
"""
Display in red lines when object is deleted.
"""
obj_str = conditional_escape(text_type(obj))
if not getattr(obj, 'deleted', False):
return obj_str
else:
return '<span class="deleted">{0}</span>'.format(obj_str) | daad6a35bab989a2ca9df63292fecf36b05ff715 | 28,432 |
def range_(stop):
""":yaql:range
Returns an iterator over values from 0 up to stop, not including
stop, i.e. [0, stop).
:signature: range(stop)
:arg stop: right bound for generated list numbers
:argType stop: integer
:returnType: iterator
.. code::
yaql> range(3)
[0, 1, 2]
"""
return iter(range(stop)) | 28717348bcdcd432388b8a4809c897c70a2fce3f | 28,433 |
import argparse
import sys
import os
def my_argument_parser(epilog=None):
"""
Create a parser with some common arguments used by detectron2 users.
Args:
epilog (str): epilog passed to ArgumentParser describing the usage.
Returns:
argparse.ArgumentParser:
"""
parser = argparse.ArgumentParser(
epilog=epilog
or f"""
Examples:
Run on single machine:
$ {sys.argv[0]} --num-gpus 8 --config-file cfg.yaml
Change some config options:
$ {sys.argv[0]} --config-file cfg.yaml MODEL.WEIGHTS /path/to/weight.pth SOLVER.BASE_LR 0.001
Run on multiple machines:
(machine0)$ {sys.argv[0]} --machine-rank 0 --num-machines 2 --dist-url <URL> [--other-flags]
(machine1)$ {sys.argv[0]} --machine-rank 1 --num-machines 2 --dist-url <URL> [--other-flags]
""",
formatter_class=argparse.RawDescriptionHelpFormatter,
)
parser.add_argument("--config-file", default="", metavar="FILE", help="path to config file")
parser.add_argument("--config-det", default="", metavar="FILE", help="path to config file of detector")
parser.add_argument(
"--resume",
action="store_true",
help="Whether to attempt to resume from the checkpoint directory. "
"See documentation of `DefaultTrainer.resume_or_load()` for what it means.",
)
parser.add_argument("--eval-only", action="store_true", help="perform evaluation only")
parser.add_argument("--num-gpus", type=int, default=1, help="number of gpus *per machine*")
parser.add_argument("--num-machines", type=int, default=1, help="total number of machines")
parser.add_argument(
"--machine-rank", type=int, default=0, help="the rank of this machine (unique per machine)"
)
# PyTorch still may leave orphan processes in multi-gpu training.
# Therefore we use a deterministic way to obtain port,
# so that users are aware of orphan processes by seeing the port occupied.
port = 2 ** 15 + 2 ** 14 + hash(os.getuid() if sys.platform != "win32" else 1) % 2 ** 14
parser.add_argument(
"--dist-url",
default="tcp://127.0.0.1:{}".format(port),
help="initialization URL for pytorch distributed backend. See "
"https://pytorch.org/docs/stable/distributed.html for details.",
)
parser.add_argument(
"opts",
help="""
Modify config options at the end of the command. For Yacs configs, use
space-separated "PATH.KEY VALUE" pairs.
For python-based LazyConfig, use "path.key=value".
""".strip(),
default=None,
nargs=argparse.REMAINDER,
)
return parser | d2f5466d09aa0619b48cd9606db6f8b7fbc892fa | 28,434 |
def post(filename: str, files: dict, output_type: str):
"""Constructs the http call to the deliver service endpoint and posts the request"""
url = f"http://{CONFIG.DELIVER_SERVICE_URL}/deliver/{output_type}"
logger.info(f"Calling {url}")
try:
response = session.post(url, params={"filename": filename}, files=files)
except MaxRetryError:
logger.error("Max retries exceeded", request_url=url)
raise RetryableError("Max retries exceeded")
except ConnectionError:
logger.error("Connection error", request_url=url)
raise RetryableError("Connection error")
return response | 358b408ace8750d1c48ca6bef0855aac4db625ca | 28,435 |
import fnmatch
import os
import sys
import glob
def list_files_recursively(root_dir, basename, suffix='y?ml'):
"""search for filenames matching the pattern: {root_dir}/**/{basename}.{suffix}
"""
root_dir = os.path.join(root_dir, "") # make sure root dir ends with "/"
# TODO - implement skip
if sys.version_info >= (3, 5):
return [filename[len(root_dir):] for filename in
glob.iglob(root_dir + '**/{0}.{1}'.format(basename, suffix), recursive=True)]
else:
# TODO - implement skip
return [os.path.join(root, filename)[len(root_dir):]
for root, dirnames, filenames in os.walk(root_dir)
for filename in fnmatch.filter(filenames, '{0}.{1}'.format(basename, suffix))] | 1c4d94efa31b67643c6394a78fef0f4c5356fa59 | 28,436 |
def is_off(*args):
"""
is_off(F, n) -> bool
is offset?
@param F (C++: flags_t)
@param n (C++: int)
"""
return _ida_bytes.is_off(*args) | 43dc5298bad5daf95f76e8426e819e1feb89f8d4 | 28,437 |
def get_libdcgm_path():
"""
Returns relative path to libdcgm.so.2
"""
return "../../lib/libdcgm.so.2" | a1067449bdc9012e07c5707ece68c3aae2799694 | 28,438 |
def method(modelclass, **kwargs):
"""Decorate a ProtoRPC method for use by the endpoints model passed in.
Requires exactly one positional argument and passes the rest of the keyword
arguments to the classmethod "method" on the given class.
Args:
modelclass: An Endpoints model class that can create a method.
Returns:
A decorator that will use the endpoint metadata to decorate an endpoints
method.
"""
return _GetEndpointsMethodDecorator('method', modelclass, **kwargs) | 801fad462414b94f6ee72e507c17813afc043f81 | 28,439 |
def detect_on(window, index=3, threshold=5): # threshold value is important: power(watts)
"""input: np array
listens for a change in active power that exceeds threshold
(can use Active/real(P), Apparent(S), and Reactive (Q)(worst..high SNR))
index = index of feature to detect. Used P_real @ index 3
returns: boolean for event detection"""
prev_avg = np.average([window[-2][index], window[-3][index], window[-4][index]])
if window[-1][index] - prev_avg > threshold: # if power change > average of last two readings
return True
else:
return False | dfea9b4ea95c22b199a63c47cb5f7f16f10df742 | 28,440 |
def calculate_target_as_one_column(df:pd.DataFrame, feature_cols:list, target_cols:list):
"""create a row for every new porduct and give the product name as target column, this is done for the train set"""
x = df[target_cols]
x = x[x==1].stack().reset_index().drop(0,1)
df = pd.merge(df, x, left_on=df.index, right_on='level_0')
df.rename(columns={'level_1': "y"}, inplace=True)
keep_cols = feature_cols.copy()
keep_cols += [ col for col in df if col[-2:] == '_s'] # keep also shifted columns
keep_cols.append('month_int')
keep_cols.append('id') #keep id
keep_cols.append('y') #keep target var
return df[keep_cols] | eee8e27a60999c95e2354877526ae27d9679a3ca | 28,441 |
def find_features_with_dtypes(df, dtypes):
"""
Find feature names in df with specific dtypes
df: DataFrame
dtypes: data types (defined in numpy) to look for
e.g, categorical features usually have dtypes np.object, np.bool
and some of them have np.int (with a limited number of unique items)
"""
return np.asarray([fname for (fname, ftype) in df.dtypes.to_dict().items()
if ftype in dtypes]) | a94177dd24cb96915245959c0a22b254bd2a59df | 28,442 |
def DeConv2d(net, n_out_channel = 32, filter_size=(3, 3),
out_size = (30, 30), strides = (2, 2), padding = 'SAME', batch_size = None, act = None,
W_init = tf.truncated_normal_initializer(stddev=0.02), b_init = tf.constant_initializer(value=0.0),
W_init_args = {}, b_init_args = {}, name ='decnn2d'):
"""Wrapper for :class:`DeConv2dLayer`, if you don't understand how to use :class:`DeConv2dLayer`, this function may be easier.
Parameters
----------
net : TensorLayer layer.
n_out_channel : int, number of output channel.
filter_size : tuple of (height, width) for filter size.
out_size : tuple of (height, width) of output.
batch_size : int or None, batch_size. If None, try to find the batch_size from the first dim of net.outputs (you should tell the batch_size when define the input placeholder).
strides : tuple of (height, width) for strides.
act : None or activation function.
others : see :class:`Conv2dLayer`.
"""
if act is None:
act = tf.identity
if batch_size is None:
batch_size = tf.shape(net.outputs)[0]
net = DeConv2dLayer(layer = net,
act = act,
shape = [filter_size[0], filter_size[1], n_out_channel, int(net.outputs._shape[-1])],
output_shape = [batch_size, out_size[0], out_size[1], n_out_channel],
strides = [1, strides[0], strides[1], 1],
padding = padding,
W_init = W_init,
b_init = b_init,
W_init_args = W_init_args,
b_init_args = b_init_args,
name = name)
return net | c99d717bac217878bc569d7fad4462d5445ac709 | 28,443 |
from typing import List
import re
def parse_release(base: str, path: str) -> List[str]:
"""Extracts built images from the release.yaml at path
Args:
base: The built images will be expected to start with this string,
other images will be ignored
path: The path to the file (release.yaml) that will contain the built images
Returns:
list of the images parsed from the file
"""
images = []
with open(path) as f:
for line in f:
match = re.search(base + ".*" + DIGEST_MARKER + ":[0-9a-f]*", line)
if match:
images.append(match.group(0))
return images | f4fec0908f2975a9ed9eef3e0a3a62549c9f757c | 28,444 |
def build_config(config_file=get_system_config_directory()):
"""
Construct the config object from necessary elements.
"""
config = Config(config_file, allow_no_value=True)
application_versions = find_applications_on_system()
# Add found versions to config if they don't exist. Versions found
# in the config file takes precedence over versions found in PATH.
for item in application_versions.iteritems():
if not config.has_option(Config.EXECUTABLES, item[0]):
config.set(Config.EXECUTABLES, item[0], item[1])
return config | 148e597f7fd9562f9830c8bd41126dd0efef96f1 | 28,445 |
import re
def preProcess(column):
"""
Do a little bit of data cleaning with the help of Unidecode and Regex.
Things like casing, extra spaces, quotes and new lines can be ignored.
"""
column = unidecode(column)
column = re.sub('\n', ' ', column)
column = re.sub('-', '', column)
column = re.sub('/', ' ', column)
column = re.sub("'", '', column)
column = re.sub(",", '', column)
column = re.sub(":", ' ', column)
column = re.sub(' +', ' ', column)
column = column.strip().strip('"').strip("'").lower().strip()
if not column:
column = None
return column | fda71aab1b2ce2baedbbc5d2195f115c9561e75d | 28,446 |
def size_to_pnts(size) -> np.ndarray:
"""
获得图片 size 的四个角点 (4,2)
"""
width = size[0]
height = size[1]
return np.array([[0, 0], [width, 0], [width, height], [0, height]]) | ca189cea9201646b0ce4cf2e32c2e21ad26929f3 | 28,447 |
def create_scenario_mms_datasets(variable_name,
scenario_name,
num_chunks,
data_path,
normalized=False):
"""Create the multi-model statistics dataset for a scenario.
Runs the function initialize_empty_mms_arrays, fill_empty_arrays,
and create_xr_dataset to generate the multi-model statistics dataset
for a scenario. Prints to the user what is being done.
Args:
variable_name: The string name of the model variable.
scenario_name: The string name of the scenario.
num_chunks: Integer number of chunks to use for saving the zarr file.
data_path: String path where the arrays will be located.
normalized: False (default) if model data is not normalized.
Returns:
Arrays of dimensions (lats, lons, times) and multi-model statistic
values (mean_vals, max_vals, min_vals, std_vals).
"""
print('Creating empty arrays')
[empty_dsets,
dim_info,
dims,
file_names,
datasets] = initialize_empty_mms_arrays(data_path,
scenario_name=scenario_name,
num_chunks=20,
normalized=normalized)
[lats, lons, times] = dims
print('Calculating multimodel statistics')
[mean_vals,
min_vals,
max_vals,
std_vals] = fill_empty_arrays(empty_dsets,
dim_info,
file_names,
datasets,
variable_name,
num_chunks)
print('Exporting dataset')
ds = create_xr_dataset(lats, lons, times, mean_vals, max_vals, min_vals, std_vals)
export_dataset(ds=ds,
output_path=OUTPUT_PATH,
variable_name=variable_name,
scenario_name=scenario_name,
normalized=normalized)
return lats, lons, times, mean_vals, max_vals, min_vals, std_vals | 0bef48bc009b2ee72abec511d9f6f886a8ed289c | 28,448 |
def carla_rotation_to_numpy_rotation_matrix(carla_rotation):
"""
Convert a carla rotation to a Cyber quaternion
Considers the conversion from left-handed system (unreal) to right-handed
system (Cyber).
Considers the conversion from degrees (carla) to radians (Cyber).
:param carla_rotation: the carla rotation
:type carla_rotation: carla.Rotation
:return: a numpy.array with 3x3 elements
:rtype: numpy.array
"""
roll, pitch, yaw = carla_rotation_to_RPY(carla_rotation)
numpy_array = euler2mat(roll, pitch, yaw)
rotation_matrix = numpy_array[:3, :3]
return rotation_matrix | 38aed692b0ad7008fff71dc9b31ce03d552ae2f2 | 28,449 |
def _liquid_viscocity(_T, ranged=True):
"""Pa * s"""
OutOfRangeTest(_T, 59.15, 130, ranged)
A, B, C, D, E = -2.0077E+01, 2.8515E+02, 1.7840E+00, -6.2382E-22, 10.0
return exp(A + B / _T + C * log(_T) + D * _T**E) | 2fd29eea442862e4904d3164783694b151cab6c9 | 28,450 |
import subprocess
def _interactive(git, name):
"""
Interactive assistant. This will supercede any command line arguments, meaning
that it is pointless to add any other arguments when using the -i argument.
"""
prompt = (
"\n========================================================================\n"
"This is the interactive helper for *sire*. Details entered here will \n"
"determine which files are included, and format them with the correct \n"
"information. Leaving a field blank is OK, but can result in incompletely \n"
"formatted files. Hit enter to begin, or type 'quit' to quit.\n"
"========================================================================\n\n"
)
_input_wrap(prompt)
output = dict()
# attempt to get some variables from shell. not sure how this looks when absent
usr = _obtain_git_username(git, name)
email = "git config user.email".split()
email = subprocess.check_output(email).decode("utf-8").strip()
real_name = "git config user.name".split()
real_name = subprocess.check_output(real_name).decode("utf-8").strip()
short = "/".join(sorted(SHORT_PATHS))
exes = f"Comma separated list of files to exclude\n(e.g. {short}): "
# tuples are field name, prompt text, default
prompts = [
("real_name", "Real name (for license, setup.py) ({default}): ", real_name),
("username", "Username ({default}): ", usr),
("email", "Email ({default}): ", email),
("git_username", "GitHub/GitLab/Bitbucket username ({default}): ", usr),
("description", "Short project description: ", None),
# ("license", "Licence to use ({default}): ", "MIT"),
("mkdocs", "Use mkdocs/readthedocs for documentation (y/N): ", False),
("virtualenv", "Generate a virtualenv for this project (y/N): ", False),
("git", "Git host to use (github,gitlab,bitbucket/None): ", None),
("exclude", exes, set()),
]
for field, prompt, default in prompts:
output[field] = _input_wrap(prompt, default)
return output.pop("git"), output | 26eedcccf0c4835c7f9596803485db44130cd370 | 28,451 |
import re, fileinput
def readConfig(filename):
"""Parses a moosicd configuration file and returns the data within.
The "filename" argument specifies the name of the file from which to read
the configuration. This function returns a list of 2-tuples which associate
regular expression objects to the commands that will be used to play files
whose names are matched by the regexps.
"""
config = []
expecting_regex = True
regex = None
command = None
for line in fileinput.input(filename):
# skip empty lines
if re.search(r'^\s*$', line):
continue
# skip lines that begin with a '#' character
if re.search('^#', line):
continue
# chomp off trailing newline
if line[-1] == '\n':
line = line[:-1]
# the first line in each pair is interpreted as a regular expression
# note that case is ignored. it would be nice if there was an easy way
# for the user to choose whether or not case should be ignored.
if expecting_regex:
regex = re.compile(line)
expecting_regex = False
# the second line in each pair is interpreted as a command
else:
command = line.split()
config.append((regex, command))
expecting_regex = True
return config | 3b641686b8e6cfaebec668367a12e32bc59104a8 | 28,452 |
def negative_f1(y_true, y_pred) -:
"""Implements custom negative F1 loss score for use in multi-isotope classifiers.
Args:
y_true: a list of ground truth.
y_pred: a list of predictions to compare against the ground truth.
Returns:
Returns the custom loss score.
Raises:
None
"""
diff = y_true - y_pred
negs = K.clip(diff, -1.0, 0.0)
false_positive = -K.sum(negs, axis=-1)
true_positive = 1.0 - false_positive
lower_clip = 1e-20
true_positive = K.clip(true_positive, lower_clip, 1.0)
return -K.mean(true_positive) | 255c3e34a17f4301a6c842c4109d930916cac3d5 | 28,453 |
import torch
def cal_gauss_log_lik(x, mu, log_var=0.0):
"""
:param x: batch of inputs (bn X fn)
:return: gaussian log likelihood, and the mean squared error
"""
MSE = torch.pow((mu - x), 2)
gauss_log_lik = -0.5*(log_var + np.log(2*np.pi) + (MSE/(1e-8 + torch.exp(log_var))))
MSE = torch.mean(torch.sum(MSE, axis=1))
gauss_log_lik = torch.mean(torch.sum(gauss_log_lik, axis=1))
return gauss_log_lik, MSE | b2d4f660c4475a632c649844694ff3f67dc93fca | 28,454 |
def translate_fun_parseInt(x):
"""Converts parseInt(string, radix) to
__extrafunc_parseInt(string, radix=10)
Args:
x (str): JavaScript code to translate.
Returns:
str: Translated JavaScript code.
Examples:
>>> from ee_extra import translate_fun_parseInt
>>> translate_fun_parseInt('1010101', 2)
"""
# Regex conditions to get the string to replace,
# the arguments, and the variable name.
arg_names = list(set(functextin(x, "parseInt")))
# if does not match the condition, return the original string
if arg_names == []:
return x, 0
replacement = [f"parseInt({arg_name})" for arg_name in arg_names]
to_replace_by = [
"__ee_extrafunc_parseInt(%s)" % (arg_name) for arg_name in arg_names
]
# Replace string by our built-in function
for z in zip(replacement, to_replace_by):
x = x.replace(z[0], z[1])
return x, 1 | 9bc63d3e4005fed12209de0169ad2641bcf09f65 | 28,455 |
def simple_intensity_based_segmentation(image, gaussian_sigma=1, thresh_method="Otsu", smallest_area_of_object=5,label_img_depth = "8bit"):
"""Perform intensity based thresholding and detect objects
Args:
raw_image_path : path to a raw image
gaussian_sigma : sigma to use for the gaussian filter
thresh_method : threshold method
smallest_area_of_object : smallest area of objects in pixels
label_img_depth : label depth
Returns:
A labelled image
"""
# apply a gaussian filter
image_smooth = skfil.gaussian(image, sigma=gaussian_sigma, preserve_range = True)
# apply threshold
bw = gen_background_mask(image_smooth, threshold_method = thresh_method)
#remove small objects
bw_size_filtered = remove_small_objects(bw,smallest_area_of_object )
#Label connected components
label_image = label(bw_size_filtered)
# add an empty image to the image
if (label_img_depth == "8bit"):
label_image_cor = cv2.normalize(label_image, None, 0, np.max(label_image), cv2.NORM_MINMAX, cv2.CV_8U)
elif (label_img_depth == "16bit"):
label_image_cor = cv2.normalize(label_image, None, 0, np.max(label_image), cv2.NORM_MINMAX, cv2.CV_16U)
else:
raise Exception('Invalid input: should be among {8bit, 16bit}')
return label_image_cor | 2f270b38e7f5d07ceb4437d7b9b6d26174af56fc | 28,456 |
import torch
def mish(x):
"""mish activation function
Args:
x (Tensor): input tensor.
Returns:
(Tensor): output tensor and have same shape with x.
Examples:
>>> mish(to_tensor([-3.0, -1.0, 0.0, 2.0]))
tensor([-1.4228e-01, -2.6894e-01, 0.0000e+00, 1.7616e+00]
References:
Mish - "Mish: A Self Regularized Non-Monotonic Neural Activation Function"
https://arxiv.org/abs/1908.08681v1
"""
return x * (torch.tanh(F.softplus(x))) | 73447216f12a2e60e9ccc249eca9abe4baa94be8 | 28,457 |
def text_box_end_pos(pos, text_box, border=0):
"""
Calculates end pos for a text box for cv2 images.
:param pos: Position of text (same as for cv2 image)
:param text_box: Size of text (same as for cv2 image)
:param border: Outside padding of textbox
:return box_end_pos: End xy coordinates for text box (end_point for cv2.rectangel())
"""
box_x, box_y = pos
text_w, text_h = text_box
box_end_pos = (box_x + text_w + border, box_y + text_h + border)
return box_end_pos | 5bd2b46fe3456ccdef1407b90256edeb310d92bc | 28,458 |
def ticket_competence_add_final(request, structure_slug, ticket_id,
new_structure_slug, structure, can_manage, ticket,
office_employee=None):
"""
Adds new ticket competence (second step)
:type structure_slug: String
:type ticket_id: String
:type new_structure_slug: String
:type structure: OrganizationalStructure (from @has_admin_privileges)
:type can_manage: Dictionary (from @has_admin_privileges)
:type ticket: Ticket (from @ticket_assigned_to_structure)
:type office_employee: OrganizationalStructureOfficeEmployee (from @is_operator)
:param structure_slug: structure slug
:param ticket_id: ticket code
:param new_structure_slug: selected structure slug
:param structure: structure object (from @has_admin_privileges)
:param can_manage: if user can manage or can read only (from @has_admin_privileges)
:param ticket: ticket object (from @ticket_assigned_to_structure)
:param office_employee: operator offices queryset (from @is_operator)
:return: render
"""
strutture = OrganizationalStructure.objects.filter(is_active = True)
# Lista uffici ai quali il ticket è assegnato
ticket_offices = ticket.get_assigned_to_offices(office_active=False)
operator_offices_list = []
assignments = TicketAssignment.objects.filter(ticket=ticket,
office__organizational_structure=structure,
office__is_active=True,
follow=True,
taken_date__isnull=False)
for assignment in assignments:
if user_manage_office(user=request.user,
office=assignment.office):
operator_offices_list.append(assignment.office)
new_structure = get_object_or_404(OrganizationalStructure,
slug=new_structure_slug,
is_active=True)
categorie = TicketCategory.objects.filter(organizational_structure=new_structure.pk,
is_active=True)
if request.method == 'POST':
form = TicketCompetenceSchemeForm(data=request.POST)
if form.is_valid():
category_slug = form.cleaned_data['category_slug']
follow = form.cleaned_data['follow']
readonly = form.cleaned_data['readonly']
selected_office_slug = form.cleaned_data['selected_office']
# Refactor
# follow_value = form.cleaned_data['follow']
# readonly_value = form.cleaned_data['readonly']
# follow = True if follow_value == 'on' else False
# readonly = True if readonly_value == 'on' else False
# La categoria passata in POST esiste?
categoria = get_object_or_404(TicketCategory,
slug=category_slug,
organizational_structure=new_structure,
is_active=True)
selected_office = None
if selected_office_slug:
selected_office = get_object_or_404(OrganizationalStructureOffice,
slug=selected_office_slug,
organizational_structure=structure,
is_active=True)
# Se alla categoria non è associato alcun ufficio,
# all'utente viene mostrato il messaggio di errore
# Perchè l'ufficio speciale Help-Desk è già competente sul ticket
if not categoria.organizational_office:
messages.add_message(request, messages.ERROR,
_("Il ticket è già di competenza"
" dell'ufficio speciale <b>{}</b>,"
" che ha la competenza della tipologia di richiesta "
"<b>{}</b>".format(settings.DEFAULT_ORGANIZATIONAL_STRUCTURE_OFFICE,
categoria)))
return redirect('uni_ticket:manage_ticket_url_detail',
structure_slug=structure_slug,
ticket_id=ticket_id)
new_office = categoria.organizational_office
if new_office in ticket_offices:
messages.add_message(request, messages.ERROR,
_("Il ticket è già di competenza"
" dell'ufficio <b>{}</b>, responsabile"
" della tipologia di richiesta <b>{}</b>".format(new_office,
categoria)))
return redirect('uni_ticket:manage_ticket_url_detail',
structure_slug=structure_slug,
ticket_id=ticket_id)
messages.add_message(request, messages.SUCCESS,
_("Competenza <b>{}</b> aggiunta"
" correttamente".format(new_office)))
# If not follow anymore
if not follow:
abandoned_offices = ticket.block_competence(user=request.user,
structure=structure,
allow_readonly=False,
selected_office=selected_office)
for off in abandoned_offices:
# if off.is_default:
# messages.add_message(request, messages.WARNING,
# _("L'ufficio <b>{}</b> non può essere"
# " rimosso dagli uffici competenti".format(off)))
# else:
ticket.update_log(user=request.user,
note= _("Competenza abbandonata da"
" Ufficio: {}".format(off)))
# If follow but readonly
elif readonly:
abandoned_offices = ticket.block_competence(user=request.user,
structure=structure,
selected_office=selected_office)
for off in abandoned_offices:
if off.is_default:
messages.add_message(request, messages.WARNING,
_("L'ufficio <b>{}</b> non può essere"
" posto in sola lettura".format(off)))
else:
ticket.update_log(user=request.user,
note= _("Competenza trasferita da"
" Ufficio: {}."
" (L'ufficio ha mantenuto"
" accesso in sola lettura)".format(off)))
# If follow and want to manage
ticket.add_competence(office=new_office,
user=request.user)
ticket.update_log(user=request.user,
note= _("Nuova competenza: {} - {}"
" - Categoria: {}".format(new_structure,
new_office,
categoria)))
# log action
logger.info('[{}] {} added new competence to'
' ticket {}'
' (follow: {}) (readonly: {})'.format(timezone.now(),
request.user,
ticket,
follow,
readonly))
return redirect('uni_ticket:manage_ticket_url_detail',
structure_slug=structure_slug,
ticket_id=ticket_id)
else:
for k,v in get_labeled_errors(form).items():
messages.add_message(request, messages.ERROR,
"<b>{}</b>: {}".format(k, strip_tags(v)))
user_type = get_user_type(request.user, structure)
template = "{}/add_ticket_competence.html".format(user_type)
title = _('Trasferisci competenza ticket')
sub_title = '{} ({})'.format(ticket.subject, ticket_id)
d = {'can_manage': can_manage,
'categorie': categorie,
'operator_offices': operator_offices_list,
'structure': structure,
'structure_slug': new_structure_slug,
'strutture': strutture,
'sub_title': sub_title,
'ticket': ticket,
'title': title,}
return render(request, template, d) | b3e159494d8f7ecf7603596face065f02e44e00e | 28,459 |
def _find_computecpp_root(repository_ctx):
"""Find ComputeCpp compiler"""
computecpp_path = ""
if _COMPUTECPP_TOOLKIT_PATH in repository_ctx.os.environ:
computecpp_path = repository_ctx.os.environ[_COMPUTECPP_TOOLKIT_PATH].strip()
if computecpp_path.startswith("/"):
_check_computecpp_version(repository_ctx, computecpp_path)
return computecpp_path
fail("Cannot find SYCL compiler, please correct your path") | 91bc817036a976565434f1a3c52c5bb7e80ed86d | 28,460 |
def distance(v):
"""
Estimated distance to the body of the Mandelbuld
"""
z = v
for k in range(MAX_ITERS):
l = (z**2).sum()
if l > BAILOUT:
escape_time = k
break
z = pow3d(z, ORDER) + v
else:
return 0
return np.log(np.log(l)) / MU_NORM - escape_time + MAX_ITERS - 2 | 79a6075da3c022c48c111ffec015835716c12f9a | 28,461 |
def _depol_error_value_two_qubit(error_param,
gate_time=0,
qubit0_t1=inf,
qubit0_t2=inf,
qubit1_t1=inf,
qubit1_t2=inf):
"""Return 2-qubit depolarizing channel parameter for device model"""
# Check trivial case where there is no gate error
if error_param is None:
return None
if error_param == 0:
return 0
# Check t1 and t2 are valid
if qubit0_t1 <= 0 or qubit1_t1 <= 0:
raise NoiseError("Invalid T_1 relaxation time parameter: T_1 <= 0.")
if qubit0_t2 <= 0 or qubit1_t2 <= 0:
raise NoiseError("Invalid T_2 relaxation time parameter: T_2 <= 0.")
if qubit0_t2 - 2 * qubit0_t1 > 0 or qubit1_t2 - 2 * qubit1_t1 > 0:
raise NoiseError(
"Invalid T_2 relaxation time parameter: T_2 greater than 2 * T_1.")
if gate_time is None:
gate_time = 0
if gate_time == 0 or (qubit0_t1 == inf and
qubit0_t2 == inf and
qubit1_t1 == inf and
qubit1_t2 == inf):
if error_param is not None and error_param > 0:
return 4 * error_param / 3
else:
return 0
# Otherwise we calculate the depolarizing error probability to account
# for the difference between the relaxation error and gate error
if qubit0_t1 == inf:
q0_par1 = 1
else:
q0_par1 = exp(-gate_time / qubit0_t1)
if qubit0_t2 == inf:
q0_par2 = 1
else:
q0_par2 = exp(-gate_time / qubit0_t2)
if qubit1_t1 == inf:
q1_par1 = 1
else:
q1_par1 = exp(-gate_time / qubit1_t1)
if qubit1_t2 == inf:
q1_par2 = 1
else:
q1_par2 = exp(-gate_time / qubit1_t2)
denom = (
q0_par1 + q1_par1 + q0_par1 * q1_par1 + 4 * q0_par2 * q1_par2 +
2 * (q0_par2 + q1_par2) + 2 * (q1_par1 * q0_par2 + q0_par1 * q1_par2))
depol_param = 1 + 5 * (4 * error_param - 3) / denom
return depol_param | ab779de7d0fac3f828f9fffbb0c13e588c3fc54b | 28,462 |
def get_google_order_sheet():
""" Return the google orders spreadsheet """
return get_google_sheet(ANDERSEN_LAB_ORDER_SHEET, 'orders') | 69ce8dcf03fd31701700eb0515ae7c3b47c9d127 | 28,463 |
def collision_check(direction):
"""
:param direction: Str : example up
:return:
"""
# really scuffed needs hard rework worked on this in night and its bad
# but it dose its job so i guess its ok for now
if mapGen.map[p.position][direction] is not None:
if mapGen.map[mapGen.map[p.position][direction]]["biom"] != "locked_box":
if mapGen.map[mapGen.map[p.position][direction]] is not None:
if mapGen.map[mapGen.map[p.position][direction]]["biom"] == "box":
if mapGen.map[mapGen.map[p.position][direction]][direction] is not None:
if mapGen.map[mapGen.map[mapGen.map[p.position][direction]][direction]]["biom"] == "box":
return False
if mapGen.map[mapGen.map[p.position][direction]][direction] is not None:
if mapGen.map[mapGen.map[mapGen.map[p.position][direction]][direction]]["biom"] == "locked_box":
return False
if mapGen.map[mapGen.map[p.position][direction]][direction] is not None:
if mapGen.map[mapGen.map[mapGen.map[p.position][direction]][direction]]["biom"] == "destination":
mapGen.map[mapGen.map[mapGen.map[p.position][direction]][direction]]["biom"] = "locked_box"
mapGen.map[mapGen.map[p.position][direction]]["biom"] = "land"
p.position = mapGen.map[p.position][direction]
return True
else:
mapGen.map[mapGen.map[mapGen.map[p.position][direction]][direction]]["biom"] = "box"
mapGen.map[mapGen.map[p.position][direction]]["biom"] = "land"
p.position = mapGen.map[p.position][direction]
return True
else:
p.position = mapGen.map[p.position][direction]
else:
return False | 35b0bd0e6e2811b470bd513ea33c339ed7c7a96b | 28,464 |
def get_freq(freq):
"""
Return frequency code of given frequency str.
If input is not string, return input as it is.
Example
-------
>>> get_freq('A')
1000
>>> get_freq('3A')
1000
"""
if isinstance(freq, compat.string_types):
base, mult = get_freq_code(freq)
freq = base
return freq | 16998470970449a9f94758c87c1d42e392c86dc9 | 28,465 |
def OMRSE(
directed = False, preprocess = "auto", load_nodes = True, load_node_types = True,
load_edge_weights = True, auto_enable_tradeoffs = True,
sort_tmp_dir = None, verbose = 2, cache = True, cache_path = None,
cache_sys_var = "GRAPH_CACHE_DIR", version = "2021-08-30", **kwargs
) -> Graph:
"""Return OMRSE graph
Parameters
----------
directed = False
preprocess = "auto"
Preprocess for optimal load time & memory peak.
Will preprocess in Linux/macOS but not Windows.
load_nodes = True
Load node names or use numeric range
auto_enable_tradeoffs = True
Enable when graph has < 50M edges
cache_path = None
Path to store graphs
Defaults either to `GRAPH_CACHE_DIR` sys var or `graphs`
cache_sys_var = "GRAPH_CACHE_DIR"
version = "2021-08-30"
Version to retrieve
The available versions are:
- 2022-04-06
- 2021-08-30
"""
return AutomaticallyRetrievedGraph(
"OMRSE", version, "kgobo", directed, preprocess, load_nodes,
load_node_types, load_edge_weights, auto_enable_tradeoffs, sort_tmp_dir, verbose, cache,
cache_path, cache_sys_var, kwargs
)() | 2de03cc7da02e58279ed5b576db4cbe14c98e0b5 | 28,466 |
def produce_new_shapedir(verts, n_betas=20):
"""Given a matrix of batch of vertices, run PCA through SVD in order to identify
a certain number of shape parameters to best describe the vert shape.
:param verts: (N x V x 3) array
:param n_betas: Number of betas to be fitted to, B
:return vtemplate: (V x 3) Array of new template vertices
:return shapedir: (nbetas x 3V) Matrix that maps an array of betas to a set of vertex deformations from the template verts
"""
N, V, _ = verts.shape
K = min(N, V, n_betas)
if n_betas > K:
print(f"Insufficient size for {n_betas} betas. Using K = {K}")
n_betas = K
v_template = verts.mean(axis=0) # set new template verts
offsets = (verts - v_template).reshape(N, 3*V)
pca = PCA(n_components = K)
fit = pca.fit(offsets)
vecs = fit.components_ * fit.explained_variance_[:, None] ** 0.5 # multiply principal unit vectors by variance
shapedir = vecs.T.reshape(V, 3, K)
return v_template, shapedir | b12008b0c8b3809d211753dab6b89d3fcbb8bbce | 28,467 |
import logging
def get_calibration(
df: pd.DataFrame,
features:pd.DataFrame,
outlier_std: float = 3,
calib_n_neighbors: int = 100,
calib_mz_range: int = 20,
calib_rt_range: float = 0.5,
calib_mob_range: float = 0.3,
**kwargs) -> (np.ndarray, float):
"""Wrapper function to get calibrated values for the precursor mass.
Args:
df (pd.DataFrame): Input dataframe that contains identified peptides.
features (pd.DataFrame): Features dataframe for which the masses are calibrated.
outlier_std (float, optional): Range in standard deviations for outlier removal. Defaults to 3.
calib_n_neighbors (int, optional): Number of neighbors used for regression. Defaults to 100.
calib_mz_range (int, optional): Scaling factor for mz range. Defaults to 20.
calib_rt_range (float, optional): Scaling factor for rt_range. Defaults to 0.5.
calib_mob_range (float, optional): Scaling factor for mobility range. Defaults to 0.3.
**kwargs: Arbitrary keyword arguments so that settings can be passes as whole.
Returns:
corrected_mass (np.ndarray): The calibrated mass
y_hat_std (float): The standard deviation of the precursor offset after calibration
"""
target = 'prec_offset_ppm'
cols = ['mz','rt']
if 'mobility' in df.columns:
cols += ['mobility']
scaling_dict = {}
scaling_dict['mz'] = ('relative', calib_mz_range/1e6)
scaling_dict['rt'] = ('absolute', calib_rt_range)
scaling_dict['mobility'] = ('relative', calib_mob_range)
df_sub = remove_outliers(df, outlier_std)
if len(df_sub) > calib_n_neighbors:
y_hat = kneighbors_calibration(df_sub, features, cols, target, scaling_dict, calib_n_neighbors)
corrected_mass = (1-y_hat/1e6) * features['mass_matched']
y_hat_std = y_hat.std()
mad_offset = np.median(np.absolute(y_hat - np.median(y_hat)))
logging.info(f'Precursor calibration std {y_hat_std:.2f}, {mad_offset:.2f}')
return corrected_mass, y_hat_std, mad_offset
else:
logging.info('Not enough data points present. Skipping recalibration.')
mad_offset = np.median(np.absolute(df['prec_offset_ppm'].values - np.median(df['prec_offset_ppm'].values)))
return features['mass_matched'], np.abs(df['prec_offset_ppm'].std()), mad_offset | af53baf47e1999f5ef421ddb1a12e2a41757f62e | 28,468 |
import argparse
from typing import List
def argunparse(options: dict, parser: argparse.ArgumentParser) -> List[str]:
"""
Convert a dict of flags back into a list of args.
"""
args = []
for argument in parser.arguments:
single_dash_name = next((arg for arg in argument["args"] if arg.startswith("-")), None)
double_dash_name = next((arg for arg in argument["args"] if arg.startswith("--")), None)
has_double_dash = bool(double_dash_name)
flag_name = double_dash_name if has_double_dash else single_dash_name
if "dest" in argument:
value = options[argument["dest"]]
else:
for flag in argument["args"]:
flag_stripped = flag.lstrip("-").replace("-", "_")
if flag_stripped in options:
value = options[flag_stripped]
break
else:
value = None
if value:
# flags without values
# TODO: doesn't handle flags that set a non-bool const value
if isinstance(value, bool):
as_args = [flag_name]
# flags with values
else:
if has_double_dash and not argument.get("nargs", None):
arg_template = "{flag}={value}".format(flag=flag_name, value="{value}")
else:
arg_template = "{flag} {value}".format(flag=flag_name, value="{value}")
if not isinstance(value, list):
as_args = [arg_template.format(value=value)]
else:
# for now assume separate flags per value e.g. "--foo bar --foo xoo"
# also need to support single flag with multiple values "--foo bar xoo"
as_args = [arg_template.format(value=v) for v in value]
args.extend(as_args)
return args | e0be2dd6e1e55bfebe5e29872d79bd9a8e2f8600 | 28,469 |
from typing import Mapping
from typing import Iterable
def _decode_bytestrings(o):
"""Decode all base64-encoded values (not keys) to bytestrings"""
if isinstance(o, Mapping):
return {key: _decode_bytestrings(value) for key, value in o.items()}
elif isinstance(o, Iterable) and not isinstance(o, (str, bytes)):
return list([_decode_bytestrings(value) for value in o])
elif isinstance(o, str) and o.startswith(BASE64_IDENTIFIER):
return b64decode(o[len(BASE64_IDENTIFIER):])
else:
return o | 6a4fd49b50df91ee9705eda2192d10cb8f64606b | 28,470 |
def extinction(lambda1in,R,unit = 'microns'):
"""
Calculates A(lambda)/A_V. So, if we know E(B - V), we do
A(lambda) = A(lambda)/A_V * E(B - V) * R.
R is alternatively R_V, usually 3.1---this parameterizes the extinction law, which you should know if you are using this function.
This is the CCM89 extinction law, which assumes microns.
"""
if 'ang' in unit:
lambda1 = lambda1in/1.e4
else:
lambda1 = lambda1in
if (lambda1 > 100).all():
print("Check units! This program assumes microns")
if (lambda1 > 3.0).any():
print ("Warning: extrapolating into the far IR (lambda > 3 microns)")
if (lambda1 < 0.125).any():
print('Warning: extreme UV is an extrapolation')
if (lambda1 < 0.1).any():
print('warning: extrapolating into the extreme UV (lambda < 1000 A)')
a = sp.zeros(lambda1.size)
b = sp.zeros(lambda1.size)
m = (lambda1 > 0.909)
a[m] = 0.574*(1/lambda1[m])**(1.61)
b[m] = -0.527*(1/lambda1[m])**(1.61)
m = (lambda1 > 0.30303)*(lambda1 <= 0.909)
x = 1/lambda1[m] - 1.82
a[m] = 1 + 0.17699*x - 0.50447*x**2 - 0.02427*x**3 + 0.72085*x**4 + 0.01979*x**5 - 0.7753*x**6 + 0.32999*x**7
b[m] = 1.41338*x + 2.28305*x**2 + 1.07233*x**3 - 5.38434*x**4 - 0.62251*x**5 + 5.3026*x**6 - 2.09002*x**7
m = (lambda1 > 0.125)*(lambda1 <= 0.30303)
x = 1/lambda1[m]
a[m] = 1.752 - 0.316*x - 0.104/( (x - 4.67)**2 + 0.341)
b[m] = -3.090 + 1.825*x + 1.206/( (x - 4.62)**2 + 0.263)
m = (lambda1 > 0.125)*(lambda1 <= 0.1695)
x = 1/lambda1[m]
a[m] += -0.04473*(x - 5.9)**2 - 0.009779*(x-5.9)**3
b[m] += 0.21300*(x - 5.9)**2 + 0.120700*(x - 5.9)**3
m = (lambda1 < 0.125)
x = 1/lambda1[m]
a[m] = -1.073 - 0.628*(x - 8.) + 0.137*(x - 8.)**2 - 0.070*(x - 8.)**3
b[m] = 13.670 + 4.257*(x - 8.) - 0.420*(x - 8.)**2 + 0.374*(x - 8.)**3
return a + b/R | d6b7a728de0b861786f6e28d3000f77d90248703 | 28,471 |
def to_rtp(F0, phi, h):
""" Converts from spherical to Cartesian coordinates (up-south-east)
"""
# spherical coordinates in "physics convention"
r = F0
phi = np.radians(phi)
theta = np.arccos(h)
x = F0*np.sin(theta)*np.cos(phi)
y = F0*np.sin(theta)*np.sin(phi)
z = F0*np.cos(theta)
if type(F0) is np.ndarray:
return np.column_stack([z, -y, x,])
else:
return np.array([z, -y, x]) | dbf89e94d9e66925621969c53b476456a27af93d | 28,472 |
def to_base(num, base, numerals=NUMERALS):
"""Convert <num> to <base> using the symbols in <numerals>"""
int(num)
int(base)
if not (0 < base < len(numerals)):
raise ValueError("<base> must be in the range [1, %i>" % len(numerals))
if num == 0:
return '0'
if num < 0:
sign = '-'
num = -num
else:
sign = ''
if base == 1:
return sign + ('1'*num)
result = ''
while num:
result = numerals[num % (base)] + result
num //= base
return sign + result | aa25cb3f26e855d17c88be25b05251ebec216790 | 28,473 |
def identify_algorithm_hyperparameters(model_initializer): # FLAG: Play nice with Keras
"""Determine keyword-arguments accepted by `model_initializer`, along with their default values
Parameters
----------
model_initializer: functools.partial, or class, or class instance
The algorithm class being used to initialize a model
Returns
-------
hyperparameter_defaults: dict
The dict of kwargs accepted by `model_initializer` and their default values"""
hyperparameter_defaults = dict()
# FLAG: Play nice with Keras
try:
signature_parameters = signature(model_initializer).parameters
except TypeError:
signature_parameters = signature(model_initializer.__class__).parameters
for k, v in signature_parameters.items():
if (v.kind == v.KEYWORD_ONLY) or (v.kind == v.POSITIONAL_OR_KEYWORD):
hyperparameter_defaults[k] = v.default
return hyperparameter_defaults | 5ed499e8b5cf832a75009adf3bb29c7f65d97d35 | 28,474 |
from typing import List
from typing import Tuple
def cnf_rep_to_text(cnf_rep: List[List[Tuple[str, bool]]]) -> str:
"""
Converts a CNF representation to a text.
:param cnf_rep: The CNF representation to convert.
:return: The text representation of the CNF.
"""
lines = []
for sentence in cnf_rep:
sentence_str = ''
first_in_clause = True
for atom in sentence:
if first_in_clause:
first_in_clause = False
else:
sentence_str += ' '
if atom[1]:
sentence_str += atom[0]
else:
sentence_str += '!' + atom[0]
lines.append(sentence_str)
return '\n'.join(lines) | dec3754493cfb0bd9fb5e68d2bab92a40bd0f294 | 28,475 |
def reshape_for_linear(images):
"""Reshape the images for the linear model
Our linear model requires that the images be reshaped as a 1D tensor
"""
n_images, n_rgb, img_height, img_width = images.shape
return images.reshape(n_images, n_rgb * img_height * img_width) | dffc5e7d0f96c4494443a7480be081b8fe6b4abd | 28,476 |
def otp(data, password, encodeFlag=True):
""" do one time pad encoding on a sequence of chars """
pwLen = len(password)
if pwLen < 1:
return data
out = []
for index, char in enumerate(data):
pwPart = ord(password[index % pwLen])
newChar = char + pwPart if encodeFlag else char - pwPart
newChar = newChar + 256 if newChar < 0 else newChar
newChar = newChar - 256 if newChar >= 256 else newChar
out.append(newChar)
return bytes(out) | 34223c69149b09b1cc3bde8bf1c432f21415362b | 28,477 |
from datetime import datetime
def export_actions(path='/tmp', http_response=False):
"""
A script for exporting Enforcement Actions content
to a CSV that can be opened easily in Excel.
Run from within consumerfinance.gov with:
`python cfgov/manage.py runscript export_enforcement_actions`
By default, the script will dump the file to `/tmp/`,
unless a path argument is supplied,
or http_response is set to True (for downloads via the Wagtail admin).
A command that passes in path would look like this:
`python cfgov/manage.py runscript export_enforcement_actions
--script-args [PATH]`
"""
timestamp = datetime.datetime.now().strftime("%Y-%m-%d")
slug = 'enforcement-actions-{}.csv'.format(timestamp)
if http_response:
response = HttpResponse(content_type='text/csv; charset=utf-8')
response['Content-Disposition'] = 'attachment;filename={}'.format(slug)
write_questions_to_csv(response)
return response
file_path = '{}/{}'.format(path, slug).replace('//', '/')
with open(file_path, 'w', encoding='windows-1252') as f:
write_questions_to_csv(f) | f85f095b6c7c3bd5a1a5277125c56e17e9d8cbd9 | 28,478 |
def reduce_load_R():
"""
Used for reconstructing a copy of the R interpreter from a pickle.
EXAMPLES::
sage: from sage.interfaces.r import reduce_load_R
sage: reduce_load_R()
R Interpreter
"""
return r | 858723306137a0e751f25766cbea5609867255f5 | 28,479 |
import urllib
def path_to_playlist_uri(relpath):
"""Convert path relative to playlists_dir to M3U URI."""
if isinstance(relpath, compat.text_type):
relpath = relpath.encode('utf-8')
return b'm3u:%s' % urllib.quote(relpath) | a69c441411b09ccce387ff76d93d17013b960de4 | 28,480 |
from json import load
import logging
def read_drive_properties(path_name):
"""
Reads drive properties from json formatted file.
Takes (str) path_name as argument.
Returns (dict) with (bool) status, (str) msg, (dict) conf
"""
try:
with open(path_name) as json_file:
conf = load(json_file)
return {"status": True, "msg": f"Read from file: {path_name}", "conf": conf}
except (IOError, ValueError, EOFError, TypeError) as error:
logging.error(str(error))
return {"status": False, "msg": str(error)}
except:
logging.error("Could not read file: %s", path_name)
return {"status": False, "msg": f"Could not read file: {path_name}"} | 18b9051801b032f5aa5532da0cfcca8793be8c91 | 28,481 |
import math
def proj(
point: np.ndarray, tol: float = 1e-9, bounds: tuple[float, float] = (0, 1)
) -> tuple[np.ndarray, float]:
"""Find projection on true ROC.
Args:
point: A point in [0, 1]^2.
tol: Tolerance.
bounds: Bounds of projection to help with the calculation.
Returns:
Projection on the true ROC and the slope there.
"""
left, right = bounds
roc_left = gauss_roc(left, tol)
if left + roc_left[0] >= sum(point):
return np.array([left, roc_left[0]]), roc_left[1]
roc_right = gauss_roc(right, tol)
if right + roc_right[0] <= sum(point):
return np.array([right, roc_right[0]]), roc_right[1]
while not math.isclose(left, right, abs_tol=tol):
middle = (left + right) / 2
if middle + gauss_roc(middle, tol)[0] > sum(point):
right = middle
else:
left = middle
if left == bounds[0]:
return np.array([left, roc_left[0]]), roc_left[1]
if right == bounds[1]:
return np.array([right, roc_right[0]]), roc_right[1]
roc_middle = gauss_roc(left, tol)
return np.array([left, roc_middle[0]]), roc_middle[1] | c56c1d9beb54f45691d63900e7458ed1ec4218ee | 28,482 |
def _split_on_wildcard(string):
"""Split the string into two such that first part does not have any wildcard.
Args:
string (str): The string to be split.
Returns:
A 2-tuple where first part doesn't have any wildcard, and second part does
have a wildcard. If wildcard is not found, the second part is empty.
If string starts with a wildcard then first part is empty.
For example:
_split_on_wildcard('a/b/c/d*e/f/*.txt') => ('a/b/c/d', '*e/f/*.txt')
_split_on_wildcard('*e/f/*.txt') => ('', '*e/f/*.txt')
_split_on_wildcard('a/b/c/d') => ('a/b/c/d', '')
"""
match = WILDCARD_REGEX.search(string)
if match is None:
return string, ''
first_wildcard_idx = match.start()
prefix = string[:first_wildcard_idx]
wildcard_str = string[first_wildcard_idx:]
return prefix, wildcard_str | 09625186d22d50b737c94d2b22156a48bbf9b5ad | 28,483 |
def esgUSPTOPatentGrantsDF(symbol="", **kwargs):
"""Patent grants are indications that a company has successfully signaled that it values its IP, that its IP is unique in the eyes of the USPTO, and that its initial patent application was a reasonable one.
Patent grants data is issued weekly on Tuesdays.
Currently only the first three assignees listed on the patent are included. Future versions may contain more detail on the content of patent grants, including assignees beyond the first three listed on the grant.
History available from 2002
https://iexcloud.io/docs/api/#esg-uspto-patent-grants
Args:
symbol (str): symbol to use
"""
kwargs["subkey"] = "10"
return _baseDF(id="PREMIUM_EXTRACT_ALPHA_ESG", symbol=symbol, **kwargs) | 22524f06572dca4fd2118a407273b7d23e68453c | 28,484 |
import time
import pickle
def get_nln_metrics(model,
train_images,
test_images,
test_labels,
model_type,
args):
"""
Calculates the NLN metrics for either frNN or KNN
Parameters
----------
model (tuple): the model used
train_images (np.array): images from training set
test_images (np.array): testing images
test_labels (np.array): labels of testing images
model_type (str): the type of model (AE,VAE,...)
args (Namespace): the argumenets from cmd_args
Returns
-------
max_auc (float32): maximum auroc for the given KNN
max_neighbours (float32): optimal number of neighbours for AUROC
max_radius (float32): optimal radius size for frNN
max_dists_auc (float32): optimal distance based auroc
max_sum_auc (float32): optimal auroc from adding distance and nln
max_mul_auc (float32): optimal auroc from multiplying dists and nln
x_hat (np.array): Reconstructions of test data
x_hat_train (np.array): Reconstructions of training data
neighbours_idx (np.array): Indexes of neigbours in z_train
neighbours_dist (np.array): Latent distances for each neighbour
"""
z_query = infer(model[0].encoder, test_images, args, 'encoder')
z = infer(model[0].encoder, train_images, args, 'encoder')
x_hat_train = infer(model[0], train_images, args, 'AE')
x_hat = infer(model[0], test_images, args, 'AE')
d,max_auc,max_neighbours,max_radius,index_counter = {},0,0,0,0
dists_auc, sum_auc, mul_auc = [], [], []
for n_bour in args.neighbors:
if args.algorithm == 'knn':
t = time()
neighbours_dist, neighbours_idx, x_hat_train, neighbour_mask = nln(z,
z_query,
x_hat_train,
args.algorithm,
n_bour,
radius=None)
error = get_nln_errors(model,
model_type,
z_query,
z,
test_images,
x_hat_train,
neighbours_idx,
neighbour_mask,
args)
if args.patches:
if error.ndim ==4:
error, test_labels_ = reconstruct(error, args, test_labels)
else:
error, test_labels_ = reconstruct_latent_patches(error, args, test_labels)
recon_error = get_error('AE', test_images, x_hat, mean=False)
if args.patches:
recon_error, test_labels_ = reconstruct(recon_error, args, test_labels)
else:
recon_error, test_labels_ = recon_error, test_labels
recon_error = process(np.nanmean(recon_error,axis=tuple(range(1,recon_error.ndim))), per_image=False)
error = process(np.nanmean(error,axis=tuple(range(1,error.ndim))), per_image =False)
dists = process(get_dists(neighbours_dist,args),per_image =False)
if args.anomaly_type == 'MISO':
alpha=0.6
add = metrics.roc_auc_score(test_labels_==args.anomaly_class, error+dists+recon_error)
mul = metrics.roc_auc_score(test_labels_==args.anomaly_class, alpha*error+(1-alpha)*dists )
dists = metrics.roc_auc_score(test_labels_==args.anomaly_class, dists)
else:
alpha=0.6
add = metrics.roc_auc_score(test_labels_!=args.anomaly_class, error+dists+recon_error)
mul = metrics.roc_auc_score(test_labels_!=args.anomaly_class, alpha*error+(1-alpha)*dists)
dists = metrics.roc_auc_score(test_labels_!=args.anomaly_class, dists)
dists_auc.append(dists)
sum_auc.append(add)
mul_auc.append(mul)
temp_args = [error,test_labels_,args.anomaly_class,args.neighbors,
[float('nan')],n_bour,float('nan'), max_auc,max_neighbours,
max_radius,index_counter,d,t,dists_auc[-1], sum_auc[-1], mul_auc[-1],args.anomaly_type]
(max_auc,max_neighbours,
max_radius,index_counter,d) = get_max_score(temp_args)
elif args.algorithm == 'frnn':
#TODO: add MISO and SIMO distinctions
for r in args.radius:
t = time()
neighbours_dist, neighbours_idx, x_hat_train, neighbour_mask = nln(z,
z_query,
x_hat_train,
args.algorithm,
n_bour,
radius=r)
error = get_nln_errors(model,
model_type,
z_query,
z,
test_images,
x_hat_train,
neighbours_idx,
neighbour_mask,
args)
if args.patches:
if error.ndim ==4:
error, test_labels_ = reconstruct(error, args, test_labels)
else:
error, test_labels_ = reconstruct_latent_patches(error, args, test_labels)
else: test_labels_ = test_labels
error = np.mean(error,axis=tuple(range(1,error.ndim)))
temp_args = [error,test_labels_,args.anomaly_class,
args.neighbors, args.radius,n_bour,r, max_auc,
max_neighbours,max_radius,index_counter,d,t, args.anomaly_type]
(max_auc,max_neighbours,
max_radius,index_counter,d) = get_max_score(temp_args)
with open('outputs/{}/{}/{}/latent_scores.pkl'.format(model_type,args.anomaly_class,args.model_name),'wb') as f:
pickle.dump(d,f)
return max_auc,max_neighbours,max_radius,np.max(dists_auc), np.max(sum_auc), np.max(mul_auc), x_hat, x_hat_train, neighbours_idx, neighbours_dist | 52157740a2507432b16ef007d541e1cc0b77b42e | 28,485 |
import os
import scipy
def load_ms_file(msfile, fieldid=None, datacolumn='RESIDUAL', method='physical', ddid=0, chunksize:int=10**7, bin_count_factor=1):
"""
Load selected data from the measurement set (MS) file and convert to xarray
DataArrays. Transform data for analysis.
Parameters
----------
msfile : string
Location of the MS file.
fieldid : int
The unique identifier for the field that is to be analyzed (FIELD_ID
in a Measurement Set). This information can be retreived using the
'listobs_daskms' class.
method : string
String representing the method for binning the UV plane. Choices are
'physical' (default) to use the telescope resolution and field of
view to set bin size and number, or statistical to compute the bins
based on a statistical MSE estimate.
ddid : int
The unique identifier for the data description id. Default is zero.
chunksize : int
Size of chunks to be used with Dask.
bin_count_factor : float
A factor to control binning if the automatic binning doesn't work
right. A factor of 0.5 results in half the bins in u and v.
default: 1.
Returns
-------
ds_ind: xarray.Dataset
The contents of the main table of the MS columns DATA and UVW flattened
and scaled by wavelength.
uvbins: xarray.Dataset
A dataset containing flattened visabilities with a binned UV set for
each observation point.
"""
# Load Metadata from the Measurement Set
msmd = listobs(msfile, 'DATA')
if fieldid==None:
fields = msmd.get_fields(verbose=False)
if(len(fields))==1:
fieldid=0
else:
print("Error: Please choose a field from the list below.")
list_fields(msmd)
raise ValueError("Parameter, \'fieldid\', not set.")
if not( (method=='statistical') or (method=='physical') ):
raise ValueError('The \'method\' parameter should be either \'physical\' or \'statistical\'.')
# Print uv-grid information to console
title_string = "Compute UV bins"
logger.info(f"{title_string:^80}")
logger.info('_'*80)
ms = get_data_column(msfile, datacolumn, group_cols=['FIELD_ID'])
# Split the dataset and attributes
try:
ds_ms, ms_attrs = ms[0][fieldid], ms[1:]
except:
list_fields(msmd)
raise ValueError(f"The fieldid value of {fieldid} is not a valid field in this dataset.")
# Get spectral window table information
spw_table_name = 'SPECTRAL_WINDOW'
spw_col = 'CHAN_FREQ'
spw = xds_from_table(f'{msfile}::{spw_table_name}', columns=['NUM_CHAN', spw_col], column_keywords=True, group_cols="__row__")
ds_spw, spw_attrs = spw[0][0], spw[1]
col_units = spw_attrs['CHAN_FREQ']['QuantumUnits'][0]
logger.info(f"Selected column {spw_col} from {spw_table_name} with units {col_units}.")
nchan = int(ds_spw.NUM_CHAN.data.compute()[0])
nrow = len(ds_ms.ROWID)
ncorr = len(ds_ms.corr)
relfile = os.path.basename(msfile)
if not len(relfile):
relfile = os.path.basename(msfile.rstrip('/'))
logger.info(f'Processing dataset {relfile} with {nchan} channels and {nrow} rows.')
# maxuv = compute_uv_from_ms(msfile, fieldid, ds_spw)
# uvlimit = [0, maxuv],[0, maxuv]
# print("UV limit is ", uvlimit)
# Compute the min and max of the unscaled UV coordinates to calculate the grid boundaries
bl_limits = [da.min(ds_ms.UVW[:,0]).compute(), da.max(ds_ms.UVW[:,0]).compute(), da.min(ds_ms.UVW[:,1]).compute(), da.max(ds_ms.UVW[:,1]).compute()]
# Compute the min and max spectral window channel and convert to wavelength
chan_wavelength_lim = np.array([[scipy.constants.c/np.max(spw_.CHAN_FREQ.data.compute()), scipy.constants.c/np.min(spw_.CHAN_FREQ.data.compute())] for spw_ in spw[0]])
# Compute the scaled limits of the UV grid by dividing the UV boundaries by the channel boundaries
uvlimit = [bl_limits[0]/np.min(chan_wavelength_lim), bl_limits[1]/np.min(chan_wavelength_lim)], [bl_limits[2]/np.min(chan_wavelength_lim), bl_limits[3]/np.min(chan_wavelength_lim)]
logger.info(f"UV limit is {uvlimit[0][0]:.2f} - {uvlimit[0][1]:.2f}, {uvlimit[1][0]:.2f} - {uvlimit[1][1]:.2f}")
if method=='statistical':
std_k = [float(uval.reduce(np.std)),
float(vval.reduce(np.std))]
logger.info(f"The calculated STD of the UV distribution is {std_k[0]} by {std_k[1]} lambda.")
binwidth = [x * (3.5/n**(1./4)) for x in std_k]
logger.info(f"Using statistical estimation of bin widthds")
logger.info(f"The calculated UV bin width is {binwidth[0]} {binwidth[1]} lambda.")
bincount = [int((uvlimit[0][1] - uvlimit[0][0])/binwidth[0]),
int((uvlimit[1][1] - uvlimit[1][0])/binwidth[1])]
if method=='physical':
'''
Calculate the field of view of the antenna, given the
observing frequency and antenna diameter.
'''
antennas = msmd.get_antenna_list(verbose=False)
diameter = np.average([a['diameter'] for a in antennas])
max_lambda = np.max(chan_wavelength_lim)
fov = 1.22 * max_lambda/diameter
binwidth = 1./fov
binwidth = [int(binwidth), int(binwidth)]
logger.info(f"Using physical estimation of bin widthds")
logger.info(f"The calculated FoV is {np.rad2deg(fov):.2f} deg.")
logger.info(f"The calculated UV bin width is {binwidth[0]} {binwidth[1]} lambda.")
bincount = [int(bin_count_factor*(uvlimit[0][1] - uvlimit[0][0])/binwidth[0]),
int(bin_count_factor*(uvlimit[1][1] - uvlimit[1][0])/binwidth[1])]
uvbins = [np.linspace( uvlimit[0][0], uvlimit[0][1], bincount[0] ),
np.linspace( uvlimit[1][0], uvlimit[1][1], bincount[1] )]
# Reload the Main table grouped by DATA_DESC_ID
ms = get_data_column(msfile, datacolumn, group_cols=['FIELD_ID', 'DATA_DESC_ID'])
#ms = xds_from_ms(msfile, columns=[datacolumn, 'UVW', 'FLAG'], group_cols=['FIELD_ID', 'DATA_DESC_ID'], table_keywords=True, column_keywords=True)
ds_ms, ms_attrs = ms[0], ms[1:]
dd = xds_from_table(f"{msfile}::DATA_DESCRIPTION")
dd = dd[0].compute()
ndd = len(dd.ROWID)
nrows = 0
# Use the DATA_DESCRIPTION table to process each subset of data (different ddids can have
# a different number of channels). The subsets will be stacked after the channel scaling.
ds_bindex = []
logger.info(f"Creating a UV-grid with ({bincount[0]}, {bincount[1]}) bins with bin size {binwidth[0]:.1f} by {binwidth[1]:.1f} lambda.")
logger.info(f"\nField, Data ID, SPW ID, Channels")
for ds_ in ds_ms:
fid = ds_.attrs['FIELD_ID']
ddid = ds_.attrs['DATA_DESC_ID']
if fid != fieldid:
logger.info(f"Skipping channel: {fid}.")
continue
spwid = int(dd.SPECTRAL_WINDOW_ID[ddid].data)
chan_freq = spw[0][spwid].CHAN_FREQ.data[0]
chan_wavelength = scipy.constants.c/chan_freq
# chan_wavelength = chan_wavelength.squeeze()
chan_wavelength = xr.DataArray(chan_wavelength, dims=['chan'])
logger.info(f"{fieldid:<5} {ddid:<7} {spwid:<6} {len(chan_freq):<8}")
# I think we can remove the W channel part of this to save some compute (ds_.UVW[:,2])
uvw_chan = xr.concat([ds_.UVW[:,0] / chan_wavelength, ds_.UVW[:,1] / chan_wavelength, ds_.UVW[:,2] / chan_wavelength], 'uvw')
uvw_chan = uvw_chan.transpose('row', 'chan', 'uvw')
uval_dig = xr.apply_ufunc(da.digitize, uvw_chan[:,:,0], uvbins[0], dask='allowed', output_dtypes=[np.int32])
vval_dig = xr.apply_ufunc(da.digitize, uvw_chan[:,:,1], uvbins[1], dask='allowed', output_dtypes=[np.int32])
# ds_ind = xr.Dataset(data_vars = {'DATA': ds_[datacolumn], 'FLAG': ds_['FLAG'], 'UV': uvw_chan[:,:,:2]}, coords = {'U_bins': uval_dig.astype(np.int32), 'V_bins': vval_dig.astype(np.int32)})
#
# return ds_ind
#
# ds_ind = ds_ind.stack(newrow=['row', 'chan']).transpose('newrow', 'uvw', 'corr')
# ds_ind = ds_ind.drop('ROWID')
# ds_ind = ds_ind.chunk({'corr': 4, 'uvw': 2, 'newrow': chunksize})
# ds_ind = ds_ind.unify_chunks()
# Avoid calling xray.dataset.stack, as it leads to an intense multi-index shuffle
# that does not seem to be dask-backed and runs on the scheduler.
da_data = ds_[datacolumn].data.reshape(-1, ncorr)
da_flag = ds_.FLAG.data.reshape(-1, ncorr)
ds_ind = xr.Dataset(data_vars = {'DATA': (("newrow", "corr"), da_data), 'FLAG': (("newrow", "corr"), da_flag)}, coords = {'U_bins': (("newrow"), uval_dig.astype(np.int32).data.ravel()), 'V_bins': (("newrow"), vval_dig.astype(np.int32).data.ravel())})
ds_ind = ds_ind.chunk({'corr': ncorr, 'newrow': chunksize})
ds_ind = ds_ind.unify_chunks()
nrows+=len(ds_ind.newrow)
ds_bindex.append(ds_ind)
logger.info(f"\nProcessed {ndd} unique data description IDs comprising {nrows} rows.")
ds_ind = xr.concat(ds_bindex, dim="newrow")
ds_ind.attrs = {'Measurement Set': msfile, 'Field': fieldid}
return ds_ind, uvbins | 875d6113fab4898595e4696af49f71e76dcdbe17 | 28,486 |
from typing import Optional
from typing import Dict
def get_web_optimized_params(
src_dst,
zoom_level_strategy: str = "auto",
aligned_levels: Optional[int] = None,
tms: morecantile.TileMatrixSet = morecantile.tms.get("WebMercatorQuad"),
) -> Dict:
"""Return VRT parameters for a WebOptimized COG."""
if src_dst.crs != tms.rasterio_crs:
with WarpedVRT(src_dst, crs=tms.rasterio_crs) as vrt:
bounds = vrt.bounds
aff = list(vrt.transform)
else:
bounds = src_dst.bounds
aff = list(src_dst.transform)
resolution = max(abs(aff[0]), abs(aff[4]))
# find max zoom (closest to the raster resolution)
max_zoom = tms.zoom_for_res(
resolution, max_z=30, zoom_level_strategy=zoom_level_strategy,
)
# defined the zoom level we want to align the raster
aligned_levels = aligned_levels or 0
base_zoom = max_zoom - aligned_levels
# find new raster bounds (bounds of UL tile / LR tile)
ul_tile = tms._tile(bounds[0], bounds[3], base_zoom)
w, _, _, n = tms.xy_bounds(ul_tile)
# The output resolution should match the TMS resolution at MaxZoom
vrt_res = tms._resolution(tms.matrix(max_zoom))
# Output transform is built from the origin (UL tile) and output resolution
vrt_transform = Affine(vrt_res, 0, w, 0, -vrt_res, n)
lr_tile = tms._tile(bounds[2], bounds[1], base_zoom)
e, _, _, s = tms.xy_bounds(
morecantile.Tile(lr_tile.x + 1, lr_tile.y + 1, lr_tile.z)
)
vrt_width = max(1, round((e - w) / vrt_transform.a))
vrt_height = max(1, round((s - n) / vrt_transform.e))
return dict(
crs=tms.rasterio_crs,
transform=vrt_transform,
width=vrt_width,
height=vrt_height,
) | 2e108f4619f5bf672981e60114065196f15116d0 | 28,487 |
from sage.misc.superseded import deprecation
def AlternatingSignMatrices_n(n):
"""
For old pickles of ``AlternatingSignMatrices_n``.
EXAMPLES::
sage: sage.combinat.alternating_sign_matrix.AlternatingSignMatrices_n(3)
doctest:...: DeprecationWarning: this class is deprecated. Use sage.combinat.alternating_sign_matrix.AlternatingSignMatrices instead
See http://trac.sagemath.org/14301 for details.
Alternating sign matrices of size 3
"""
deprecation(14301,'this class is deprecated. Use sage.combinat.alternating_sign_matrix.AlternatingSignMatrices instead')
return AlternatingSignMatrices(n) | 3fad083e18ded990b62f2453ba75966fac6df6ed | 28,488 |
def _get_band(feature, name, size):
"""
Gets a band normalized and correctly scaled from the raw data.
Args:
feature (obj): the feature as it was read from the files.
name (str): the name of the band.
size (int): the size of the band.
Returns:
tf.Tensor: the band parsed into a tensor for further manipulation.
"""
return _normalize_band(tf.reshape(feature[name], [size, size]), name) | db13abf7dc1cfa1cff88da864c2aaac043f574b2 | 28,489 |
import functools
def rgetattr(obj, attr, default=sentinel):
"""
from https://stackoverflow.com/questions/31174295/getattr-and-setattr-on-nested-objects
"""
if default is sentinel:
_getattr = getattr
else:
def _getattr(obj, name):
return getattr(obj, name, default)
return functools.reduce(_getattr, [obj] + attr.split('.')) | 6b6b7d98e117647a5609e10a499795ad293f1c6d | 28,490 |
def unpack_dims(data, vlabels):
"""
Unpacks an interleaved 4th dimension in an imaging data array
Parameters
----------
data : np array
a numpy array of data. Should have 3 spatial dimensions followed by
one nonspatial dimension of interleaved data
vlabels : pandas DataFrame
a dataframe indicating the label type for each slice in the 4th dimension.
Each column specifies a label type, and each row gives the labeling combination for
each index in the 4th dimension. It is assumed row 0 corresponds to index 0 in the 4th
dimension, and so on. Additionally, each column should be gapless. In other words, there
should be at least one entry for each integer between the min and max of a column. Otherwise
you will get blank dimensions in the unpacked data.
The vlabels are returned as part of parrec_to_nifti. They can also be found as
an ordered dict with parrecobject.header.get_volume_labels() (but you must
convert to an DataFrame). A NiFTi using parrec_to_nifti may also have the labels
saved as an Excel file
Note that you change the order of the column of vlabels,
and this will change the order of the returned dimensions of the output
Returns
-------
new_data : np array
The data with the 4th dimension unpacked into n additional dimensions, where
n is the number of columns in vlabels. For example, if relabels has three columns,
then the 4th dimension of the original data will become the 4th, 5th and 6th dimensions
in new_data. The 1st column in vlabels will be stored as the 4th dimension, the 2nd
column in vlabels will be stored as the 5th dimension, and so on
"""
adj_labels = vlabels.copy()
for col in adj_labels.columns:
# we need to make the labels zero indexed
adj_labels[col] = adj_labels[col] - np.min(adj_labels[col])
spatial_dim_maxes = data.shape[:3]
extra_dim_maxes = [i for i in adj_labels.max()+1]
dim_shape = []
dim_shape.extend(spatial_dim_maxes)
dim_shape.extend(extra_dim_maxes)
new_data = np.zeros(dim_shape)
for i, row in adj_labels.iterrows():
sli = data[:,:,:,i]
# construct an indexing tuple
itup = [...]
add_labels = list(row)
itup.extend(add_labels)
itup = tuple(itup)
new_data[itup] = sli
return new_data | 1eaf0b00d4dd8be26927845aefa334b84e9264df | 28,491 |
def bash_this(s):
"""produce a shell fragment that runs the string str inside a fresh bash.
This works around potential strange options that are set in the topmost
bash like POSIX-compatibility mode, -e or similar."""
return 'bash -c %s' % shell_quote(s) | e06d287ebdf226ab6f83004c65cea7ada94232e1 | 28,492 |
import re
def filter_markdown(md, **kwargs):
"""Python markdown requires markdown="1" on HTML block elements
that contain markdown. AND there's a bug where if you use
markdown.extensions.extra, it replaces code fences in HTML
block elements with garbled text."""
def add_markdown_class(m):
if m.group(0).find("markdown=") == -1:
return m.group(1) + ' markdown="1">'
else:
return m.group(0)
logger.info("... adding markdown class to embedded divs...")
md = re.sub(r"(<div[^>]*)>", add_markdown_class, md)
return md | 4f55362bf336a0b7f8adba56f31b92a4952b14ae | 28,493 |
def multiply_tensors(tensor1, tensor2):
"""Multiplies two tensors in a matrix-like multiplication based on the
last dimension of the first tensor and first dimension of the second
tensor.
Inputs:
tensor1: A tensor of shape [a, b, c, .., x]
tensor2: A tensor of shape [x, d, e, f, ...]
Outputs:
A tensor of shape [a, b, c, ..., d, e, f, ...]
"""
sh1 = tf.shape(tensor1)
sh2 = tf.shape(tensor2)
len_sh1 = len(tensor1.get_shape())
len_sh2 = len(tensor2.get_shape())
prod1 = tf.constant(1, dtype=tf.int32)
sh1_list = []
for z in range(len_sh1 - 1):
sh1_z = sh1[z]
prod1 *= sh1_z
sh1_list.append(sh1_z)
prod2 = tf.constant(1, dtype=tf.int32)
sh2_list = []
for z in range(len_sh2 - 1):
sh2_z = sh2[len_sh2 - 1 - z]
prod2 *= sh2_z
sh2_list.append(sh2_z)
reshape_1 = tf.reshape(tensor1, [prod1, sh1[len_sh1 - 1]])
reshape_2 = tf.reshape(tensor2, [sh2[0], prod2])
result = tf.reshape(tf.matmul(reshape_1, reshape_2), sh1_list + sh2_list)
assert len(result.get_shape()) == len_sh1 + len_sh2 - 2
return result | 374547e03fe95b02a77ef1420e7cac2f07248fb3 | 28,494 |
def get_cumulative_collection():
"""获取设备累积数据表
"""
client = MongoClient(connection_string)
db = client.get_database(database)
collection = db.get_collection('equipment_cumulative')
return collection | 0de651fe424730b2e486298e8e142190514748bb | 28,495 |
import re
def self_closing(xml_str, isSelfClosing):
"""
是否自闭合空标签,
:param isSelfClosing:
:param xml_str:
:return:
"""
if(isSelfClosing=="true"):
xml_str = re.sub(r"<(.*)>(</.*>)", r"<\1/>" , xml_str)
return xml_str
else:
return xml_str | b8b68626549da9a27335c5340db3ba65b753af90 | 28,496 |
def setup_dense_net(
num_predictors, neuron_counts=DEFAULT_NEURON_COUNTS,
dropout_rates=DEFAULT_DROPOUT_RATES,
inner_activ_function_name=DEFAULT_INNER_ACTIV_FUNCTION_NAME,
inner_activ_function_alpha=DEFAULT_INNER_ACTIV_FUNCTION_ALPHA,
output_activ_function_name=DEFAULT_OUTPUT_ACTIV_FUNCTION_NAME,
output_activ_function_alpha=DEFAULT_OUTPUT_ACTIV_FUNCTION_ALPHA,
l1_weight=DEFAULT_L1_WEIGHT, l2_weight=DEFAULT_L2_WEIGHT,
use_batch_normalization=True):
"""Sets up (but does not train) dense neural network for binary classifn.
This method sets up the architecture, loss function, and optimizer.
D = number of dense layers
:param num_predictors: Number of input (predictor) variables.
:param neuron_counts: length-D numpy array with number of neurons for each
dense layer. The last value in this array is the number of target
variables (predictands).
:param dropout_rates: length-D numpy array with dropout rate for each dense
layer. To turn off dropout for a given layer, use NaN or a non-positive
number.
:param inner_activ_function_name: Name of activation function for all inner
(non-output) layers.
:param inner_activ_function_alpha: Alpha (slope parameter) for
activation function for all inner layers. Applies only to ReLU and eLU.
:param output_activ_function_name: Same as `inner_activ_function_name` but
for output layer.
:param output_activ_function_alpha: Same as `inner_activ_function_alpha` but
for output layer.
:param l1_weight: Weight for L_1 regularization.
:param l2_weight: Weight for L_2 regularization.
:param use_batch_normalization: Boolean flag. If True, will use batch
normalization after each inner layer.
:return: model_object: Untrained instance of `keras.models.Model`.
"""
# TODO(thunderhoser): Allow for tasks other than binary classification.
assert neuron_counts[-1] == 1
input_layer_object = keras.layers.Input(shape=(num_predictors,))
regularizer_object = _get_weight_regularizer(
l1_weight=l1_weight, l2_weight=l2_weight
)
num_layers = len(neuron_counts)
layer_object = None
for i in range(num_layers):
if layer_object is None:
this_input_layer_object = input_layer_object
else:
this_input_layer_object = layer_object
layer_object = _get_dense_layer(
num_output_units=neuron_counts[i],
weight_regularizer=regularizer_object
)(this_input_layer_object)
if i == num_layers - 1:
layer_object = _get_activation_layer(
function_name=output_activ_function_name,
slope_param=output_activ_function_alpha
)(layer_object)
else:
layer_object = _get_activation_layer(
function_name=inner_activ_function_name,
slope_param=inner_activ_function_alpha
)(layer_object)
if dropout_rates[i] > 0:
layer_object = _get_dropout_layer(
dropout_fraction=dropout_rates[i]
)(layer_object)
if use_batch_normalization and i != num_layers - 1:
layer_object = _get_batch_norm_layer()(layer_object)
model_object = keras.models.Model(
inputs=input_layer_object, outputs=layer_object
)
model_object.compile(
loss=keras.losses.binary_crossentropy,
optimizer=keras.optimizers.Adam(),
metrics=METRIC_FUNCTION_LIST
)
model_object.summary()
return model_object | 3f952d3121253208b62ccbd2e3149e820cd6f72b | 28,497 |
def init_weights_he(nin, nout, nd, ny):
""" Sample the weights using variance Var(W) = 2/nin according to He initilization
for ReLU nonlinearities from a normal distribution with zero mean.
"""
sigma = np.sqrt(2/(nin))
weights = np.random.normal(0, sigma,((nd, ny))) # Weight vector (nd x ny)
return weights | fdb9fcef6888ea8513b22e84207f67cd71d90a9e | 28,498 |
def get_all_message_template():
"""returns all drivers or none"""
try:
return MessageTemplates.objects.all(), "success"
except Exception as e:
return None, str(e) | 1e63e73776b5b15d1cd7512fec32cea14454d717 | 28,499 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.