max_stars_repo_path stringlengths 3 269 | max_stars_repo_name stringlengths 4 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.05M | score float64 0.23 5.13 | int_score int64 0 5 |
|---|---|---|---|---|---|---|
src/dict/months.py | sungheeyun/PythonLectures | 0 | 12761051 | <gh_stars>0
import json
if __name__ == "__main__":
with open("months.json") as fid:
months_ = json.load(fid)
months = dict()
for key, value in months_.items():
months[int(key)] = value
print("The dictionary contains the following keys: ", months.keys())
print("The dictionary contains the following keys: ", list(months.keys()))
print(months[1])
print(months.pop(5))
print(months)
months[5] = "MAY"
print(months)
print(list(months.keys()))
months[1] = "Jan"
print(months)
print(sorted(months.keys()))
print(months)
for key in months:
print(key, months[key])
print("---- KEY/VALUE PAIR ----")
for key, value in sorted(months.items()):
print("months[", key, "] =", value)
| 3.265625 | 3 |
set_edge_weights.py | dsanders11/chromium-include-cleanup | 3 | 12761052 | #!/usr/bin/env python3
import argparse
import csv
import logging
import os
import sys
import typing
from typing import Dict, Iterator, Optional, Tuple
from common import IncludeChange
from include_analysis import ParseError, parse_raw_include_analysis_output
from utils import (
get_include_analysis_edges_centrality,
get_include_analysis_edge_prevalence,
get_include_analysis_edge_sizes,
load_config,
)
def set_edge_weights(
changes_file: typing.TextIO, edge_weights: Dict[str, Dict[str, int]]
) -> Iterator[Tuple[IncludeChange, int, str, str, Optional[int]]]:
"""Set edge weights in the include changes output"""
change_type_value: str
for change_type_value, line, filename, header, *_ in csv.reader(changes_file):
change_type = IncludeChange.from_value(change_type_value)
change = (line, filename, header)
if change_type is IncludeChange.REMOVE:
# For now, only removes have edge weights
if filename not in edge_weights:
logging.warning(f"Skipping filename not found in weights, file may be removed: {filename}")
elif header not in edge_weights[filename]:
logging.warning(f"Skipping edge not found in weights: {filename},{header}")
else:
change = change + (edge_weights[filename][header],)
elif change_type is IncludeChange.ADD:
# TODO - Some metric for how important they are to add, if there
# is one? Maybe something like the ratio of occurrences to
# direct includes, suggesting it's used a lot, but has lots
# of missing includes? That metric wouldn't really work well
# since leaf headers of commonly included headers would end
# up with a high ratio, despite not really being important to
# add anywhere. Maybe there's no metric here and instead an
# analysis is done at the end to rank headers by how many
# suggested includes there are for that file.
pass
full_change: Tuple[IncludeChange, int, str, str, Optional[int]] = (change_type_value, *change)
yield full_change
def main():
parser = argparse.ArgumentParser(description="Set edge weights in include changes output")
parser.add_argument(
"changes_file",
type=argparse.FileType("r"),
help="CSV of include changes to set edge weights for.",
)
parser.add_argument(
"include_analysis_output",
type=argparse.FileType("r"),
help="The include analysis output to use.",
)
parser.add_argument(
"--metric",
choices=["centrality", "input_size", "prevalence"],
default="input_size",
help="Metric to use for edge weights.",
)
parser.add_argument("--config", help="Name of config file to use.")
parser.add_argument("--verbose", action="store_true", default=False, help="Enable verbose logging.")
args = parser.parse_args()
try:
include_analysis = parse_raw_include_analysis_output(args.include_analysis_output.read())
except ParseError as e:
message = str(e)
print("error: Could not parse include analysis output file")
if message:
print(message)
return 2
if args.verbose:
logging.basicConfig(level=logging.DEBUG)
config = None
if args.config:
config = load_config(args.config)
csv_writer = csv.writer(sys.stdout)
if args.metric == "input_size":
edge_weights = get_include_analysis_edge_sizes(include_analysis, config.includeDirs if config else None)
elif args.metric == "centrality":
edge_weights = get_include_analysis_edges_centrality(include_analysis, config.includeDirs if config else None)
elif args.metric == "prevalence":
edge_weights = get_include_analysis_edge_prevalence(include_analysis, config.includeDirs if config else None)
try:
for row in set_edge_weights(args.changes_file, edge_weights):
csv_writer.writerow(row)
sys.stdout.flush()
except BrokenPipeError:
devnull = os.open(os.devnull, os.O_WRONLY)
os.dup2(devnull, sys.stdout.fileno())
sys.exit(1)
return 0
if __name__ == "__main__":
try:
sys.exit(main())
except KeyboardInterrupt:
pass # Don't show the user anything
| 2.453125 | 2 |
src/tools/nuscenes-devkit/utils/color_map.py | jie311/TraDeS | 1,284 | 12761053 | from typing import Dict, Tuple
def get_colormap() -> Dict[str, Tuple[int, int, int]]:
"""
Get the defined colormap.
:return: A mapping from the class names to the respective RGB values.
"""
classname_to_color = { # RGB.
"noise": (0, 0, 0), # Black.
"animal": (70, 130, 180), # Steelblue
"human.pedestrian.adult": (0, 0, 230), # Blue
"human.pedestrian.child": (135, 206, 235), # Skyblue,
"human.pedestrian.construction_worker": (100, 149, 237), # Cornflowerblue
"human.pedestrian.personal_mobility": (219, 112, 147), # Palevioletred
"human.pedestrian.police_officer": (0, 0, 128), # Navy,
"human.pedestrian.stroller": (240, 128, 128), # Lightcoral
"human.pedestrian.wheelchair": (138, 43, 226), # Blueviolet
"movable_object.barrier": (112, 128, 144), # Slategrey
"movable_object.debris": (210, 105, 30), # Chocolate
"movable_object.pushable_pullable": (105, 105, 105), # Dimgrey
"movable_object.trafficcone": (47, 79, 79), # Darkslategrey
"static_object.bicycle_rack": (188, 143, 143), # Rosybrown
"vehicle.bicycle": (220, 20, 60), # Crimson
"vehicle.bus.bendy": (255, 127, 80), # Coral
"vehicle.bus.rigid": (255, 69, 0), # Orangered
"vehicle.car": (255, 158, 0), # Orange
"vehicle.construction": (233, 150, 70), # Darksalmon
"vehicle.emergency.ambulance": (255, 83, 0),
"vehicle.emergency.police": (255, 215, 0), # Gold
"vehicle.motorcycle": (255, 61, 99), # Red
"vehicle.trailer": (255, 140, 0), # Darkorange
"vehicle.truck": (255, 99, 71), # Tomato
"flat.driveable_surface": (0, 207, 191), # nuTonomy green
"flat.other": (175, 0, 75),
"flat.sidewalk": (75, 0, 75),
"flat.terrain": (112, 180, 60),
"static.manmade": (222, 184, 135), # Burlywood
"static.other": (255, 228, 196), # Bisque
"static.vegetation": (0, 175, 0), # Green
"vehicle.ego": (255, 240, 245)
}
return classname_to_color
| 3.484375 | 3 |
scripts/audit_service_accounts.py | ONSdigital/gcp-role-checker | 0 | 12761054 | import argparse
import json
import os
import subprocess
import sys
top_path = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
parser = argparse.ArgumentParser(description='''
Script to generate a list of service accounts along with their privilege levels
''')
parser.add_argument('org',
help='The organization resource ID I.e. organizations/999999999999')
parser.add_argument('--project_labels',
help='A set of labels to filter projects on \n' +
'I.e. env:dev,project:foo')
parser.add_argument('--data_dir', help='location of raw JSON data', default='data')
parser.add_argument('--limit', default=10, type=int,
help='the max number of accounts to return')
parser.add_argument('--member_type',
choices=['service_account','user_account','group'],
help='the type of member to filter results by')
parser.add_argument('--skip_collect', action='store_true',
help='gather data from Google APIs')
parser.add_argument('--sort_type', default='total_sum',
choices=['total_sum', 'top_sum'],
help='the sort function used to order the members')
def main():
parsed_args = parser.parse_args()
if not parsed_args.skip_collect:
gather_data(parsed_args.org, parsed_args.project_labels, parsed_args.data_dir)
with open(os.path.join(parsed_args.data_dir, 'members.json')) as f:
members = json.load(f)
service_accounts = filter_service_accounts(members, parsed_args.member_type)
sort_fn = getattr(sys.modules[__name__], parsed_args.sort_type)
sorted_members = sorted(service_accounts, key=sort_fn, reverse=True)
print_permissions(sorted_members[:parsed_args.limit])
def print_permissions(permissions):
for item in permissions:
email, data = item
print("\n{}:".format(email))
for resource in data['resources']:
roles = [
"{} ({} permissions)".format(role['name'], role['permission_count'])
for role in resource['roles']
]
print(" {}: {}".format(resource['name'], ",".join(roles)))
def gather_data(org, project_labels, data_dir):
proc_args = [
"go",
"run",
f'{top_path}/cmd/checker/main.go',
f'-org={org}',
]
if project_labels:
proc_args.append(f'-project_labels={project_labels}')
if data_dir:
proc_args.append(f'-data={data_dir}')
subprocess.run(proc_args, check=True)
def filter_service_accounts(members, member_type):
startswith_map = {
'service_account': 'serviceAccount:',
'user_account': 'user:',
'group': 'group:',
None: ''
}
return (
(memberEmail, member) for memberEmail, member in members.items()
if memberEmail.startswith(startswith_map[member_type])
)
def total_sum(data):
_, member = data
return sum(
sum(
role['permission_count'] for role in resource['roles']
)
for resource in member['resources']
)
def top_sum(data):
_, member = data
return max(
sum(
role['permission_count'] for role in resource['roles']
)
for resource in member['resources']
)
if __name__ == '__main__':
main() | 2.78125 | 3 |
bayesnet/sampler/hmc.py | ctgk/bayes | 21 | 12761055 | import random
import numpy as np
from bayesnet.network import Network
def hmc(model, call_args, parameter=None, sample_size=100, step_size=1e-3, n_step=10):
"""
Hamiltonian Monte Carlo sampling aka Hybrid Monte Carlo sampling
Parameters
----------
model : Network
bayesian network
call_args : tuple or dict
observations of the model
parameter : dict
dict of parameter to be sampled
sample_size : int
number of samples to be generated
step_size : float
update size of parameters
n_step : int
number of updation of parameters
Returns
-------
sample : dict of list of np.ndarray
samples from the model given observations
"""
if not isinstance(model, Network):
raise TypeError("model must be Network object")
if not isinstance(sample_size, int):
raise TypeError(f"sample_size must be int, not {type(sample_size)}")
if not isinstance(step_size, (int, float)):
raise TypeError(f"step_size must be float, not {type(step_size)}")
if not isinstance(n_step, int):
raise TypeError(f"n_step must be int, not {type(n_step)}")
def run_model():
model.clear()
if isinstance(call_args, tuple):
model(*call_args)
elif isinstance(call_args, dict):
model(**call_args)
else:
raise TypeError("call_args must be tuple or dict")
sample = dict()
previous = dict()
velocity = dict()
if parameter is not None:
if not isinstance(parameter, dict):
raise TypeError("parameter must be dict")
for key, p in parameter.items():
if p is not model.parameter[key]:
raise ValueError("parameter must be defined in the model")
variable = parameter
else:
variable = model.parameter
for key in variable:
sample[key] = []
for _ in range(sample_size):
run_model()
log_posterior = model.log_pdf()
log_posterior.backward()
kinetic_energy = 0
for key, v in variable.items():
previous[key] = v.value
velocity[key] = np.random.normal(size=v.shape)
kinetic_energy += 0.5 * np.square(velocity[key]).sum()
velocity[key] += 0.5 * v.grad * step_size
v.value = v.value + step_size * velocity[key]
hamiltonian = kinetic_energy - log_posterior.value
for _ in range(n_step):
run_model()
model.log_pdf().backward()
for key, v in variable.items():
velocity[key] += step_size * v.grad
v.value += step_size * velocity[key]
run_model()
log_posterior_new = model.log_pdf()
log_posterior_new.backward()
kinetic_energy_new = 0
for key, v in velocity.items():
v += 0.5 * step_size * variable[key].grad
kinetic_energy_new += 0.5 * np.square(v).sum()
hamiltonian_new = kinetic_energy_new - log_posterior_new.value
accept_proba = np.exp(hamiltonian - hamiltonian_new)
if random.random() < accept_proba:
for key, v in variable.items():
sample[key].append(v.value)
else:
for key, v in variable.items():
v.value = previous[key]
sample[key].append(v.value)
return sample
| 2.703125 | 3 |
qprotocal/utils/xbin.py | gorgiaxx/qq-protocal-library | 109 | 12761056 | #!/usr/bin/env python
import binascii
import hashlib
import random
class Xbin(object):
# def __init__(self):
# get random hex by length
def get_random_hex(self, length=1, is_bytes=0):
random_hex = ''
for _ in range(0, length):
random_hex += "{:0>2x}".format(random.randrange(0, 255))
if is_bytes:
return bytes().fromhex(random_hex)
else:
return random_hex
def get_md5_value(src, is_bytes=0):
md5 = hashlib.md5()
md5.update(src)
md5_digest = md5.hexdigest()
if is_bytes:
return bytes().fromhex(md5_digest)
else:
return md5_digest
| 3.171875 | 3 |
sacredbrowser/StateModels.py | michaelwand/SacredBrowser | 13 | 12761057 | # This file contains objects which represent QT models to encapsulate browser state
# (see BrowserState.py) which is NOT contained in any database (for that, see DbEntries.py and
# DbModel.py). Note that the models do not automatically react to
# any changes originating reloading the database entries, the controller must call the respective functions.
# Note that all these models are NOT editable, they only change by outside command.
from PyQt5 import QtCore, QtGui, QtWidgets
from . import BrowserState
class InvisibleFieldsModel(QtCore.QAbstractListModel):
def __init__(self,fields):
super().__init__()
self._fields = fields # an instance of BrowserState.Fields
self._fields.invisible_fields_to_be_changed.connect(self.slot_invisible_fields_to_be_changed)
self._fields.invisible_fields_changed.connect(self.slot_invisible_fields_changed)
def rowCount(self,idx):
assert not idx.isValid() # we only have top level data
return self._fields.invisible_fields_count()
def data(self,index,role):
row = index.row() # only relevant thing
if role == QtCore.Qt.DisplayRole or role == QtCore.Qt.ToolTipRole:
return self._fields.get_invisible_fields()[row][1] # remove the type
else:
return None
def slot_invisible_fields_to_be_changed(self,change_data):
# for the interpretation of change_data see BrowserState.py
if change_data.tp == BrowserState.Fields.ChangeType.Reset:
self.beginResetModel()
elif change_data.tp == BrowserState.Fields.ChangeType.Content:
pass
elif change_data.tp == BrowserState.Fields.ChangeType.Insert:
first = change_data.info[0]
last = change_data.info[0] + change_data.info[1] - 1
self.beginInsertRows(QtCore.QModelIndex(),first,last)
elif change_data.tp == BrowserState.Fields.ChangeType.Remove:
first = change_data.info[0]
last = change_data.info[0] + change_data.info[1] - 1
self.beginRemoveRows(QtCore.QModelIndex(),first,last)
def slot_invisible_fields_changed(self,new_fields,change_data):
# for the interpretation of change_data see BrowserState.py
if change_data.tp == BrowserState.Fields.ChangeType.Reset:
self.endResetModel()
elif change_data.tp == BrowserState.Fields.ChangeType.Content:
for row in change_data.info:
idx = self.index(row,0,QtCore.QModelIndex())
self.dataChanged.emit(idx,idx)
elif change_data.tp == BrowserState.Fields.ChangeType.Insert:
self.endInsertRows()
elif change_data.tp == BrowserState.Fields.ChangeType.Remove:
self.endRemoveRows()
class VisibleFieldsModel(QtCore.QAbstractListModel):
def __init__(self,fields):
super().__init__()
self._fields = fields # an instance of BrowserState.Fields
self._fields.visible_fields_to_be_changed.connect(self.slot_visible_fields_to_be_changed)
self._fields.visible_fields_changed.connect(self.slot_visible_fields_changed)
def rowCount(self,idx):
assert not idx.isValid() # we only have top level data
return self._fields.visible_fields_count()
def data(self,index,role):
row = index.row() # only relevant thing
if role == QtCore.Qt.DisplayRole or role == QtCore.Qt.ToolTipRole:
return self._fields.get_visible_fields()[row][1]
else:
return None
def slot_visible_fields_to_be_changed(self,change_data):
# for the interpretation of change_data see BrowserState.py
if change_data.tp == BrowserState.Fields.ChangeType.Reset:
self.beginResetModel()
elif change_data.tp == BrowserState.Fields.ChangeType.Content:
pass
elif change_data.tp == BrowserState.Fields.ChangeType.Insert:
first = change_data.info[0]
last = change_data.info[0] + change_data.info[1] - 1
self.beginInsertRows(QtCore.QModelIndex(),first,last)
elif change_data.tp == BrowserState.Fields.ChangeType.Remove:
first = change_data.info[0]
last = change_data.info[0] + change_data.info[1] - 1
self.beginRemoveRows(QtCore.QModelIndex(),first,last)
def slot_visible_fields_changed(self,new_fields,change_data):
# for the interpretation of change_data see BrowserState.py
if change_data.tp == BrowserState.Fields.ChangeType.Reset:
self.endResetModel()
elif change_data.tp == BrowserState.Fields.ChangeType.Content:
for row in change_data.info:
idx = self.index(row,0,QtCore.QModelIndex())
self.dataChanged.emit(idx,idx)
elif change_data.tp == BrowserState.Fields.ChangeType.Insert:
self.endInsertRows()
elif change_data.tp == BrowserState.Fields.ChangeType.Remove:
self.endRemoveRows()
| 2.125 | 2 |
jp.atcoder/joi2006yo/joi2006yo_a/11171031.py | kagemeka/atcoder-submissions | 1 | 12761058 | <gh_stars>1-10
import sys
n, *ab = map(int, sys.stdin.read().split())
ab = zip(*[iter(ab)] * 2)
def main():
sa = sb = 0
for a, b in ab:
if a > b:
sa += a + b
elif a < b:
sb += a + b
else:
sa += a
sb += b
return sa, sb
if __name__ == '__main__':
ans = main()
print(*ans, sep=' ')
| 2.84375 | 3 |
creme/linear_model/glm.py | Leo-VK/creme | 0 | 12761059 | import collections
import math
import numbers
import numpy as np
from .. import base
from .. import optim
from .. import utils
__all__ = [
'LinearRegression',
'LogisticRegression'
]
class GLM:
"""Generalized Linear Model.
Parameters:
optimizer (optim.Optimizer): The sequential optimizer used for updating the weights. Note
that the intercept is handled separately.
loss (optim.Loss): The loss function to optimize for.
l2 (float): Amount of L2 regularization used to push weights towards 0.
intercept (float): Initial intercept value.
intercept_lr (optim.schedulers.Scheduler or float): Learning rate scheduler used for
updating the intercept. If a `float` is passed, then an instance of
`optim.schedulers.Constant` will be used. Setting this to 0 implies that the intercept
will be not be updated.
clip_gradient (float): Clips the absolute value of each gradient value.
initializer (optim.initializers.Initializer): Weights initialization scheme.
Attributes:
weights (collections.defaultdict): The current weights.
"""
def __init__(self, optimizer, loss, l2, intercept, intercept_lr, clip_gradient, initializer):
self.optimizer = optimizer
self.loss = loss
self.l2 = l2
self.intercept = intercept
self.intercept_lr = (
optim.schedulers.Constant(intercept_lr)
if isinstance(intercept_lr, numbers.Number) else
intercept_lr
)
self.clip_gradient = clip_gradient
self.weights = collections.defaultdict(initializer)
self.initializer = initializer
def _raw_dot(self, x):
return utils.math.dot(self.weights, x) + self.intercept
def _eval_gradient(self, x, y, sample_weight):
"""Returns the gradient for a given observation.
This logic is put into a separate function for testing purposes.
"""
loss_gradient = self.loss.gradient(y_true=y, y_pred=self._raw_dot(x))
# Apply the sample weight
loss_gradient *= sample_weight
# Clip the gradient to avoid numerical instability
loss_gradient = utils.math.clamp(
loss_gradient,
minimum=-self.clip_gradient,
maximum=self.clip_gradient
)
return (
{
i: (
xi * loss_gradient +
2. * self.l2 * self.weights.get(i, 0)
)
for i, xi in x.items()
},
loss_gradient
)
def fit_one(self, x, y, sample_weight=1.):
# Some optimizers need to do something before a prediction is made
self.weights = self.optimizer.update_before_pred(w=self.weights)
# Calculate the gradient
gradient, loss_gradient = self._eval_gradient(x=x, y=y, sample_weight=sample_weight)
# Update the intercept
self.intercept -= self.intercept_lr.get(self.optimizer.n_iterations) * loss_gradient
# Update the weights
self.weights = self.optimizer.update_after_pred(w=self.weights, g=gradient)
return self
class LinearRegression(GLM, base.Regressor):
"""Linear regression.
Parameters:
optimizer (optim.Optimizer): The sequential optimizer used for updating the weights. Note
that the intercept is handled separately. Defaults to ``optim.SGD(.01)``.
loss (optim.RegressionLoss): The loss function to optimize for. Defaults to
``optim.losses.SquaredLoss``.
l2 (float): Amount of L2 regularization used to push weights towards 0.
intercept (float): Initial intercept value.
intercept_lr (optim.schedulers.Scheduler or float): Learning rate scheduler used for
updating the intercept. If a `float` is passed, then an instance of
`optim.schedulers.Constant` will be used. Setting this to 0 implies that the intercept
will be not be updated.
l2 (float): Amount of L2 regularization used to push weights towards 0.
clip_gradient (float): Clips the absolute value of each gradient value.
initializer (optim.initializers.Initializer): Weights initialization scheme.
Attributes:
weights (collections.defaultdict): The current weights.
Example:
::
>>> from creme import datasets
>>> from creme import linear_model
>>> from creme import metrics
>>> from creme import model_selection
>>> from creme import preprocessing
>>> X_y = datasets.TrumpApproval()
>>> model = (
... preprocessing.StandardScaler() |
... linear_model.LinearRegression(intercept_lr=.1)
... )
>>> metric = metrics.MAE()
>>> model_selection.progressive_val_score(X_y, model, metric)
MAE: 0.616405
>>> model['LinearRegression'].intercept
38.000439
Note:
Using a feature scaler such as `preprocessing.StandardScaler` upstream helps the optimizer
to converge.
"""
def __init__(self, optimizer=None, loss=None, l2=.0, intercept=0., intercept_lr=.01,
clip_gradient=1e12, initializer=None):
super().__init__(
optimizer=(
optim.SGD(optim.schedulers.InverseScaling(.01, .25))
if optimizer is None else
optimizer
),
loss=optim.losses.Squared() if loss is None else loss,
intercept=intercept,
intercept_lr=intercept_lr,
l2=l2,
clip_gradient=clip_gradient,
initializer=initializer if initializer else optim.initializers.Zeros()
)
def predict_one(self, x):
return self.loss.mean_func(self._raw_dot(x))
def debug_one(self, x, decimals=5, **print_params):
"""
Example:
::
>>> from creme import datasets
>>> from creme import linear_model
>>> from creme import metrics
>>> from creme import model_selection
>>> from creme import preprocessing
>>> X_y = datasets.TrumpApproval()
>>> model = (
... preprocessing.StandardScaler() |
... linear_model.LinearRegression(intercept_lr=.1)
... )
>>> for x, y in X_y:
... y_pred = model.predict_one(x)
... model = model.fit_one(x, y)
>>> model.debug_one(x)
0. Input
--------
gallup: 43.84321 (float)
ipsos: 40.57068 (float)
morning_consult: 37.81875 (float)
ordinal_date: 737389 (int)
rasmussen: 40.10469 (float)
you_gov: 41.63691 (float)
<BLANKLINE>
1. StandardScaler
-----------------
gallup: 1.18751 (float)
ipsos: -0.04683 (float)
morning_consult: -1.22583 (float)
ordinal_date: 1.72946 (float)
rasmussen: -0.23857 (float)
you_gov: 0.44131 (float)
<BLANKLINE>
2. LinearRegression
-------------------
Name Value Weight Contribution
Intercept 1.00000 38.00044 38.00044
ordinal_date 1.72946 2.23125 3.85885
gallup 1.18751 0.28647 0.34019
you_gov 0.44131 -0.01270 -0.00560
ipsos -0.04683 1.01815 -0.04768
rasmussen -0.23857 0.45099 -0.10759
morning_consult -1.22583 0.35181 -0.43126
<BLANKLINE>
Prediction: 41.60735
"""
def fmt_float(x):
return '{: ,.{prec}f}'.format(x, prec=decimals)
names = list(map(str, x.keys())) + ['Intercept']
values = list(map(fmt_float, list(x.values()) + [1]))
weights = list(map(fmt_float, [self.weights.get(i, 0) for i in x] + [self.intercept]))
contributions = [xi * self.weights.get(i, 0) for i, xi in x.items()] + [self.intercept]
order = reversed(np.argsort(contributions))
contributions = list(map(fmt_float, contributions))
table = utils.pretty.print_table(
headers=['Name', 'Value', 'Weight', 'Contribution'],
columns=[names, values, weights, contributions],
order=order
)
print(table, **print_params)
class LogisticRegression(GLM, base.BinaryClassifier):
"""Logistic regression.
Parameters:
optimizer (optim.Optimizer): The sequential optimizer used for updating the weights. Note
that the intercept is handled separately. Defaults to ``optim.SGD(.05)``.
loss (optim.BinaryLoss): The loss function to optimize for. Defaults to
``optim.losses.Log``.
l2 (float): Amount of L2 regularization used to push weights towards 0.
intercept (float): Initial intercept value.
intercept_lr (optim.schedulers.Scheduler or float): Learning rate scheduler used for
updating the intercept. If a `float` is passed, then an instance of
`optim.schedulers.Constant` will be used. Setting this to 0 implies that the intercept
will be not be updated.
l2 (float): Amount of L2 regularization used to push weights towards 0.
clip_gradient (float): Clips the absolute value of each gradient value.
initializer (optim.initializers.Initializer): Weights initialization scheme.
Attributes:
weights (collections.defaultdict): The current weights.
Example:
::
>>> from creme import datasets
>>> from creme import linear_model
>>> from creme import metrics
>>> from creme import model_selection
>>> from creme import optim
>>> from creme import preprocessing
>>> X_y = datasets.Phishing()
>>> model = (
... preprocessing.StandardScaler() |
... linear_model.LogisticRegression(optimizer=optim.SGD(.1))
... )
>>> metric = metrics.Accuracy()
>>> model_selection.progressive_val_score(X_y, model, metric)
Accuracy: 88.96%
Note:
Using a feature scaler such as `preprocessing.StandardScaler` upstream helps the optimizer
to converge.
"""
def __init__(self, optimizer=None, loss=None, l2=.0, intercept=0., intercept_lr=.01,
clip_gradient=1e12, initializer=None):
super().__init__(
optimizer=optim.SGD(.01) if optimizer is None else optimizer,
loss=optim.losses.Log() if loss is None else loss,
intercept=intercept,
intercept_lr=intercept_lr,
l2=l2,
clip_gradient=clip_gradient,
initializer=initializer if initializer else optim.initializers.Zeros()
)
def predict_proba_one(self, x):
p = self.loss.mean_func(self._raw_dot(x)) # Convert logit to probability
return {False: 1. - p, True: p}
| 3.296875 | 3 |
underdog_fastapi/underdog_fastapi/api/schemas.py | ericmbernier/ericbernier-blog-posts | 9 | 12761060 | from pydantic import BaseModel
from typing import List
from underdog_fastapi.underdog.team import Team
class PlayerBase(BaseModel):
first_name: str
last_name: str
adp: float = None
projected_points: float = None
team_name: str
team_abbreviation: Team
class Player(PlayerBase):
id: int
bye_week: int = None
class Config:
orm_mode = True
class PlayerStack(BaseModel):
players: List[PlayerBase]
average_adp: float
median_adp: float
projected_points_per_week: float
| 2.75 | 3 |
Lib/site-packages/QtModularUiPack/Widgets/VideoExtensions/__init__.py | fochoao/cpython | 3 | 12761061 | <gh_stars>1-10
from .image_render_widget import ImageRenderWidget, ImageCircle, ImageEllipse, ImageLayer, ImageRectangle, ImageShape
from .video_frame_grabber import VideoFrameGrabber | 1.101563 | 1 |
ROAR/agent_module/ios_agent.py | XiangyuZhou-Berkeley/ROAR | 1 | 12761062 | from ROAR.agent_module.agent import Agent
from ROAR.utilities_module.data_structures_models import SensorsData
from ROAR.utilities_module.vehicle_models import Vehicle, VehicleControl
from ROAR.configurations.configuration import Configuration as AgentConfig
import cv2
import numpy as np
import open3d as o3d
from ROAR.utilities_module.occupancy_map import OccupancyGridMap
from ROAR.perception_module.depth_to_pointcloud_detector import DepthToPointCloudDetector
from ROAR.perception_module.ground_plane_detector import GroundPlaneDetector
from ROAR.perception_module.lane_detector import LaneDetector
class iOSAgent(Agent):
def __init__(self, vehicle: Vehicle, agent_settings: AgentConfig, **kwargs):
super().__init__(vehicle, agent_settings, **kwargs)
# initialize occupancy grid map content
self.occu_map = OccupancyGridMap(agent=self)
self.depth_to_pcd = DepthToPointCloudDetector(agent=self)
self.ground_plane_detector = GroundPlaneDetector(agent=self)
self.lane_detector = LaneDetector(agent=self)
# initialize open3d related content
self.vis = o3d.visualization.Visualizer()
self.vis.create_window(width=500, height=500)
self.pcd = o3d.geometry.PointCloud()
self.coordinate_frame = o3d.geometry.TriangleMesh.create_coordinate_frame()
self.points_added = False
def run_step(self, sensors_data: SensorsData, vehicle: Vehicle) -> VehicleControl:
super(iOSAgent, self).run_step(sensors_data, vehicle)
if self.front_depth_camera.data is not None and self.front_rgb_camera.data is not None:
depth_img = self.front_depth_camera.data.copy()
lane_mask = self.lane_detector.run_in_series()
none_lane = np.where(lane_mask < 0.5)
depth_img[none_lane] = 0
pcd = self.depth_to_pcd.run_in_series(depth_image=depth_img)
points: np.ndarray = np.asarray(pcd.points)
self.occu_map.update(points)
self.occu_map.visualize()
self.non_blocking_pcd_visualization(pcd=pcd, should_center=True,
should_show_axis=True, axis_size=1)
return VehicleControl()
def non_blocking_pcd_visualization(self, pcd: o3d.geometry.PointCloud,
should_center=False,
should_show_axis=False,
axis_size: float = 0.1):
points = np.asarray(pcd.points)
colors = np.asarray(pcd.colors)
if should_center:
points = points - np.mean(points, axis=0)
if self.points_added is False:
self.pcd = o3d.geometry.PointCloud()
self.pcd.points = o3d.utility.Vector3dVector(points)
self.pcd.colors = o3d.utility.Vector3dVector(colors)
if should_show_axis:
self.coordinate_frame = o3d.geometry.TriangleMesh.create_coordinate_frame(size=axis_size,
origin=np.mean(points,
axis=0))
self.vis.add_geometry(self.coordinate_frame)
self.vis.add_geometry(self.pcd)
self.points_added = True
else:
# print(np.shape(np.vstack((np.asarray(self.pcd.points), points))))
self.pcd.points = o3d.utility.Vector3dVector(points)
self.pcd.colors = o3d.utility.Vector3dVector(colors)
if should_show_axis:
self.coordinate_frame = o3d.geometry.TriangleMesh.create_coordinate_frame(size=axis_size,
origin=np.mean(points,
axis=0))
self.vis.update_geometry(self.coordinate_frame)
self.vis.update_geometry(self.pcd)
self.vis.poll_events()
self.vis.update_renderer()
| 2.1875 | 2 |
official/cv/lenet/modelarts/train_start.py | leelige/mindspore | 77 | 12761063 | <filename>official/cv/lenet/modelarts/train_start.py
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""
######################## train lenet example ########################
train lenet and get network model files(.ckpt) :
python train.py --data_path /YourDataPath
"""
import os
import argparse
import glob
import sys
import time
import numpy as np
import moxing as mox
from src.model_utils.moxing_adapter import get_device_id, get_device_num, get_rank_id, get_job_id
from src.dataset import create_dataset
from src.lenet import LeNet5
import mindspore.nn as nn
from mindspore.context import ParallelMode
from mindspore.communication.management import init
from mindspore import context
from mindspore import export
from mindspore import Tensor
from mindspore.train import Model
from mindspore.train.callback import ModelCheckpoint, CheckpointConfig, LossMonitor, TimeMonitor
from mindspore.train.serialization import load_checkpoint, load_param_into_net
from mindspore.nn.metrics import Accuracy
from mindspore.common import set_seed
root_dir = os.path.join(os.path.dirname(os.path.realpath(__file__))) # src root dir
cwd = os.getcwd()
sys.path.append(os.path.join(os.path.abspath(os.path.dirname(__file__))))
sys.path.append(os.path.join(os.path.abspath(os.path.dirname(__file__)), '../'))
parser = argparse.ArgumentParser(description='mindspore lenet training')
parser.add_argument("--enable_modelarts", default='True', type=str, help="")
parser.add_argument("--data_url", type=str, default="", help="dataset path for obs")
parser.add_argument("--train_url", type=str, default="", help="train path for obs")
parser.add_argument('--data_path', type=str, default='/cache/data', help='Dataset url for local')
parser.add_argument("--output_path", type=str, default="/cache/train", help="dir of training output for local")
# parser.add_argument("--checkpoint_path", type=str, default="./checkpoint/", help="setting dir of checkpoint output")
parser.add_argument('--device_target', type=str, default='Ascend', choices=['Ascend', 'GPU', 'CPU'],
help='device where the code will be implemented. (Default: Ascend)')
parser.add_argument('--num_classes', type=int, default=10, help='number of classes')
parser.add_argument('--batch_size', type=int, default=32, help='batch size')
parser.add_argument('--epoch_size', type=int, default=1, help='epoch sizse')
parser.add_argument("--learning_rate", type=float, default=0.002, help="")
parser.add_argument("--sink_size", type=int, default=-1, help="")
parser.add_argument("--momentum", type=float, default=0.9, help="")
parser.add_argument("--save_checkpoint_steps", type=int, default=125, help="")
parser.add_argument('--lr', type=float, default=0.01, help='base learning rate')
parser.add_argument("--image_height", type=int, default=32, help="")
parser.add_argument("--image_width", type=int, default=32, help="")
parser.add_argument("--buffer_size", type=int, default=1000, help="")
parser.add_argument("--keep_checkpoint_max", type=int, default=10, help="")
parser.add_argument('--z', type=str, default='AIR', choices=['AIR', 'ONNX', 'MINDIR'],
help='Format of output model(Default: AIR)')
parser.add_argument('--file_name', type=str, default='lenet', help='output file name')
parser.add_argument("--ckpt_path", type=str, default="/cache/train", help="")
parser.add_argument("--ckpt_file", type=str, default="/cache/train/checkpoint_lenet-10_1875.ckpt", help="")
cfg = parser.parse_args()
set_seed(1)
_global_sync_count = 0
def frozen_to_air(net, args):
param_dict = load_checkpoint(args.get("ckpt_file"))
load_param_into_net(net, param_dict)
input_arr = Tensor(np.zeros([args.get("batch_size"),
1, args.get("image_height"), args.get("image_width")], np.float32))
export(net, input_arr, file_name=args.get("file_name"), file_format=args.get("file_format"))
def sync_data(from_path, to_path):
"""
Download data from remote obs to local directory if the first url is remote url and the second one is local path
Upload data from local directory to remote obs in contrast.
"""
global _global_sync_count
sync_lock = "/tmp/copy_sync.lock" + str(_global_sync_count)
_global_sync_count += 1
# Each server contains 8 devices as most.
if get_device_id() % min(get_device_num(), 8) == 0 and not os.path.exists(sync_lock):
print("from path: ", from_path)
print("to path: ", to_path)
mox.file.copy_parallel(from_path, to_path)
print("===finish data synchronization===")
try:
os.mknod(sync_lock)
except IOError:
print("Failed to create directory")
print("===save flag===")
while True:
if os.path.exists(sync_lock):
break
time.sleep(1)
print("Finish sync data from {} to {}.".format(from_path, to_path))
def wrapped_func(config_name):
"""
Download data from remote obs to local directory if the first url is remote url and the second one is local path
Upload data from local directory to remote obs in contrast.
"""
if config_name.enable_modelarts:
if config_name.data_url:
if not os.path.isdir(config_name.data_path):
os.makedirs(config_name.data_path)
sync_data(config_name.data_url, config_name.data_path)
print("Dataset downloaded: ", os.listdir(cfg.data_path))
if config_name.train_url:
if not os.path.isdir(config_name.output_path):
os.makedirs(config_name.output_path)
sync_data(config_name.train_url, config_name.output_path)
print("Workspace downloaded: ", os.listdir(config_name.output_path))
def train_lenet_model():
"""
main function to train model in modelArts
"""
print(cfg)
print('device id:', get_device_id())
print('device num:', get_device_num())
print('rank id:', get_rank_id())
print('job id:', get_job_id())
device_target = cfg.device_target
context.set_context(mode=context.GRAPH_MODE, device_target=cfg.device_target)
context.set_context(save_graphs=False)
if device_target == "GPU":
context.set_context(enable_graph_kernel=True)
context.set_context(graph_kernel_flags="--enable_cluster_ops=MatMul")
device_num = get_device_num()
if device_num > 1:
context.reset_auto_parallel_context()
context.set_auto_parallel_context(device_num=device_num,
parallel_mode=ParallelMode.DATA_PARALLEL, gradients_mean=True)
if device_target == "Ascend":
context.set_context(device_id=get_device_id())
init()
elif device_target == "GPU":
init()
else:
context.set_context(device_id=get_device_id())
# create dataset
ds_train = create_dataset(os.path.join(cfg.data_path, "train"), cfg.batch_size)
if ds_train.get_dataset_size() == 0:
raise ValueError("Please check dataset size > 0 and batch_size <= dataset size")
print("dataset size is : " + str(ds_train.get_dataset_size()))
network = LeNet5(cfg.num_classes)
net_loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction="mean")
net_opt = nn.Momentum(network.trainable_params(), cfg.lr, cfg.momentum)
time_cb = TimeMonitor(data_size=ds_train.get_dataset_size())
config_ck = CheckpointConfig(save_checkpoint_steps=cfg.save_checkpoint_steps,
keep_checkpoint_max=cfg.keep_checkpoint_max)
ckpoint_cb = ModelCheckpoint(prefix="checkpoint_lenet", directory=cfg.ckpt_path, config=config_ck)
if cfg.device_target != "Ascend":
if cfg.device_target == "GPU":
context.set_context(enable_graph_kernel=True)
model = Model(network, net_loss, net_opt, metrics={"Accuracy": Accuracy()})
else:
model = Model(network, net_loss, net_opt, metrics={"Accuracy": Accuracy()}, amp_level="O2")
print("============== Starting Training ==============")
model.train(cfg.epoch_size, ds_train, callbacks=[time_cb, ckpoint_cb, LossMonitor()])
print("============== Training finish ==============")
ckpt_list = glob.glob(str(cfg.output_path) + "/*lenet*.ckpt")
print(ckpt_list)
if not ckpt_list:
print("ckpt file not generated")
ckpt_list.sort(key=os.path.getmtime)
ckpt_model = ckpt_list[-1]
print(ckpt_model)
frozen_to_air_args = {"ckpt_file": ckpt_model,
"batch_size": cfg.batch_size,
"image_height": cfg.image_height,
"image_width": cfg.image_width,
"file_name": "/cache/train/lenet",
"file_format": "AIR"}
frozen_to_air(network, frozen_to_air_args)
mox.file.copy_parallel(cfg.output_path, cfg.train_url)
if __name__ == "__main__":
wrapped_func(cfg)
train_lenet_model()
| 1.921875 | 2 |
Week1.py | TanZng/Automating-Real-World-Tasks-with-Python | 0 | 12761064 | #!/usr/bin/env python3
from PIL import Image
import glob
import os
def crear_folder():
if not os.path.exists('/opt/icons/'):
os.makedirs('/opt/icons/')
def guardar(imagen, filename):
save_path = '/opt/icons/' + filename
imagen.save(save_path, 'JPEG')
print(imagen.format, imagen.size)
def rotate_resize(imagen):
new_image = imagen.rotate(-90).resize((128,128))
return new_image
def main():
# Script on images/ dir
crear_folder()
for filename in glob.glob("ic_*"):
imagen = Image.open(filename).convert('RGB')
new_image = rotate_resize(imagen)
guardar(new_image, filename)
print("Done!")
if __name__ == "__main__":
main()
| 3.25 | 3 |
measure_mate/migrations/0024_assessment_status.py | niche-tester/measure-mate | 15 | 12761065 | <gh_stars>10-100
# -*- coding: utf-8 -*-
# Generated by Django 1.9.5 on 2016-04-11 11:46
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('measure_mate', '0023_auto_20160330_0145'),
]
operations = [
migrations.AddField(
model_name='assessment',
name='status',
field=models.CharField(choices=[(b'TODO', b'To Do'), (b'DONE', b'Done')], default=b'TODO', max_length=128),
),
]
| 1.726563 | 2 |
src/ping1d_sonar/__init__.py | FletcherFT/ping1d_sonar | 0 | 12761066 | #!/usr/bin/env python3
"""Scan serial ports for ping devices
Symlinks to detected devices are created under /dev/serial/ping/
This script needs root permission to create the symlinks
"""
import subprocess
import numpy as np
import rospy
from brping import PingDevice, PingParser, PingMessage
from brping.definitions import *
import serial
import socket
from collections import deque
from sensor_msgs.msg import Range, MultiEchoLaserScan, LaserEcho
class PingEnumerator:
def legacy_detect_ping1d(self, ping):
"""
Detects Ping1D devices without DEVICE_INFORMATION implemented
"""
firmware_version = ping.request(PING1D_FIRMWARE_VERSION)
if firmware_version is None:
return None
description = "/dev/serial/ping/Ping1D-id-{}-t-{}-m-{}-v-{}.{}".format (
firmware_version.src_device_id,
firmware_version.device_type,
firmware_version.device_model,
firmware_version.firmware_version_major,
firmware_version.firmware_version_minor
)
return description
def detect_device(self, dev):
"""
Attempts to detect the Ping device attached to serial port 'dev'
Returns the new path with encoded name if detected, or None if the
device was not detected
"""
print("Checking if " + dev + " is a Ping device...")
try:
ping = PingDevice()
ping.connect_serial("/dev/serial/by-id/" + dev, 115200)
except Exception as exception:
print("An exception has occurred: ", exception)
return None
if not ping.initialize():
return None
device_info = ping.request(COMMON_DEVICE_INFORMATION)
if not device_info:
return self.legacy_detect_ping1d(ping)
if device_info.device_type == 1:
description = "/dev/serial/ping/Ping1D-id-{}-r-{}-v-{}.{}.{}"
elif device_info.device_type == 2:
description = "/dev/serial/ping/Ping360-id-{}-r-{}-v-{}.{}.{}"
# Open device with 2M baud to setup Ping360
print("Setting baud to 2M...")
ser = serial.Serial("/dev/serial/by-id/" + dev, 2000000)
ser.send_break()
ser.write("UUUUUUU".encode())
ser.close()
self.set_low_latency(dev)
else:
return None
return description.format (
device_info.src_device_id,
device_info.device_revision,
device_info.firmware_version_major,
device_info.firmware_version_minor,
device_info.firmware_version_patch
)
def set_low_latency(self, dev):
"""
Receives /dev/serial/by-id/...
maps to it to ttyUSB and sets the latency_timer for the device
"""
raise NotImplementedError("This method currently not supported, requires root permissions.")
target_device = subprocess.check_output(' '.join(["readlink", "-f", "/dev/serial/by-id/%s" % dev]), shell=True)
device_name = target_device.decode().strip().split("/")[-1]
latency_file = "/sys/bus/usb-serial/devices/{0}/latency_timer".format(device_name)
with open(latency_file, 'w') as p:
p.write("1")
p.flush()
def make_symlink(self, origin, target):
"""
follows target to real device an links origin to it
origin => target
Returns True if sucessful
"""
raise NotImplementedError("This method currently not supported, requires root permissions.")
try:
# Follow link to actual device
target_device = subprocess.check_output(' '.join(["readlink", "-f", "/dev/serial/by-id/%s" % origin]), shell=True)
# Strip newline from output
target_device = target_device.decode().split('\n')[0]
# Create another link to it
subprocess.check_output(' '.join(["mkdir", "-p", "/dev/serial/ping"]), shell=True)
subprocess.check_output("ln -fs %s %s" % (
target_device,
target), shell=True)
print(origin, " linked to ", target)
return True
except subprocess.CalledProcessError as exception:
print(exception)
return False
def erase_old_symlinks(self):
"""
Erases all symlinks at "/dev/serial/ping/"
"""
raise NotImplementedError("This method currently not supported, requires root permissions.")
try:
subprocess.check_output(["rm", "-rf", "/dev/serial/ping"])
except subprocess.CalledProcessError as exception:
print(exception)
def list_serial_devices(self):
"""
Lists serial devices at "/dev/serial/by-id/"
"""
# Look for connected serial devices
try:
output = subprocess.check_output("ls /dev/serial/by-id", shell=True)
return output.decode().strip().split("\n")
except subprocess.CalledProcessError as exception:
print(exception)
return []
class PingDriver:
def __init__(self):
rospy.init_node("ping1d_driver_node")
self.ping_sensors = []
self.enumerator = PingEnumerator()
hz = rospy.Rate(1.0)
while not len(self.ping_sensors) and not rospy.is_shutdown():
self.ping_sensors = [f"/dev/serial/by-id/{dev}" for dev in self.enumerator.list_serial_devices()]
rospy.logerr_throttle(10.0, f"{rospy.get_name()} | Waiting for valid ping1d sensor to appear.")
hz.sleep()
## Messages that have the current distance measurement in the payload
self.distance_messages = [
PING1D_DISTANCE,
PING1D_DISTANCE_SIMPLE,
PING1D_PROFILE
]
## Parser to verify client comms
self.parser = PingParser()
self.range_publisher = rospy.Publisher("range", Range, queue_size=10)
self.profile_publisher = rospy.Publisher("profile", MultiEchoLaserScan, queue_size=10)
self.hz = rospy.Rate(15.0)
if not rospy.is_shutdown():
rospy.loginfo("Setting up serial device.")
self.device = PingDevice()
self.device.connect_serial(self.ping_sensors[0], 115200)
data = PingMessage(PING1D_CONTINUOUS_STOP)
data.pack_msg_data()
self.device.write(data.msg_data)
data = PingMessage(PING1D_SET_MODE_AUTO)
data.pack_msg_data()
self.device.write(data.msg_data)
data = PingMessage(PING1D_SET_RANGE)
data.scan_start = 200
data.scan_length = 30000
data.pack_msg_data()
self.device.write(data.msg_data)
## Digest incoming ping data
def parse(self, data: PingMessage):
range_msg = None
profile_msg = None
if data.message_id in self.distance_messages:
range_msg = Range()
range_msg.header.frame_id = "altimeter"
range_msg.header.stamp = rospy.Time.now()
range_msg.radiation_type = range_msg.ULTRASOUND
range_msg.field_of_view = 0.52
range_msg.max_range = (data.scan_start + data.scan_length) / 1000
range_msg.min_range = data.scan_start / 1000.0
if range_msg.min_range <= data.distance / 1000 <= range_msg.max_range:
range_msg.range = data.distance / 1000
if data.message_id == PING1D_PROFILE:
profile_msg = MultiEchoLaserScan()
profile_msg.header = range_msg.header
profile_msg.ranges = [LaserEcho(np.linspace(data.scan_start / 1000, data.scan_start / 1000 + data.scan_length / 1000, data.profile_data_length).tolist())]
profile_msg.range_min = data.scan_start / 1000.0
profile_msg.range_max = (data.scan_start + data.scan_length) / 1000
profile_msg.angle_increment = 0
profile_msg.angle_max = 0
profile_msg.angle_min = 0
profile_msg.intensities = [LaserEcho(np.frombuffer(data.profile_data, dtype=np.uint8).tolist())]
return range_msg, profile_msg
def send_ping1d_request(self):
data = PingMessage()
data.request_id = PING1D_DISTANCE
data.src_device_id = 0
data.pack_msg_data()
self.device.write(data.msg_data)
def run(self):
# read ping device from serial
try:
while not rospy.is_shutdown():
self.send_ping1d_request()
device_data = self.device.read()
if device_data is not None:
range_msg, profile_msg = self.parse(device_data)
if range_msg is not None:
self.range_publisher.publish(range_msg)
if profile_msg is not None:
self.profile_publisher.publish(profile_msg)
self.hz.sleep()
except rospy.ROSInterruptException:
pass
finally:
self.device.iodev.close()
class PingClient(object):
def __init__(self):
## Queued messages received from client
self.rx_msgs = deque([])
## Parser to verify client comms
self.parser = PingParser()
## Digest incoming client data
# @return None
def parse(self, data):
for b in bytearray(data):
if self.parser.parse_byte(b) == PingParser.NEW_MESSAGE:
self.rx_msgs.append(self.parser.rx_msg)
## Dequeue a message received from client
# @return None: if there are no comms in the queue
# @return PingMessage: the next ping message in the queue
def dequeue(self):
if len(self.rx_msgs) == 0:
return None
return self.rx_msgs.popleft()
class PingProxy(object):
def __init__(self, device: str, port: int, topic: str):
## A serial object for ping device comms
self.device = device
## UDP port number for server
self.port = port
## Publisher to send ROS range information on
self.range_msg = Range()
self.range_publisher = rospy.Publisher(topic, Range, queue_size=10)
## Connected client dictionary
self.clients = {}
## Socket to serve on
self.socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.socket.setblocking(False)
self.socket.bind(('0.0.0.0', self.port))
## Run proxy tasks
def run(self):
try:
data, address = self.socket.recvfrom(4096)
# new client
if address not in self.clients:
self.clients[address] = PingClient()
# digest data coming in from client
self.clients[address].parse(data)
except TimeoutError:
pass # waiting for data
except Exception as e:
print("Error reading data", e)
# read ping device from serial
device_data = self.device.read(self.device.in_waiting)
# send ping device data to all clients via UDP
if device_data: # don't write empty data
for client in self.clients:
# print("writing to client", client)
self.socket.sendto(device_data, client)
# send all client comms to ping device
for client in self.clients:
c = self.clients[client]
msg = c.dequeue()
while msg is not None:
self.device.write(msg.msg_data)
msg = c.dequeue()
| 2.90625 | 3 |
models/unetp/layers.py | qgking/DASC_COVID19 | 4 | 12761067 | <filename>models/unetp/layers.py
# -*- coding: utf-8 -*-
# @Time : 20/7/2 11:08
# @Author : qgking
# @Email : <EMAIL>
# @Software: PyCharm
# @Desc : layers.py
import torch
import torch.nn as nn
import torch.nn.functional as F
from models.unetp.init_weights import init_weights
class unetConv2(nn.Module):
def __init__(self, in_size, out_size, is_batchnorm, n=2, ks=3, stride=1, padding=1):
super(unetConv2, self).__init__()
self.n = n
self.ks = ks
self.stride = stride
self.padding = padding
s = stride
p = padding
if is_batchnorm:
for i in range(1, n + 1):
conv = nn.Sequential(nn.Conv2d(in_size, out_size, ks, s, p),
nn.BatchNorm2d(out_size),
nn.ReLU(inplace=True), )
setattr(self, 'conv%d' % i, conv)
in_size = out_size
else:
for i in range(1, n + 1):
conv = nn.Sequential(nn.Conv2d(in_size, out_size, ks, s, p),
nn.ReLU(inplace=True), )
setattr(self, 'conv%d' % i, conv)
in_size = out_size
# initialise the blocks
for m in self.children():
init_weights(m, init_type='kaiming')
def forward(self, inputs):
x = inputs
for i in range(1, self.n + 1):
conv = getattr(self, 'conv%d' % i)
x = conv(x)
return x
class unetUp(nn.Module):
def __init__(self, in_size, out_size, is_deconv, n_concat=2):
super(unetUp, self).__init__()
# self.conv = unetConv2(in_size + (n_concat - 2) * out_size, out_size, False)
self.conv = unetConv2(out_size * 2, out_size, False)
if is_deconv:
self.up = nn.ConvTranspose2d(in_size, out_size, kernel_size=4, stride=2, padding=1)
else:
self.up = nn.UpsamplingBilinear2d(scale_factor=2)
# initialise the blocks
for m in self.children():
if m.__class__.__name__.find('unetConv2') != -1: continue
init_weights(m, init_type='kaiming')
def forward(self, inputs0, *input):
# print(self.n_concat)
# print(input)
outputs0 = self.up(inputs0)
for i in range(len(input)):
outputs0 = torch.cat([outputs0, input[i]], 1)
return self.conv(outputs0)
class unetUp_origin(nn.Module):
def __init__(self, in_size, out_size, is_deconv, n_concat=2):
super(unetUp_origin, self).__init__()
# self.conv = unetConv2(out_size*2, out_size, False)
if is_deconv:
self.conv = unetConv2(in_size + (n_concat - 2) * out_size, out_size, False)
self.up = nn.ConvTranspose2d(in_size, out_size, kernel_size=4, stride=2, padding=1)
else:
self.conv = unetConv2(in_size + (n_concat - 2) * out_size, out_size, False)
self.up = nn.UpsamplingBilinear2d(scale_factor=2)
# initialise the blocks
for m in self.children():
if m.__class__.__name__.find('unetConv2') != -1: continue
init_weights(m, init_type='kaiming')
def forward(self, inputs0, *input):
# print(self.n_concat)
# print(input)
outputs0 = self.up(inputs0)
for i in range(len(input)):
outputs0 = torch.cat([outputs0, input[i]], 1)
return self.conv(outputs0)
| 2.453125 | 2 |
dice.py | mpi3d/py-sense-hat | 1 | 12761068 | <gh_stars>1-10
from sense_hat import SenseHat
import time
import random
sense = SenseHat()
sense.clear()
sense.set_rotation(180)
sense.low_light = True
sense.set_imu_config(False, False, True)
s = 30
b = [255,255,255]
n = [0,0,0]
d1 = [n,n,n,n,n,n,n,n,
n,n,n,n,n,n,n,n,
n,n,n,n,n,n,n,n,
n,n,n,b,b,n,n,n,
n,n,n,b,b,n,n,n,
n,n,n,n,n,n,n,n,
n,n,n,n,n,n,n,n,
n,n,n,n,n,n,n,n]
d2 = [b,b,n,n,n,n,n,n,
b,b,n,n,n,n,n,n,
n,n,n,n,n,n,n,n,
n,n,n,n,n,n,n,n,
n,n,n,n,n,n,n,n,
n,n,n,n,n,n,n,n,
n,n,n,n,n,n,b,b,
n,n,n,n,n,n,b,b]
d3 = [b,b,n,n,n,n,n,n,
b,b,n,n,n,n,n,n,
n,n,n,n,n,n,n,n,
n,n,n,b,b,n,n,n,
n,n,n,b,b,n,n,n,
n,n,n,n,n,n,n,n,
n,n,n,n,n,n,b,b,
n,n,n,n,n,n,b,b]
d4 = [b,b,n,n,n,n,b,b,
b,b,n,n,n,n,b,b,
n,n,n,n,n,n,n,n,
n,n,n,n,n,n,n,n,
n,n,n,n,n,n,n,n,
n,n,n,n,n,n,n,n,
b,b,n,n,n,n,b,b,
b,b,n,n,n,n,b,b]
d5 = [b,b,n,n,n,n,b,b,
b,b,n,n,n,n,b,b,
n,n,n,n,n,n,n,n,
n,n,n,b,b,n,n,n,
n,n,n,b,b,n,n,n,
n,n,n,n,n,n,n,n,
b,b,n,n,n,n,b,b,
b,b,n,n,n,n,b,b]
d6 = [b,b,n,n,n,n,b,b,
b,b,n,n,n,n,b,b,
n,n,n,n,n,n,n,n,
b,b,n,n,n,n,b,b,
b,b,n,n,n,n,b,b,
n,n,n,n,n,n,n,n,
b,b,n,n,n,n,b,b,
b,b,n,n,n,n,b,b]
accel_only = sense.get_accelerometer()
a = ("{pitch}.{roll}.{yaw}".format(**accel_only))
p,p0,r,r0,y,y0 = a.split(".")
p1 = p
r1 = r
y1 = y
while True :
accel_only = sense.get_accelerometer()
a = ("{pitch}.{roll}.{yaw}".format(**accel_only))
p,p0,r,r0,y,y0 = a.split(".")
if int(p) + s < int(p1) or int(p) - s > int(p1) or int(r) + s < int(r1) or int(r) - s > int(r1) or int(y) + s < int(y1) or int(y) - s > int(y1) :
i = random.randint(10,20)
while i > 0 :
w = random.randint(1,6)
if w == 1 :
sense.set_pixels(d1)
elif w == 2 :
sense.set_pixels(d2)
elif w == 3 :
sense.set_pixels(d3)
elif w == 4 :
sense.set_pixels(d4)
elif w == 5 :
sense.set_pixels(d5)
elif w == 6 :
sense.set_pixels(d6)
t = 0.5 / i
time.sleep(t)
i = i - 1
i = 5
while i > 0 :
time.sleep(0.2)
sense.clear()
time.sleep(0.2)
if w == 1 :
sense.set_pixels(d1)
elif w == 2 :
sense.set_pixels(d2)
elif w == 3 :
sense.set_pixels(d3)
elif w == 4 :
sense.set_pixels(d4)
elif w == 5 :
sense.set_pixels(d5)
elif w == 6 :
sense.set_pixels(d6)
i = i - 1
accel_only = sense.get_accelerometer()
a = ("{pitch}.{roll}.{yaw}".format(**accel_only))
p,p0,r,r0,y,y0 = a.split(".")
p1 = p
r1 = r
y1 = y
time.sleep(0.01)
| 1.78125 | 2 |
cloudservice/scheduler/check_instances_integrity.py | iPlantCollaborativeOpenSource/iPlant-Atmosphere | 1 | 12761069 | <reponame>iPlantCollaborativeOpenSource/iPlant-Atmosphere<filename>cloudservice/scheduler/check_instances_integrity.py
#
# The contents of this file are subject to the terms listed in the LICENSE file you received with this code.
#
# Project: Atmosphere, iPlant Collaborative
# Author: <NAME>
# Twitter: @seungjin
# GitHub: seungjin
#
from atmosphere.cloudservice.models import *
import json
import logging
from django.core.mail import send_mail
def send_email_to_admin(subject=None, message=None):
admin_email = Configs.objects.get(key="admin_email").value
if subject == None :
subject = "cloud admin email from atmosphere"
send_mail(subject, message, admin_email, [admin_email], fail_silently=False)
def check_instance_integrity(all_instances_json=None):
"""
for now current running vms and current pending vms check.
for now, only few cases will be caught...
1) when
eucalyptus says an instance is not running (not in the euca_describe list) but
instances table shows athe instnace is running
2) when
instance's current_running_vms_from_eucalyptus is running
and
instance's current_pending_vms_from_instances_table is pending
for 180 seconds or more
"""
if all_instances_json == None:
all_instances = json.loads(Resources_watches.objects.raw("SELECT id, resource_get_function_result FROM cloudservice_resources_watches WHERE resource_get_function_name = 'get_all_instances_list' order by updated_at DESC limit 1")[0].resource_get_function_result)
else:
all_instances = json.loads(all_instances_json)
running_instance = filter(lambda x: x['instance_state'] == "running", all_instances)
running_instance_ids_list = map(lambda x: x['instance_id'], running_instance)
pending_instance = filter(lambda x: x['instance_state'] == "pending", all_instances)
pending_instance_ids_list = map(lambda x: x['instance_id'], pending_instance)
running_pending_instance_ids_list = running_instance_ids_list + pending_instance_ids_list
current_running_vms_from_instances_table = [i.instance_id for i in Instances.objects.filter(current_state = "running")]
current_pending_vms_from_instances_table = [i.instance_id for i in Instances.objects.filter(current_state = "pending")]
current_running_pending_vms_from_instances_table = current_running_vms_from_instances_table + current_pending_vms_from_instances_table
# CASE 1: Missing instances
#set(current_running_vms_from_instances_table)
#set(running_instance_ids_list)
if len(current_running_pending_vms_from_instances_table) > 0 :
if not set(current_running_pending_vms_from_instances_table).issubset(set(running_pending_instance_ids_list)) :
lost_instnace_list = ", ".join(list(set(current_running_pending_vms_from_instances_table) - set(running_pending_instance_ids_list)))
for i in list(set(current_running_pending_vms_from_instances_table) - set(running_pending_instance_ids_list)):
instances = Instances.objects.get(instance_id = i)
instances.current_state = "lost"
instances.save()
message = "Atmosphere detects following instances are lost:\n"+lost_instnace_list
send_email_to_admin(subject="Lost instance alert",message=message)
# do i need to update database ??? - I don't know.. ummmm
# CASE 2: Failed instances
| 2 | 2 |
code/GWO/gwo.py | KGJsGit/my_Optimization-studio | 4 | 12761070 | <gh_stars>1-10
def GWO(lb, ub, dim, searchAgents_no, maxIters):
# Grey wolves 초기화
alpha_pos = np.zeros(dim) # The best search agent
alpha_score = float("inf")
beta_pos = np.zeros(dim) # The second best search agent
beta_score = float("inf")
delta_pos = np.zeros(dim) # The third best search agent
delta_score = float("inf")
# 모든 wolf 들의 위치 랜덤 초기화 [lb, ub]
positions = np.zeros((searchAgents_no, dim))
for i in range(dim):
positions[:, i] = (np.random.uniform(lb, ub, searchAgents_no))
# Main loop
for l in range(0, maxIters):
# 모든 늑대들의 계층을 결정하는 Step
for i in range(0, searchAgents_no):
# Boundary를 벗어나는 위치 변환
for j in range(dim):
positions[i, j] = np.clip(positions[i, j], lb, ub)
# i번째 wolf의 fitness 산출
fitness = F9(positions[i, :])
# 알파, 베타, 델타 늑대 업데이트
# 알파 늑대보다 좋은 늑대가 나타나면 그 늑대의 fitness와 pos를 알파로 위임. 알파, 베타, 델타 늑대를 한 단계씩 강등
if fitness < alpha_score:
delta_score = beta_score # Update delta
delta_pos = beta_pos.copy()
beta_score = alpha_score # Update beta
beta_pos = alpha_pos.copy()
alpha_score = fitness # Update alpha
alpha_pos = positions[i, :].copy()
# 베타 늑대 적임자가 나타나면 그 늑대의 fitness와 pos를 베타로 위임. 베타, 델타 늑대를 한 단계씩 강등
if fitness > alpha_score and fitness < beta_score:
delta_score = beta_score # Update delte
delta_pos = beta_pos.copy()
beta_score = fitness # Update beta
beta_pos = positions[i, :].copy()
# 델타 늑대 적임자가 나타나면 그 늑대의 fitness와 pos를 델타로 위임. 델타 늑대를 한 단계 강등
if fitness > alpha_score and fitness > beta_score and fitness < delta_score:
delta_score = fitness # Update delta
delta_pos = positions[i, :].copy()
# a는 선형적으로 감소하는 값으로 2 ~ 0을 가짐
a = 2 - l * ((2) / maxIters)
# 모든 늑대들의 pos를 업데이트하는 Step
for i in range(0, searchAgents_no):
for j in range(0, dim):
r1 = random.random() # r1 is a random number in [0,1]
r2 = random.random() # r2 is a random number in [0,1]
A1 = 2 * a * r1 - a # Equation (3.3)
C1 = 2 * r2 # Equation (3.4)
D_alpha = abs(C1 * alpha_pos[j] - positions[i, j]) # Equation (3.5)-part 1
X1 = alpha_pos[j] - A1 * D_alpha # Equation (3.6)-part 1
r1 = random.random()
r2 = random.random()
A2 = 2 * a * r1 - a # Equation (3.3)
C2 = 2 * r2 # Equation (3.4)
D_beta = abs(C2 * beta_pos[j] - positions[i, j]) # Equation (3.5)-part 2
X2 = beta_pos[j] - A2 * D_beta # Equation (3.6)-part 2
r1 = random.random()
r2 = random.random()
A3 = 2 * a * r1 - a # Equation (3.3)
C3 = 2 * r2 # Equation (3.4)
D_delta = abs(C3 * delta_pos[j] - positions[i, j]) # Equation (3.5)-part 3
X3 = delta_pos[j] - A3 * D_delta # Equation (3.5)-part 3
positions[i, j] = (X1 + X2 + X3) / 3 # Equation (3.7)
print(l, "번째 최적 해 :", alpha_score)
return "GWO :"+str(alpha_score)
| 2.25 | 2 |
wagtailnhsukfrontend/settings/apps.py | mikemonteith-livi/wagtail-nhsuk-frontend | 21 | 12761071 | <filename>wagtailnhsukfrontend/settings/apps.py
from django.apps import AppConfig
class SettingsAppConfig(AppConfig):
name = 'wagtailnhsukfrontend.settings'
label = 'wagtailnhsukfrontendsettings'
verbose_name = "Wagtail NHSUK Frontend Settings"
| 1.328125 | 1 |
utils/data_management.py | pradhami/Basic-ANN | 0 | 12761072 | import tensorflow as tf
import logging
def train_valid_test_generator():
"""Loading mnist dataset from tensorflow.keras and scaling the pixels between 0 to 1
Args:
NA
Returns:
nd array: Train, Test and Validation datasets
"""
mnist = tf.keras.datasets.mnist
(x_train_full, y_train_full), (x_test,y_test)= mnist.load_data()
x_valid, x_train = x_train_full[:5000] /255, x_train_full[5000:] / 255
y_valid, y_train = y_train_full[:5000], y_train_full[5000:]
x_test = x_test/255
return ((x_train,y_train),(x_valid,y_valid),(x_test,y_test))
| 2.875 | 3 |
exastolog/StateTransitionSubGraphs.py | sysbio-curie/pyExaStoLog | 2 | 12761073 | # BSD 3-Clause License
# Copyright (c) 2020, Instit<NAME>
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import networkx as nx
import numpy as np
import scipy.sparse as sparse
class StateTransitionSubGraphs:
def __init__(self, A_sparse, x0):
self.subnetws = None
self.scc_submats = None
self.nonempty_subgraphs = None
self.sorted_vertices = None
self.cyclic_sorted_subgraphs = None
self.fcn_scc_subgraphs(A_sparse, x0)
def fcn_metagraph_scc(self, A_sparse_sub):
matr_size = A_sparse_sub.shape[0]
g_sub = nx.from_scipy_sparse_matrix(A_sparse_sub, create_using=nx.DiGraph())
g_sub.remove_edges_from(nx.selfloop_edges(g_sub))
# Here we reverse it only for debugging purpose
# The order shouldn't matter, but it's nice to have the same as matlab
scc_list = list(reversed(list(nx.strongly_connected_components(g_sub))))
# print("%d connected components" % len(scc_list))
num_verts_per_scc = []
scc_memb_per_vert = np.zeros((matr_size, 1))
for i, scc in enumerate(scc_list):
num_verts_per_scc.append(len(scc))
scc_memb_per_vert[list(scc),:] = i
# row, col = np.where((A_sparse_sub - np.diag(A_sparse_sub.diagonal())) > 0)
# Yet another trick to get the exact same results as matlab
# The difference is returning the list from parsing via columns or via rows, hopefully nothing critical
t_matr = (A_sparse_sub - sparse.diags(A_sparse_sub.diagonal())).transpose()
col, row, _ = sparse.find(t_matr > 0)
diff = scc_memb_per_vert[row] != scc_memb_per_vert[col]
row_sel = row[np.where(diff[:, 0])]
col_sel = col[np.where(diff[:, 0])]
A_metagraph = sparse.csr_matrix(
(np.array(A_sparse_sub[row_sel, col_sel]).flatten(),
(scc_memb_per_vert[row_sel][:, 0], scc_memb_per_vert[col_sel][:, 0])),
shape=(len(num_verts_per_scc), len(num_verts_per_scc))
)
metagraph = nx.from_scipy_sparse_matrix(A_metagraph, create_using=nx.DiGraph())
metagraph_ordering=np.array(list(nx.topological_sort(metagraph)))
terminal_scc_ind, _ = np.where(A_metagraph.sum(axis=1) == 0)
terminal_scc_pos = np.isin(metagraph_ordering, terminal_scc_ind)
nonterm_scc_num = len(num_verts_per_scc) - len(terminal_scc_ind)
scc_sup1 = [i for i, scc in enumerate(scc_list) if len(scc) > 1]
term_cycles_ind = set(scc_sup1).intersection(set(terminal_scc_ind))
where_terminal_scc_pos, = np.where(terminal_scc_pos)
if np.sum(np.logical_not(where_terminal_scc_pos>(nonterm_scc_num-1))) > 0:
nonterm_scc_inds = np.logical_not(np.isin(metagraph_ordering, terminal_scc_ind))
metagraph_ordering_terminal_bottom = np.concatenate([
metagraph_ordering[nonterm_scc_inds],
metagraph_ordering[terminal_scc_pos]
])
else:
metagraph_ordering_terminal_bottom = metagraph_ordering
if len(term_cycles_ind) > 0:
scc_cell_reordered = [scc_list[i] for i in metagraph_ordering_terminal_bottom]
# index of cells containing term cycles after reordering
term_cycles_ind, = np.where(np.isin(metagraph_ordering_terminal_bottom, np.array(list(term_cycles_ind))))
# we need a cell of the indices of certices withing whese
scc_cell_reordered_lengths = np.array([len(scc) for scc in scc_cell_reordered])
scc_cell_reordered_cumsum = np.cumsum(scc_cell_reordered_lengths)
cycle_first_verts = scc_cell_reordered_cumsum[term_cycles_ind] - scc_cell_reordered_lengths[term_cycles_ind];
cycle_last_verts = scc_cell_reordered_cumsum[term_cycles_ind] - 1
term_cycles_bounds = [np.concatenate([cycle_first_verts, cycle_last_verts])]
else:
term_cycles_ind = []
term_cycles_bounds = []
# reordered original vertices
vert_topol_sort = np.concatenate([list(scc_list[i]) for i in metagraph_ordering_terminal_bottom])
return vert_topol_sort, term_cycles_ind, A_metagraph, scc_list, term_cycles_bounds
def fcn_scc_subgraphs(self, A_sparse, x0):
# print("Indentifying SCCs")
B_sparse = sparse.csc_matrix(A_sparse)
B_sparse.setdiag(0)
nb_scc, labels = sparse.csgraph.connected_components(B_sparse, directed=True,connection='weak')
scc = [[] for _ in range(nb_scc)]
for i, label in enumerate(labels):
scc[label].append(i)
self.subnetws = scc
cell_subgraphs = []
self.scc_submats = []
self.nonempty_subgraphs = []
# print("Identifying SCCs in subgraphs")
for i, subnet in enumerate(self.subnetws):
cell_subgraphs.append(subnet)
# Slicing done it two steps : First the rows, which is the most efficient for csr sparse matrix
# then columns. I should probably dig deeper
t_sparse = A_sparse[subnet, :][:, subnet]
t_sparse.setdiag(0)
nb_scc, labels = sparse.csgraph.connected_components(t_sparse, directed=True,connection='strong')
scc = [[] for _ in range(nb_scc)]
for j, label in enumerate(labels):
scc[label].append(j)
self.scc_submats.append(scc)
if sum(x0[subnet]) > 0:
self.nonempty_subgraphs.append(i)
self.sorted_vertices = []
self.cyclic_sorted_subgraphs = []
counter = 0
for nonempty_subgraph in self.nonempty_subgraphs:
A_sparse_sub = A_sparse[self.subnetws[nonempty_subgraph], :][:, self.subnetws[nonempty_subgraph]]
if A_sparse_sub.shape[0] == len(self.scc_submats[nonempty_subgraph]):
t_g = nx.from_scipy_sparse_matrix(A_sparse_sub, create_using=nx.DiGraph())
t_g.remove_edges_from(nx.selfloop_edges(t_g))
self.sorted_vertices.append(list(nx.topological_sort(t_g)))
else:
# print("Cycles in STG")
# If entire graph is only one connected component, no need for re-ordering
if len(self.scc_submats[nonempty_subgraph]) == 1:
self.sorted_vertices.append(self.scc_submats[nonempty_subgraph])
else:
vert_topol_sort,term_cycles_ind,_,scc_cell,term_cycle_bounds=self.fcn_metagraph_scc(A_sparse_sub)
cycle_lengths = [len(scc) for scc in scc_cell]
a = np.zeros((max(cycle_lengths)))
for i in range(max(cycle_lengths)):
for j in cycle_lengths:
if j == i+1:
a[j-1] += 1
# print('Cycles of lenth: %s (%s times)' % (set(cycle_lengths), a[np.where(a>0)]) )
self.cyclic_sorted_subgraphs.append((vert_topol_sort, term_cycles_ind, term_cycle_bounds))
counter += 1
| 1.101563 | 1 |
bitey/cpu/instruction/bpl.py | jgerrish/bitey | 0 | 12761074 | from dataclasses import dataclass
from bitey.cpu.instruction.instruction import Instruction
@dataclass
class BPL(Instruction):
"""
BPL: Branch on Result Plus
Branch if the Negative Flag is not True
"""
def instruction_execute(self, cpu, memory, value, address=None):
if (address is not None) and (cpu.flags["N"].status is not True):
cpu.registers["PC"].set(address)
| 3.15625 | 3 |
phyllo/protocol/stacks.py | ethanjli/phyllo-python | 0 | 12761075 | """Communication protocol stacks for easy abstractions of communication links."""
# Builtins
# Packages
from phyllo.protocol.application.stacks import make_preset_stack as make_preset_application
from phyllo.protocol.application.stacks import make_pubsub
from phyllo.protocol.communication import AutomaticStack, PRESET_STACK_TYPES
from phyllo.protocol.transport.stacks import make_preset_stack as make_preset_transport
from phyllo.protocol.transport.stacks import make_stack as make_transport
# Protocol stacks
def make_stack(
transport_stack=make_transport, application_stack=make_pubsub,
stack=AutomaticStack, name='Protocol'
):
"""Make a protocol stack."""
stacks = []
if transport_stack is not None:
if callable(transport_stack):
transport_stack = transport_stack()
stacks.append(transport_stack)
if application_stack is not None:
if callable(application_stack):
application_stack = application_stack()
stacks.append(application_stack)
if not stacks:
raise ValueError('Cannot make an empty protocol stack!')
return stack(*stacks, name=name)
# Preset stacks
def make_preset_stack(
transport_medium='stream', transport_logical='minimal', application='pubsub',
stack='automatic', name='Protocol'
):
"""Make a protocol stack specified by preset names."""
transport = make_preset_transport(
medium=transport_medium, logical=transport_logical, stack=stack
)
application = make_preset_application(application=application, stack=stack)
return make_stack(
transport_stack=transport, application_stack=application,
stack=PRESET_STACK_TYPES[stack], name=name
)
| 2.671875 | 3 |
io_scene_niftools/modules/nif_import/animation/transform.py | ZtModArchive/blender_niftools_addon | 3 | 12761076 | <gh_stars>1-10
"""This script contains classes to help import NIF controllers as blender bone or object level transform(ation) animations."""
# ***** BEGIN LICENSE BLOCK *****
#
# Copyright © 2019, NIF File Format Library and Tools contributors.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# * Neither the name of the NIF File Format Library and Tools
# project nor the names of its contributors may be used to endorse
# or promote products derived from this software without specific
# prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# ***** END LICENSE BLOCK *****
import bpy
import mathutils
from functools import singledispatch
from bisect import bisect_left
from pyffi.formats.nif import NifFormat
from io_scene_niftools.modules.nif_import.animation import Animation
from io_scene_niftools.modules.nif_import.object import block_registry
from io_scene_niftools.utils import math
from io_scene_niftools.utils.blocks import safe_decode
from io_scene_niftools.utils.logging import NifLog
def interpolate(x_out, x_in, y_in):
"""
sample (x_in I y_in) at x coordinates x_out
"""
y_out = []
intervals = zip(x_in, x_in[1:], y_in, y_in[1:])
slopes = [(y2 - y1) / (x2 - x1) for x1, x2, y1, y2 in intervals]
# if we had just one input, slope will be 0 for constant extrapolation
if not slopes:
slopes = [0, ]
for x in x_out:
i = bisect_left(x_in, x) - 1
# clamp to valid range
i = max(min(i, len(slopes) - 1), 0)
y_out.append(y_in[i] + slopes[i] * (x - x_in[i]))
return y_out
class TransformAnimation(Animation):
def __init__(self):
super().__init__()
self.import_kf_root = singledispatch(self.import_kf_root)
self.import_kf_root.register(NifFormat.NiControllerSequence, self.import_controller_sequence)
self.import_kf_root.register(NifFormat.NiSequenceStreamHelper, self.import_sequence_stream_helper)
self.import_kf_root.register(NifFormat.NiSequenceData, self.import_sequence_data)
def get_bind_data(self, b_armature):
"""Get the required bind data of an armature. Used by standalone KF import and export. """
self.bind_data = {}
if b_armature:
for b_bone in b_armature.data.bones:
n_bind_scale, n_bind_rot, n_bind_trans = math.decompose_srt(math.get_object_bind(b_bone))
self.bind_data[b_bone.name] = (n_bind_rot.inverted(), n_bind_trans)
def get_target(self, b_armature_obj, n_name):
"""Gets a target for an anim controller"""
b_name = block_registry.get_bone_name_for_blender(n_name)
# if we have an armature, get the pose bone
if b_armature_obj:
if b_name in b_armature_obj.pose.bones:
return b_armature_obj.pose.bones[b_name]
# try to find the object for animation
else:
if b_name in bpy.data.objects:
return bpy.data.objects[b_name]
def import_kf_root(self, kf_root, b_armature_obj):
"""Base method to warn user that this root type is not supported"""
NifLog.warn(f"Unknown KF root block found : {safe_decode(kf_root.name)}")
NifLog.warn(f"This type isn't currently supported: {type(kf_root)}")
def import_generic_kf_root(self, kf_root):
NifLog.debug(f'Importing {type(kf_root)}...')
return safe_decode(kf_root.name)
def import_sequence_data(self, kf_root, b_armature_obj):
b_action_name = self.import_generic_kf_root(kf_root)
actions = set()
for evaluator in kf_root.evaluators:
b_target = self.get_target(b_armature_obj, evaluator.node_name)
actions.add(self.import_keyframe_controller(evaluator, b_armature_obj, b_target, b_action_name))
for b_action in actions:
if b_action:
self.import_text_keys(kf_root, b_action)
if kf_root.cycle_type:
extend = self.get_extend_from_cycle_type(kf_root.cycle_type)
self.set_extrapolation(extend, b_action.fcurves)
def import_sequence_stream_helper(self, kf_root, b_armature_obj):
b_action_name = self.import_generic_kf_root(kf_root)
actions = set()
# import parallel trees of extra datas and keyframe controllers
extra = kf_root.extra_data
controller = kf_root.controller
textkeys = None
while extra and controller:
# textkeys in the stack do not specify node names, import as markers
while isinstance(extra, NifFormat.NiTextKeyExtraData):
textkeys = extra
extra = extra.next_extra_data
# grabe the node name from string data
if isinstance(extra, NifFormat.NiStringExtraData):
b_target = self.get_target(b_armature_obj, extra.string_data)
actions.add(self.import_keyframe_controller(controller, b_armature_obj, b_target, b_action_name))
# grab next pair of extra and controller
extra = extra.next_extra_data
controller = controller.next_controller
for b_action in actions:
if b_action:
self.import_text_key_extra_data(textkeys, b_action)
def import_controller_sequence(self, kf_root, b_armature_obj):
b_action_name = self.import_generic_kf_root(kf_root)
actions = set()
for controlledblock in kf_root.controlled_blocks:
# get bone name
# todo [pyffi] fixed get_node_name() is up, make release and clean up here
# ZT2 - old way is not supported by pyffi's get_node_name()
n_name = controlledblock.target_name
# fallout (node_name) & Loki (StringPalette)
if not n_name:
n_name = controlledblock.get_node_name()
b_target = self.get_target(b_armature_obj, n_name)
# todo - temporarily disabled! should become a custom property on both object and pose bone, ideally
# import bone priority
# b_target.niftools.priority = controlledblock.priority
# fallout, Loki
kfc = controlledblock.interpolator
if not kfc:
# ZT2
kfc = controlledblock.controller
if kfc:
actions.add(self.import_keyframe_controller(kfc, b_armature_obj, b_target, b_action_name))
for b_action in actions:
if b_action:
self.import_text_keys(kf_root, b_action)
# fallout: set global extrapolation mode here (older versions have extrapolation per controller)
if kf_root.cycle_type:
extend = self.get_extend_from_cycle_type(kf_root.cycle_type)
self.set_extrapolation(extend, b_action.fcurves)
def import_keyframe_controller(self, n_kfc, b_armature, b_target, b_action_name):
"""
Imports a keyframe controller as fcurves in an action, which is created if necessary.
n_kfc: some nif struct that has keyframe data, somewhere
b_armature: either None or Object (blender armature)
b_target: either Object or PoseBone
b_action_name: name of the action that should be used; the actual imported name may differ due to suffixes
"""
# the target may not exist in the scene, in which case it is None here
if not b_target:
return
NifLog.debug(f'Importing keyframe controller for {b_target.name}')
translations = []
scales = []
rotations = []
eulers = []
n_kfd = None
# transform controllers (dartgun.nif)
if isinstance(n_kfc, NifFormat.NiTransformController):
if n_kfc.interpolator:
n_kfd = n_kfc.interpolator.data
# B-spline curve import
elif isinstance(n_kfc, NifFormat.NiBSplineInterpolator):
# used by WLP2 (tiger.kf), but only for non-LocRotScale data
# eg. bone stretching - see controlledblock.get_variable_1()
# do not support this for now, no good representation in Blender
if isinstance(n_kfc, NifFormat.NiBSplineCompFloatInterpolator):
# pyffi lacks support for this, but the following gets float keys
# keys = list(kfc._getCompKeys(kfc.offset, 1, kfc.bias, kfc.multiplier))
return
times = list(n_kfc.get_times())
# just do these temp steps to avoid generating empty fcurves down the line
trans_temp = [mathutils.Vector(tup) for tup in n_kfc.get_translations()]
if trans_temp:
translations = zip(times, trans_temp)
rot_temp = [mathutils.Quaternion(tup) for tup in n_kfc.get_rotations()]
if rot_temp:
rotations = zip(times, rot_temp)
scale_temp = list(n_kfc.get_scales())
if scale_temp:
scales = zip(times, scale_temp)
# Bsplines are Bezier curves
interp_rot = interp_loc = interp_scale = "BEZIER"
elif isinstance(n_kfc, NifFormat.NiMultiTargetTransformController):
# not sure what this is used for
return
else:
# ZT2 & Fallout
n_kfd = n_kfc.data
if isinstance(n_kfd, NifFormat.NiKeyframeData):
interp_rot = self.get_b_interp_from_n_interp(n_kfd.rotation_type)
interp_loc = self.get_b_interp_from_n_interp(n_kfd.translations.interpolation)
interp_scale = self.get_b_interp_from_n_interp(n_kfd.scales.interpolation)
if n_kfd.rotation_type == 4:
b_target.rotation_mode = "XYZ"
# uses xyz rotation
if n_kfd.xyz_rotations[0].keys:
# euler keys need not be sampled at the same time in KFs
# but we need complete key sets to do the space conversion
# so perform linear interpolation to import all keys properly
# get all the keys' times
times_x = [key.time for key in n_kfd.xyz_rotations[0].keys]
times_y = [key.time for key in n_kfd.xyz_rotations[1].keys]
times_z = [key.time for key in n_kfd.xyz_rotations[2].keys]
# the unique time stamps we have to sample all curves at
times_all = sorted(set(times_x + times_y + times_z))
# the actual resampling
x_r = interpolate(times_all, times_x, [key.value for key in n_kfd.xyz_rotations[0].keys])
y_r = interpolate(times_all, times_y, [key.value for key in n_kfd.xyz_rotations[1].keys])
z_r = interpolate(times_all, times_z, [key.value for key in n_kfd.xyz_rotations[2].keys])
eulers = zip(times_all, zip(x_r, y_r, z_r))
else:
b_target.rotation_mode = "QUATERNION"
rotations = [(key.time, key.value) for key in n_kfd.quaternion_keys]
if n_kfd.scales.keys:
scales = [(key.time, key.value) for key in n_kfd.scales.keys]
if n_kfd.translations.keys:
translations = [(key.time, key.value) for key in n_kfd.translations.keys]
# ZT2 - get extrapolation for every kfc
if isinstance(n_kfc, NifFormat.NiKeyframeController):
flags = n_kfc.flags
# fallout, Loki - we set extrapolation according to the root NiControllerSequence.cycle_type
else:
flags = None
# create or get the action
if b_armature and isinstance(b_target, bpy.types.PoseBone):
# action on armature, one per armature
b_action = self.create_action(b_armature, b_action_name)
if b_target.name in self.bind_data:
n_bind_rot_inv, n_bind_trans = self.bind_data[b_target.name]
bone_name = b_target.name
else:
# one action per object
b_action = self.create_action(b_target, f"{b_action_name}_{b_target.name}")
bone_name = None
if eulers:
NifLog.debug('Rotation keys..(euler)')
fcurves = self.create_fcurves(b_action, "rotation_euler", range(3), flags, bone_name)
for t, val in eulers:
key = mathutils.Euler(val)
if bone_name:
key = math.import_keymat(n_bind_rot_inv, key.to_matrix().to_4x4()).to_euler()
self.add_key(fcurves, t, key, interp_rot)
elif rotations:
NifLog.debug('Rotation keys...(quaternions)')
fcurves = self.create_fcurves(b_action, "rotation_quaternion", range(4), flags, bone_name)
for t, val in rotations:
key = mathutils.Quaternion([val.w, val.x, val.y, val.z])
if bone_name:
key = math.import_keymat(n_bind_rot_inv, key.to_matrix().to_4x4()).to_quaternion()
self.add_key(fcurves, t, key, interp_rot)
if translations:
NifLog.debug('Translation keys...')
fcurves = self.create_fcurves(b_action, "location", range(3), flags, bone_name)
for t, val in translations:
key = mathutils.Vector([val.x, val.y, val.z])
if bone_name:
key = math.import_keymat(n_bind_rot_inv, mathutils.Matrix.Translation(key - n_bind_trans)).to_translation()
self.add_key(fcurves, t, key, interp_loc)
if scales:
NifLog.debug('Scale keys...')
fcurves = self.create_fcurves(b_action, "scale", range(3), flags, bone_name)
for t, val in scales:
key = (val, val, val)
self.add_key(fcurves, t, key, interp_scale)
return b_action
def import_transforms(self, n_block, b_obj, bone_name=None):
"""Loads an animation attached to a nif block."""
# find keyframe controller
n_kfc = math.find_controller(n_block, (NifFormat.NiKeyframeController, NifFormat.NiTransformController))
if n_kfc:
# skeletal animation
if bone_name:
p_bone = b_obj.pose.bones[bone_name]
self.import_keyframe_controller(n_kfc, b_obj, p_bone, f"{b_obj.name}_Anim")
# object-level animation
else:
self.import_keyframe_controller(n_kfc, None, b_obj, f"{b_obj.name}_Anim")
def import_controller_manager(self, n_block, b_obj, b_armature):
ctrlm = n_block.controller
if ctrlm and isinstance(ctrlm, NifFormat.NiControllerManager):
NifLog.debug(f'Importing NiControllerManager')
if b_armature:
self.get_bind_data(b_armature)
for ctrl in ctrlm.controller_sequences:
self.import_kf_root(ctrl, b_armature)
| 1.445313 | 1 |
tpDcc/libs/curves/core/lib.py | tpDcc/tpDcc-libs-curves | 0 | 12761077 | <filename>tpDcc/libs/curves/core/lib.py
#! /usr/bin/env python
# -*- coding: utf-8 -*-
"""
Module that contains library definition for tpDcc-libs-curveslib
"""
from __future__ import print_function, division, absolute_import
import os
import logging.config
from tpDcc import dcc
from tpDcc.core import library, command
from tpDcc.libs.python import path as path_utils
from tpDcc.libs.curves.core import consts
logger = logging.getLogger(consts.LIB_ID)
class CurvesLib(library.DccLibrary, object):
ID = consts.LIB_ID
def __init__(self, *args, **kwargs):
super(CurvesLib, self).__init__(*args, **kwargs)
@classmethod
def config_dict(cls):
base_tool_config = library.DccLibrary.config_dict()
tool_config = {
'name': 'Curves Library',
'id': CurvesLib.ID,
'supported_dccs': {'maya': ['2017', '2018', '2019', '2020']},
'tooltip': 'Library to manage curves in a DCC agnostic way',
'root': cls.ROOT if hasattr(cls, 'ROOT') else '',
'file': cls.PATH if hasattr(cls, 'PATH') else '',
}
base_tool_config.update(tool_config)
return base_tool_config
@classmethod
def load(cls):
# Initialize environment variable that contains paths were curves libs command are located
# This environment variable is used by the command runner
dcc_name = dcc.client().get_name()
commands_path = path_utils.clean_path(
os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), 'dccs', dcc_name, 'commands'))
if os.path.isdir(commands_path):
command.CommandRunner().manager().register_path(commands_path, 'tpDcc')
| 2.21875 | 2 |
2-2dual.py | zhangjun135/Li_hang-Statistical-learning-method_code | 0 | 12761078 | # -*- coding: utf-8 -*-
"""
Created on Sun Dec 27 22:04:58 2020
@author: zhangjun
"""
# -*- coding: utf-8 -*-
"""
Created on Sun Dec 27 20:06:37 2020
@author: zhangjun
"""
import numpy as np
class perceptron:
def __init__(self):
self.alpha = None
self.b = None
self.w = None
def train(self, x, y, learning_rate=1):
self.alpha = np.zeros(x.shape[0])
self.b = np.zeros(1)
G = np.dot(x,x.T)
while True:
index_ms = 0
for index,x_i in enumerate(x):
index_s = y[index]*(np.sum(np.dot(self.alpha*y,G[:,index]))+ self.b)
if index_s<=0:
self.alpha[index] = self.alpha[index] + learning_rate
self.b = self.b + learning_rate*y[index]
break
index_ms = index_ms + 1
self.w = np.dot(self.alpha.T*y,x)
print (self.alpha,self.w,self.b)
if index_ms==x.shape[0]:
break
def prediction(self,x_pred):
y_pred = np.zeros(x_pred.shape[0])
for index,x_i in enumerate(x_pred):
y_pred[index] = np.sum(self.w*x_i) + self.b
if y_pred[index]>0:
y_pred[index] = 1
else:
y_pred[index] = -1
return y_pred
if __name__ == '__main__':
x = np.array([[3,3],[4,3],[1,1]])
y = np.array([1,1,-1])
Model = perceptron()
Model.train(x,y,learning_rate=1)
y_pred = Model.prediction(x)
print ('w,b=',Model.w,Model.b)
| 3.171875 | 3 |
Web_Crawling/python-crawler/chapter_9/whoosh_indexer.py | devming0322/2021-K-Digital-Training | 11 | 12761079 | <reponame>devming0322/2021-K-Digital-Training
import os
import sys
import time
import w3lib.html
from dashboard_crawler import get_dashboard_posts
from whoosh_lib import get_or_create_index
if __name__ == '__main__':
# 인덱스 핸들러 읽어 들이기
ix = get_or_create_index()
# 인덱스 쓰기 전용 writer 객체 만들기
writer = ix.writer()
# 대시보드의 글 추출하기
dashboard_posts = get_dashboard_posts()
# 데이터 인덱싱하기
for post in dashboard_posts['posts']:
writer.update_document(
post_url=post['post_url'],
# 인덱스 대상 문장에서 HTML 태그 제거하기
body=w3lib.html.remove_tags(post['body']),
)
# 인덱스 반영하기
writer.commit()
| 2.1875 | 2 |
dcbase/tests/unit/models/testUserProfile.py | tctimmeh/dc-django-base | 0 | 12761080 | <reponame>tctimmeh/dc-django-base<gh_stars>0
from dcbase.apps import TIMEZONE_SESSION_KEY
from django.utils.timezone import get_current_timezone
from dcbase.models import UserProfile
from dcbase.tests.unit import UnitTestCase
from django.utils import translation
from django.utils.translation import LANGUAGE_SESSION_KEY
class TestUserProfile(UnitTestCase):
def test_profileIsAddedWhenUserIsCreated(self):
user = self.createUser()
self.assertIsInstance(user.profile, UserProfile)
def test_profileIsCreatedWithCurrentLanguage(self):
language = 'fr'
translation.activate(language)
user = self.createUser()
self.assertEqual(language, user.profile.language)
def test_loggingInSetsLanguageFromProfile(self):
initialLanguage = 'en'
translation.activate(initialLanguage)
user = self.createUser()
user.profile.language = 'fr'
user.profile.save()
self.logOut()
self.assertEqual(initialLanguage, translation.get_language())
self.logInAs(user)
self.assertEqual('fr', self.client.session[LANGUAGE_SESSION_KEY])
def test_loggingInSetsTimezoneFromProfile(self):
expected = 'America/Edmonton'
user = self.createUser()
user.profile.timezone = expected
user.profile.save()
self.assertNotEqual(expected, get_current_timezone())
self.logInAs(user)
self.assertEqual(expected, self.client.session[TIMEZONE_SESSION_KEY])
| 2.125 | 2 |
modules/db_connectors/mssql.py | Infosecurity-LLC/unicon_v2 | 1 | 12761081 | import pymssql
import logging
import sys
logger = logging.getLogger(__name__)
class DbaseException(Exception):
pass
class SelectorMSSQL:
def __init__(self, device, db_setting):
self.cursor = None
self.device = device
try:
self.connection = pymssql.connect(server=db_setting['server'],
port=db_setting['port'],
user=db_setting['user'],
password=db_setting['password'],
database=db_setting['database'])
except pymssql.OperationalError as err:
logger.error(f"[{device}] Не удалось подключиться к БД")
sys.exit(1)
# raise DbaseException(f'[{self.device}] Dbase server connection failed {err}')
def __exit__(self, exc_type, exc_val, exc_tb):
self.connection.close()
def raw_query(self, query):
self.cursor = self.connection.cursor()
try:
self.cursor.execute(query)
except pymssql.ProgrammingError:
raise DbaseException(
f'[{self.device}] SQL ProgrammingError at dbase.select function. Error in sql select: {query}')
return self.cursor
| 2.65625 | 3 |
benchmark_graph_util.py | SugarBooty/chia_benchmark_graph | 2 | 12761082 | <reponame>SugarBooty/chia_benchmark_graph<gh_stars>1-10
"""
This is a quick program makde to graph data from benchmarking Chia parameters.
It accepts data in the form of CSV files as follows:
THREADS,BUCKETS,DURATION
"""
from matplotlib.colors import Normalize
import matplotlib.pyplot as plt
from matplotlib import cm
def readFromFile(filePath: str) -> list:
with open(filePath) as f:
content = f.read().splitlines()
return content
# converts the list formatted "T,B,D" into lists of T, B, and D
# returns a tuple of the lists
def extrapolateData(data: list) -> tuple:
x = []
y = []
z = []
for entry in data:
T, B, D = entry.split(',')
x.append(int(T))
y.append(int(B))
z.append(float(D))
return (x, y, z)
def dedupeList(input: list) -> list:
# dicts cant have duplicate keys, so I convert the list into one and then back to remove duplicates
return list(dict.fromkeys(input))
def makeGraph(data: list, data2: list = []) :
x1, y1, z1 = data
if (data2 != []):
x2, y2, z2 = data2
# Configure matplotlib for 3d graphing
fig = plt.figure()
ax = plt.axes(projection ='3d')
'''
here is where the data is specified
x, y, z is self explanitory, they are the 1d arrays containing the data
cmap is the color map used. To choose a different one look up "matplotlib how to choose a colormap"
norm makes the colormap corelate to the z axis. it makes it easier to read the graph
edgecolor is also self explanitory, I made the edges black
alpha lets you set the opacity of a graph. it needs to be within 0 and 1
'''
ax.plot_trisurf(x1, y1, z1, cmap=cm.bwr, norm=Normalize(), edgecolor = "black", alpha=0.8)
if (data2 != []):
ax.plot_trisurf(x2, y2, z2, cmap=cm.bwr, norm=Normalize(), edgecolor = "black")
ax.set_xticks(dedupeList(x1))
ax.set_yticks(dedupeList(y1))
plt.show()
# tiny bit messy shhh
content1 = extrapolateData(readFromFile("benchmarkNoRaidCut"))
content2 = extrapolateData(readFromFile("benchmarkRaidCut"))
# content2 can be ommitted to show only one graph
makeGraph(content1, content2)
| 2.65625 | 3 |
app/data/record_on_appeal/record_on_appeal.py | 18F/aocourt-api | 0 | 12761083 | <reponame>18F/aocourt-api<filename>app/data/record_on_appeal/record_on_appeal.py
import datetime
from sqlalchemy import Boolean, Column, Integer, String, Table, ForeignKey
from sqlalchemy.orm import relationship
from sqlalchemy.sql.sqltypes import DateTime
from ..database import mapper_registry
from app.entities import RecordOnAppeal, RecordOnAppealDocketEntry
roa_table = Table(
'records_on_appeal',
mapper_registry.metadata,
Column('id', Integer, primary_key=True, index=True),
Column('original_case_id', Integer, ForeignKey('cases.id'), nullable=False),
Column('title', String, nullable=False),
Column('date_filed', DateTime),
Column('sealed', Boolean, default=False),
Column('type', String),
Column('court', String),
Column('receiving_court', String),
Column('status', String, nullable=True),
Column('reviewed', Boolean, default=False),
Column('remanded', Boolean, default=False),
Column('created_at', DateTime, default=datetime.datetime.utcnow),
Column(
'updated_on',
DateTime,
default=datetime.datetime.utcnow,
onupdate=datetime.datetime.utcnow
)
)
roa_docket_entry_table = Table(
"roa_docket_entry",
mapper_registry.metadata,
Column('id', Integer, nullable=False, primary_key=True),
Column('case_id', Integer, ForeignKey('records_on_appeal.id'), nullable=False),
Column('sequence_no', Integer, nullable=False),
Column('court', String, nullable=False),
Column('recieving_court', String, nullable=True),
Column('text', String, nullable=False),
Column('date_filed', DateTime),
Column('entry_type', String, nullable=False),
Column('sealed', Boolean, default=False),
Column('include_with_appeal', Boolean, default=True),
Column('created_at', DateTime, default=datetime.datetime.utcnow),
Column(
'updated_on',
DateTime,
default=datetime.datetime.utcnow,
onupdate=datetime.datetime.utcnow
)
)
def run_mappers():
mapper_registry.map_imperatively(RecordOnAppealDocketEntry, roa_docket_entry_table)
mapper_registry.map_imperatively(
RecordOnAppeal,
roa_table,
properties={
'docket_entries': relationship(
RecordOnAppealDocketEntry,
order_by="asc(RecordOnAppealDocketEntry.sequence_no)"
)
}
)
| 2.15625 | 2 |
galileo.py | pdsteele/DES-Python | 6 | 12761084 | <filename>galileo.py
# -------------------------------------------------------------------------
# * A Monte Carlo simulation of Galileo's three dice experiment.
# *
# * Name : galileo.c
# * Author : <NAME> & <NAME>
# * Language : ANSI C
# * Latest Revision : 9-11-98
# # Translated by : <NAME>
# # Language : Python 3.3
# # Latest Revision : 3/26/14
# * -------------------------------------------------------------------------
# */
from rng import random, putSeed
N = 10000 # number of replications */
def Equilikely(a,b):
# # ------------------------------------------------
# * generate an Equilikely random variate, use a < b
# * ------------------------------------------------
# */
return (a + int((b - a + 1) * random()))
# i # replication index */
# x # sum of three dice */
count=[0 for i in range(0,19)] # histogram */
p=[0.0 for i in range(0,19)] # probability estimates */
putSeed(0)
for i in range(0,N):
x = Equilikely(1, 6) + Equilikely(1, 6) + Equilikely(1, 6)
count[x] += 1
for x in range(3,19): # estimate probabilities */
p[x] = float(count[x]) / N
print("\nbased on {0:d} replications the estimated probabilities are:\n".format(N))
for x in range(3,19):
print("p[{0:2d}] = {1:5.3f}".format(x, p[x]))
# C output:
# Enter a positive integer seed (9 digits or less) >> 123456789
# based on 10000 replications the estimated probabilities are:
# p[ 3] = 0.004
# p[ 4] = 0.014
# p[ 5] = 0.030
# p[ 6] = 0.043
# p[ 7] = 0.066
# p[ 8] = 0.102
# p[ 9] = 0.119
# p[10] = 0.120
# p[11] = 0.125
# p[12] = 0.116
# p[13] = 0.095
# p[14] = 0.068
# p[15] = 0.047
# p[16] = 0.029
# p[17] = 0.016
# p[18] = 0.005 | 2.90625 | 3 |
algorithms/delete-node-in-a-linked-list.py | jiangyx3915/leetcode | 0 | 12761085 | """
description: delete-node-in-a-linked-list(删除链表中的节点)
author: jiangyx3915
date: 2018/10/13
请编写一个函数,使其可以删除某个链表中给定的(非末尾)节点,你将只被给定要求被删除的节点。
现有一个链表 -- head = [4,5,1,9],它可以表示为:
4 -> 5 -> 1 -> 9
说明:
链表至少包含两个节点。
链表中所有节点的值都是唯一的。
给定的节点为非末尾节点并且一定是链表中的一个有效节点。
不要从你的函数中返回任何结果。
结题思路
由于没有办法得到node的前节点,我们只能通过将下一个节点的值复制到当前节点node,然后移除node的下一个节点来达到目
"""
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution:
def deleteNode(self, node):
"""
:type node: ListNode
:rtype: void Do not return anything, modify node in-place instead.
"""
node.val = node.next.val
node.next = node.next.next
| 3.984375 | 4 |
app/tests/routers/test_dependencies.py | NewShadesDAO/api | 1 | 12761086 | <reponame>NewShadesDAO/api<gh_stars>1-10
import pytest
from bson import ObjectId
from fastapi import FastAPI
from httpx import AsyncClient
from app.dependencies import common_parameters
from app.models.channel import Channel
class TestDependenciesRouter:
@pytest.mark.asyncio
@pytest.mark.parametrize(
"query_params",
[
{"before": str(ObjectId()), "after": str(ObjectId())},
{"before": str(ObjectId()), "around": str(ObjectId())},
{"after": str(ObjectId()), "around": str(ObjectId())},
],
)
async def test_common_params_before_or_after_or_around_only(
self, app: FastAPI, authorized_client: AsyncClient, server_channel: Channel, query_params
):
response = await authorized_client.get(f"channels/{str(server_channel.pk)}/messages", params=query_params)
assert response.status_code == 400
@pytest.mark.asyncio
@pytest.mark.parametrize("limit", [0, -20, 500, "hey"])
async def test_common_params_limit_within_limits_and_int(
self,
app: FastAPI,
authorized_client: AsyncClient,
server_channel: Channel,
limit: int,
):
response = await authorized_client.get(f"channels/{str(server_channel.pk)}/messages?limit={limit}")
assert response.status_code == 422
@pytest.mark.asyncio
@pytest.mark.parametrize(
"sort, sort_by_field, sort_by_direction",
[
("-created_at", "created_at", -1),
("created_at", "created_at", 1),
("-author", "author", -1),
("author", "author", 1),
],
)
async def test_common_params_sort_as_one_string(
self,
app: FastAPI,
authorized_client: AsyncClient,
server_channel: Channel,
sort,
sort_by_field,
sort_by_direction,
):
params = await common_parameters(sort=sort)
assert params["sort_by_field"] == sort_by_field
assert params["sort_by_direction"] == sort_by_direction
| 2.1875 | 2 |
utils/get_data_hbag/pb/cut_in_pb2.py | NovemberChopin/GuideLine | 0 | 12761087 | <filename>utils/get_data_hbag/pb/cut_in_pb2.py
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: cut_in.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='cut_in.proto',
package='',
syntax='proto3',
serialized_pb=_b('\n\x0c\x63ut_in.proto\"8\n\x06\x43utInS\x12\x11\n\ttimestamp\x18\x01 \x01(\x01\x12\x1b\n\tcut_in_id\x18\x02 \x03(\x0b\x32\x08.CutInID\" \n\x08Points2D\x12\t\n\x01x\x18\x01 \x01(\x01\x12\t\n\x01y\x18\x02 \x01(\x01\"-\n\x07\x43utInID\x12\n\n\x02id\x18\x01 \x01(\r\x12\x16\n\x06\x63ut_in\x18\x02 \x03(\x0b\x32\x06.CutIn\"\x98\x01\n\x05\x43utIn\x12\x12\n\ncross_time\x18\x02 \x01(\x01\x12\x1e\n\x0b\x63ross_point\x18\x03 \x01(\x0b\x32\t.Points2D\x12\x19\n\x04type\x18\x04 \x01(\x0e\x32\x0b.CutIn.Type\"@\n\x04Type\x12\x0b\n\x07UNKNOWN\x10\x00\x12\x14\n\x10left_lane_change\x10\x01\x12\x15\n\x11right_lane_change\x10\x02\x62\x06proto3')
)
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_CUTIN_TYPE = _descriptor.EnumDescriptor(
name='Type',
full_name='CutIn.Type',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='UNKNOWN', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='left_lane_change', index=1, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='right_lane_change', index=2, number=2,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=244,
serialized_end=308,
)
_sym_db.RegisterEnumDescriptor(_CUTIN_TYPE)
_CUTINS = _descriptor.Descriptor(
name='CutInS',
full_name='CutInS',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='timestamp', full_name='CutInS.timestamp', index=0,
number=1, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='cut_in_id', full_name='CutInS.cut_in_id', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=16,
serialized_end=72,
)
_POINTS2D = _descriptor.Descriptor(
name='Points2D',
full_name='Points2D',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='x', full_name='Points2D.x', index=0,
number=1, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='y', full_name='Points2D.y', index=1,
number=2, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=74,
serialized_end=106,
)
_CUTINID = _descriptor.Descriptor(
name='CutInID',
full_name='CutInID',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='CutInID.id', index=0,
number=1, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='cut_in', full_name='CutInID.cut_in', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=108,
serialized_end=153,
)
_CUTIN = _descriptor.Descriptor(
name='CutIn',
full_name='CutIn',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='cross_time', full_name='CutIn.cross_time', index=0,
number=2, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='cross_point', full_name='CutIn.cross_point', index=1,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='type', full_name='CutIn.type', index=2,
number=4, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
_CUTIN_TYPE,
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=156,
serialized_end=308,
)
_CUTINS.fields_by_name['cut_in_id'].message_type = _CUTINID
_CUTINID.fields_by_name['cut_in'].message_type = _CUTIN
_CUTIN.fields_by_name['cross_point'].message_type = _POINTS2D
_CUTIN.fields_by_name['type'].enum_type = _CUTIN_TYPE
_CUTIN_TYPE.containing_type = _CUTIN
DESCRIPTOR.message_types_by_name['CutInS'] = _CUTINS
DESCRIPTOR.message_types_by_name['Points2D'] = _POINTS2D
DESCRIPTOR.message_types_by_name['CutInID'] = _CUTINID
DESCRIPTOR.message_types_by_name['CutIn'] = _CUTIN
CutInS = _reflection.GeneratedProtocolMessageType('CutInS', (_message.Message,), dict(
DESCRIPTOR = _CUTINS,
__module__ = 'cut_in_pb2'
# @@protoc_insertion_point(class_scope:CutInS)
))
_sym_db.RegisterMessage(CutInS)
Points2D = _reflection.GeneratedProtocolMessageType('Points2D', (_message.Message,), dict(
DESCRIPTOR = _POINTS2D,
__module__ = 'cut_in_pb2'
# @@protoc_insertion_point(class_scope:Points2D)
))
_sym_db.RegisterMessage(Points2D)
CutInID = _reflection.GeneratedProtocolMessageType('CutInID', (_message.Message,), dict(
DESCRIPTOR = _CUTINID,
__module__ = 'cut_in_pb2'
# @@protoc_insertion_point(class_scope:CutInID)
))
_sym_db.RegisterMessage(CutInID)
CutIn = _reflection.GeneratedProtocolMessageType('CutIn', (_message.Message,), dict(
DESCRIPTOR = _CUTIN,
__module__ = 'cut_in_pb2'
# @@protoc_insertion_point(class_scope:CutIn)
))
_sym_db.RegisterMessage(CutIn)
# @@protoc_insertion_point(module_scope)
| 1.6875 | 2 |
scripts/superimport.py | qiuhuachuan/pyprobml | 0 | 12761088 | <reponame>qiuhuachuan/pyprobml<filename>scripts/superimport.py
# If you add `import superimport` to the top of your script
# then running it should automatically trigger installation of all required packages
# Author: <NAME> (<EMAIL>)
# Code is based on
# https://stackoverflow.com/questions/44210656/how-to-check-if-a-module-is-installed-in-python-and-if-not-install-it-within-t
# https://stackoverflow.com/questions/52311738/get-name-from-python-file-which-called-the-import
# https://gist.github.com/gene1wood/9472a9d0dffce1a56d6e796afc6539b8
# https://stackoverflow.com/questions/8718885/import-module-from-string-variable
import sys
import subprocess
import pkg_resources
import requests
import pipreqs
import inspect
import re
import logging
import os
def get_packages_from_txt(file, dim="="):
packages_string = open(file).read()
if dim:
packages = {
c.split(dim)[0]: c.split(dim)[1] for c in packages_string.split("\n") if c
}
else:
packages = {c: True for c in packages_string.split("\n") if c}
return packages
def install_if_missing(packages_names, verbose=False):
installed = {pkg.key for pkg in pkg_resources.working_set}
missing = packages_names - installed
if missing:
python3 = sys.executable
if verbose:
subprocess.check_call([python3, "-m", "pip", "install", *missing])
else:
subprocess.check_call(
[python3, "-m", "pip", "install", *missing], stdout=subprocess.DEVNULL
)
def get_match_list(the_string, the_regex_pattern, guard="#"):
if not the_string or the_string == "":
return None
re_string = re.compile(the_regex_pattern)
matches_itr = re.finditer(re_string, the_string)
matches_list = list(matches_itr)
matches_list = [m for m in matches_list if the_string[m.span()[0] - 1] != guard]
return matches_list
def preprocess_imports(name):
if name.find(".")!=-1:
name = name.split(".")[0]
if name.endswith(" "):
name = name[:-1]
if name.find(" as ")!=-1:
name = name.split(" as ")[0]
return name
def get_imports(
file_string=None, patterns=[r"^import (.+)$", r"^from ((?!\.+).*?) import (?:.*)$"]
):
matches = []
for p in patterns:
strings = file_string.split("\n")
for s in strings:
re_matches = get_match_list(s, p)
if re_matches:
for m in re_matches:
the_string = m.group()
if the_string.startswith("from"):
i = the_string.find("import")
name = the_string[5:i]
name = preprocess_imports(name)
else:
name = the_string.replace("import ", "")
name = preprocess_imports(name)
matches.append(name)
return set(matches)
def check_if_package_on_pypi(packages_name):
response = requests.get(f"https://pypi.python.org/pypi/{packages_name}/json")
if response.status_code == 200:
meta = response.json()
name = meta["info"]["name"]
return True, name, meta
else:
return False, None, None
def import_module(module_name, verbose=False):
try:
# because we want to import using a variable, do it this way
module_obj = __import__(module_name)
# create a global object containging our module
globals()[module_name] = module_obj
except ImportError as e:
if verbose:
sys.stderr.write(
f"ERROR: superimport : missing python module: {module_name} \nTrying try to install automatcially\n"
)
raise e
mapper = pipreqs.__path__[0] + "/mapping"
mapping = get_packages_from_txt(mapper, ":")
stdlib_path = pipreqs.__path__[0] + "/stdlib"
stdlib = get_packages_from_txt(stdlib_path, "")
dir_name = os.path.dirname(__file__)
mapping2 = get_packages_from_txt(f"{dir_name}/superimport/mapping2", ":")
mapping = {**mapping, **mapping2} # adding two dictionaries
gnippam = {v: k for k, v in mapping.items()} # reversing the mapping
if __name__ != "__main__":
for frame in inspect.stack()[1:]:
if frame.filename[0] != "<":
fc = open(frame.filename).read()
fc = fc.replace("import superimport\n", "")
matches = get_imports(fc)
for package in matches:
try:
import_module(package, True)
except Exception as e:
if package in mapping:
try:
install_if_missing({mapping[package]}, True)
except:
print("Could not install automatically from map, trying reverse map")
install_if_missing({gnippam[package]}, True)
else:
logging.warning("Package was not found in the reverse index, trying pypi.")
status, name, meta = check_if_package_on_pypi(package)
if status:
logging.info(
f"Package{name} was found on PyPi\nNow installing {name}"
)
install_if_missing({package}, True)
else:
logging.warning(
f"Failed to install {package} automatically"
)
break
| 2.296875 | 2 |
netket/utils/struct/utils.py | gpescia/MyNetKet | 352 | 12761089 | <gh_stars>100-1000
import sys
import builtins
from dataclasses import MISSING
## STUFF FROM python/lib/dataclasses.py
def _set_new_attribute(cls, name, value):
# Never overwrites an existing attribute. Returns True if the
# attribute already exists.
if name in cls.__dict__:
return True
setattr(cls, name, value)
return False
def _create_fn(
name, args, body, *, globals=None, locals=None, return_type=MISSING, doc=None
):
# Note that we mutate locals when exec() is called. Caller
# beware! The only callers are internal to this module, so no
# worries about external callers.
if locals is None:
locals = {}
if "BUILTINS" not in locals:
locals["BUILTINS"] = builtins
return_annotation = ""
if return_type is not MISSING:
locals["_return_type"] = return_type
return_annotation = "->_return_type"
args = ",".join(args)
body = "\n".join(f" {b}" for b in body)
# Compute the text of the entire function.
txt = f" def {name}({args}){return_annotation}:\n{body}"
local_vars = ", ".join(locals.keys())
txt = f"def __create_fn__({local_vars}):\n{txt}\n return {name}"
ns = {}
exec(txt, globals, ns) # noqa: W0122
fn = ns["__create_fn__"](**locals)
if doc is not None:
fn.__doc__ = doc
return fn
def get_class_globals(clz):
if clz.__module__ in sys.modules:
globals = sys.modules[clz.__module__].__dict__
else:
globals = {}
return globals
| 2.421875 | 2 |
aiml/__init__.py | edjdavid/aiml | 0 | 12761090 | from .models import MLModels
| 1.078125 | 1 |
examples/test_ffbuilder.py | ltalirz/aiida-lsmo | 0 | 12761091 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Test for ff_builder"""
from __future__ import absolute_import
from __future__ import print_function
from aiida.orm import Dict
from aiida.plugins import CalculationFactory
from aiida.engine import run_get_node
# Calculation objects
FFBuilder = CalculationFactory("lsmo.ff_builder") # pylint: disable=invalid-name
ff_parameters = Dict( # pylint: disable=invalid-name
dict={
'ff_framework': 'UFF',
'ff_molecules': {
'CO2': 'TraPPE',
'N2': 'TraPPE',
},
'shifted': False,
'tail_corrections': True,
'mixing_rule': 'Lorentz-Berthelot',
'separate_interactions': True
})
results, node = run_get_node(FFBuilder, ff_parameters) # pylint: disable=invalid-name
print("Terminated ff_builder calcfunction, pk:", node.pk)
for key, val in results.items():
#filepath = os.path.join(val._repository._get_base_folder().abspath, val.filename)
print("Output:", val.pk, key)
| 1.960938 | 2 |
app.py | helionagamachi/S3Share | 0 | 12761092 | #!/usr/bin/env python3
from aws_cdk import core
from aws_cdk.aws_s3 import Bucket
from s3_share.s3_share_stack import S3ShareStack
app = core.App()
S3ShareStack(app, "s3-share")
app.synth()
| 1.421875 | 1 |
sdk/python/pulumi_azure_native/servicebus/v20170401/rule.py | sebtelko/pulumi-azure-native | 0 | 12761093 | <reponame>sebtelko/pulumi-azure-native<gh_stars>0
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['RuleArgs', 'Rule']
@pulumi.input_type
class RuleArgs:
def __init__(__self__, *,
namespace_name: pulumi.Input[str],
resource_group_name: pulumi.Input[str],
subscription_name: pulumi.Input[str],
topic_name: pulumi.Input[str],
action: Optional[pulumi.Input['ActionArgs']] = None,
correlation_filter: Optional[pulumi.Input['CorrelationFilterArgs']] = None,
filter_type: Optional[pulumi.Input['FilterType']] = None,
rule_name: Optional[pulumi.Input[str]] = None,
sql_filter: Optional[pulumi.Input['SqlFilterArgs']] = None):
"""
The set of arguments for constructing a Rule resource.
:param pulumi.Input[str] namespace_name: The namespace name
:param pulumi.Input[str] resource_group_name: Name of the Resource group within the Azure subscription.
:param pulumi.Input[str] subscription_name: The subscription name.
:param pulumi.Input[str] topic_name: The topic name.
:param pulumi.Input['ActionArgs'] action: Represents the filter actions which are allowed for the transformation of a message that have been matched by a filter expression.
:param pulumi.Input['CorrelationFilterArgs'] correlation_filter: Properties of correlationFilter
:param pulumi.Input['FilterType'] filter_type: Filter type that is evaluated against a BrokeredMessage.
:param pulumi.Input[str] rule_name: The rule name.
:param pulumi.Input['SqlFilterArgs'] sql_filter: Properties of sqlFilter
"""
pulumi.set(__self__, "namespace_name", namespace_name)
pulumi.set(__self__, "resource_group_name", resource_group_name)
pulumi.set(__self__, "subscription_name", subscription_name)
pulumi.set(__self__, "topic_name", topic_name)
if action is not None:
pulumi.set(__self__, "action", action)
if correlation_filter is not None:
pulumi.set(__self__, "correlation_filter", correlation_filter)
if filter_type is not None:
pulumi.set(__self__, "filter_type", filter_type)
if rule_name is not None:
pulumi.set(__self__, "rule_name", rule_name)
if sql_filter is not None:
pulumi.set(__self__, "sql_filter", sql_filter)
@property
@pulumi.getter(name="namespaceName")
def namespace_name(self) -> pulumi.Input[str]:
"""
The namespace name
"""
return pulumi.get(self, "namespace_name")
@namespace_name.setter
def namespace_name(self, value: pulumi.Input[str]):
pulumi.set(self, "namespace_name", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
Name of the Resource group within the Azure subscription.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="subscriptionName")
def subscription_name(self) -> pulumi.Input[str]:
"""
The subscription name.
"""
return pulumi.get(self, "subscription_name")
@subscription_name.setter
def subscription_name(self, value: pulumi.Input[str]):
pulumi.set(self, "subscription_name", value)
@property
@pulumi.getter(name="topicName")
def topic_name(self) -> pulumi.Input[str]:
"""
The topic name.
"""
return pulumi.get(self, "topic_name")
@topic_name.setter
def topic_name(self, value: pulumi.Input[str]):
pulumi.set(self, "topic_name", value)
@property
@pulumi.getter
def action(self) -> Optional[pulumi.Input['ActionArgs']]:
"""
Represents the filter actions which are allowed for the transformation of a message that have been matched by a filter expression.
"""
return pulumi.get(self, "action")
@action.setter
def action(self, value: Optional[pulumi.Input['ActionArgs']]):
pulumi.set(self, "action", value)
@property
@pulumi.getter(name="correlationFilter")
def correlation_filter(self) -> Optional[pulumi.Input['CorrelationFilterArgs']]:
"""
Properties of correlationFilter
"""
return pulumi.get(self, "correlation_filter")
@correlation_filter.setter
def correlation_filter(self, value: Optional[pulumi.Input['CorrelationFilterArgs']]):
pulumi.set(self, "correlation_filter", value)
@property
@pulumi.getter(name="filterType")
def filter_type(self) -> Optional[pulumi.Input['FilterType']]:
"""
Filter type that is evaluated against a BrokeredMessage.
"""
return pulumi.get(self, "filter_type")
@filter_type.setter
def filter_type(self, value: Optional[pulumi.Input['FilterType']]):
pulumi.set(self, "filter_type", value)
@property
@pulumi.getter(name="ruleName")
def rule_name(self) -> Optional[pulumi.Input[str]]:
"""
The rule name.
"""
return pulumi.get(self, "rule_name")
@rule_name.setter
def rule_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "rule_name", value)
@property
@pulumi.getter(name="sqlFilter")
def sql_filter(self) -> Optional[pulumi.Input['SqlFilterArgs']]:
"""
Properties of sqlFilter
"""
return pulumi.get(self, "sql_filter")
@sql_filter.setter
def sql_filter(self, value: Optional[pulumi.Input['SqlFilterArgs']]):
pulumi.set(self, "sql_filter", value)
class Rule(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
action: Optional[pulumi.Input[pulumi.InputType['ActionArgs']]] = None,
correlation_filter: Optional[pulumi.Input[pulumi.InputType['CorrelationFilterArgs']]] = None,
filter_type: Optional[pulumi.Input['FilterType']] = None,
namespace_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
rule_name: Optional[pulumi.Input[str]] = None,
sql_filter: Optional[pulumi.Input[pulumi.InputType['SqlFilterArgs']]] = None,
subscription_name: Optional[pulumi.Input[str]] = None,
topic_name: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Description of Rule Resource.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[pulumi.InputType['ActionArgs']] action: Represents the filter actions which are allowed for the transformation of a message that have been matched by a filter expression.
:param pulumi.Input[pulumi.InputType['CorrelationFilterArgs']] correlation_filter: Properties of correlationFilter
:param pulumi.Input['FilterType'] filter_type: Filter type that is evaluated against a BrokeredMessage.
:param pulumi.Input[str] namespace_name: The namespace name
:param pulumi.Input[str] resource_group_name: Name of the Resource group within the Azure subscription.
:param pulumi.Input[str] rule_name: The rule name.
:param pulumi.Input[pulumi.InputType['SqlFilterArgs']] sql_filter: Properties of sqlFilter
:param pulumi.Input[str] subscription_name: The subscription name.
:param pulumi.Input[str] topic_name: The topic name.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: RuleArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Description of Rule Resource.
:param str resource_name: The name of the resource.
:param RuleArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(RuleArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
action: Optional[pulumi.Input[pulumi.InputType['ActionArgs']]] = None,
correlation_filter: Optional[pulumi.Input[pulumi.InputType['CorrelationFilterArgs']]] = None,
filter_type: Optional[pulumi.Input['FilterType']] = None,
namespace_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
rule_name: Optional[pulumi.Input[str]] = None,
sql_filter: Optional[pulumi.Input[pulumi.InputType['SqlFilterArgs']]] = None,
subscription_name: Optional[pulumi.Input[str]] = None,
topic_name: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = RuleArgs.__new__(RuleArgs)
__props__.__dict__["action"] = action
__props__.__dict__["correlation_filter"] = correlation_filter
__props__.__dict__["filter_type"] = filter_type
if namespace_name is None and not opts.urn:
raise TypeError("Missing required property 'namespace_name'")
__props__.__dict__["namespace_name"] = namespace_name
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["rule_name"] = rule_name
__props__.__dict__["sql_filter"] = sql_filter
if subscription_name is None and not opts.urn:
raise TypeError("Missing required property 'subscription_name'")
__props__.__dict__["subscription_name"] = subscription_name
if topic_name is None and not opts.urn:
raise TypeError("Missing required property 'topic_name'")
__props__.__dict__["topic_name"] = topic_name
__props__.__dict__["name"] = None
__props__.__dict__["type"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:servicebus/v20170401:Rule"), pulumi.Alias(type_="azure-native:servicebus:Rule"), pulumi.Alias(type_="azure-nextgen:servicebus:Rule"), pulumi.Alias(type_="azure-native:servicebus/v20180101preview:Rule"), pulumi.Alias(type_="azure-nextgen:servicebus/v20180101preview:Rule"), pulumi.Alias(type_="azure-native:servicebus/v20210101preview:Rule"), pulumi.Alias(type_="azure-nextgen:servicebus/v20210101preview:Rule")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(Rule, __self__).__init__(
'azure-native:servicebus/v20170401:Rule',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'Rule':
"""
Get an existing Rule resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = RuleArgs.__new__(RuleArgs)
__props__.__dict__["action"] = None
__props__.__dict__["correlation_filter"] = None
__props__.__dict__["filter_type"] = None
__props__.__dict__["name"] = None
__props__.__dict__["sql_filter"] = None
__props__.__dict__["type"] = None
return Rule(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def action(self) -> pulumi.Output[Optional['outputs.ActionResponse']]:
"""
Represents the filter actions which are allowed for the transformation of a message that have been matched by a filter expression.
"""
return pulumi.get(self, "action")
@property
@pulumi.getter(name="correlationFilter")
def correlation_filter(self) -> pulumi.Output[Optional['outputs.CorrelationFilterResponse']]:
"""
Properties of correlationFilter
"""
return pulumi.get(self, "correlation_filter")
@property
@pulumi.getter(name="filterType")
def filter_type(self) -> pulumi.Output[Optional[str]]:
"""
Filter type that is evaluated against a BrokeredMessage.
"""
return pulumi.get(self, "filter_type")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Resource name
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="sqlFilter")
def sql_filter(self) -> pulumi.Output[Optional['outputs.SqlFilterResponse']]:
"""
Properties of sqlFilter
"""
return pulumi.get(self, "sql_filter")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Resource type
"""
return pulumi.get(self, "type")
| 1.625 | 2 |
mofa/assistants/migrations/0008_delete_dummyassistant.py | BoxInABoxICT/BoxPlugin | 0 | 12761094 | # Generated by Django 2.2.6 on 2020-01-28 12:30
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('assistants', '0007_dummyassistant'),
]
operations = [
migrations.DeleteModel(
name='DummyAssistant',
),
]
| 1.390625 | 1 |
auctions/migrations/0003_auto_20200711_1614.py | nmk0462/commerce | 14 | 12761095 | # Generated by Django 3.0.8 on 2020-07-11 10:44
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('auctions', '0002_bids_listings'),
]
operations = [
migrations.RenameModel(
old_name='Bids',
new_name='Bid',
),
migrations.RenameModel(
old_name='Listings',
new_name='Listing',
),
]
| 1.773438 | 2 |
mcenter_cli/parallelm/mcenter_cli/delete_mlapp.py | lisapm/mlpiper | 7 | 12761096 | <filename>mcenter_cli/parallelm/mcenter_cli/delete_mlapp.py
import logging
import pprint
from parallelm.mlapp_directory.mlapp_defs import MLAppProfileKeywords, MLAppPatternKeywords
class MLAppDeleteHelper:
def __init__(self, mclient, mlapp_name, dry_run=True):
"""
Deleting an mlapp given name.
Note, If the mlapp is currently running deletion will fail.
:param mclient: MCenterClient object to use to communicate with MCenter
:param mlapp_name: mlapp_name to delete
:param dry_run: do not delete just perform a dry run and show what is going to be deleted
"""
self._logger = logging.getLogger(self.__class__.__name__)
self._mlapp_name = mlapp_name
self._mclient = mclient
self._dry_run_mode = dry_run
self._pattern_name = None
self._pattern_id = None
self._pipeline_patterns_ids = []
self._pipeline_profiles_ids = []
def _detect_all_ids(self):
for profile_info in self._mclient.list_ion_profiles():
self._logger.info("Profile part: [{}]".format(self._mlapp_name))
self._logger.info(pprint.pformat(profile_info))
profile_name = profile_info[MLAppProfileKeywords.NAME]
if profile_name == self._mlapp_name:
self._pattern_name = profile_info[MLAppProfileKeywords.PATTERN_NAME]
self._pattern_id = profile_info[MLAppProfileKeywords.PATTERN_ID]
self._profile_id = profile_info[MLAppProfileKeywords.ID]
self._logger.info("Found mlapp {} {}".format(profile_name, self._profile_id))
for node_info in profile_info[MLAppProfileKeywords.NODES]:
pipeline_pattern_id = node_info[MLAppProfileKeywords.NODE_PIPELINE_PATTERN_ID]
self._pipeline_patterns_ids.append(pipeline_pattern_id)
pipeline_ee_tuple = node_info[MLAppProfileKeywords.NODE_PIPELINE_EE_TUPLE]
pipeline_profile_id = pipeline_ee_tuple[MLAppProfileKeywords.PIPELINE_EE_TUPLE_PIPELINE_PROFILE_ID]
self._pipeline_profiles_ids.append(pipeline_profile_id)
return
raise Exception("Could not find MLApp {}".format(self._mlapp_name))
def _delete_mlapp(self):
self._logger.info("Deleting profile: {}".format(self._profile_id))
self._logger.info("Deleting pattern: {}".format(self._pattern_id))
self._logger.info("Deleting pipeline profiles: {}".format(self._pipeline_profiles_ids))
self._logger.info("Deleting pipeline patterns: {}".format(self._pipeline_patterns_ids))
if not self._dry_run_mode:
self._mclient.delete_ion_profile(self._profile_id)
self._mclient.delete_ion_pattern(self._pattern_id)
for pipeline_profile_id in self._pipeline_profiles_ids:
self._mclient.delete_pipeline_profile(pipeline_profile_id)
for pipeline_pattern_id in self._pipeline_patterns_ids:
self._mclient.delete_pipeline_pattern(pipeline_pattern_id)
def delete(self):
"""
Perform actual delete.
:return:
"""
self._detect_all_ids()
self._delete_mlapp()
| 2.328125 | 2 |
attacks/methods/biased_boundary_attack.py | ttbrunner/biased_boundary_attack_avc | 15 | 12761097 | <reponame>ttbrunner/biased_boundary_attack_avc
from concurrent import futures
from concurrent.futures import ThreadPoolExecutor
import foolbox
import numpy as np
import timeit
from models.utils.batch_tensorflow_model import BatchTensorflowModel
from models.utils.ensemble_tf_model import EnsembleTFModel
from utils.sampling.normal import sample_hypersphere
class BiasedBoundaryAttack:
"""
Like BoundaryAttack, but uses biased sampling from prior beliefs (lucky guesses).
Apart from Perlin Noise and projected gradients, this implementation contains more work that is not in the paper:
- We try addidional patterns (single-pixel modification, jitter patterns) to escape local minima whenever the attack gets stuck
- We dynamically tune hyperparameters according to the observed success of previous samples
- At each step, multiple gradients are calculated to counter stochastic defenses
- Optimized for speed: only use gradients if we can't progress without them.
"""
def __init__(self, blackbox_model, sample_gen, substitute_model=None):
"""
Creates a reusable instance.
:param blackbox_model: The model to attack.
:param sample_gen: Random sample generator.
:param substitute_model: A surrogate model for gradients - either a TensorFlowModel, BatchTensorFlowModel or EnsembleTFModel.
"""
self.blackbox_model = blackbox_model
self.sample_gen = sample_gen
self._jitter_mask = self.precalc_jitter_mask()
# A substitute model that provides batched gradients.
self.batch_sub_model = None
if substitute_model is not None:
if isinstance(substitute_model, foolbox.models.TensorFlowModel):
self.batch_sub_model = BatchTensorflowModel(substitute_model._images, substitute_model._batch_logits, session=substitute_model.session)
else:
assert isinstance(substitute_model, EnsembleTFModel) or isinstance(substitute_model, BatchTensorflowModel)
self.batch_sub_model = substitute_model
# We use ThreadPools to calculate candidates and surrogate gradients while we're waiting for the model's next prediction.
self.pg_thread_pool = ThreadPoolExecutor(max_workers=1)
self.candidate_thread_pool = ThreadPoolExecutor(max_workers=1)
def __enter__(self):
self.pg_thread_pool.__enter__()
self.candidate_thread_pool.__enter__()
return self
def __exit__(self, exc_type, exc_value, traceback):
# Will block until the futures are calculated. Thankfully they're not very complicated.
self.pg_thread_pool.__exit__(exc_type, exc_value, traceback)
self.candidate_thread_pool.__exit__(exc_type, exc_value, traceback)
print("BiasedBoundaryAttack: all threads stopped.")
def run_attack(self, X_orig, label, is_targeted, X_start, n_calls_left_fn, n_max_per_batch=50, n_seconds=None,
source_step=1e-2, spherical_step=1e-2, give_up_calls_left=0, give_up_dist=9999):
"""
Runs the Biased Boundary Attack against a single image.
The attack terminates when n_calls_left_fn() returns 0, n_seconds have elapsed, or a "give up" condition is reached.
Give-up functionality:
- When few calls are remaining, but the distance is still high. Could use the additional time for other images.
- Could theoretically be used to game the final score: spend more time on imgs that will reduce the median, and give up on others
- Largely unused (didn't get to finish this)
:param X_orig: The original (clean) image to perturb.
:param label: The target label (if targeted), or the original label (if untargeted).
:param is_targeted: True if targeted.
:param X_start: The starting point (must be of target class).
:param n_calls_left_fn: A function that returns the currently remaining number of queries against the model.
:param n_max_per_batch: How many samples are drawn per "batch". Samples are processed serially (the challenge doesn't allow
batching), but for each "batch", the attack dynamically adjusts hyperparams based on the success of
previous samples. This "batch" size is the max number of samples after which hyperparams are reset, and
a new "batch" is started. See generate_candidate().
:param n_seconds: Maximum seconds allowed for the attack to complete.
:param source_step: source step hyperparameter (see Boundary Attack)
:param spherical_step: orthogonal step hyperparameter (see Boundary Attack)
:param give_up_calls_left: give-up condition: if less than this number of calls is left
:param give_up_dist: give-up condition: if the current L2 distance is higher than this
:return: The best adversarial example so far.
"""
assert len(X_orig.shape) == 3
assert len(X_start.shape) == 3
assert X_orig.dtype == np.float32
time_start = timeit.default_timer()
pg_future = None
try:
# WARN: Inside this function, image space is normed to [0,1]!
X_orig = np.float32(X_orig) / 255.
X_start = np.float32(X_start) / 255.
label_current, dist_best = self._eval_sample(X_start, X_orig)
if (label_current == label) != is_targeted:
print("WARN: Starting point is not a valid adversarial example! Continuing for now.")
X_adv_best = np.copy(X_start)
# Abort if we're running out of queries
while n_calls_left_fn() > 3:
# Determine how many samples to draw at the current position.
n_candidates = min(n_max_per_batch, n_calls_left_fn())
# Calculate the projected adversarial gradient at the current position.
# Putting this into a ThreadPoolExecutor. While this is processing, we can already draw ~2 samples without waiting for the
# gradient. If the first 2 samples were unsuccessful, then the later ones can be biased with the gradient.
# Also cancel any pending requests from previous steps.
if pg_future is not None:
pg_future.cancel()
pg_future = self.pg_thread_pool.submit(self.get_projected_gradients, **{
"x_current": X_adv_best,
"x_orig": X_orig,
"label": label,
"is_targeted": is_targeted})
# Also do candidate generation with a ThreadPoolExecutor. We need to squeeze out every bit of runtime.
# Queue the first candidate.
candidate_future = self.candidate_thread_pool.submit(self.generate_candidate, **{
"i": 0,
"n": n_candidates,
"x_orig": X_orig,
"x_current": X_adv_best,
"source_step": source_step,
"spherical_step": spherical_step,
"pg_future": pg_future})
for i in range(n_candidates):
# Get candidate and queue the next one.
candidate = candidate_future.result()
if i < n_candidates - 1:
candidate_future = self.candidate_thread_pool.submit(self.generate_candidate, **{
"i": i+1,
"n": n_candidates,
"x_orig": X_orig,
"x_current": X_adv_best,
"source_step": source_step,
"spherical_step": spherical_step,
"pg_future": pg_future})
time_elapsed = timeit.default_timer() - time_start
if n_seconds is not None and time_elapsed >= n_seconds:
print("WARN: Running out of time! Aborting attack!")
return X_adv_best * 255.
if dist_best > give_up_dist and n_calls_left_fn() < give_up_calls_left:
print("Distance is way too high, aborting attack to save time.")
return X_adv_best * 255.
# Test if successful. NOTE: dist is rounded here!
candidate_label, rounded_dist = self._eval_sample(candidate, X_orig)
unrounded_dist = np.linalg.norm(candidate - X_orig)
if (candidate_label == label) == is_targeted:
if unrounded_dist < dist_best:
print("@ {:.3f}: After {} samples, found something @ {:.3f} (rounded {:.3f})! (reduced by {:.1%})".format(
dist_best, i, unrounded_dist, rounded_dist, 1.-rounded_dist/dist_best))
# Terminate this batch (don't try the other candidates) and advance.
X_adv_best = candidate
dist_best = unrounded_dist
break
return X_adv_best * 255.
finally:
# Be safe and wait for the gradient future. We want to be sure that no BG worker is blocking the GPU before returning.
if pg_future is not None:
futures.wait([pg_future])
def generate_candidate(self, i, n, x_orig, x_current, source_step, spherical_step, pg_future):
# This runs in a loop (while i<n) per "batch".
# Whenever a candidate is successful, a new batch is started. Therefore, i is the number of previously unsuccessful samples.
# Trying to use this in our favor, we tune our hyperparameters based on i:
# - As i gets higher, progressively reduce step size for the next candidate
# - When i gets high, try to blend jitter patterns and single pixels
# Try this only once: blend a jitter pattern that brings us closer to the source,
# but should be invisible to the defender (if they use denoising).
if i == int(0.7 * n):
candidate = x_current
fade_eps = 0.005
while np.sum(np.abs(np.round(candidate*255.) - np.round(x_current*255.))) < 0.0001:
#print("jitter at i={} with fade_eps={}".format(i, fade_eps))
candidate = self.generate_jitter_sample(x_orig, x_current, fade_eps=fade_eps)
fade_eps += 0.005
return candidate
# Last resort: change single pixels to rip us out of the local minimum.
i_pixel_start = int(0.9 * n)
if i >= i_pixel_start:
l0_pixel_index = i - i_pixel_start
#print("pixel at {}".format(l0_pixel_index))
candidate = self.generate_l0_sample(x_orig, x_current, n_px_to_change=1, px_index=l0_pixel_index)
return candidate
# Default: use the BBA. Scale both spherical and source step with i.
scale = (1. - i/n) + 0.3
c_source_step = source_step * scale
c_spherical_step = spherical_step * scale
# Get the adversarial projected gradient from the (other) BG worker.
# Create the first 2 candidates without it, so we can already start querying the model. The BG worker can finish the gradients
# while we're waiting for those first 2 results.
pg_factor = 0.5
pgs = None
if i >= 2:
# if pg_future.running():
# print("Waiting for gradients...")
pgs = pg_future.result()
pgs = pgs if i % 2 == 0 else None # Only use gradient bias on every 2nd iteration.
candidate, spherical_candidate = self.generate_boundary_sample(
X_orig=x_orig, X_adv_current=x_current, source_step=c_source_step, spherical_step=c_spherical_step,
sampling_fn=self.sample_gen.get_perlin, pgs_current=pgs, pg_factor=pg_factor)
return candidate
def generate_l0_sample(self, X_orig, X_aex, n_px_to_change=1, px_index=0):
# Modified copypasta from refinement_tricks.refine_jitter().
# Change the n-th important pixel.
# Sort indices of the pixels, descending by difference to original.
# TODO: try color-triples?
i_highest_diffs = np.argsort(np.abs(X_aex - X_orig), axis=None)[::-1]
X_candidate = X_aex.copy()
# Try and replace n pixels at once.
i_pxs = i_highest_diffs[px_index: px_index + n_px_to_change]
for i_px in i_pxs:
i_px = np.unravel_index(i_px, X_orig.shape)
X_candidate[i_px] = X_orig[i_px]
return X_candidate
def precalc_jitter_mask(self):
# Prepare a jitter mask with XOR (alternating). TODO: we could really improve this pattern. S&P noise, anyone?
jitter_width = 5
jitter_mask = np.empty((64, 64, 3), dtype=np.bool)
for i in range(64):
for j in range(64):
jitter_mask[i, j, :] = (i % jitter_width == 0) ^ (j % jitter_width == 0)
return jitter_mask
def generate_jitter_sample(self, X_orig, X_aex, fade_eps=0.01):
# Modified copypasta from refinement_tricks.refine_pixels().
jitter_mask = self._jitter_mask
jitter_diff = np.zeros(X_orig.shape, dtype=np.float32)
jitter_diff[jitter_mask] = (X_aex - X_orig)[jitter_mask]
X_candidate = X_aex - fade_eps * jitter_diff
return X_candidate
def generate_boundary_sample(self, X_orig, X_adv_current, source_step, spherical_step, sampling_fn, pgs_current=None, pg_factor=0.3):
# Partially adapted from FoolBox BoundaryAttack.
unnormalized_source_direction = np.float64(X_orig) - np.float64(X_adv_current)
source_norm = np.linalg.norm(unnormalized_source_direction)
source_direction = unnormalized_source_direction / source_norm
# Get perturbation from provided distribution
sampling_dir = sampling_fn()
# ===========================================================
# calculate candidate on sphere
# ===========================================================
dot = np.vdot(sampling_dir, source_direction)
sampling_dir -= dot * source_direction # Project orthogonal to source direction
sampling_dir /= np.linalg.norm(sampling_dir)
# If available: Bias the spherical dirs in direction of the adversarial gradient, which is projected onto the sphere
if pgs_current is not None:
# We have a bunch of gradients that we can try. Randomly select one.
# NOTE: we found this to perform better than simply averaging the gradients.
pg_current = pgs_current[np.random.randint(0, len(pgs_current))]
sampling_dir = (1. - pg_factor) * sampling_dir + pg_factor * pg_current
sampling_dir /= np.linalg.norm(sampling_dir)
sampling_dir *= spherical_step * source_norm # Norm to length stepsize*(dist from src)
D = 1 / np.sqrt(spherical_step ** 2 + 1)
direction = sampling_dir - unnormalized_source_direction
spherical_candidate = X_orig + D * direction
np.clip(spherical_candidate, 0., 1., out=spherical_candidate)
# ===========================================================
# step towards source
# ===========================================================
new_source_direction = X_orig - spherical_candidate
new_source_direction_norm = np.linalg.norm(new_source_direction)
# length if spherical_candidate would be exactly on the sphere
length = source_step * source_norm
# length including correction for deviation from sphere
deviation = new_source_direction_norm - source_norm
length += deviation
# make sure the step size is positive
length = max(0, length)
# normalize the length
length = length / new_source_direction_norm
candidate = spherical_candidate + length * new_source_direction
np.clip(candidate, 0., 1., out=candidate)
return np.float32(candidate), np.float32(spherical_candidate)
def get_projected_gradients(self, x_current, x_orig, label, is_targeted):
# Idea is: we have a direction (spherical candidate) in which we want to sample.
# We know that the gradient of a substitute model, projected onto the sphere, usually points to an adversarial region.
# Even if we are already adversarial, it should point "deeper" into that region.
# If we sample in that direction, we should move toward the center of the adversarial cone.
# Here, we simply project the gradient onto the same hyperplane as the spherical samples.
#
# Instead of a single projected gradient, this method returns an entire batch of them:
# - Surrogate gradients are unreliable, so we sample them in a region around the current position.
# - This gives us a similar benefit as observed "PGD with random restarts".
source_direction = x_orig - x_current
source_norm = np.linalg.norm(source_direction)
source_direction = source_direction / source_norm
# Take a tiny step towards the source before calculating the gradient. This marginally improves our results.
step_inside = 0.002 * source_norm
x_inside = x_current + step_inside * source_direction
# Perturb the current position before calc'ing gradients
n_samples = 8
radius_max = 0.01 * source_norm
x_perturb = sample_hypersphere(n_samples=n_samples, sample_shape=x_orig.shape, radius=1, sample_gen=self.sample_gen)
x_perturb *= np.random.uniform(0., radius_max)
x_inside_batch = x_inside + x_perturb
gradients = (self.batch_sub_model.gradient(x_inside_batch * 255., [label] * n_samples) / 255.)
if is_targeted:
gradients = -gradients
# Project the gradients.
for i in range(n_samples):
dot = np.vdot(gradients[i], source_direction)
projected_gradient = gradients[i] - dot * source_direction # Project orthogonal to source direction
projected_gradient /= np.linalg.norm(projected_gradient) # Norm to length 1
gradients[i] = projected_gradient
return gradients
def _eval_sample(self, x, x_orig_normed=None):
# Round, then get label and distance.
x_rounded = np.round(np.clip(x * 255., 0, 255))
preds = self.blackbox_model.predictions(np.uint8(x_rounded))
label = np.argmax(preds)
if x_orig_normed is None:
return label
else:
dist = np.linalg.norm(x_rounded/255. - x_orig_normed)
return label, dist
| 2.234375 | 2 |
Assignments_SMAI/BayesianClassifier.py | sum-coderepo/HadoopApp | 2 | 12761098 | import numpy as np
import pylab as pl
from sklearn import mixture
np.random.seed(0)
#C1 = np.array([[3, -2.7], [1.5, 2.7]])
#C2 = np.array([[1, 2.0], [-1.5, 1.7]])
#
#X_train = np.r_[
# np.random.multivariate_normal((-7, -7), C1, size=7),
# np.random.multivariate_normal((7, 7), C2, size=7),
#]
X_train = np.r_[
np.array([[0,0],[0,1],[2,0],[3,2],[3,3],[2,2],[2,0]]),
np.array([[7,7],[8,6],[9,7],[8,10],[7,10],[8,9],[7,11]]),
]
print(X_train)
clf = mixture.GaussianMixture(n_components=2, covariance_type='full')
clf.weights_ = [2,1]
clf.fit(X_train)
#define g1(x, y) and g2(x, y)
def g1(x, y):
print("x = {},y = {} for g1".format(x,y))
return clf.predict_proba(np.column_stack((x, y)))[:, 0]
def g2(x, y):
print("x = {},y = {} for g2".format(x,y))
return clf.predict_proba(np.column_stack((x, y)))[:, 1]
X, Y = np.mgrid[-15:13:500j, -15:13:500j]
x = X.ravel()
y = Y.ravel()
p = (g1(x, y) - g2(x, y)).reshape(X.shape)
pl.scatter(X_train[:, 0], X_train[:, 1])
pl.contour(X, Y, p, levels=[0])
pl.show() | 2.578125 | 3 |
tests/test_properties.py | denisvasilik/binalyzer-core | 0 | 12761099 | """
test_properties
~~~~~~~~~~~~~~~
This module implements tests for the properties module.
"""
import pytest
from binalyzer_core import (
ValueProperty,
ReferenceProperty,
StretchSizeProperty,
Template,
)
def test_reference_property_is_read_only():
property = ReferenceProperty(Template(), 'invalid_name')
with pytest.raises(RuntimeError):
property.value = 0
def test_value_property():
value_property0 = ValueProperty()
value_property1 = ValueProperty(42)
assert value_property0.value == 0
assert value_property1.value == 42
def test_sizing_stretch_without_predecessors():
template_a = Template(name='a')
template_c = Template(name='c', parent=template_a)
template_d = Template(name='d', parent=template_a)
template_a.size = 10
template_c.size_property = StretchSizeProperty(template_c)
template_d.size = 4
assert template_c.size == 6
def test_sizing_stretch_without_successors():
template_a = Template(name='a')
template_b = Template(name='b', parent=template_a)
template_c = Template(name='c', parent=template_a)
template_a.size = 10
template_b.size = 1
template_c.size_property = StretchSizeProperty(template_c)
assert template_c.size == 9
def test_sizing_stretch_with_siblings():
template_a = Template(name='a')
template_b = Template(name='b', parent=template_a)
template_c = Template(name='c', parent=template_a)
template_d = Template(name='d', parent=template_a)
template_a.size = 10
template_b.size = 1
template_c.size_property = StretchSizeProperty(template_c)
template_d.size = 4
assert template_c.size == 5
def test_sizing_stretch_without_siblings():
template_a = Template(name='a')
template_c = Template(name='c', parent=template_a)
template_a.size = 10
template_c.size_property = StretchSizeProperty(template_c)
assert template_c.size == 10
| 2.40625 | 2 |
Books/GodOfPython/P13_Exception/direct/num3.py | Tim232/Python-Things | 2 | 12761100 | import sys, time
num = 1
try:
while num<=10:
print(num)
num += 1
time.sleep(1)
except KeyboardInterrupt:
print('exit')
sys.exit(0)
else:
print('complete')
finally:
print('Goodbye Python') | 3.5 | 4 |
normi/config.py | Yohannfra/normi | 0 | 12761101 | #!/usr/bin/env python3
import os
import sys
import toml
from .utils import Utils
DEFAULT_FILE_CONTENT = [
"indent_style = 'spaces'",
"indent_size = 4",
"max_len_line = 80",
"max_size_function = 20",
"max_function_in_file = 5",
"epitech_header = true",
"return_values_in_parenthese = true",
'forbidden_functions = []',
"forbidden_comments_in_functions = true",
"space_after_keyword = true",
"space_after_coma = true",
"requiere_void_when_no_args = true",
"max_parameters_to_functions = 4",
"max_variable_per_function = -1",
"brackets_style = 'end_of_line'",
"additionnal_types = []",
"excluded_files = []"
]
class Config:
def __init__(self):
self.settings = {}
self.config_file = ".normi.toml"
def parse_config(self):
content = Utils.get_file_content(self.config_file)
if content == None:
self.settings = toml.loads("\n".join(DEFAULT_FILE_CONTENT))
print("Using default configuration")
else:
self.settings = toml.loads(content)
@classmethod
def init_config(self):
config_file = ".normi.toml"
if os.path.exists(config_file):
sys.exit(f"{config_file} already exists, can't init the file")
try:
f = open(config_file, 'w')
except:
sys.exit(f"Could not create file {config_file}")
for line in DEFAULT_FILE_CONTENT:
f.write(line + '\n')
f.close()
print(f"Initialized {config_file} with success")
def get(self, param):
return self.settings[param]
| 2.609375 | 3 |
google/appengine/ext/analytics/main.py | vladushakov987/appengine_python3 | 0 | 12761102 | <gh_stars>0
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Main script for appstats analytics."""
from __future__ import division
from past.utils import old_div
from builtins import object
import email.Utils
try:
import json
except ImportError:
import simplejson as json
import logging
import mimetypes
import os
import re
import time
from google.appengine.ext import webapp
from google.appengine.ext.analytics import process
from google.appengine.ext.analytics import stats
from google.appengine.ext.appstats import loader
from google.appengine.ext.appstats import recording
from google.appengine.ext.appstats import ui
from google.appengine.ext.webapp import template
from google.appengine.ext.webapp import util
class Cache(object):
"""Cache appstats records for better tool performance.
Loading full Appstats records from file is time
consuming mainly because of the overheads in converting binary
protobuf strings to python protobuf objects. Caching the
records can help ensure this is done only the first time the
main page is loaded, and the overheads are avoided as the user
navigates the tool. Note that caching is intended for the offline
analysis case (e.g. analyzing locally downloaded files). In online
production, local caches might not be effective as requests go to
multiple app servers. Also, there might be issues around memcache
getting updated periodically. Note that we store the file name
and the time the file has been last modified to identify if the
cache is still valid.
"""
def __init__(self):
"""Constructor."""
self.hascontents = False
self.filename = None
self.mtime = None
self.recordlist = []
def Reset(self):
"""Reset and delete cache contents."""
self.hascontents = False
self.filename = None
self.mtime = None
self.recordlist = []
def IsCached(self, source, mtime):
"""Check whether data from a file is cached.
Args:
source: name of file being read
mtime: last modification time of file being read
Returns:
A boolean: true if cached, false otherwise.
"""
if not self.hascontents:
return False
if self.filename == source and self.mtime == mtime:
return True
else:
return False
def Insert(self, source, mtime, recordlist):
"""Insert records in cache.
Args:
source: name of file whose data is being cached.
mtime: last modification time of file being cached.
recordlist: list of StatsProto instances retrieved from
file in reverse chronological order (i.e. most recent first).
"""
self.hascontents = True
self.filename = source
self.mtime = mtime
self.recordlist = recordlist
class Filter(object):
"""Returns a subset of records that meet filtering crtieria.
While navigating the tool, developers may wish to focus on a certain
subset of records that meet desired filters. This class is used to
specify the desired filtering criteria. Currently, the supported filters
are (i) by time of recording; and (ii) request latency.
"""
url = None
starttime = None
endtime = None
latency_lower = None
latency_upper = None
def __init__(self, url=None, starttime=None, endtime=None,
latency_lower=None, latency_upper=None):
"""Set filtering criteria.
Args:
url: consider only requests corresponding to this URL.
starttime: consider only records recorded with timestamp (in seconds)
higher than this value. Timestamps are measured from start of recording
of entire data source.
endtime: consider only records recorded with timestamp (in seconds)
lower than this value.
latency_lower: consider only requests with latency (in milliseconds)
greater than this value
latency_upper: consider only requests with latency lower than this value
"""
self.url = url
if starttime:
self.starttime = int(starttime)
if endtime:
self.endtime = int(endtime)
if latency_lower:
self.latency_lower = int(latency_lower)
if latency_upper:
self.latency_upper = int(latency_upper)
logging.info('Filtering requests: url: %s start: %s end: %s'
'latency_lower: %s, latency_upper: %s',
url, starttime, endtime, latency_lower, latency_upper)
def Match(self, url, timestamp, latency):
"""Check if record meets filtering criteria.
Args:
url: path of that http request (after normalization)
timestamp: timestamp of record
latency: latency of request that record pertains to.
Returns:
Boolean which is True if the record matches filtering criteria
and false otherwise.
"""
if self.url:
if url != self.url:
return False
if self.starttime:
if timestamp < self.starttime:
return False
if self.endtime:
if timestamp > self.endtime:
return False
if self.latency_lower:
if latency < self.latency_lower:
return False
if self.latency_upper:
if latency > self.latency_upper:
return False
return True
cache = Cache()
mockable_open = open
class StatsPage(webapp.RequestHandler):
"""Handler for analysis page."""
dirname = os.path.join(os.path.dirname(__file__))
def FilterRecords(self, recordlist, recording_starttime):
"""Returns subset of records that meet filtering crtieria.
While navigating the tool, developers may wish to focus on a certain
subset of records that meet desired filters. Currently, the supported
filters are (i) by time of recording; and (ii) request latency. Filter
information is parsed from request arguments.
Args:
recordlist: List of raw appstats records over which filtering condition
must be applied.
recording_starttime: Timestamp when recording of data starts expressed
in seconds. This is the timestamp of the earliest recorded Appstats
data.
Returns:
Subset of records that meet the filtering criteria
"""
url = self.request.get('url')
latency_lower = self.request.get('latency_lower')
latency_upper = self.request.get('latency_upper')
starttime = self.request.get('starttime')
endtime = self.request.get('endtime')
filter_condition = Filter(url=url,
starttime=starttime,
endtime=endtime,
latency_lower=latency_lower,
latency_upper=latency_upper)
filtered_records = []
for record in recordlist:
path_key = recording.config.extract_key(record)
reltime = int(record.start_timestamp_milliseconds() * 0.001 -
recording_starttime)
latency = record.duration_milliseconds()
ismatch = filter_condition.Match(path_key, reltime, latency)
if ismatch:
filtered_records.append(record)
logging.info('Original number of records: %d', len(recordlist))
logging.info('After filtering: number of records: %d',
len(filtered_records))
return filter_condition, filtered_records
def RenderMain(self, urlstatsdict, source, recording_starttime):
"""Rendering main page of analysis page.
Args:
urlstatsdict: A dictionary with keys being URL paths, and values
being URLStat objects.
source: Source of Appstats data. Either filename if being read from
a file or MEMCACHE if being read from memcache.
recording_starttime: Timestamp when recording of data starts expressed
in seconds. This is the timestamp of the earliest recorded Appstats
data.
"""
resptime_byfreq, intervals = process.URLFreqRespTime(urlstatsdict)
data = {
'resptime_byfreq': resptime_byfreq,
'intervals': intervals,
'source': source,
'recordingstart': time.asctime(time.gmtime(recording_starttime)),
}
path = os.path.join(self.dirname, 'templates/main.html')
self.response.out.write(template.render(path, data))
def RenderDrill(self, url, urlstatsdict, recording_starttime, source,
filter_condition):
"""Rendering analysis page that drills into URL.
Args:
url: URL that is being drilled into.
urlstatsdict: A dictionary with keys being URL paths, and values
being URLStat objects.
recording_starttime: Timestamp when recording of data starts expressed
in seconds. This is the timestamp of the earliest recorded Appstats
data.
source: Source of Appstats data. Either filename if being read from
a file or MEMCACHE if being read from memcache.
filter_condition: Filter object that specifies filtering criteria on
which requests must be shown.
"""
if url in urlstatsdict:
urlstats = urlstatsdict[url]
drill = process.DrillURL(urlstats)
data = {
'url': url,
'drill': drill,
'first_timestamp': recording_starttime,
'recordingstart': time.asctime(time.gmtime(recording_starttime)),
'source': source,
'filter_json': json.dumps(filter_condition.__dict__),
'filter': filter_condition.__dict__,
}
path = os.path.join(self.dirname, 'templates/drill.html')
self.response.out.write(template.render(path, data))
def RenderDetail(self, url, urlstatsdict, records_bytimestamp, detail):
"""Renders detailed Appstats view of single request.
Args:
url: URL that is being drilled into.
urlstatsdict: A dictionary with keys being URL paths, and values
being URLStat objects.
records_bytimestamp: A dictionary. Each key is the timestamp of an
Appstats record (expressed in seconds). Each value is the
corresponding Appstats record (RequestStatProto protobuf).
detail: An index that can help identify which record is being
desired.
"""
if url in urlstatsdict:
urlstats = urlstatsdict[url]
revindex = -detail - 1
ts = urlstats.urlrequestlist[revindex].timestamp
record = records_bytimestamp[ts]
ui.render_record(self.response, record)
def RenderError(self, errormessage, source):
"""Render error message page.
Args:
errormessage: Error message to be rendered.
source: Source of Appstats data. Either filename if being read from
a file or MEMCACHE if being read from memcache.
"""
data = {
'errormessage': errormessage,
'source': source,
}
path = os.path.join(self.dirname, 'templates/error.html')
self.response.out.write(template.render(path, data))
def RenderPklList(self, pklfiles):
"""Render directory listing of all pkl files.
Args:
pklfiles: A list of pklfiles in the application root directory.
"""
data = {
'pklfiles': pklfiles,
}
path = os.path.join(self.dirname, 'templates/showPklFiles.html')
self.response.out.write(template.render(path, data))
def ReadableTime(self, seconds):
"""Convert seconds into user-friendly time.
The seconds elapsed since an appstats file is shown on the directory
page. This is converted into the most appropriate combination of units
(minute, hour or day) to make it easy for the user to read.
Args:
seconds: Seconds elapsed since an Appstats data file was downloaded.
Returns:
elapsed: Readable version of seconds elapsed.
"""
if seconds < 60:
if int(seconds) == 1:
elapsed = '%d second ago' %(seconds)
else:
elapsed = '%d seconds ago' %(seconds)
elif seconds < 3600:
minutes = old_div(seconds,60)
if int(minutes) == 1:
elapsed = '%d minute ago' %(minutes)
else:
elapsed = '%d minutes ago' %(minutes)
elif seconds < 86400:
hours = old_div(seconds,3600)
if int(hours) == 1:
elapsed = '%d hour ago' %(hours)
else:
elapsed = '%d hours ago' %(hours)
else:
days = old_div(seconds,86400)
if int(days) == 1:
elapsed = '%d day ago' %(days)
else:
elapsed = '%d days ago' %(days)
return elapsed
def ListPklFiles(self):
"""Create a list of available pkl files.
Generates a directory listing of application root directory to obtain
a list of all pkl files.
Returns:
pklfiles: A list of tuples one per pkl file in the application
root directory. Each tuple contains the file name, seconds
elapsed since last modification, and a user-friendly version of elapsed
second information. The list is sorted by seconds elapsed, i.e. most
recently downloaded files are listed first.
"""
rootdir = self.GetRoot()
files = os.listdir(rootdir)
currtime = time.time()
pklfiles = []
for filename in files:
if re.search('\.pkl$', filename):
path = os.path.join(rootdir, filename)
lastmodified = os.path.getmtime(path)
elapsed_secs = currtime - lastmodified
elapsed_text = self.ReadableTime(elapsed_secs)
pklfiles.append((filename, elapsed_secs, elapsed_text))
pklfiles.sort(key=lambda tuple: tuple[1])
return pklfiles
def GetRoot(self):
"""Determine the root directory of the application.
Returns:
Root directory of the application, i.e. directory that has app.yaml
file. Returns None if it cannot locate the root directory.
"""
rootdir = self.dirname
tryfile = None
while rootdir != '/':
tryfile = os.path.join(rootdir, 'app.yaml')
if not os.path.exists(tryfile):
rootdir = os.path.dirname(rootdir)
else:
break
if rootdir != '/':
logging.info('Application Root directory: %s', rootdir)
return rootdir
else:
if os.path.exists('/app.yaml'):
logging.info('Application Root directory: %s', rootdir)
return rootdir
else:
logging.error('No parent directory has app.yaml!')
return None
def ReadData(self, source):
"""Parses source option and reads appropriate data source.
Args:
source: Source of Appstats data. Either filename if being read from
a file or MEMCACHE if being read from memcache.
Returns:
errormessage: An error message to display to the user if an error occured
while reading data, None if no error occured.
recordlist: A list of Appstats records in RequestStatProto protobuf format
in reverse chronological order (i.e. most recent first).
"""
errormessage = None
recordlist = None
if source == 'MEMCACHE':
recordlist = loader.FromMemcache()
else:
rootdir = self.GetRoot()
if rootdir is None:
errormessage = 'No parent directory has app.yaml!'
return errormessage, recordlist
source_root = os.path.join(rootdir, source)
try:
outfile = mockable_open(source_root, 'rb')
except IOError:
logging.error('Cannot open %s', source)
errormessage = 'Unable to open file!'
return errormessage, recordlist
mtime = os.path.getmtime(source_root)
if cache.IsCached(source, mtime):
logging.info('Using cached records from %s', source)
recordlist = cache.recordlist
else:
logging.info('Reading fresh records from %s', source)
recordlist = loader.UnpickleFromFile(outfile)
cache.Insert(source, mtime, recordlist)
return errormessage, recordlist
def InitURLStats(self, recordlist):
"""Initialize data structures from appstats data.
Args:
recordlist: A list of Appstats records in RequestStatProto protobuf
format in reverse chronological order (i.e. most recent first).
Returns:
records_bytimestamp: A dictionary. Each key is the timestamp of an
Appstats record (expressed in seconds). Each value is the
corresponding Appstats record (RequestStatProto protobuf).
urlstatsdict: A dictionary with keys being URL paths, and values
being URLStat objects.
"""
records_bytimestamp = {}
urlstatsdict = {}
for record in recordlist:
ts = record.start_timestamp_milliseconds() * 0.001
records_bytimestamp[ts] = record
path_key = recording.config.extract_key(record)
if not path_key in urlstatsdict:
urlstatsdict[path_key] = stats.URLStats(path_key)
urlstatsdict[path_key].AddRequest(record)
return records_bytimestamp, urlstatsdict
def get(self):
"""Handler for statistics/diagnostics page."""
logging.info(self.request.path)
if not self.request.path.endswith('/'):
querystring = self.request.query_string
if not querystring:
self.redirect(self.request.path + '/')
else:
self.redirect(self.request.path + '/?' + self.request.query_string)
return
if not 'source' in self.request.arguments():
pklfiles = self.ListPklFiles()
self.RenderPklList(pklfiles)
else:
source = self.request.get('source')
logging.info('Before ReadData')
errormessage, recordlist = self.ReadData(source)
logging.info('After ReadData')
if errormessage:
self.RenderError(errormessage, source)
return
if not recordlist:
self.RenderError('No records in this Appstats snapshot.', source)
return
recording_starttime = recordlist[-1].start_timestamp_milliseconds()
recording_starttime *= 0.001
filter_condition, filtered_records = self.FilterRecords(
recordlist, recording_starttime)
records_bytimestamp, urlstatsdict = self.InitURLStats(filtered_records)
url = self.request.get('url')
detail = self.request.get('detail')
if not url and not detail:
self.RenderMain(urlstatsdict, source, recording_starttime)
elif not detail:
self.RenderDrill(url, urlstatsdict, recording_starttime,
source, filter_condition)
else:
detail = int(detail)
self.RenderDetail(url, urlstatsdict, records_bytimestamp, detail)
class LocalStaticHandler(webapp.RequestHandler):
"""Request handler to serve static files.
Only files directory in the static subdirectory are rendered this
way (no subdirectories).
"""
def get(self):
"""Handler for static page."""
here = os.path.dirname(__file__)
fn = self.request.path
i = fn.rfind('/')
fn = fn[i+1:]
fn = os.path.join(here, 'static', fn)
ctype, _ = mimetypes.guess_type(fn)
assert ctype and '/' in ctype, repr(ctype)
expiry = 3600
expiration = email.Utils.formatdate(time.time() + expiry, usegmt=True)
fp = mockable_open(fn, 'rb')
try:
self.response.out.write(fp.read())
finally:
fp.close()
self.response.headers['Content-type'] = ctype
self.response.headers['Cache-Control'] = 'public, max-age=expiry'
self.response.headers['Expires'] = expiration
URLMAP = [
('/stats/local/.*', LocalStaticHandler),
('/stats/*', StatsPage),
('/stats/file', ui.FileHandler),
('/stats/static/.*', ui.StaticHandler),
]
app = webapp.WSGIApplication(URLMAP, debug=True)
def main():
util.run_bare_wsgi_app(app)
if __name__ == '__main__':
main()
| 1.78125 | 2 |
end2you/data_provider/get_provider.py | tfyd/myEnd2you | 0 | 12761103 | <reponame>tfyd/myEnd2you
import torch
from torch.utils.data import DataLoader
from torch.nn.utils.rnn import pad_sequence
from functools import partial
from .audio_provider import AudioProvider
from .visual_provider import VisualProvider
from .multifile_audiovisual_provider import MultiFile_AVProvider
from .singlefile_audiovisual_provider import SingleFile_AVProvider
def get_provider(modality):
""" Factory method to get the appropriate provider.
Args:
modality (str): Which modality provider to return.
"""
return {
'audio': AudioProvider,
'visual': VisualProvider,
'audiovisual': MultiFile_AVProvider
}[modality]
def pad_collate(batch):
""" Pad batch tensors to have equal length.
Args:
batch (list): Data to pad.
Returns:
modality_tensors (torch.Tensor): Batched data tensors.
labels (torch.Tensor): Batched label tensors.
num_seqs_per_sample (list): Number of sequences of each batch tensor.
data_file (str): File name.
"""
data, labels, data_file = zip(*batch)
number_of_modalities = len(data[0]) if isinstance(data[0], list) else 1
if number_of_modalities == 1:
data = [[x] for x in data]
modality_tensors = []
for i in range(number_of_modalities):
modality_i = [torch.Tensor(x[i]) for x in data]
padded_modality = pad_sequence(modality_i, batch_first=True)
modality_tensors.append(padded_modality)
num_seqs_per_sample = [len(x) for x in labels]
labels = [torch.Tensor(x) for x in labels]
labels = pad_sequence(labels, batch_first=True)
if number_of_modalities == 1:
modality_tensors = modality_tensors[0]
return modality_tensors, labels, num_seqs_per_sample, data_file
def get_dataloader(params, **kwargs):
""" Gets the DataLoader object for each type in split_dirs keys.
Args:
params (Params) : Parameters needed to load data.
`modality` (str): Modality to provide data from.
`dataset_path` (str): Path to `hdf5` data files.
`seq_length` (int): Number of consecuvite frames to load.
`batch_size` (int): Batch size.
`cuda` (int): Whether to use cuda
`num_workers` (int): Number of workers to use.
`is_training` (bool): Whether to provide data for training/evaluation.
Returns:
dataloaders (dict): contains the DataLoader object for each type
in `split_dirs` keys.
"""
Provider = get_provider(params.modality)
return DataLoader(Provider(params.dataset_path, seq_length=params.seq_length),
batch_size=params.batch_size,
shuffle=params.is_training,
num_workers=params.num_workers,
pin_memory=params.cuda,
collate_fn=pad_collate)
| 2.4375 | 2 |
unitology/conf.py | bashu/django-unitology | 9 | 12761104 | <filename>unitology/conf.py
# -*- coding: utf-8 -*-
from django.conf import settings # pylint: disable=W0611
from appconf import AppConf
from .variables import METRIC
class UnitologySettings(AppConf):
DATABASE_UNITS = METRIC
class Meta:
prefix = 'unitology'
holder = 'unitology.conf.settings'
| 1.273438 | 1 |
scripts/mu_sims_exon.py | stephenrong/mutation-paper | 0 | 12761105 | <filename>scripts/mu_sims_exon.py
#!/usr/bin/env python
from __future__ import division
import sys
import gzip
from mu_sims_module import *
from numpy.random import randint
# # # real sequences
if __name__ == '__main__':
iter_start = int(sys.argv[1])
iter_end = int(sys.argv[2])
np.random.seed(iter_start+2)
# import mut_matrix
mut_file = "../data/mu-matrix-7mers.txt"
mut_matrix = get_mut_matrix(mut_file)
# import sequences
with gzip.open("../data/hg19-unipAliSwissprot-cds_genes.txt.gz", "rt") as handle:
seq_exon = [record.seq for record in SeqIO.parse(handle, "fasta")]
# output files
run_file = "../results/simulations/exon_"+str(iter_start)+"_"+str(iter_end)
with open(run_file+"_track_seq.txt", "w+") as track_seq, open(run_file+"_track_mut.txt", "w+") as track_mut, open(run_file+"_track_verbose.txt", "w+") as track_verbose:
# add header
prefix = "init_cond"+"\t"+"constr_cond"+"\t"+"run_number"
track_seq.write(prefix+"\t"+"seq_length"+"\t"+"mutation"+"\t"+"mut_scaled"+"\t"+"mutation_tot"+"\t"+"mut_scaled_tot"+"\t"+"sequence"+"\n")
track_mut.write(prefix+"\t"+"seq_length"+"\t"+"mutation"+"\t"+"mut_scaled"+"\t"+"mutation_tot"+"\t"+"mut_scaled_tot"+"\t"+"mut_mean"+"\t"+"esr_mean"+"\n")
track_verbose.write(prefix+"\t"+"seq_length"+"mutation"+"\t"+"mut_scaled"+"\t"+"mutation_tot"+"\t"+"mut_scaled_tot"+"\t"+"codonBefore"+"\t" "codonAfter"+"\t"+"wtMotif"+"\t"+"mtMotif"+"\t"+"mutRate"+"\t"+"aminoBefore"+"\t"+"aminoAfter"+"\t"+"aminoCheck"+"\n")
for i in range(iter_start, iter_end):
init_cond = "exon"
constr_cond = "neutral"
run_number = str(i)
print init_cond+"_"+constr_cond+"_"+run_number
seq_sequence = seq_exon[i]
mut_step = 50
mut_final = 10*len(seq_sequence)
muSimsConstraintGranthamSimulation(mut_matrix, track_seq, track_mut, track_verbose, init_cond, constr_cond, run_number, seq_sequence, mut_final, mut_step, 300, False)
for i in range(iter_start, iter_end):
init_cond = "exon"
constr_cond = "identity"
run_number = str(i)
print init_cond+"_"+constr_cond+"_"+run_number
seq_sequence = seq_exon[i]
mut_step = 50
mut_final = 10*len(seq_sequence)
muSimsConstraintGranthamSimulation(mut_matrix, track_seq, track_mut, track_verbose, init_cond, constr_cond, run_number, seq_sequence, mut_final, mut_step, 0, False)
# for i in range(iter_start, iter_end):
# init_cond = "exon"
# constr_cond = "grantham"
# run_number = str(i)
# print init_cond+"_"+constr_cond+"_"+run_number
# seq_sequence = seq_exon[i]
# mut_step = 50
# mut_final = 10*len(seq_sequence)
# muSimsConstraintGranthamSimulation(mut_matrix, track_seq, track_mut, track_verbose, init_cond, constr_cond, run_number, seq_sequence, mut_final, mut_step, 30, False)
# track kmer freqs
for kmer_size in range(6, 7):
out_seq_kmers(run_file+"_track_seq.txt", run_file+"_track_kmers"+str(kmer_size)+".txt", kmer_size, False)
# track mut, esr, and rosenberg scores
out_seq_mut(run_file+"_track_seq.txt", run_file+"_track_mut.txt")
| 2.296875 | 2 |
jux/jux/create_df_minmax.py | Jadit19/Inter-IIT-Tech-Meet-2022 | 0 | 12761106 | import numpy as np
import pandas as pd
from scipy.optimize import curve_fit
from .helper import exp_fit_func, inverse_exp_func, exp_func
def exp_curve_fit_(x_range, ln_y_range):
popc, pcov = curve_fit(exp_fit_func, x_range, ln_y_range)
ln_a, b = popc
a = np.exp(ln_a)
return a, b
def get_interm_zip_features_(ynew, _s4, _p4, _e1):
start_times = []
peak_times = []
end_times = []
peak_intensities = []
for i in range(len(_s4)):
if (_p4[i] - _s4[i] > 0) and (_e1[i] - _p4[i] > 0):
start_times.append(_s4[i])
peak_times.append(_p4[i])
end_times.append(_e1[i])
peak_intensities.append(ynew[_p4[i]])
return start_times, peak_times, end_times, peak_intensities
def get_interm_zip_(h1, h2, h3, h4):
_zip = pd.DataFrame(zip(h1, h2, h3, h4))
_zip.columns = ["start_time", "peak_time", "end_time", "peak_intensity"]
return _zip
def get_final_zip_features(xnew, ynew, _zip):
st = _zip["start_time"]
pt = _zip["peak_time"]
et = _zip["end_time"]
pi = _zip["peak_intensity"]
y_min = np.min(ynew)
final_st = []
final_pt = []
final_et = []
est_et = []
final_si = []
final_pi = []
final_err = []
final_bc = []
_class = []
for i in range(len(st)):
x_range = [int(xnew[j] - xnew[pt[i]]) for j in range(pt[i], et[i])]
ln_y_range = [np.log(ynew[j]) for j in range(pt[i], et[i])]
try:
popc, pcov = curve_fit(exp_fit_func, x_range, ln_y_range)
ln_a, b = popc
a = np.exp(ln_a)
# the 7th filter, can't allow increasing exponential so-called-flares!
# _calc_et is estimated end time from the analytical function fitted
if b < 0:
continue
_calc_et = inverse_exp_func(ynew[st[i]], a, b)
final_st.append(st[i])
final_pt.append(pt[i])
final_et.append(et[i])
final_pi.append(pi[i])
final_si.append(ynew[st[i]])
est_et.append(_calc_et + pt[i])
final_bc.append((ynew[st[i]] + ynew[et[i]]) / 2)
y_dash = []
y_diff = []
y_proj = []
x_proj = []
for _i, j in enumerate(x_range):
__y = exp_func(xnew[j], a, b)
y_dash.append(__y)
y_diff.append(abs(np.exp(ln_y_range[_i]) - __y))
for j in range(et[i] - pt[i], _calc_et):
if (j + pt[i]) < len(xnew):
x_proj.append(xnew[j + pt[i]])
y_proj.append(exp_func(xnew[j], a, b))
# error is sum(difference between fitted and actual) / ((peak intensity - minimum intensity) * duration from peak to actual end)
final_err.append((np.sum(y_dash)) / ((pi[i] - y_min) * (len(x_range))))
val = np.log10(pi[i] / 25)
_str = ""
_val = str(int(val * 100) / 10)[-3:]
if int(val) < 1:
_str = "A" + _val
elif int(val) == 1:
_str = "B" + _val
elif int(val) == 2:
_str = "C" + _val
elif int(val) == 3:
_str = "M" + _val
elif int(val) > 3:
_str = "X" + _val
_class.append(_str)
except:
print("Error in curve fitting")
return (
final_st,
final_pt,
final_et,
est_et,
final_si,
final_pi,
final_bc,
final_err,
_class,
)
def get_final_zip(g1, g2, g3, g4, g5, g6, g7, g8, g9):
final_zip = pd.DataFrame(zip(g1, g2, g3, g4, g5, g6, g7, g8, g9))
final_zip.columns = [
"start_time",
"peak_time",
"end_time",
"est_end_time",
"start_intensity",
"peak_intensity",
"background_counts",
"error",
"class",
]
return final_zip
| 2.578125 | 3 |
tfdet/model/detector/fcn.py | Burf/tfdetection | 0 | 12761107 | <filename>tfdet/model/detector/fcn.py
import tensorflow as tf
from ..neck import FeatureUpsample
def conv(filters, kernel_size, strides = 1, padding = "same", use_bias = True, kernel_initializer = "he_normal", **kwargs):
return tf.keras.layers.Conv2D(filters, kernel_size, strides = strides, padding = padding, use_bias = use_bias, kernel_initializer = kernel_initializer, **kwargs)
def fcn(feature, n_class = 35, n_feature = 512, n_depth = 2, method = "bilinear", logits_activation = tf.keras.activations.sigmoid, convolution = conv, normalize = tf.keras.layers.BatchNormalization, activation = tf.keras.activations.relu):
#https://arxiv.org/pdf/1411.4038.pdf
if not isinstance(feature, list):
feature = [feature]
out = feature = FeatureUpsample(concat = True, method = method, name = "feature_upsample")(feature)
for index in range(n_depth):
out = convolution(n_feature, 3, padding = "same", use_bias = normalize is None, name = "feature_conv{0}".format(index + 1))(out)
if normalize is not None:
out = normalize(name = "feature_norm{0}".format(index + 1))(out)
out = tf.keras.layers.Activation(activation, name = "feature_act{0}".format(index + 1))(out)
if 0 < n_depth:
out = tf.keras.layers.Concatenate(axis = -1, name = "post_concat")([out, feature])
out = convolution(n_feature, 3, padding = "same", use_bias = normalize is None, name = "post_conv")(out)
if normalize is not None:
out = normalize(name = "post_norm")(out)
out = tf.keras.layers.Activation(activation, name = "post_act")(out)
out = convolution(n_class, 1, use_bias = True, activation = logits_activation, name = "logits")(out)
return out | 2.46875 | 2 |
s3_arbitrator.py | dariusstrasel/Google_Analytics_Embedded_Dashboard | 1 | 12761108 | <filename>s3_arbitrator.py
"""s3.arbitrator.py
Gets Google Service Account credentials from an S3 file object.
"""
import boto3
import botocore
import os
import json
BUCKET_NAME = 'google-dashboard-service-key'
KEY = 'key.json'
def hasAWSEnviornmentalVariables():
"""Checks the presence of AWS Credentials in OS envoirnmental variables and returns a bool if True or False."""
access_key = os.environ.get('AWS_ACCESS_KEY_ID')
secret_key = os.environ.get('AWS_SECRET_ACCESS_KEY')
if access_key and secret_key:
return True
return False
def openS3Connection():
"""Returns a S3 connection object if AWS ENV tokens are found."""
if hasAWSEnviornmentalVariables():
return boto3.resource('s3')
else:
raise Exception("Could not find access tokens in OS ENV variables.")
def getJSONKeyDictionary():
"""Returns the body of S3 file "key.json" as a Python dictionary. """
s3 = openS3Connection()
s3_file_object = s3.Object(BUCKET_NAME, KEY)
response_binary = s3_file_object.get()['Body'].read() # This allows the file to be accessed without saving it the filesystem.
response_as_dictionary = json.loads(response_binary.decode('utf-8'))
return response_as_dictionary
| 3 | 3 |
src/ionotomo/tomography/sparse_covariance.py | Joshuaalbert/IonoTomo | 7 | 12761109 | import numpy as np
from scipy.spatial import cKDTree
from scipy.spatial.distance import pdist, squareform
from scipy.sparse import coo_matrix
import pylab as plt
def squared_exponential(x2,D=3):
#x = np.reshape(x,(-1,D))
return np.exp(-x2/2.)
def matern52(x2):
x = np.sqrt(x2)
res = x2
res *= 5./3.
res += np.sqrt(5) * x
res += 1
res *= np.exp((-np.sqrt(5))*x)
return res
def sparse_covariance(cfun, points, sigma, corr,tol=0.1,upper_tri=True):
N,D = points.shape
if not isinstance(corr,np.ndarray):
corr = np.ones(D)*corr
#Get support
if tol == 0.:
isot = np.inf
else:
isot = 0.
for dim in range(D):
direction = (np.arange(D)==dim).astype(float)
t = 0
c0 = cfun(0)
c = c0
while c/c0 > tol:
t += 0.1
c = cfun(np.sum((t*direction/corr)**2))
isot = max(isot,t/corr[dim])
#print("isotropic support: {}".format(isot))
kd = cKDTree(points/corr)
if upper_tri:
pairs = kd.query_pairs(isot,p=2,output_type='ndarray')
pairs = np.concatenate([np.array([np.arange(N)]*2).T,pairs])
x1 = points[pairs[:,0],:]
x2 = points[pairs[:,1],:]
dx = x1-x2
dx /= corr
dx *= dx
dx = np.sum(dx,axis=1)
cval = cfun(dx)
csparse = coo_matrix((cval,(pairs[:,0],pairs[:,1])), shape=(N,N))
else:
X = kd.sparse_distance_matrix(kd,isot,output_type='coo_matrix')
cval = cfun(X.data**2)
csparse = coo_matrix((cval,(X.col,X.row)), shape=(N,N))
return (sigma**2)*csparse
def dense_covariance(cfun, points, sigma, corr):
N,D = points.shape
if not isinstance(corr,np.ndarray):
corr = np.ones(D)*corr
points = points / corr
X = squareform(pdist(points,metric='sqeuclidean'))
return (sigma**2)*cfun(X)
def test_sparse_covariance():
corr = np.array([0.2,0.5,0.1])
xvec = np.linspace(0,1,50)
yvec = np.linspace(0,1,10)
zvec = np.linspace(0,1,10)
X,Y,Z = np.meshgrid(xvec,yvec,zvec,indexing='ij')
points = np.array([X.flatten(),Y.flatten(), Z.flatten()]).T
#%timeit -n 2 cdense = dense_covariance(squared_exponential, points, None, corr)
cdense = dense_covariance(matern52, points, 1., corr)
# #print(cdense)
# plt.imshow(cdense)
# plt.colorbar()
# plt.show()
#%timeit -n 2 csparse = sparse_covariance(squared_exponential,points,None,corr,tol=0.1)
csparse = sparse_covariance(matern52,points,1.,corr,tol=0,upper_tri=False)
assert np.all(np.isclose(csparse.toarray(), cdense))
# #print(csparse.toarray())
# plt.imshow(csparse.toarray())
# plt.colorbar()
# plt.show()
# plt.imshow(csparse.toarray() - cdense)
# plt.colorbar()
# plt.show()
csparse = sparse_covariance(matern52,points,1.,corr,tol=0.1,upper_tri=True)
print("upper triangle tol=0.1 -> saving: {}%".format(1-csparse.nonzero()[0].size/cdense.size))
csparse = sparse_covariance(matern52,points,1.,corr,tol=0.01,upper_tri=True)
print("upper triangle tol=0.01 -> saving: {}%".format(1-csparse.nonzero()[0].size/cdense.size))
def test_sparse_covariance_performance():
corr = np.array([5.,5.,1.])
xvec = np.linspace(-80,80,150)
yvec = np.linspace(-80,80,150)
zvec = np.linspace(0,1000,20)
X,Y,Z = np.meshgrid(xvec,yvec,zvec,indexing='ij')
points = np.array([X.flatten(),Y.flatten(), Z.flatten()]).T
csparse = sparse_covariance(matern52,points,1.,corr,tol=0.1,upper_tri=True)
print("upper triangle tol=0.1 -> saving: {}%".format(1-csparse.nonzero()[0].size/points.size**2))
if __name__=='__main__':
test_sparse_covariance_performance()
| 2.1875 | 2 |
h2o-py/tests/testdir_algos/psvm/pyunit_svm_svmguide3.py | ahmedengu/h2o-3 | 6,098 | 12761110 | from __future__ import print_function
import sys
sys.path.insert(1,"../../../")
import h2o
from tests import pyunit_utils
from h2o.estimators.psvm import H2OSupportVectorMachineEstimator
def svm_svmguide3():
svmguide3 = h2o.import_file(pyunit_utils.locate("smalldata/svm_test/svmguide3scale.svm"))
svmguide3_test = h2o.import_file(pyunit_utils.locate("smalldata/svm_test/svmguide3scale_test.svm"))
# parameters taken from libsvm guide
svm_tuned = H2OSupportVectorMachineEstimator(hyper_param=128, gamma=0.125, disable_training_metrics=False)
svm_tuned.train(y="C1", training_frame=svmguide3, validation_frame=svmguide3_test)
accuracy = svm_tuned.model_performance(valid=True).accuracy()[0][1]
assert accuracy >= 0.80 # guide has 87% - this just shows it is not completely off
if __name__ == "__main__":
pyunit_utils.standalone_test(svm_svmguide3)
else:
svm_svmguide3()
| 2.140625 | 2 |
mppca.py | michelbl/MPPCA | 25 | 12761111 | # Translation in python of the Matlab implementation of <NAME> and
# <NAME>, of the algorithm described in
# "Mixtures of Probabilistic Principal Component Analysers",
# <NAME> and <NAME>, Neural Computation 11(2),
# pp 443–482, MIT Press, 1999
import numpy as np
def initialization_kmeans(X, p, q, variance_level=None):
"""
X : dataset
p : number of clusters
q : dimension of the latent space
variance_level
pi : proportions of clusters
mu : centers of the clusters in the observation space
W : latent to observation matricies
sigma2 : noise
"""
N, d = X.shape
# initialization
init_centers = np.random.randint(0, N, p)
while (len(np.unique(init_centers)) != p):
init_centers = np.random.randint(0, N, p)
mu = X[init_centers, :]
distance_square = np.zeros((N, p))
clusters = np.zeros(N, dtype=np.int32)
D_old = -2
D = -1
while(D_old != D):
D_old = D
# assign clusters
for c in range(p):
distance_square[:, c] = np.power(X - mu[c, :], 2).sum(1)
clusters = np.argmin(distance_square, axis=1)
# compute distortion
distmin = distance_square[range(N), clusters]
D = distmin.sum()
# compute new centers
for c in range(p):
mu[c, :] = X[clusters == c, :].mean(0)
#for c in range(p):
# plt.scatter(X[clusters == c, 0], X[clusters == c, 1], c=np.random.rand(3,1))
# parameter initialization
pi = np.zeros(p)
W = np.zeros((p, d, q))
sigma2 = np.zeros(p)
for c in range(p):
if variance_level:
W[c, :, :] = variance_level * np.random.randn(d, q)
else:
W[c, :, :] = np.random.randn(d, q)
pi[c] = (clusters == c).sum() / N
if variance_level:
sigma2[c] = np.abs((variance_level/10) * np.random.randn())
else:
sigma2[c] = (distmin[clusters == c]).mean() / d
return pi, mu, W, sigma2, clusters
def mppca_gem(X, pi, mu, W, sigma2, niter):
N, d = X.shape
p = len(sigma2)
_, q = W[0].shape
sigma2hist = np.zeros((p, niter))
M = np.zeros((p, q, q))
Minv = np.zeros((p, q, q))
Cinv = np.zeros((p, d, d))
logR = np.zeros((N, p))
R = np.zeros((N, p))
M[:] = 0.
Minv[:] = 0.
Cinv[:] = 0.
L = np.zeros(niter)
for i in range(niter):
print('.', end='')
for c in range(p):
sigma2hist[c, i] = sigma2[c]
# M
M[c, :, :] = sigma2[c]*np.eye(q) + np.dot(W[c, :, :].T, W[c, :, :])
Minv[c, :, :] = np.linalg.inv(M[c, :, :])
# Cinv
Cinv[c, :, :] = (np.eye(d)
- np.dot(np.dot(W[c, :, :], Minv[c, :, :]), W[c, :, :].T)
) / sigma2[c]
# R_ni
deviation_from_center = X - mu[c, :]
logR[:, c] = ( np.log(pi[c])
+ 0.5*np.log(
np.linalg.det(
np.eye(d) - np.dot(np.dot(W[c, :, :], Minv[c, :, :]), W[c, :, :].T)
)
)
- 0.5*d*np.log(sigma2[c])
- 0.5*(deviation_from_center * np.dot(deviation_from_center, Cinv[c, :, :].T)).sum(1)
)
myMax = logR.max(axis=1).reshape((N, 1))
L[i] = (
(myMax.ravel() + np.log(np.exp(logR - myMax).sum(axis=1))).sum(axis=0)
- N*d*np.log(2*3.141593)/2.
)
logR = logR - myMax - np.reshape(np.log(np.exp(logR - myMax).sum(axis=1)), (N, 1))
myMax = logR.max(axis=0)
logpi = myMax + np.log(np.exp(logR - myMax).sum(axis=0)) - np.log(N)
logpi = logpi.T
pi = np.exp(logpi)
R = np.exp(logR)
for c in range(p):
mu[c, :] = (R[:, c].reshape((N, 1)) * X).sum(axis=0) / R[:, c].sum()
deviation_from_center = X - mu[c, :].reshape((1, d))
SW = ( (1/(pi[c]*N))
* np.dot((R[:, c].reshape((N, 1)) * deviation_from_center).T,
np.dot(deviation_from_center, W[c, :, :]))
)
Wnew = np.dot(SW, np.linalg.inv(sigma2[c]*np.eye(q) + np.dot(np.dot(Minv[c, :, :], W[c, :, :].T), SW)))
sigma2[c] = (1/d) * (
(R[:, c].reshape(N, 1) * np.power(deviation_from_center, 2)).sum()
/
(N*pi[c])
-
np.trace(np.dot(np.dot(SW, Minv[c, :, :]), Wnew.T))
)
W[c, :, :] = Wnew
return pi, mu, W, sigma2, R, L, sigma2hist
def mppca_predict(X, pi, mu, W, sigma2):
N, d = X.shape
p = len(sigma2)
_, q = W[0].shape
M = np.zeros((p, q, q))
Minv = np.zeros((p, q, q))
Cinv = np.zeros((p, d, d))
logR = np.zeros((N, p))
R = np.zeros((N, p))
for c in range(p):
# M
M[c, :, :] = sigma2[c] * np.eye(q) + np.dot(W[c, :, :].T, W[c, :, :])
Minv[c, :, :] = np.linalg.inv(M[c, :, :])
# Cinv
Cinv[c, :, :] = (np.eye(d)
- np.dot(np.dot(W[c, :, :], Minv[c, :, :]), W[c, :, :].T)
) / sigma2[c]
# R_ni
deviation_from_center = X - mu[c, :]
logR[:, c] = ( np.log(pi[c])
+ 0.5*np.log(
np.linalg.det(
np.eye(d) - np.dot(np.dot(W[c, :, :], Minv[c, :, :]), W[c, :, :].T)
)
)
- 0.5*d*np.log(sigma2[c])
- 0.5*(deviation_from_center * np.dot(deviation_from_center, Cinv[c, :, :].T)).sum(1)
)
myMax = logR.max(axis=1).reshape((N, 1))
logR = logR - myMax - np.reshape(np.log(np.exp(logR - myMax).sum(axis=1)), (N, 1))
R = np.exp(logR)
return R
| 2.984375 | 3 |
day1/day1.py | Kavuti/advent-of-code-2015 | 0 | 12761112 | def get_input():
with open("input.txt", "r") as file:
return file.read()
def quiz1(data):
print(data.count("(") - data.count(")"))
def quiz2(data):
count = 0
for i, p in enumerate(data):
if p == ("("):
count += 1
if p == (")"):
count -= 1
if count == -1:
print(i+1)
break
if __name__ == "__main__":
data = get_input()
quiz1(data)
quiz2(data) | 3.40625 | 3 |
distribubot/__init__.py | orionspeakgaming/orionsdistributor | 4 | 12761113 | <gh_stars>1-10
""" distribubot."""
from .version import version as __version__
__all__ = [
'utils',
'distribubot'
] | 0.871094 | 1 |
examples/python_file.py | luxcium/vsc-pop-n-lock-theme | 78 | 12761114 | <reponame>luxcium/vsc-pop-n-lock-theme
#!/usr/bin/env python
"""Test file for Python syntax highlighting in editors / IDEs.
Meant to cover a wide range of different types of statements and expressions.
Not necessarily sensical or comprehensive (assume that if one exception is
highlighted that all are, for instance).
Extraneous trailing whitespace can't be tested because of svn pre-commit hook
checks for such things.
"""
# Comment
# OPTIONAL: XXX catch your attention
# TODO(me): next big thing
# FIXME: this does not work
# Statements
from __future__ import with_statement # Import
from sys import path as thing
print(thing)
assert True # keyword
def foo(): # function definition
return []
class Bar(object): # Class definition
def __enter__(self):
pass
def __exit__(self, *args):
pass
foo() # UNCOLOURED: function call
while False: # 'while'
continue
for x in foo(): # 'for'
break
with Bar() as stuff:
pass
if False:
pass # 'if'
elif False:
pass
else:
pass
# Constants
'single-quote', u'unicode' # Strings of all kinds; prefixes not highlighted
"double-quote"
"""triple double-quote"""
'''triple single-quote'''
r'raw'
ur'unicode raw'
'escape\n'
'\04' # octal
'\xFF' # hex
'\u1111' # unicode character
1 # Integral
1L
1.0 # Float
.1
1+2j # Complex
# Expressions
1 and 2 or 3 # Boolean operators
2 < 3 # UNCOLOURED: comparison operators
spam = 42 # UNCOLOURED: assignment
2 + 3 # UNCOLOURED: number operators
[] # UNCOLOURED: list
{} # UNCOLOURED: dict
(1,) # UNCOLOURED: tuple
all # Built-in functions
GeneratorExit # Exceptions
| 2.453125 | 2 |
onap_tests/unit/components/sdnc.py | Orange-OpenSource/xtesting-onap-tests | 0 | 12761115 | #!/usr/bin/env python
# Copyright (c) 2017 Orange and others.
#
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
# pylint: disable=missing-docstring
import unittest
class SdncTestingBase(unittest.TestCase):
pass
# {"input": {"sdnc-request-header":
# {"svc-notification-url":
# "http:\\/\\/onap.org:8080\\/adapters\\/rest\\/SDNCNotify",
# "svc-request-id": "test", "svc-action": "reserve"},
# "request-information":
# {"request-action": "PreloadVNFRequest", "order-version": "1",
# "notification-url": "onap.org", "order-number": "1", "request-id": "test"},
# "vnf-topology-information": {"vnf-assignments": {"vnf-vms": [],
# "availability-zones": [], "vnf-networks": []},
# "vnf-parameters":
# [{"vnf-parameter-name": "netconf_user_1",
# "vnf-parameter-value": "netconfuser1"},
# {"vnf-parameter-name": "netconf_password_1",
# "vnf-parameter-value": "ncuser1Pass"},
# {"vnf-parameter-name": "netconf_ssh_public_key_1",
# "vnf-parameter-value": "vmrf_key_pair"}],
# "vnf-topology-identifier":
# {"service-type": "a674f0ce-3f7e-4f75-96f7-39830e9a1b61",
# "generic-vnf-type": "vMRFaaS3/vMRF3 0",
# "vnf-name": "be1e0d5e-4c89-4467-b2ef-c1c3f8a5b136",
# "generic-vnf-name": "vMRFaaS3-service-instance-0DP8AF",
# "vnf-type": "vmrf30..Vmrf3..base_swms..module-0"}}}}
#
# SDNC url: /restconf/operations/VNF-API:preload-vnf-topology-operation
#
# {"output":{"svc-request-id":"test",
# "response-code":"200","ack-final-indicator":"Y"}}
| 1.851563 | 2 |
pyqmri/models/__init__.py | agahkarakuzu/PyQMRI | 18 | 12761116 | """Package containig various model files for fitting.
This package contains the various MRI models currently implemented for the
toolbox. In additon, the "GeneralModel" can be run with a simple text file
to devine new model. An exemplary textfile for simple models can be
generated by running the "genDefaultModelfile" function of the "GeneralModel".
"""
# __all__ = ["BiExpDecay",
# "DiffdirLL",
# "GeneralModel",
# "ImageReco",
# "IRLL",
# "template",
# "VFA"]
from .BiExpDecay import Model as BiExpDecay
from .DiffdirLL import Model as DiffdirLL
from .GeneralModel import Model as GeneralModel
from .ImageReco import Model as ImageReco
from .IRLL import Model as IRLL
from .VFA import Model as VFA
| 1.601563 | 2 |
setup.py | mahlberg/postgresql-metrics | 0 | 12761117 | <gh_stars>0
# -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pip.req import parse_requirements
from setuptools import find_packages
from setuptools import setup
# parse_requirements() returns generator of pip.req.InstallRequirement objects
install_requirements = parse_requirements('requirements.txt')
# requirements below is a list of requirements:
# e.g. ['psycopg2==2.6.1', 'logbook==0.10.1']
requirements = [str(ir.req) for ir in install_requirements]
setup(name='postgresql-metrics',
version='0.2.3',
author=u'<NAME>',
author_email='<EMAIL>',
url='https://github.com/spotify/postgresql-metrics',
description='Simple service to provide metrics for your PostgreSQL database',
packages=find_packages(),
install_requires=requirements,
entry_points={
'console_scripts': [
'postgresql-metrics=postgresql_metrics.metrics_logic:main',
]}
)
| 1.65625 | 2 |
Final Exam/code/data2csv.py | billgoo/Rutgers-CS543-Massive-Data-Storage-and-Retrieval | 0 | 12761118 | <reponame>billgoo/Rutgers-CS543-Massive-Data-Storage-and-Retrieval
# -*-coding:utf-8 -*-
import csv
import pandas as pd
line_list_ratings, line_list_users, line_list_movies = [], [], []
age_dict = {"1": "Under 18", "18": "18-24", "25": "25-34", "35": "35-44", "45": "45-49", "50": "50-55", "56": "56+"}
occupation_dict = {"0": "'other' or not specified", "1": "academic/educator", "2": "artist",\
"3": "clerical/admin", "4": "college/grad student", "5": "customer service",\
"6": "doctor/health care", "7": "executive/managerial", "8": "farmer", "9": "homemaker",\
"10": "K-12 student", "11": "lawyer", "12": "programmer", "13": "retired",\
"14": "sales/marketing", "15": "scientist", "16": "self-employed", "17": "technician/engineer",\
"18": "tradesman/craftsman", "19": "unemployed", "20": "writer"}
# transfer ratings
with open('ratings.dat', 'rb') as filein:
for line in filein:
l = line.decode('utf-8')
line_list_ratings.append(l.strip('\n').split('::'))
# list to dataframe and write to csv
df_ratings = pd.DataFrame(line_list_ratings, columns=['UserID','MovieID','Rating','Timestamp'])
df_ratings.to_csv('ratings.csv', columns=['UserID','MovieID','Rating','Timestamp'], index=False, sep=',')
# transfer users
with open('users.dat', 'rb') as filein:
for line in filein:
l = line.decode('utf-8').strip('\n').split('::')
# replace int tags with string
l[2], l[3] = age_dict[l[2]], occupation_dict[l[3]]
line_list_users.append(l)
df_users = pd.DataFrame(line_list_users, columns=['UserID','Gender','Age','Occupation','Zip-code'])
df_users.to_csv('users.csv', columns=['UserID','Gender','Age','Occupation','Zip-code'], index=False, sep=',')
# transfer movies
with open('movies.dat', 'rb') as filein:
for line in filein:
l = line.decode('utf-8')
line_list_movies.append(l.strip('\n').split('::'))
df_movies = pd.DataFrame(line_list_movies, columns=['MovieID','Title','Genres'])
df_movies.to_csv('movies.csv', columns=['MovieID','Title','Genres'], index=False, sep=',')
| 2.53125 | 3 |
data/data_storages.py | P0lyFish/noise2-series | 4 | 12761119 | <reponame>P0lyFish/noise2-series<gh_stars>1-10
import os.path as osp
import lmdb
import numpy as np
import pickle
class BaseDataStorage:
def get(index):
pass
def __len__(self):
pass
class LmdbDataStorage(BaseDataStorage):
def __init__(self, data_path):
self.env = lmdb.open(
data_path,
readonly=True,
lock=False,
readahead=False,
meminit=False
)
with open(osp.join(osp.dirname(data_path),
'meta_info.pkl'), 'rb') as f:
meta_info = pickle.load(f)
self.img_size = list(map(int, meta_info['resolution'].split('_')))
self.keys = meta_info['keys']
self.dtype = meta_info['dtype']
def __getitem__(self, key):
key = '{:08d}'.format(key)
with self.env.begin(write=False) as txn:
buf = txn.get(key.encode('ascii'))
num_pixels = self.img_size[0] * self.img_size[1] * self.img_size[2]
if len(buf) == num_pixels * 8:
img_flat = np.frombuffer(buf, dtype=np.float64)
elif len(buf) == num_pixels * 1:
img_flat = np.frombuffer(buf, dtype=np.uint8)
else:
print(len(buf))
raise ValueError("Invalid data size")
H, W, C = self.img_size
img = img_flat.reshape(H, W, C)
return img
def __len__(self):
return len(self.keys)
class NumpyDataStorage(BaseDataStorage):
def __init__(self, data_path):
self.data = np.load(data_path, allow_pickle=True)
def __getitem__(self, key):
return self.data[key]
def __len__(self):
return self.data.shape[0]
| 2.171875 | 2 |
app/services/user_services.py | rarenicks/donna-backend | 0 | 12761120 | from flask import jsonify
from ast import literal_eval
from app import db
from app.models.office import User, Location
def validate_and_add_user(form):
status, new_user = validate_and_save_user(form, skip_location=False)
if status:
return jsonify(success=True, item=new_user.to_dict())
else:
return jsonify(success=False, message='Missing required fields!'), 400
def fetch_all_users(is_plain_dict=False, args=None):
if args :
campus_id = args.get('campusId', None)
if campus_id:
locations = Location.query.filter_by(campus_id=campus_id).all()
users = User.query.filter(User.location_id.in_([l.id for l in locations])).all()
else:
users = User.query.all()
return [user.to_plain_dict() if is_plain_dict else user.to_dict() for user in users]
def fetch_user_with(id=None):
return find_or_delete_user_with(id=id)
def find_or_delete_user_with(id=None, should_delete=False):
user = User.query.filter_by(id=id).first()
if user:
if should_delete:
db.session.delete(user)
db.session.commit()
return jsonify(item=user.to_dict(), success=True), 200
else:
return jsonify(message='Requested Record Not Available!', success=False), 404
def delete_user_with(id=None):
return find_or_delete_user_with(id=id, should_delete=True)
def validate_input_and_authenticate(form):
uname = form.get('username', None)
passwd = form.get('password', None)
if uname and passwd:
user = User.query.filter_by(username=uname, password=passwd).first()
if user:
return jsonify(success=True, item=user.to_dict())
else:
return jsonify(success=False, message='Authentication Failed!'), 403
else:
return jsonify(success=False, message='Missing required fields!'), 401
def validate_and_upload_users(ustr, reset):
users = literal_eval(ustr.decode().replace("'", '"'))
if reset :
User.query.delete()
db.session.commit()
count = 0
status = False
for user in users:
status, u = validate_and_save_user(user, True)
count += 1 if status else 0
# print(new_user)
return status, count
def validate_and_save_user(form, skip_location):
first_name = form.get("firstName", None)
last_name = form.get("lastName", None)
username = form.get("username", None)
password = form.get("password", None)
location_id = form.get("locationId", None) if "locationId" in form else None
if first_name and last_name and username and password:
if (not skip_location) and (not location_id):
return False, None
new_user = User(first_name=first_name, last_name=last_name, username=username, password=password, location_id=location_id)
new_user.save()
return True, new_user
return False, None
| 2.578125 | 3 |
examples/textmap_simpletest_builtin_screen.py | MakerThornhill/CircuitPython_textMap | 2 | 12761121 | <reponame>MakerThornhill/CircuitPython_textMap
# Sample code using the textMap library and the "textBox" wrapper class
# Creates four textBox instances
# Inserts each textBox into a tileGrid group
# Writes text into the box one character at a time
# Moves the position of the textBox around the display
# Clears each textBox after the full string is written (even if the text is outside of the box)
import textmap
from textmap import textBox
import board
import displayio
import time
import terminalio
import fontio
import sys
import busio
DISPLAY_WIDTH=320
DISPLAY_HEIGHT=240
display = board.DISPLAY
display.show(None)
print ('Display is started')
# load all the fonts
print('loading fonts...')
import terminalio
fontList = []
fontHeight = []
##### the BuiltinFont terminalio.FONT has a different return strategy for get_glyphs and
# is currently not handled by these functions.
#fontList.append(terminalio.FONT)
#fontHeight = [10] # somehow the terminalio.FONT needs to be adjusted to 10
# Load some proportional fonts
fontFiles = [
'fonts/Helvetica-Bold-16.bdf',
'fonts/BitstreamVeraSans-Roman-24.bdf', # Header2
'fonts/BitstreamVeraSans-Roman-16.bdf', # mainText
]
from adafruit_bitmap_font import bitmap_font
for i, fontFile in enumerate(fontFiles):
thisFont = bitmap_font.load_font(fontFile)
fontList.append(thisFont)
fontHeight.append( thisFont.get_glyph(ord("M")).height )
preloadTheGlyphs= True # set this to True if you want to preload the font glyphs into memory
# preloading the glyphs will help speed up the rendering of text but will use more RAM
if preloadTheGlyphs:
# identify the glyphs to load into memory -> increases rendering speed
glyphs = b'0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ-,.:?! '
print('loading glyphs...')
for font in fontList:
font.load_glyphs(glyphs)
print('Glyphs are loaded.')
print('Fonts completed loading.')
# create group
import gc
gc.collect()
print( 'Memory free: {}'.format(gc.mem_free()) )
textBoxes=[] # list of textBox instances
textBoxes.append( textBox('', fontList[0], DISPLAY_WIDTH, DISPLAY_HEIGHT, backgroundColor=0x000000, textColor=0x443344) )
print( 'Memory free: {}'.format(gc.mem_free()) )
textBoxes.append( textBox('', fontList[0], 150, 60, backgroundColor=0x000000, textColor=0xFFFFFF) )
print( 'Memory free: {}'.format(gc.mem_free()) )
textBoxes.append( textBox('', fontList[1], 160, 100, backgroundColor=0xFF00FF, textColor=0xFFFFFF) )
print( 'Memory free: {}'.format(gc.mem_free()) )
textBoxes.append( textBox('', fontList[2], 180, 80, backgroundColor=0x00FFFF, textColor=0x444444) )
print( 'Memory free: {}'.format(gc.mem_free()) )
gc.collect()
myGroup = displayio.Group( max_size=len(textBoxes) ) # Create a group for displaying
tileGridList=[] # list of tileGrids
#startPositions
x=[0, 10, 160, 50]
y=[0, 20, 80, 150]
xVelocity=[0, 1, -1, 2]
yVelocity=[0, -1, 2, 1]
gc.collect()
stringList=[]
stringList.append('Full Screen Size: This is a stationary box, not a stationery box. Full Screen Size: This is a stationary box, not a stationery box. Full Screen Size: This is a stationary box, not a stationery box. Full Screen Size: This is a stationary box, not a stationery box. Full Screen Size: This is a stationary box, not a stationery box. Full Screen Size: This is a stationary box, not a stationery box. Full Screen Size: This is a stationary box, not a stationery box. Full Screen Size: This is a stationary box, not a stationery box. Full Screen Size: This is a stationary box, not a stationery box. Full Screen Size: This is a stationary box, not a stationery box.')
stringList.append('Helvetica Bold 16 font - with Black background color')
stringList.append('Vera Sans 24 font - this is a longer line that is wrapping around')
stringList.append('Vera Sans 16 font - how much text will this hold but it will not print text that goes outside the box but it will cut it off at the bottom if it is too large.')
for i, box in enumerate(textBoxes):
tileGridList.append (displayio.TileGrid(box.bitmap, pixel_shader=box.palette, x=x[i], y=y[i]) )
myGroup.append(tileGridList[i])
display.show(myGroup)
charCount=0
while True:
# Add characters one at a time.
for i, box in enumerate(textBoxes):
charToPrint=charCount % len(stringList[i])
if charToPrint == 0:
box.clearBitmap()
box.addText(stringList[i][charToPrint]) # add a character
gc.collect()
charCount += 1
# Move each box
for i, thisTileGrid in enumerate(tileGridList):
targetX=thisTileGrid.x + xVelocity[i]
targetY=thisTileGrid.y + yVelocity[i]
if ( (targetX + textBoxes[i].bitmap.width) >= DISPLAY_WIDTH ) or (targetX < 0):
xVelocity[i] = -1* xVelocity[i]
if ( (targetY + textBoxes[i].bitmap.height) >= DISPLAY_HEIGHT ) or (targetY < 0):
yVelocity[i] = -1* yVelocity[i]
thisTileGrid.x=thisTileGrid.x + xVelocity[i]
thisTileGrid.y=thisTileGrid.y + yVelocity[i]
gc.collect()
# Print the memory availability every 10 movements.
if charCount % 10 == 0:
print( 'Memory free: {}'.format(gc.mem_free()) ) | 2.9375 | 3 |
cdsso/contrib/register/signals.py | ASU-CodeDevils/sso.codedevils.org | 0 | 12761122 | from django.conf import settings
from django.db.models.signals import post_save, pre_save
from django.dispatch import receiver
from .models import KnownMember, StudentRegistration
from .tasks import notify_sds_registration, register_on_slack
from .utils import email_user_complete_registration
@receiver(pre_save, sender=StudentRegistration)
def check_known_members(instance: StudentRegistration, **kwargs):
"""
Checks the current known members for the current registering user. If the user is a known
member, their information is updated after registration.
Args:
instance (StudentRegistration): The student registration instance.
"""
known_member = KnownMember.objects.filter(email__exact=instance.user.email)
if known_member:
known_member = known_member.first()
instance.slack_registered = known_member.slack_registered
instance.sds_registered = known_member.sds_registered
instance.user.name = known_member.name
known_member.delete() # delete the known member to save space in the database
@receiver(post_save, sender=StudentRegistration)
def notify_complete_registration(instance: StudentRegistration, **kwargs):
"""
Notifies the user when their registration has been updated.
Args:
instance (StudentRegistration): The student registration instance.
"""
if settings.RUN_REGISTRATION_POST_SAVE_SIGNAL:
save_again = False # needed for if we notify admin/the user
# add users to slack automatically with Flameboi util
if (
settings.FLAMEBOI["REGISTER_SLACK_USERS_WITH_FLAMEBOI"]
and not instance.slack_registered
and not instance.slack_add_attempt
):
register_on_slack.delay(emails=[instance.user.email])
instance.slack_add_attempt = True
save_again = True
# notify managers of new users to be added to SunDevilSync
if (
settings.NOTIFY_MANAGERS_SDS_REGISTRATION
and not instance.sds_registered
and not instance.sds_notified
):
notify_sds_registration.delay(instance.user.email)
instance.sds_notified = True
save_again = True
# notify a user if their registration has been completed
if (
settings.SEND_COMPLETED_REGISTRATION_NOTIFICATION
and instance.completed_registration
and not instance.completed_registration_notification
and not instance._restart_registration
):
# TODO send the user an email saying their registration has been completed
email_user_complete_registration(email=instance.user.email)
instance.completed_registration_notification = True
save_again = True
if save_again:
instance.save()
| 2 | 2 |
cfgov/prepaid_agreements/urls.py | thephillipsequation/cfgov-refresh | 1 | 12761123 | <filename>cfgov/prepaid_agreements/urls.py
from django.conf.urls import url
from prepaid_agreements.views import detail, index
urlpatterns = [
url(r'^$', index, name='index'),
url(r'^detail/(?P<product_id>\d+)/$', detail, name='detail'),
]
| 1.726563 | 2 |
include/os.py | HSTEHSTEHSTE/Landscape_Generator | 0 | 12761124 | import os
import numpy as np
folder = ""
file_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
os.chdir(file_path)
map_files = {
"main": "main.csv",
"landmass": "landmass.csv"
}
save_file = "map_saves"
def save(maps):
for map_name in maps:
np.savetxt(save_file + '/' + map_files[map_name], maps[map_name], delimiter = ',')
def load(maps):
for map_name in map_files:
maps[map_name] = np.loadtxt(save_file + '/' + map_files[map_name], delimiter = ',')
return maps | 2.984375 | 3 |
tests/docs/models.py | OlgaBorisova/django-pgfields | 1 | 12761125 | from __future__ import absolute_import, unicode_literals
from django_pg import models
class Hobbit(models.Model):
name = models.CharField(max_length=50)
favorite_foods = models.ArrayField(models.CharField(max_length=100))
created = models.DateTimeField(auto_now_add=True)
modified = models.DateTimeField(auto_now=True)
class Elf(models.Model):
id = models.UUIDField(auto_add=True, primary_key=True)
name = models.CharField(max_length=50)
created = models.DateTimeField(auto_now_add=True)
modified = models.DateTimeField(auto_now=True)
class AuthorField(models.CompositeField):
name = models.CharField(max_length=75)
sex = models.CharField(max_length=6, choices=(
('male', 'Male'),
('female', 'Female'),
))
birthdate = models.DateField()
class Book(models.Model):
title = models.CharField(max_length=50)
author = AuthorField()
date_published = models.DateField()
| 2.0625 | 2 |
libweasyl/libweasyl/alembic/versions/088e13f2ae70_add_journal_content_column.py | greysteil/wzl-test | 1 | 12761126 | """Add journal content column
Revision ID: 088e13f2ae70
Revises: <KEY>
Create Date: 2017-08-21 04:34:29.541975
"""
# revision identifiers, used by Alembic.
revision = '088e13f2ae70'
down_revision = '<KEY>'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.add_column('journal', sa.Column('content', sa.Text(), nullable=True))
def downgrade():
raise Exception('Reversing this migration could delete new journal content')
| 1.507813 | 2 |
scripts/postcontent_to_group.py | saikrishnarallabandi/Vision.falcon | 0 | 12761127 | #!usr/bin/env python
# Program to mine data from your own facebook account
import json
import facebook
import os
import sys
import random
token = os.environ.get('FACEBOOK_TOKEN')
group_id = str(os.environ.get('THOPGANG_GROUP_ID'))
timestamp = str(sys.argv[1])
polarity_file = '../data/polarity.may2020.pos'
print("The group is ", group_id)
def get_content(polarity_file):
f = open(polarity_file)
lines = []
for line in f:
line = line.split('\n')[0].split()
content = ' '.join(k for k in line[:-1])
if len(line) > 7:
lines.append(content)
selected_content = random.choice(lines).split()
content = ' '.join(k for k in selected_content)
text = "Do you remember this from last month? + '\n' " + content
return text
def main():
graph = facebook.GraphAPI(token)
# profile = graph.get_object(
# 'me', fields='first_name,location,link,email,groups')
group = graph.get_object(id=group_id)
id = group['id']
#pic = get_pic()
#pic = pics_path + '/' + pic
#graph.put_photo(album_path=id + '/photos', image=open(pic, 'rb'), message='Look at this! Posting at ' + timestamp + ' EST')
content = get_content(polarity_file)
graph.put_object(id, 'feed', message=content)
print(group)
if __name__ == '__main__':
main()
| 2.96875 | 3 |
serial_reader.py | elfofmaxwell/BENG187_length_measurements | 0 | 12761128 | <filename>serial_reader.py<gh_stars>0
import follow_log
ser_log_path = 'D:\\se_ds\\ref\\ser_log.txt'
port = 'COM6'
baudrate = 9600
follow_log.output_serial(port, baudrate, 1, ser_log_path)
| 2.25 | 2 |
tests/test_resolver.py | rouge8/pex | 1 | 12761129 | # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import os
import pytest
from twitter.common.contextutil import temporary_dir
from pex.common import safe_copy
from pex.fetcher import Fetcher
from pex.package import EggPackage, SourcePackage
from pex.resolvable import ResolvableRequirement
from pex.resolver import Unsatisfiable, _ResolvableSet, resolve
from pex.resolver_options import ResolverOptionsBuilder
from pex.testing import make_sdist
def test_empty_resolve():
empty_resolve = resolve([])
assert empty_resolve == []
with temporary_dir() as td:
empty_resolve = resolve([], cache=td)
assert empty_resolve == []
def test_simple_local_resolve():
project_sdist = make_sdist(name='project')
with temporary_dir() as td:
safe_copy(project_sdist, os.path.join(td, os.path.basename(project_sdist)))
fetchers = [Fetcher([td])]
dists = resolve(['project'], fetchers=fetchers)
assert len(dists) == 1
def test_diamond_local_resolve_cached():
# This exercises the issue described here: https://github.com/pantsbuild/pex/issues/120
project1_sdist = make_sdist(name='project1', install_reqs=['project2<1.0.0'])
project2_sdist = make_sdist(name='project2')
with temporary_dir() as dd:
for sdist in (project1_sdist, project2_sdist):
safe_copy(sdist, os.path.join(dd, os.path.basename(sdist)))
fetchers = [Fetcher([dd])]
with temporary_dir() as cd:
dists = resolve(['project1', 'project2'], fetchers=fetchers, cache=cd, cache_ttl=1000)
assert len(dists) == 2
def test_resolvable_set():
builder = ResolverOptionsBuilder()
rs = _ResolvableSet()
rq = ResolvableRequirement.from_string('foo[ext]', builder)
source_pkg = SourcePackage.from_href('foo-2.3.4.tar.gz')
binary_pkg = EggPackage.from_href('Foo-2.3.4-py3.4.egg')
rs.merge(rq, [source_pkg, binary_pkg])
assert rs.get(source_pkg.name) == set([source_pkg, binary_pkg])
assert rs.get(binary_pkg.name) == set([source_pkg, binary_pkg])
assert rs.packages() == [(rq, set([source_pkg, binary_pkg]), None)]
# test methods
assert rs.extras('foo') == set(['ext'])
assert rs.extras('Foo') == set(['ext'])
# test filtering
rs.merge(rq, [source_pkg])
assert rs.get('foo') == set([source_pkg])
assert rs.get('Foo') == set([source_pkg])
with pytest.raises(Unsatisfiable):
rs.merge(rq, [binary_pkg])
def test_resolvable_set_built():
builder = ResolverOptionsBuilder()
rs = _ResolvableSet()
rq = ResolvableRequirement.from_string('foo', builder)
source_pkg = SourcePackage.from_href('foo-2.3.4.tar.gz')
binary_pkg = EggPackage.from_href('foo-2.3.4-py3.4.egg')
rs.merge(rq, [source_pkg])
assert rs.get('foo') == set([source_pkg])
assert rs.packages() == [(rq, set([source_pkg]), None)]
with pytest.raises(Unsatisfiable):
rs.merge(rq, [binary_pkg])
updated_rs = rs.replace_built({source_pkg: binary_pkg})
updated_rs.merge(rq, [binary_pkg])
assert updated_rs.get('foo') == set([binary_pkg])
assert updated_rs.packages() == [(rq, set([binary_pkg]), None)]
| 2.109375 | 2 |
2021/day_02.py | lbreede/adventofcode | 2 | 12761130 | <filename>2021/day_02.py
# --- Day 2: Dive! ---
import aoc_helper
def drive(lst, use_aim=0):
horizontal = 0
depth = 0
aim = 0
for L in lst:
d, x = L
if d == "forward":
horizontal += x
if not use_aim:
if d == "up":
depth -= x
elif d == "down":
depth += x
else:
if d == "forward":
depth += aim * x
elif d == "up":
aim -= x
elif d == "down":
aim += x
return horizontal * depth
raw_cmd_list = aoc_helper.load_input("day02_input.txt")
cmd_list = []
for L in raw_cmd_list:
direction, amount = L.split(" ")
amount = int(amount)
cmd_list.append([direction, amount])
part1 = drive(cmd_list)
part2 = drive(cmd_list, 1)
print(f"Part 1: {part1}")
print(f"Part 2: {part2}")
| 3.640625 | 4 |
bpcs/text_to_image.py | BburnN123/bpcs | 20 | 12761131 | import re
import string
from math import sqrt
import numpy as np
from PIL import Image
from .test_utils import show_html_diff
def digits_in_base_as_tuple(x, base):
"""
x is int
base is int
gets the digits of x in the new base
e.g. digits_in_base_as_tuple(20, 2) == (1,0,1,0,0)
"""
cur = x
digs = []
while cur:
digs.append(cur % base)
cur /= base
return tuple(reversed(digs))
def get_word_color_map_fcn(all_words):
"""
given a set of words, returns a fcn
returning an RGB color
where each word is maximally spaced out from other word colors
"""
words = set(all_words)
words.add(' ') # add space for padding
ncolors = 256**3
ncolors_per_word = ncolors/len(words)
word_order = sorted(words)
def get_word_color(word):
ind = word_order.index(word)
assert ind >= 0
colors = digits_in_base_as_tuple(ind*ncolors_per_word, 256)
while len(colors) < 3:
colors = (0,) + colors
assert len(colors) == 3
return colors
return get_word_color
def list_to_uint8_array(colors, dims):
arr = np.array(colors)
arr_shaped = np.resize(arr, dims)
if arr.size != arr_shaped.size:
diff = arr_shaped.size - arr.size
print "WARNING: txt will be replicated by {0} chars when printed to image".format(diff)
arr_shaped = np.uint8(arr_shaped)
return arr_shaped
def adjust_words_and_get_dims(words, verbose=False):
area = len(words)
one_side = sqrt(area)
desired_side = (int(one_side)+1) if one_side > int(one_side) else int(one_side)
diff = desired_side**2 - area
words += [' ']*diff
assert len(words) == desired_side**2, desired_side**2 - len(words)
if verbose:
print 'Adding %s words to end of txt' % (diff,)
return words, [desired_side, desired_side, 3]
def str_to_words(txt, keep_spaces=False):
# if keep_spaces:
# # want each space to be its own word
# space_first = txt[0] == ' '
# words = str_to_words(txt)
# space_chunks = [x for x in re.split('[^ ]', txt) if x] + [' ']
# final = []
# for word, space in zip(words, space_chunks):
# if space_first:
# for i in range(len(space)):
# final.append(' ')
# final.append(word)
# else:
# final.append(word)
# for i in range(len(space)):
# final.append(' ')
# return final
if keep_spaces:
words = str_to_words(txt)
spaces = [x for x in re.split('[^ ]', txt) if x] + [' ']
return [x for pair in zip(words, spaces) for x in pair]
else:
return txt.split()
# return re.sub('['+string.punctuation+']', '', txt).split()
def txt_to_uint8_array_by_word(txt):
words = str_to_words(txt, True)
words, dims = adjust_words_and_get_dims(words)
get_color = get_word_color_map_fcn(words)
colors = [get_color(word) for word in words]
return list_to_uint8_array(colors, dims)
def adjust_txt_and_get_dims(txt, verbose=False):
added = 0
# pad with 0s to make divisible by 3
rem = len(txt) % 3
add = 3-rem if rem else 0
txt += ' '*add
added += add
# pad with 0s to make square
area = len(txt)/3
one_side = sqrt(area)
desired_side = (int(one_side)+1) if one_side > int(one_side) else int(one_side)
diff = 3*(desired_side**2 - area)
txt += ' '*diff
added += diff
assert len(txt) == 3*(desired_side**2), 3*(desired_side**2) - len(txt)
if verbose:
print 'Adding %s spaces to end of txt' % (added,)
return txt, [desired_side, desired_side, 3]
def txt_to_uint8_array_by_char(txt):
txt, dims = adjust_txt_and_get_dims(txt, True)
colors = [ord(x) for x in txt]
return list_to_uint8_array(colors, dims)
def image_to_txt(imfile, txtfile):
"""
converts each character to a number
assuming the character is ascii
and arranges all resulting colors into an array => image
note: colors are inserted depth first, meaning
e.g. if the first word is 'the'
then the first pixel will be (ord('t'), ord('h'), ord('e'))
'the' => (116, 104, 101) == #6A6865
"""
png = Image.open(imfile).convert('RGB')
arr = np.array(png)
dims = arr.size
arr_flat = np.resize(arr, dims)
chars = [chr(x) for x in arr_flat]
with open(txtfile, 'w') as f:
f.write(''.join(chars))
def txt_to_image(txtfile, imfile, by_char=True):
txt = open(txtfile).read()
if by_char:
arr = txt_to_uint8_array_by_char(txt)
else:
arr = txt_to_uint8_array_by_word(txt)
im = Image.fromarray(arr)
im.save(imfile)
def test_adjust_txt_and_get_dims():
vals = [5, 10, 11, 19, 24, 25, 31, 32, 269393]
sides = [2, 2, 2, 3, 3, 3, 4, 4, 300]
for val, side in zip(vals, sides):
assert adjust_txt_and_get_dims(' '*val)[1] == [side, side, 3], val
def test_invertibility(txtfile):
"""
roughly, assert txtfile == image_to_txt(txt_to_image(txtfile))
ignoring whitespace before and after txt
"""
pngfile = txtfile.replace('.txt', '.png')
txt_to_image(txtfile, pngfile)
new_txtfile = txtfile.replace('.', '_new.')
image_to_txt(pngfile, new_txtfile)
txt1 = open(txtfile).read().strip()
txt2 = open(new_txtfile).read().strip()
assert txt1 == txt2, show_html_diff((txt1, 'OG'), (txt2, 'NEW'))
def test_all():
txtfile = 'docs/tmp.txt'
test_adjust_txt_and_get_dims()
test_invertibility(txtfile)
if __name__ == '__main__':
test_all()
by_char = False
base_dir = '/Users/mobeets/bpcs-steg/docs/'
infiles = ['karenina', 'warandpeace']
infiles = ['tmp', 'tmp1', 'tmp2']
infiles = [base_dir + infile + '.txt' for infile in infiles]
outfiles = [base_dir + outfile + '.txt' for outfile in outfiles]
for infile,outfile in zip(infiles, outfiles):
txt_to_image(infile, outfile, by_char)
# infile = '/Users/mobeets/Desktop/tmp2.png'
# outfile = '/Users/mobeets/Desktop/tmp2.txt'
# image_to_txt(infile, outfile, by_char)
| 3.765625 | 4 |
libs/response_extra.py | fangMint/django_web | 0 | 12761132 | from django.http import JsonResponse
def response_template(msg, result, code, data):
return JsonResponse({
'result': result, 'code': code,
'msg': msg, 'data': data
})
def response_success(msg="", result=1, code=0, data=None):
return response_template(msg, result, code, data)
def response_failure(msg="", result=0, code=0, data=None):
return response_template(msg, result, code, data)
def msg_template(task, msg, result, code, data):
return {
"task": task, 'result': result, 'code': code,
'msg': msg, 'data': data
}
def msg_success(task="", msg="", result=1, code=0, data=None):
return msg_template(task, msg, result, code, data)
def msg_failure(task="", msg="", result=0, code=0, data=None):
return msg_template(task, msg, result, code, data)
def user_does_not_exists(code=0):
return response_failure(msg="没有对应用户", code=code)
def view_exception(code=0): # view出现异常
return response_failure(msg="网络不好", code=code) | 2.046875 | 2 |
libHC/hcLab.py | cknd/synchrony | 5 | 12761133 | <reponame>cknd/synchrony<filename>libHC/hcLab.py
"""
experiment setup, running & evaluation
CK 2014
"""
import copy
import numpy as np
import networkx as nx
import hcWrapSim as wrp
import hcNetworks as net
import hcPlotting as plo
from hcUtil import printprogress
from matplotlib import pyplot as plt
from numpy.random import RandomState
# provide a fixed set of random seeds
rng = RandomState(1)
standard_seeds = rng.randint(0,100000,200)
class measure:
def __init__(self, roi=None, wait=0, name=None, window=None, windowlength=1000, increment=1):
"""
A measure runs some analysis on raw simulation data & remembers the result.
It accumulates results when applied repeatedly.
Args:
roi: a bool or 0/1 array of the size of the network grid, determines
which cells to measure.
wait: ignore the first x steps of the recording
name: name of this measurement
window: Which, if any, windowed analysis to perform
- None: measure on the whole recording
(starting after the first 'wait' steps)
- "moving": measure repeatedly in a stepwise-moving section of
the recording (starting at 0)...
- "tiling": ...in subsequent, non-overlapping sections of
the recording (starting at 0)...
- "growing": ...in a growing section of the recording
(starting at the self.wait'th step)
windowlength: Length of the window, if a windowed analysis is used.
increment: If a growing window is used, grow it by ~ steps at a time.
"""
self.roi = roi.astype(bool) if (roi is not None) else None
self.name = name
self.wait = wait
self.window = window
self.wlength = windowlength
self.grw_startsize = 100
self.increment = increment
self.reset()
def reset(self):
""" delete all accumulated measurements"""
self.results = []
def apply(self, voltages, spikes, verbose=False):
"""
analyze the given voltage traces or spiketrains & store the result.
Args:
recording: MxNxT numerical array, where M,N match the network's grid dimensions
"""
## breakpoint to verify match between measured region & recording.
# f=300
# import pdb; pdb.set_trace()
# plt.clf();plt.imshow(recording[:,:,f],interpolation='nearest',vmax=2)
# plt.imshow(self.roi,alpha=0.3,interpolation='nearest');f=f+10;plt.show()
if isinstance(self, measure_spikebased):
assert spikes.dtype == 'bool'
recording = spikes
else:
recording = voltages
if self.roi is not None:
assert self.roi.shape == (recording.shape[0], recording.shape[1])
# trigger slightly different computations depending on windowing mode:
if self.window is None:
result = self.compute(recording[:, :, self.wait:])
elif self.window == "moving":
result = self.compute_movingwindow(recording)
elif self.window == "tiling":
result = self.compute_tilingwindow(recording)
elif self.window == "growing":
result = self.compute_growingwindow(recording)
else:
raise Exception("Didnt understand window argument: " + self.window)
if verbose:
print 'result ', self.name, result
self.results.append(result)
def compute_movingwindow(self, recording):
""" run the measure repeatedly in a moving window.
Args:
recording: MxNxT numerical array, where M,N match the network's grid dimensions
"""
wl = self.wlength
res = np.zeros(recording.shape[2]-wl)
for step in range(len(res)):
res[step] = self.compute(recording[:, :, step:step+wl])
printprogress("calc. moving window, step ", step, len(res))
return res
def compute_tilingwindow(self, recording):
""" run the measure repeatedly in subsequent, nonoverlapping windows
Args:
recording: MxNxT numerical array, where M,N match the network's grid dimensions
"""
wl = self.wlength
nrtiles = int(recording.shape[2]/wl) # floor via int()
res = np.zeros(nrtiles)
for tile in range(nrtiles):
res[tile] = self.compute(recording[:, :, tile*wl:tile*wl+wl])
printprogress("calc. tiling window, no. ", tile, nrtiles)
return res
def compute_growingwindow(self, recording):
""" run the measure repeatedly in a growing window
Args:
recording: MxNxT numerical array, where M,N match the network's grid dimensions
"""
tr = self.wait
ssz = self.grw_startsize
usablesteps = recording.shape[2]-tr-ssz
res = np.zeros(int(np.ceil(usablesteps/float(self.increment))))
for i, step in enumerate(range(0, usablesteps, self.increment)):
res[i] = self.compute(recording[:, :, tr:tr+ssz+step])
printprogress("calc. growing window, step ", i, len(res))
return res
def compute(self, recording):
""" Measure something on the given (piece of) recording. Override this.
Args:
recording: MxNxT' numerical array, where M,N match the network's grid dimensions
Returns:
Some kind of measurement.
"""
raise NotImplementedError()
return None
class rsync(measure):
"""
Strogatz' zero lag synchrony measure Rsync: The variance of the
population mean normalized by the population's mean variance.
"""
vrange = (0,1) # plotting code will check whether a measure has a vrange attribute,
# if so, will use it to set fixed axis limits.
def compute(self, recording):
roii = self.roi.nonzero() # get the indices of the region to be measured
selected_cells = recording[roii[0], roii[1], :]
meanfield = np.mean(selected_cells, axis=0) # spatial mean across cells, at each time
variances = np.var(selected_cells, axis=1) # variance over time of each cell
return np.var(meanfield)/np.mean(variances)
class measure_spikebased(measure):
spike_based = True
""" base class for spike time based sync measures """
def __init__(self, roi, tau=1, delta_t=0.005, **kwargs):
kernel = self.def_kernel(tau, delta_t)
self.conv = lambda singlecell: np.convolve(singlecell, kernel, mode='valid')
measure.__init__(self, roi, **kwargs)
def def_kernel(self, tau, delta_t):
ts = np.arange(0, tau*10, delta_t)
decay = np.exp(-ts/tau)
thr = 1e-3
decay = decay[0:np.nonzero(decay < thr)[0][0]]
kernel = np.concatenate((np.zeros_like(decay), decay))
return kernel
def get_convolved(self, recording, inspect=False):
assert recording.dtype == 'bool'
roii = self.roi.nonzero() # get the indices of the region to be measured
selected_cells = recording[roii[0], roii[1], :]
convd = np.apply_along_axis(self.conv, axis=1, arr=selected_cells)
if inspect:
plt.figure()
plt.subplot(211)
plt.imshow(convd)
plt.subplot(212)
plt.plot(np.mean(convd, axis=0))
return convd
class mean_spikecount(measure_spikebased):
""" average number of spikes in the measured population during the measured period """
def compute(self, recording):
roii = self.roi.nonzero()
selected_cells = recording[roii[0], roii[1], :]
return np.sum(selected_cells)/len(roii[0])
class spikedetect_additive(measure_spikebased):
""" summed nr of spikes in the measured population at each time step"""
def compute(self, recording):
thr = 1
roii = self.roi.nonzero() # get the indices of the region to be measured
selected_cells = recording[roii[0], roii[1], :]
return np.sum(selected_cells, axis=0)
class spikedetect_add_jitter(measure_spikebased):
""" a noisy spike recorder """
def __init__(self, jitter=0, delta_t=0.005, downsample=10, **kwargs):
self.jitter_steps = jitter/(delta_t*downsample)
measure_spikebased.__init__(self, **kwargs)
def compute(self, recording):
thr = 1
roii = self.roi.nonzero() # get the indices of the region to be measured
selected_cells = recording[roii[0], roii[1], :]
if self.jitter_steps > 0:
# import ipdb; ipdb.set_trace()
offsets = (rng.rand(selected_cells.shape[0])-0.5) * self.jitter_steps
for i,o in enumerate(offsets):
selected_cells[i,:] = np.roll(selected_cells[i,:], int(o))
return np.sum(selected_cells, axis=0)
class spikey_rsync(measure_spikebased):
""" Rsync measure based on smoothed spike data """
vrange = (0, 0.7)
def compute(self, recording):
convd = self.get_convolved(recording, inspect=False)
meanfield = np.mean(convd, axis=0)
variances = np.var(convd, axis=1)
return np.var(meanfield)/np.mean(variances)
class vanrossum(measure_spikebased):
""" the van Rossum distance """
def compute(self, recording):
convd = self.get_convolved(recording)
pairwise_dist = []
for trace_a in convd:
for trace_b in convd:
pairwise_dist.append(np.sqrt(np.mean((trace_a - trace_b)**2)))
return np.mean(pairwise_dist)
class experiment:
"""
Stores the specification of an experiment, gathers simulation results & applies measures.
An experiment consists of a simulation setup (network, input pattern and other simulation parameters),
a list of random seeds and a list of measures. The object then gives access to raw simulation results
and results of measurements.
"""
def __init__(self, network, seeds, measures=[], inputc=None, inputc_inh=None, name="Acme experiment", **simulation_kwargs):
"""
Args:
network: a networkx graph meeting the following assumptions: it has the graph attribute
network.graph["grid_dimensions"] holding the tuple (M,N), nodes are labeled as (i,j)
tuples to mark spatial coordinates in an MxN grid and each edge is labeled with
the float attribute "strength".
seeds: list of random seeds - determines the number of repetitions
measures: list of measures to apply
inputc: MxN array, scales the rate of excitatory input pulses arriving at each cell
inputc_inh: MxN array, scales the rate of inhibitory input pulses arriving at each cell
simulation_kwargs: see `hcWrapSim.run_sim`
"""
self.name = name
self.network = network
self.inputc = inputc
self.inputc_inh = inputc_inh
self.verbose = simulation_kwargs.get('verbose', False)
simulation_kwargs['delta_t'] = simulation_kwargs.get('delta_t', 0.005)
simulation_kwargs['T'] = simulation_kwargs.get('T', 1000)
simulation_kwargs['downsample'] = simulation_kwargs.get('downsample', 10)
self.simulation_kwargs = simulation_kwargs
assert len(set(seeds)) == len(seeds)
self.seeds = seeds
self.measures = dict([(ms.name, copy.deepcopy(ms)) if ms.name
else (ms.__class__.__name__, copy.deepcopy(ms)) for ms in measures])
def run(self):
"""Run all simulation trials & apply all measurements"""
le = len(self.seeds)
[meas.reset() for meas in self.measures.values()]
for i, s in enumerate(self.seeds):
printprogress('running "' + self.name + '", repetition ', i, le)
voltage, spikes = self.getraw(i)
for meas in self.measures.values():
meas.apply(voltage, spikes, verbose=self.verbose)
def getresults(self, which):
"""
Return measurements from one specific measure.
Args:
which: name of the measure from which to return results
Returns:
List of measurement results, one for each random seed.
Result type depends on the particular measure object.
"""
if not self.measures[which].results:
self.run()
return self.measures[which].results
def getraw(self, trialnr=0):
"""
Return raw simulation data (voltage traces) of one trial.
Args:
trialnr: From which trial to return data
(which position in the list of seeds)
Returns:
MxNxT array of voltage traces
"""
seed = self.seeds[trialnr]
volt, _, spikes = wrp.run(self.network, seed, inputc_inh=self.inputc_inh, inputc=self.inputc, **self.simulation_kwargs)
return volt, np.array(spikes, dtype='bool')
def viewtrial(self, trialnr=0, animate=False, start=0, skip=10, grid_as="image",
shuffle=False, shuffle_seed=1, spikes_only=False, cmap=plt.cm.bone):
"""
Quick & dirty visualization of the simulation data from one trial.
Args:
trialnr: From which trial to show data
animate: If False, display a M*NxT trace image. If True, show a movie.
start: When to start the animation. Applies if animate is true.
skip: How many time steps to skip for each animation frame (higher = faster)
Applies if animate is true.
grid_as: How to display the animation. Applies only if animate is true.
"image" - draw each node as a pixel using the node
labels as positions
"graph" - draw as a grid graph using the node labels
as positions
a networkx layout function - draw a graph with node
positions computed by that function
shuffle: If true, shuffle the order of cells in the trace image.
Applies only if animate is false.
shuffle_seed: int random seed for shuffling
cmap: a matplotlib colormap object,
specifies how the voltage traces will be coloured
"""
idx = 1 if spikes_only else 0
if animate:
if grid_as == "image":
plo.viewanim(self.getraw(trialnr)[idx], start, skip, title=self.name, cmap=cmap)
else:
print "showing graph animation, using the first measure's r-o-i"
plo.animate_graph(self.getraw(trialnr)[idx], self.network, self.measures.values()[0].roi,
self.inputc, start, skip, grid_as, title=self.name, cmap=cmap)
elif spikes_only:
plo.view_spikes(self.getraw(trialnr)[1], title=self.name, shuffle=shuffle, shuffle_seed=shuffle_seed)
else:
s_per_step = self.simulation_kwargs['delta_t'] * self.simulation_kwargs['downsample'] / 1000
plo.view_voltages(self.getraw(trialnr)[0], title=self.name, shuffle=shuffle, shuffle_seed=shuffle_seed, s_per_step=s_per_step)
def plotsetup(self,measure=None):
""" plot the network, input region and region of interest of the selected measure"""
plo.eplotsetup(self,measure)
def saveanimtr(self, trialnr=0, start=0, skip=4, stop=None, grid_as="image", dpi=120, cmap=plt.cm.bone, filename=None, ms_per_step=None):
""" save an animation of a recording """
if grid_as == "image":
plo.saveanim(self.getraw(trialnr)[0], start, skip, title=self.name, dpi=dpi, cmap=cmap, filename=filename, ms_per_step=ms_per_step)
else:
print "saving graph animation, using the first measure's r-o-i"
plo.saveanim_graph(self.getraw(trialnr)[0], self.network, self.measures.values()[0].roi,
self.inputc, start, skip, stop=stop, grid_as=grid_as, title=self.name, dpi=dpi, cmap=cmap, filename=filename, ms_per_step=ms_per_step)
def statistics(self):
"""Return some topological information about the experiment"""
stat = {}
stat["net diameter"] = nx.diameter(self.network)
stat["net radius"] = nx.radius(self.network)
stat["net asp"] = nx.average_shortest_path_length(self.network)
stat["input asp"] = net.inputASL(self.network, self.inputc)
for m in self.measures.values():
distr = net.distances_to_roi(self.network, self.inputc,m.roi)
stat["stim to roi distances, mean",m.name] = np.mean(distr)
stat["stim to roi distances, var",m.name] = np.var(distr)
centrs = nx.closeness_centrality(self.network)
stat["roi centralities",m.name] = [centrs[tuple(node)]
for node in np.transpose(m.roi.nonzero())]
return stat
| 2.484375 | 2 |
baselines/bert_context_vectorizer.py | vvyadrincev/taxonomy-enrichment | 11 | 12761134 | <gh_stars>10-100
import argparse
import os
from gensim.models import KeyedVectors
from tqdm import tqdm
import numpy as np
import json
from bert_model import BertPretrained
class BertContextVectorizer:
def __init__(self, model_path):
self.bert = BertPretrained(model_path)
# -------------------------------------------------------------
# update vectors
# -------------------------------------------------------------
def update_vectors(self, current_vectors, text_path, output_path, batch_size):
counter = 0
batch = []
position_batch = []
with open(text_path, 'r', encoding='utf-8') as f:
for line in tqdm(f):
counter += 1
tokens, positions = json.loads(line)
if len(self.bert.tokenize(tokens)[0]) > 510:
continue
batch.append(tokens)
position_batch.append(positions)
if counter % batch_size == 0:
vectors = self.get_vectors(batch, position_batch)
for word, vector in vectors:
if not any(np.isnan(vector)):
if word in current_vectors:
current_vectors[word][0] += vector
current_vectors[word][1] += 1
batch = []
position_batch = []
vectors = self.get_vectors(batch, position_batch)
for word, vector in vectors:
if not any(np.isnan(vector)):
if word in current_vectors:
current_vectors[word][0] += vector
current_vectors[word][1] += 1
self.save_as_w2v_mean(current_vectors, output_path)
# -------------------------------------------------------------
# get vectors
# -------------------------------------------------------------
def get_vectors(self, sentences, indices):
word_vectors = []
batch = self.bert.vectorize_sentences(sentences)
for sent_vectors, tokens, sent_indices in zip(batch, sentences, indices):
assert sent_vectors.shape[0] == len(tokens)
word_vectors.extend([(synset, self.get_avg_vector(sent_vectors, borders))
for synset, borders in sent_indices])
return word_vectors
@staticmethod
def get_avg_vector(vectors, borders):
return np.mean([np.mean(vectors[start:end], 0) for start, end in borders], 0)
# -------------------------------------------------------------
# save vectors
# -------------------------------------------------------------
@staticmethod
def save_as_w2v_mean(dictionary, output_path):
with open(output_path, 'w', encoding='utf-8') as w:
w.write(f"{len(dictionary)} {list(dictionary.values())[0][0].shape[-1]}\n")
for word, (vector, count) in dictionary.items():
mean_vector = vector / count if count != 0 else vector
vector_line = " ".join(map(str, mean_vector))
w.write(f"{word.upper()} {vector_line}\n")
def get_vectors(filepath):
w2v = KeyedVectors.load_word2vec_format(filepath, binary=False)
return {word: [w2v[word], 1] for word in w2v.vocab}
def parse_args():
parser = argparse.ArgumentParser(prog='BERT context vectorizer')
parser.add_argument('--bert_path', type=str, dest="bert_path", help='bert model dir')
parser.add_argument('--vectors_path', type=str, dest="vectors_path", help='vectors_path')
parser.add_argument('--output_path', type=str, dest="output_path", help='output_path')
parser.add_argument('--texts_dir', type=str, dest="texts_dir", help='texts_dir')
parser.add_argument('--batch_size', type=int, dest='batch_size', help='batch size', default=40)
return parser.parse_args()
if __name__ == '__main__':
# --bert_path "models/rubert_cased_torch"
# --texts_dir "models/parsed_news_for_bert_words_only"
# --vectors_path "models/vectors/nouns_public_single_bert3.txt"
# --output_path "models/vectors/nouns_public_context_bert.txt"
args = parse_args()
bcv = BertContextVectorizer(args.bert_path)
vectors = get_vectors(args.vectors_path)
for filename in os.listdir(args.texts_dir):
print(f"Processing {filename}")
bcv.update_vectors(vectors, os.path.join(args.texts_dir, filename), args.output_path, args.batch_size)
| 2.609375 | 3 |
gdsfactory/components/dicing_lane.py | simbilod/gdsfactory | 0 | 12761135 | <reponame>simbilod/gdsfactory
import gdsfactory as gf
from gdsfactory.component import Component
from gdsfactory.components.rectangle import rectangle
from gdsfactory.components.triangle import triangle
from gdsfactory.types import ComponentSpec, Float2, LayerSpec
triangle_metal = gf.partial(triangle, layer="M3", xtop=2)
@gf.cell
def dicing_lane(
size: Float2 = (50, 300),
marker: ComponentSpec = triangle_metal,
layer_dicing: LayerSpec = "DICING",
) -> Component:
"""Dicing lane with triangular markers on both sides.
Args:
size: (tuple) Width and height of rectangle.
marker: function to generate the dicing lane markers.
layer_dicing: Specific layer to put polygon geometry on.
"""
c = Component()
r = c << rectangle(size=size, layer=layer_dicing)
m = gf.get_component(marker)
mbr = c << m
mbr.xmin = r.xmax
mbl = c << m
mbl.mirror()
mbl.xmax = r.xmin
mtr = c << m
mtr.mirror()
mtr.rotate(180)
mtr.xmin = r.xmax
mtr.ymax = r.ymax
mtl = c << m
mtl.rotate(180)
mtl.xmax = r.xmin
mtl.ymax = r.ymax
return c
if __name__ == "__main__":
c = dicing_lane()
c.show()
| 2.71875 | 3 |
config.py | mantzoun/smantzbot | 0 | 12761136 | user = "DB_USER"
password = "<PASSWORD>"
token = "API_TOKEN"
valid_chats = []
| 1.09375 | 1 |
dss_vae/metrics/translation_evaluator.py | baoy-nlp/DSS-VAE | 37 | 12761137 | # MIT License
# Copyright (c) 2018 the NJUNLP groups.
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# Author baoyu.nlp
# Time 2019-01-28 18:02
from __future__ import division
import os
from .base_evaluator import BaseEvaluator
from .bleu_scorer import BleuScoreMetric
from .evaluation import prediction as evaluate
class TranslationEvaluator(BaseEvaluator):
def __init__(self, model, eval_set, eval_lists, sort_key, eval_tgt, out_dir="./out", batch_size=20,
write_down=False, use_bpe=False, **kwargs):
super().__init__(model, eval_set, out_dir, batch_size)
self.eval_dirs = eval_lists
self.write_down = write_down
self.sort_key = sort_key
self.eval_tgt = eval_tgt
self.score_item = "BLEU"
self.use_bpe = use_bpe
def __call__(self, eval_desc="mt"):
"""
Args:
eval_desc:
Returns: eval the multi-bleu for machine translation
"""
training = self.model.training
self.model.eval()
eval_results = evaluate(
examples=self.eval_set,
model=self.model,
sort_key=self.sort_key,
batch_size=self.batch_size,
out_dir=os.path.join(self.out_dir, eval_desc) if self.write_down is not None else None)
bleu = BleuScoreMetric.evaluate_file(
pred_file=eval_results['pred_file'],
gold_files=self.eval_dirs,
)
self.model.training = training
return {
'BLEU': bleu,
'EVAL TIME': eval_results['use_time'],
"EVAL SPEED": len(self.eval_set) / eval_results['use_time']
}
| 1.4375 | 1 |
third_party/cabal2bazel/bzl/cabal_paths.bzl | matthewbauer/hazel | 0 | 12761138 | <reponame>matthewbauer/hazel
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Rules for auto-generating the Paths_* module needed in third-party.
Some third-party Haskell packages use a Paths_{package_name}.hs file
which is auto-generated by Cabal. That file lets them
- Get the package's current version number
- Find useful data file
This file exports the "cabal_paths" rule for auto-generating that Paths module.
For usage information, see the below documentation for that rule.
"""
load("@bazel_skylib//:lib.bzl", "paths")
load("@io_tweag_rules_haskell//haskell:haskell.bzl", "haskell_library")
load("//tools:mangling.bzl", "hazel_library")
def _impl_path_module_gen(ctx):
paths_file = ctx.new_file(ctx.label.name)
base_dir = paths.join(
ctx.label.package,
ctx.attr.data_dir if ctx.attr.data_dir else ""
)
ctx.template_action(
template=ctx.file._template,
output=paths_file,
substitutions={
"%{module}": ctx.attr.module,
"%{base_dir}": paths.join(
# TODO: this probably won't work for packages not in external
# repositories. See:
# https://github.com/bazelbuild/bazel/wiki/Updating-the-runfiles-tree-structure
"..", paths.relativize(ctx.label.workspace_root, "external"), base_dir),
"%{version}": str(ctx.attr.version),
},
)
return struct(files=depset([paths_file]))
_path_module_gen = rule(
implementation=_impl_path_module_gen,
attrs={
"data_dir": attr.string(),
"module": attr.string(),
"version": attr.int_list(mandatory=True, non_empty=True),
"_template": attr.label(allow_files=True, single_file=True,
default=Label(
"@ai_formation_hazel//:paths-template.hs")),
},
)
def cabal_paths(name=None, package=None, data_dir='',data=[], version=[], **kwargs):
"""Generate a Cabal Paths_* module.
Generates a Paths_ module for use by Cabal packages, and compiles it into a
haskell_library target that can be dependend on by other Haskell rules.
Example usage:
haskell_binary(
name = "hlint",
srcs = [..],
deps = [":paths", ...],
)
cabal_paths(
name = "paths",
package = "hlint",
version = [1, 18, 5],
# Corresponds to the Cabal "data-dir" field.
data_dir = "datafiles",
# Corresponds to the Cabal "data-files" field, with data-dir prepended.
data = ["datafiles/some/path", "datafiles/another/path"],
)
Args:
name: The name of the resulting library target.
package: The name (string) of the Cabal package that's being built.
data_dir: The subdirectory (relative to this package) that contains the
data files (if any). If empty, assumes the data files are at the top
level of the package.
data: The data files that this package depends on to run.
version: The version number of this package (list of ints)
"""
module_name = "Paths_" + package
paths_file = module_name + ".hs"
_path_module_gen(
name = paths_file,
module=module_name,
data_dir=data_dir,
version=version,
)
haskell_library(
name=name,
srcs = [paths_file],
data = data,
deps = [
hazel_library("base"),
hazel_library("filepath"),
],
# TODO: run directory resolution.
**kwargs)
| 1.703125 | 2 |
engine/batch_operation.py | sakshitantak/NLPActionExtraction | 0 | 12761139 | """file to deal with batch operations"""
import sys
import json
import xlwt
import extract_info
from fit_sheet_wrapper import FitSheetWrapper
from xlwt import Workbook
import random
# from desk import *
import requests
import pandas as pd
import numpy as np
import nltk
import json
import csv
from nltk.corpus import stopwords
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.model_selection import train_test_split
from sklearn import naive_bayes
from sklearn.metrics import roc_auc_score
import pandas
""" Gets the arguments from the command line input """
SENTENCES = sys.argv[1:]
#print(SENTENCES)
wb = Workbook()
ws = FitSheetWrapper(wb.add_sheet('Sheet 1'))
style = xlwt.XFStyle()
font = xlwt.Font()
font.bold = True
style.font = font
# df = pandas.read_csv('engine/desktop_train_health2.csv')
# stopset = set(stopwords.words('english'))
# vectorizer = TfidfVectorizer(use_idf=True,lowercase=True,strip_accents='ascii',stop_words=stopset)
# y = df.common
# x = vectorizer.fit_transform(df.action)
# x_train,x_test,y_train,y_test=train_test_split(x,y,random_state=42)
# clf=naive_bayes.MultinomialNB()
# clf.fit(x_train,y_train)
i = 1
for request in SENTENCES:
result = extract_info.main(request)
print(result)
ws.write(i, 0, "Test Case Number", style=style)
ws.write(i, 1, result['case_id'])
i += 1
ws.write(i, 0, "Test Case Type", style=style)
r = random.randint(0,1)
ws.write(i, 1, "Unique" if r==1 else "General")
i += 1
ws.write(i, 0, "Test Case Description", style=style)
ws.write(i, 1, result['action'])
i += 1
if len(result["inputs"]) > 0:
ws.write_merge(
i, i + len(result["inputs"]) - 1, 0, 0, "Expected Inputs", style=style)
for inp in result["inputs"]:
ws.write(i, 1, inp[0] + " = " + inp[1])
i += 1
else:
ws.write(i, 0, "Expected Inputs", style=style)
ws.write(i, 1, "-")
i += 1
ws.write(i, 0, "Expected Resuls", style=style)
ws.write(i, 1, result['expectation'])
i += 4
wb.save('genTestCases.xls')
print(json.dumps({"code":True}))
sys.stdout.flush()
| 2.53125 | 3 |
operators/nsm-operator-registry/python/pulumi_pulumi_kubernetes_crds_operators_nsm_operator_registry/nsm/v1alpha1/outputs.py | pulumi/pulumi-kubernetes-crds | 0 | 12761140 | <filename>operators/nsm-operator-registry/python/pulumi_pulumi_kubernetes_crds_operators_nsm_operator_registry/nsm/v1alpha1/outputs.py
# coding=utf-8
# *** WARNING: this file was generated by crd2pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
__all__ = [
'NSMSpec',
'NSMStatus',
]
@pulumi.output_type
class NSMSpec(dict):
"""
NSMSpec defines the desired state of NSM
"""
def __init__(__self__, *,
forwarding_plane_image: str,
forwarding_plane_name: str,
insecure: bool,
version: str):
"""
NSMSpec defines the desired state of NSM
:param str forwarding_plane_name: Forwarding plane configs
:param bool insecure: nsmgr configs true or false
:param str version: Version field for reference on Openshift UI
"""
pulumi.set(__self__, "forwarding_plane_image", forwarding_plane_image)
pulumi.set(__self__, "forwarding_plane_name", forwarding_plane_name)
pulumi.set(__self__, "insecure", insecure)
pulumi.set(__self__, "version", version)
@property
@pulumi.getter(name="forwardingPlaneImage")
def forwarding_plane_image(self) -> str:
return pulumi.get(self, "forwarding_plane_image")
@property
@pulumi.getter(name="forwardingPlaneName")
def forwarding_plane_name(self) -> str:
"""
Forwarding plane configs
"""
return pulumi.get(self, "forwarding_plane_name")
@property
@pulumi.getter
def insecure(self) -> bool:
"""
nsmgr configs true or false
"""
return pulumi.get(self, "insecure")
@property
@pulumi.getter
def version(self) -> str:
"""
Version field for reference on Openshift UI
"""
return pulumi.get(self, "version")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class NSMStatus(dict):
"""
NSMStatus defines the observed state of NSM
"""
def __init__(__self__, *,
phase: str):
"""
NSMStatus defines the observed state of NSM
:param str phase: Operator phases during deployment
"""
pulumi.set(__self__, "phase", phase)
@property
@pulumi.getter
def phase(self) -> str:
"""
Operator phases during deployment
"""
return pulumi.get(self, "phase")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
| 1.96875 | 2 |
tests/test_layout.py | up2cat/flask_extras | 19 | 12761141 | """Tests for 'layout' filters."""
from flask_extras.filters import layout
class TestBs3Col:
"""All tests for bs3 col function."""
def test_returns_right_width(self):
"""Test the return value for a valid type."""
assert layout.bs3_cols(1) == 12
assert layout.bs3_cols(2) == 6
assert layout.bs3_cols(3) == 4
assert layout.bs3_cols(4) == 3
assert layout.bs3_cols(5) == 2
assert layout.bs3_cols(6) == 2
def test_returns_right_width_bad_data(self):
"""Test the return value for an invalid type."""
assert layout.bs3_cols(None) == 12
assert layout.bs3_cols('foo') == 12
assert layout.bs3_cols(dict()) == 12
| 2.90625 | 3 |
src/lib/kombi/Task/Shotgun/SGPublishTask.py | paulondc/chilopoda | 2 | 12761142 | import os
import getpass
import tempfile
from ..Task import Task
from ...TaskWrapper import TaskWrapper
from ...Crawler import Crawler
from ...Crawler.Fs.FsCrawler import FsCrawler
from ...Crawler.Fs.Image import ImageCrawler
class SGPublishTask(Task):
"""
Generic Shotgun publish task for data created by the CreateVersion task.
"""
__shotgunUrl = os.environ.get('KOMBI_SHOTGUN_URL', '')
__shotgunScriptName = os.environ.get('KOMBI_SHOTGUN_SCRIPTNAME', '')
__shotgunApiKey = os.environ.get('KOMBI_SHOTGUN_APIKEY', '')
def __init__(self, *args, **kwargs):
"""
Create a RenderPublish object.
"""
super(SGPublishTask, self).__init__(*args, **kwargs)
# setting default options
self.setOption("url", self.__shotgunUrl)
self.setOption("scriptName", self.__shotgunScriptName)
self.setOption("apiKey", self.__shotgunApiKey)
self.__publishData = {}
def _perform(self):
"""
Perform the task.
"""
import shotgun_api3
# creating a singleton session object
sg = shotgun_api3.Shotgun(
self.option('url'),
script_name=self.option('scriptName'),
api_key=self.option('apiKey')
)
# Source crawler is a json crawler that points to published data
sourceCrawler = self.crawlers()[0]
filePath = self.target(sourceCrawler) if self.target(sourceCrawler) else sourceCrawler.var('filePath')
self.__publishData["path"] = {"local_path": filePath}
self.__publishData["description"] = self.templateOption('comment', crawler=sourceCrawler)
self.__publishData["version_number"] = sourceCrawler.var('version')
if "_sgTask" in sourceCrawler.varNames():
self.__publishData["task"] = sourceCrawler.var("_sgTask")
publishName = self.templateOption('publishName', crawler=sourceCrawler)
self.__publishData["name"] = publishName
self.__publishData["code"] = publishName
self.__linkData(sg)
self.__sgFileType(sg)
self.__sgUser(sg)
sgPublishFile = sg.create("PublishedFile", self.__publishData)
self.__makeThumbnail(sgPublishFile, sg)
self.__makeDaily(sgPublishFile, sg)
# this task does not return any crawlers as result
return []
def __linkData(self, sg):
"""
Find the data that needs to be linked to the publish in Shotgun.
"""
sourceCrawler = self.crawlers()[0]
project = sg.find_one('Project', [['name', 'is', sourceCrawler.var('job')]])
self.__publishData['project'] = project
if "shot" in sourceCrawler.varNames() or "assetName" in sourceCrawler.varNames():
varName = "shot" if "shot" in sourceCrawler.varNames() else "assetName"
varType = "Shot" if "shot" in sourceCrawler.varNames() else "Asset"
filters = [
['code', 'is', sourceCrawler.var(varName)],
['project', 'is', project]
]
entityData = sg.find(varType, filters)
if len(entityData) != 1:
raise Exception(
"[SGPublish] Cannot find unique {} {} in project {}. Skip Publish.".format(
varName,
sourceCrawler.var(varName),
sourceCrawler.var('job')
)
)
self.__publishData['entity'] = entityData[0]
else:
self.__publishData['entity'] = project
def __sgFileType(self, sg):
"""
Find the shotgun file type for the publish. Create it in Shotgun if it does not already exist.
"""
publishedFileType = self.option('publishedFileType')
sgFileType = sg.find_one('PublishedFileType', filters=[["code", "is", publishedFileType]])
if not sgFileType:
# create a published file type on the fly
sgFileType = sg.create("PublishedFileType", {"code": publishedFileType})
self.__publishData["published_file_type"] = sgFileType
def __sgUser(self, sg):
"""
Find the shotgun user information for the publish.
"""
fields = ["id", "type", "email", "login", "name", "image"]
user = os.environ.get("KOMBI_USER", getpass.getuser()),
self.__publishData["created_by"] = sg.find_one("HumanUser", filters=[["login", "is", user]], fields=fields)
def __makeThumbnail(self, sgPublishFile, sg):
"""
Create a temporary thumbnail using images found in data to load as publish thumbnail in shotgun.
"""
createThumbnail = False
sourceCrawler = self.crawlers()[0]
if "thumbnailFile" in self.optionNames():
thumbnailFilePath = self.templateOption('thumbnailFile', crawler=sourceCrawler)
else:
# Look for an image sequence to create a thumbnail. If multiple sequences found, using the first one.
createThumbnail = True
imageCrawlers = sourceCrawler.globFromParent(filterTypes=[ImageCrawler])
if not imageCrawlers:
# No images anywhere in the publish, nothing to use as a thumbnail
return
groups = Crawler.group(filter(lambda x: x.isSequence(), imageCrawlers))
if groups:
targetCrawler = groups[0][int(len(groups[0]) / 2)]
else:
targetCrawler = imageCrawlers[0]
tempFile = tempfile.NamedTemporaryFile(
delete=False,
suffix=".jpg",
mode='w'
)
tempFile.close()
thumbnailFilePath = tempFile.name
# Remove file so the thumbnail task doesn't ask to overwrite it
os.unlink(thumbnailFilePath)
thumbnailTask = Task.create('imageThumbnail')
thumbnailTask.add(targetCrawler, thumbnailFilePath)
# Using python taskWrapper because the imageThumbnail task uses OIIO
TaskWrapper.create('python').run(thumbnailTask)
if os.path.exists(thumbnailFilePath):
sg.upload_thumbnail("PublishedFile", sgPublishFile["id"], thumbnailFilePath)
if createThumbnail:
# removing the temporary file
os.unlink(thumbnailFilePath)
def __makeDaily(self, sgPublishFile, sg):
"""
Create a version in Shotgun for this path and linked to this publish.
"""
sourceCrawler = self.crawlers()[0]
if 'movieFile' not in self.optionNames():
# No movie provided, glob for a mov
movCrawlers = sourceCrawler.globFromParent(filterTypes=["mov"])
if not movCrawlers:
return
movieFilePath = movCrawlers[0].var("filePath")
else:
movieFilePath = self.templateOption('movieFile', crawler=sourceCrawler)
if not movieFilePath or not os.path.exists(movieFilePath):
raise Exception("Movie provided for daily creation does not exist: {}".format(movieFilePath))
# create a name for the version based on the file name
# grab the file name, strip off extension
name = os.path.splitext(os.path.basename(movieFilePath))[0]
# do some replacements
name = name.replace("_", " ")
# and capitalize
name = name.capitalize()
firstFrame = None
lastFrame = None
imageSeqPath = None
movCrawler = FsCrawler.createFromPath(movieFilePath)
if firstFrame in movCrawler.varNames():
firstFrame = movCrawler.var('firstFrame')
lastFrame = movCrawler.var('lastFrame')
imageCrawlers = sourceCrawler.globFromParent(filterTypes=[ImageCrawler])
groups = Crawler.group(filter(lambda x: x.isSequence(), imageCrawlers))
if groups:
seqGroup = groups[0]
imageSeqPath = os.path.join(
os.path.dirname(seqGroup[0].var("filePath")),
'{0}.%0{1}d.{2}'.format(
seqGroup[0].var('name'),
seqGroup[0].var('padding'),
seqGroup[0].var('ext')
)
)
if firstFrame is None:
firstFrame = seqGroup[0].var('frame')
lastFrame = seqGroup[-1].var('frame')
# Create the version in Shotgun
data = {
"code": name,
"sg_status_list": "rev",
"entity": self.__publishData['entity'],
"created_by": self.__publishData['created_by'],
"user": self.__publishData['created_by'],
"description": self.__publishData['description'],
"project": self.__publishData['project']
}
if firstFrame is not None and lastFrame is not None:
data["sg_first_frame"] = firstFrame
data["sg_last_frame"] = lastFrame
data["frame_count"] = (lastFrame - firstFrame + 1)
data["frame_range"] = "%s-%s" % (firstFrame, lastFrame)
if imageSeqPath:
data["sg_path_to_frames"] = imageSeqPath
data["published_files"] = [sgPublishFile]
data["sg_path_to_movie"] = movieFilePath
sgVersion = sg.create("Version", data)
# upload files
sg.upload("Version", sgVersion["id"], movieFilePath, "sg_uploaded_movie")
return sgVersion
# registering task
Task.register(
'sgPublish',
SGPublishTask
)
| 2.34375 | 2 |
tests/verification/test__unsteady_navier_stokes_boussinesq.py | alexanderzimmerman/sapphire | 10 | 12761143 | """Verify accuracy of the unsteady Navier-Stokes-Boussinesq solver."""
import firedrake as fe
import sapphire.mms
import tests.verification.test__navier_stokes_boussinesq
from sapphire.simulations.unsteady_navier_stokes_boussinesq import Simulation
import tests.validation.helpers
diff = fe.diff
def strong_residual(sim, solution):
r_p, r_u, r_T = tests.verification.test__navier_stokes_boussinesq.\
strong_residual(sim = sim, solution = solution)
_, u, T = solution
t = sim.time
r_u += diff(u, t)
r_T += diff(T, t)
return r_p, r_u, r_T
sin, pi = fe.sin, fe.pi
def space_verification_solution(sim):
x, y = fe.SpatialCoordinate(sim.mesh)
u0 = sin(2*pi*x)*sin(pi*y)
u1 = sin(pi*x)*sin(2*pi*y)
ihat, jhat = sim.unit_vectors
u = (u0*ihat + u1*jhat)
p = -0.5*sin(pi*x)*sin(pi*y)
T = sin(2*pi*x)*sin(pi*y)
mean_pressure = fe.assemble(p*fe.dx)
p -= mean_pressure
return p, u, T
def time_verification_solution(sim):
exp = fe.exp
x, y = fe.SpatialCoordinate(sim.mesh)
t = sim.time
u0 = sin(2*pi*x)*sin(pi*y)
u1 = sin(pi*x)*sin(2*pi*y)
ihat, jhat = sim.unit_vectors
u = exp(t)*(u0*ihat + u1*jhat)
p = -0.5*sin(pi*x)*sin(pi*y)
T = exp(t)*sin(2*pi*x)*sin(pi*y)
mean_pressure = fe.assemble(p*fe.dx)
p -= mean_pressure
return p, u, T
class UnitSquareUniformMeshSimulation(Simulation):
def __init__(self, *args,
meshcell_size,
**kwargs):
n = int(round(1/meshcell_size))
kwargs["mesh"] = fe.UnitSquareMesh(n, n)
super().__init__(*args, **kwargs)
def dirichlet_boundary_conditions(sim, manufactured_solution):
"""Apply velocity and temperature Dirichlet BC's on every boundary.
Do not apply Dirichlet BC's on the pressure.
"""
_, u, T = manufactured_solution
return [
fe.DirichletBC(sim.solution_subspaces["u"], u, "on_boundary"),
fe.DirichletBC(sim.solution_subspaces["T"], T, "on_boundary")]
sim_kwargs = {
"reynolds_number": 20,
"rayleigh_number": 1.e3,
"prandtl_number": 0.71,
"quadrature_degree": 4}
def test__verify_second_order_spatial_convergence_via_mms():
sim_kwargs["taylor_hood_pressure_degree"] = 1
sim_kwargs["temperature_degree"] = 2
sim_kwargs["timestep_size"] = 1
sim_kwargs["time_stencil_size"] = 2
sapphire.mms.verify_order_of_accuracy(
discretization_parameter_name = "meshcell_size",
discretization_parameter_values = [1/n for n in (8, 16, 32)],
Simulation = UnitSquareUniformMeshSimulation,
sim_kwargs = sim_kwargs,
strong_residual = strong_residual,
manufactured_solution = space_verification_solution,
dirichlet_boundary_conditions = dirichlet_boundary_conditions,
norms = ("L2", "H1", "H1"),
expected_orders = (2, 2, 2),
decimal_places = 1,
endtime = 1)
def test__verify_first_order_temporal_convergence_via_mms():
sim_kwargs["meshcell_size"] = 1/32
sim_kwargs["taylor_hood_pressure_degree"] = 2
sim_kwargs["temperature_degree"] = 3
sapphire.mms.verify_order_of_accuracy(
discretization_parameter_name = "timestep_size",
discretization_parameter_values = (1/2, 1/4, 1/8, 1/16),
Simulation = UnitSquareUniformMeshSimulation,
sim_kwargs = sim_kwargs,
strong_residual = strong_residual,
manufactured_solution = time_verification_solution,
dirichlet_boundary_conditions = dirichlet_boundary_conditions,
endtime = 1,
norms = (None, "L2", "L2"),
expected_orders = (None, 1, 1),
decimal_places = 1)
class HeatDrivenCavitySimulation(UnitSquareUniformMeshSimulation):
def dirichlet_boundary_conditions(self):
return [
fe.DirichletBC(self.solution_subspaces["u"], (0, 0), "on_boundary"),
fe.DirichletBC(self.solution_subspaces["T"], 0.5, 1),
fe.DirichletBC(self.solution_subspaces["T"], -0.5, 2)]
def test__steady_state_heat_driven_cavity_benchmark():
""" Verify against steady state heat-driven cavity benchmark.
Comparing to data published in @cite{wang2010comprehensive}.
"""
endtime = 1.e12
Ra = 1.e6
Pr = 0.71
sim = HeatDrivenCavitySimulation(
rayleigh_number = Ra,
prandtl_number = Pr,
taylor_hood_pressure_degree = 1,
temperature_degree = 2,
meshcell_size = 1/40,
timestep_size = endtime)
sim.states = sim.run(endtime = endtime)
# Check coordinates (0.3499, 0.8499) instead of (0.35, 0.85)
# because the Function evaluation fails at the exact coordinates.
# See https://github.com/firedrakeproject/firedrake/issues/1340
tests.validation.helpers.check_scalar_solution_component(
solution = sim.solution,
component = 1,
subcomponent = 0,
coordinates = [(0.5, y)
for y in (0, 0.15, 0.34999, 0.5, 0.65, 0.84999)],
expected_values = [val*Ra**0.5/Pr
for val in (0, -0.0649, -0.0194, 0,
0.0194, 0.0649)],
absolute_tolerances = [val*Ra**0.5/Pr
for val in (1.e-12, 0.001, 0.001, 1.e-12, 0.001, 0.001)])
| 2.53125 | 3 |
fromTxtToVec/to_vector.py | Captain-F/from-txt-to-vector | 3 | 12761144 | from fromTxtToVec.corpus_build import Corpus
from fromTxtToVec.pad import Pad
from fromTxtToVec.BERT_feat import ExtractBertEmb
from fromTxtToVec.train_vector import Embedding
import numpy as np
class To_vec:
def __init__(self, mode, sent_maxlen):
self.mode = mode
self.sent_maxlen = sent_maxlen
def vector(self):
sents, labels = Corpus().read_txt()
pad_sents, pad_labels = Pad(self.sent_maxlen).pad_seq(sents, labels)
if self.mode == 'w2v':
sents_, labels_ = pad_sents, pad_labels
elif self.mode == 'bert':
path = input('请输入BERT模型的绝对路径or相对路径...')
extractor = ExtractBertEmb(bert_path=path)
granu = input('请输入抽取的粒度: token or cls')
if granu == 'token':
bert_sents = extractor.extract(sentences=[''.join(i) for i in sents], granularity=granu)
sents_ = []
for s in bert_sents:
if len(s) >= int(self.sent_maxlen):
matrix = s[:int(self.sent_maxlen)]
else:
matrix = np.zeros((int(self.sent_maxlen), 768))
for idx, i in enumerate(s):
matrix[idx] = i
sents_.append(matrix)
elif granu == 'token':
sents_ = extractor.extract(sentences=[''.join(i) for i in sents], granularity=granu)
labels_ = pad_labels
return np.array(sents_), labels_
def w2v_matrix(self, emb_size):
sents, labels = Corpus().read_txt()
matrix = Embedding(emb_size=emb_size).w2v(corpus=sents)
return matrix
| 2.53125 | 3 |
sponge.py | leocalm/lyrapy | 1 | 12761145 | MASK = 0xffffffffffffffff
BLOCK_LEN_INT64 = 8
BLOCK_LEN_BYTES = 8 * BLOCK_LEN_INT64
blake2b_IV = [
0x6a09e667f3bcc908, 0xbb67ae8584caa73b,
0x3c6ef372fe94f82b, 0xa54ff53a5f1d36f1,
0x510e527fade682d1, 0x9b05688c2b3e6c1f,
0x1f83d9abfb41bd6b, 0x5be0cd19137e2179
]
def rotr(w, c):
return (w >> c) | (w << (64 - c))
def G(r, i, a, b, c, d):
a = a + b & MASK # noqa
d = rotr(d ^ a, 32) & MASK # noqa
c = c + d & MASK # noqa
b = rotr(b ^ c, 24) & MASK # noqa
a = a + b & MASK # noqa
d = rotr(d ^ a, 16) & MASK # noqa
c = c + d & MASK # noqa
b = rotr(b ^ c, 63) & MASK # noqa
return a, b, c, d
def round_lyra(r, v):
v[0], v[4], v[8], v[12] = G(r, 0, v[0], v[4], v[8], v[12])
v[1], v[5], v[9], v[13] = G(r, 1, v[1], v[5], v[9], v[13])
v[2], v[6], v[10], v[14] = G(r, 2, v[2], v[6], v[10], v[14])
v[3], v[7], v[11], v[15] = G(r, 3, v[3], v[7], v[11], v[15])
v[0], v[5], v[10], v[15] = G(r, 4, v[0], v[5], v[10], v[15])
v[1], v[6], v[11], v[12] = G(r, 5, v[1], v[6], v[11], v[12])
v[2], v[7], v[8], v[13] = G(r, 6, v[2], v[7], v[8], v[13])
v[3], v[4], v[9], v[14] = G(r, 7, v[3], v[4], v[9], v[14])
def initState():
state = [0] * 8
for i in range(8):
state.append(blake2b_IV[i])
return state
def blake2b_lyra(v):
for i in range(12):
round_lyra(i, v)
def reduced_blake2b_lyra(v):
round_lyra(0, v)
def print_state(state, name='state'):
print(name + ': ', end='')
for s in state:
print('{:x}|'.format(s), end='')
print('\n------------------------------------------')
def copy_block(state, out, start, size=BLOCK_LEN_INT64):
for i in range(size):
out[start + i] = state[i]
def squeeze(state, size):
out = [0] * int(size / BLOCK_LEN_INT64)
fullBlocks = int(size / BLOCK_LEN_BYTES)
pos = 0
for i in range(fullBlocks):
copy_block(state, out, pos)
blake2b_lyra(state)
pos += BLOCK_LEN_INT64
remaining = int(size % BLOCK_LEN_BYTES / BLOCK_LEN_INT64)
copy_block(state, out, pos, remaining)
return out
def pad(text):
blocks = int(len(text) / BLOCK_LEN_BYTES) + 1
l = [b for b in text.encode()]
l.append(0x80)
r = range(len(l), blocks * BLOCK_LEN_BYTES)
for _ in r:
l.append(0)
l[-1] ^= 0x01
return l
def build_int64(number_list, pos):
return ((number_list[pos*BLOCK_LEN_INT64 + 7] << 56) +
(number_list[pos*BLOCK_LEN_INT64 + 6] << 48) +
(number_list[pos*BLOCK_LEN_INT64 + 5] << 40) +
(number_list[pos*BLOCK_LEN_INT64 + 4] << 32) +
(number_list[pos*BLOCK_LEN_INT64 + 3] << 24) +
(number_list[pos*BLOCK_LEN_INT64 + 2] << 16) +
(number_list[pos*BLOCK_LEN_INT64 + 1] << 8) +
(number_list[pos*BLOCK_LEN_INT64 + 0]))
def absorb_block(state, data, reduced=False):
for i in range(BLOCK_LEN_INT64):
state[i] ^= build_int64(data, i)
if reduced:
reduced_blake2b_lyra(state)
else:
blake2b_lyra(state)
def main():
state = initState()
# print_state(state)
# out = squeeze(state, 96)
# print_state(state)
# print_state(out, 'out')
# print(['{:x}'.format(b) for b in pad('Lyra sponge')])
absorb_block(state, pad('Lyra sponge'))
print_state(state)
if __name__ == '__main__':
main()
| 1.671875 | 2 |
Shared_Funcs/pemfc_dsvdt.py | c-randall/p2d_pemfc | 8 | 12761146 | """ Import needed modules """
"-----------------------------------------------------------------------------"
from scipy.integrate import solve_ivp
from Shared_Funcs.pemfc_transport_funcs import *
import cantera as ct
import numpy as np
import sys
""" Control options for derivative functions """
"-----------------------------------------------------------------------------"
# Toggles to turn on/off in/outer rxns, gas transports, or surface tracking:---
pt_rxn = 1
o2_rxn = 1
gas_tog = 1
gdl_tog = 1
surf_tog = 1
""" Define CL dsvdt for core-shell model """
"-----------------------------------------------------------------------------"
def dsvdt_cl_cs(t, sv, dsvdt, objs, p, iSV, gdl_BC):
""" Set up conditions at GDL/CL BC """
# Initialize indecies for looping:-----------------------------------------
cl_ymv = 0 # CL y direction mover (y: GDL -> Elyte)
# Load in BC state and flux from GDL:--------------------------------------
TDY1 = gdl_BC['TDY1']
flux_up = gdl_BC['flux_up']
i_io_up = 0 # no protons flow into the GDL
""" Begin loop - with if statements for CL/Elyte BC """
for i in range(cl['Ny']):
# Temperature at each Y node:------------------------------------------
dsvdt[iSV['T_cl'] +cl_ymv] = 0
# Gas phase species at each Y node:------------------------------------
if i == cl['Ny'] -1: # BC for CL and electrolyte interface
flux_dwn = np.zeros(gas_ca.n_species)
else:
rho_gas_k = sv[iSV['rho_gas_k'] +cl_ymv +cl['nxt_y']]
TDY2 = sv[iSV['T_cl'] +cl_ymv +cl['nxt_y']], sum(rho_gas_k), rho_gas_k
flux_dwn = fickian_adf(TDY1, TDY2, gas_ca, cl, gas_tog)
# Set the phases for O2 absorption rxn:
rho_gas_k = sv[iSV['rho_gas_k'] +cl_ymv]
rho_naf_k = sv[iSV['rho_naf_k'] +cl_ymv]
gas_ca.TDY = sv[iSV['T_cl'] +cl_ymv], sum(rho_gas_k), rho_gas_k
naf_b_ca.TDY = sv[iSV['T_cl'] +cl_ymv], sum(rho_naf_k), rho_naf_k
rho_dot_g = naf_s_ca.get_net_production_rates(gas_ca) *cl['SApv_naf']\
*cl['1/eps_g'] *gas_ca.molecular_weights *gas_tog
rho_dot_n = naf_s_ca.get_net_production_rates(naf_b_ca) *cl['SApv_naf']\
*cl['1/eps_n'] *naf_b_ca.molecular_weights
# Include rxn and flux in ODE term:
dsvdt[iSV['rho_gas_k'] +cl_ymv] = (flux_up - flux_dwn)*cl['1/eps_g']*cl['1/dy']\
+ o2_rxn *rho_dot_g
flux_up = flux_dwn
TDY1 = TDY2
# Nafion densities at each R node:-------------------------------------
# The Naftion densities change due to reactions at the outter and inner
# most shells as well as fluxes between adjacent shells. The direction
# of storage for the radial terms are done from the outermost shell
# to the innermost one.
" Start by evaluating the outermost shell "
# This node contains an O2 absorption rxn with the gas phase as well as
# a maxx flux with the adjacent inner node.
rho_k1 = sv[iSV['rho_naf_k'] +cl_ymv]
rho_k2 = sv[iSV['rho_naf_k'] +cl_ymv +cl['nxt_r']]
rho_flx_inr = radial_fdiff(rho_k1, rho_k2, cl, 0, ver, 'core_shell')
# Combine absorption and flux to get overall ODE for Nafion densities:
dsvdt[iSV['rho_naf_k'] +cl_ymv] = o2_rxn *rho_dot_n *cl['1/Vf_shl'][0]\
- rho_flx_inr *cl['1/r_j'][0]**2 *cl['1/t_shl'][0]
dsvdt[iSV['rho_naf_k'][cl['iH']] +cl_ymv] = 0 # Ensure constant proton density
rho_flx_otr = rho_flx_inr
rho_k1 = rho_k2
" Evaluate the inner shell nodes "
for j in range(1, cl['Nr'] -1):
rho_k2 = sv[iSV['rho_naf_k'] +cl_ymv +(j+1)*cl['nxt_r']]
rho_flx_inr = radial_fdiff(rho_k1, rho_k2, cl, j, ver, 'core_shell')
iMid = iSV['rho_naf_k'] +cl_ymv +j*cl['nxt_r']
dsvdt[iMid] = (rho_flx_otr - rho_flx_inr) *cl['1/r_j'][j]**2 *cl['1/t_shl'][j]
rho_flx_otr = rho_flx_inr
rho_k1 = rho_k2
" Apply the Pt reaction BC at the innermost shell "
# Set the phases for the ORR at the Pt surface:
carb_ca.electric_potential = 0
pt_s_ca.electric_potential = 0
naf_b_ca.electric_potential = -sv[iSV['phi_dl'] +cl_ymv]
naf_s_ca.electric_potential = -sv[iSV['phi_dl'] +cl_ymv]
pt_s_ca.coverages = sv[iSV['theta_pt_k'] +cl_ymv]
naf_b_ca.TDY = sv[iSV['T_cl'] +cl_ymv], sum(rho_k1), rho_k1
rho_dot_n = pt_s_ca.get_net_production_rates(naf_b_ca) *cl['SApv_pt']\
*cl['1/eps_n'] *naf_b_ca.molecular_weights
# Pt surface coverages:
dsvdt[iSV['theta_pt_k'] +cl_ymv] = pt_s_ca.get_net_production_rates(pt_s_ca)\
*cl['1/gamma'] *pt_rxn *surf_tog
# Innermost Nafion node densities:
iLast = iSV['rho_naf_k'] +cl_ymv +(cl['Nr'] -1)*cl['nxt_r']
dsvdt[iLast] = pt_rxn *rho_dot_n *cl['1/Vf_shl'][-1] \
+ rho_flx_otr *cl['1/r_j'][-1]**2 *cl['1/t_shl'][-1]
# Double layer potential at each Y node:-------------------------------
# The double layer potential is only stored as a function of CL depth.
# This means that no local potential gradients are shored in the radial
# direction throughout the Nafion shells.
# Find ionic currents and define ODE for phi_dl:
if i == cl['Ny'] -1: # BC for CL and electrolyte interface
i_io_dwn = cl['i_ext']
else:
i_io_dwn = (sv[iSV['phi_dl'] +cl_ymv] - sv[iSV['phi_dl'] +cl_ymv +cl['nxt_y']])\
*cl['sig_naf_io'] *cl['1/dy']
i_Far = pt_rxn *pt_s_ca.get_net_production_rates(carb_ca) *ct.faraday
i_dl = (i_io_up - i_io_dwn)*cl['1/dy'] - i_Far*cl['SApv_pt']
dsvdt[iSV['phi_dl'] +cl_ymv] = i_dl*cl['1/CA_dl']
i_io_up = i_io_dwn
# Update Y direction moving index:-------------------------------------
cl_ymv = cl_ymv +cl['nxt_y']
return dsvdt
""" Define CL dsvdt for flooded-agglomerate model """
"-----------------------------------------------------------------------------"
def dsvdt_cl_fa(t, sv, dsvdt, objs, p, iSV, gdl_BC):
""" Set up conditions at GDL/CL BC """
# Initialize indecies for looping:-----------------------------------------
cl_ymv = 0 # CL y direction mover (y: GDL -> Elyte)
# Load in BC state and flux from GDL:--------------------------------------
TDY1 = gdl_BC['TDY1']
flux_up = gdl_BC['flux_up']
i_io_up = 0 # no protons flow into the GDL
""" Begin loop - with if statements for CL/Elyte BC """
for i in range(cl['Ny']):
# Temperature at each Y node:------------------------------------------
dsvdt[iSV['T_cl'] +cl_ymv] = 0
# Gas phase species at each Y node:------------------------------------
if i == cl['Ny'] -1:
flux_dwn = np.zeros(gas_ca.n_species)
else:
rho_gas_k = sv[iSV['rho_gas_k'] +cl_ymv +cl['nxt_y']]
TDY2 = sv[iSV['T_cl'] +cl_ymv +cl['nxt_y']], sum(rho_gas_k), rho_gas_k
flux_dwn = fickian_adf(TDY1, TDY2, gas_ca, cl, gas_tog)
# Set the phases for O2 absorption rxn:
rho_gas_k = sv[iSV['rho_gas_k'] +cl_ymv]
rho_shl_k = sv[iSV['rho_shl_k'] +cl_ymv]
gas_ca.TDY = sv[iSV['T_cl'] +cl_ymv], sum(rho_gas_k), rho_gas_k
naf_b_ca.TDY = sv[iSV['T_cl'] +cl_ymv], sum(rho_shl_k), rho_shl_k
rho_dot_g = naf_s_ca.get_net_production_rates(gas_ca) *cl['SApv_naf']\
*cl['1/eps_g'] *gas_ca.molecular_weights *gas_tog
rho_dot_n = naf_s_ca.get_net_production_rates(naf_b_ca) *cl['SApv_naf']\
*cl['1/eps_n'] *naf_b_ca.molecular_weights
# Include rxn and flux in ODE term:
dsvdt[iSV['rho_gas_k'] +cl_ymv] = (flux_up - flux_dwn)*cl['1/eps_g']*cl['1/dy']\
+ o2_rxn *rho_dot_g
flux_up = flux_dwn
TDY1 = TDY2
# Nafion densities at each R node:-------------------------------------
# The Nafion densities change due to reactions throughout the inner
# agglomerate as well as fluxes between adjacent radial nodes. The
# direction of storage for the radial terms starts with a single node
# for the outer shell, and then continues from the outer agglomerate
# node into the center.
" Start by evaluating single-node nafion shell "
# This node contains an O2 absorption rxn with the gas phase as well as
# a mass flux with the inner agglomerate.
rho_k1 = sv[iSV['rho_shl_k'] +cl_ymv]
rho_k2 = sv[iSV['rho_naf_k'] +cl_ymv]
rho_flx_inr = radial_fdiff(rho_k1, rho_k2, cl, 0, ver, 'flooded_agg')
# Combine absorption and flux to get overall ODE:
dsvdt[iSV['rho_shl_k'] +cl_ymv] = o2_rxn *rho_dot_n - rho_flx_inr
dsvdt[iSV['rho_shl_k'][cl['iH']] +cl_ymv] = 0 # Ensure constant proton density
rho_flx_otr = rho_flx_inr
rho_k1 = rho_k2
" Evaluate the inner agglomerate nodes "
# Loop through radial nodes within agglomerate:
i_Far_r = np.zeros(cl['Nr'])
# Set the phases for ORR at the Pt surface:
carb_ca.electric_potential = 0
pt_s_ca.electric_potential = 0
naf_b_ca.electric_potential = -sv[iSV['phi_dl'] +cl_ymv]
naf_s_ca.electric_potential = -sv[iSV['phi_dl'] +cl_ymv]
for j in range(cl['Nr'] -1):
rho_k2 = sv[iSV['rho_naf_k'] +cl_ymv +(j+1)*cl['nxt_r']]
rho_flx_inr = radial_fdiff(rho_k1, rho_k2, cl, j+1, ver, 'flooded_agg')
pt_s_ca.coverages = sv[iSV['theta_pt_k'] +cl_ymv +j*cl['nxt_r']]
naf_b_ca.TDY = sv[iSV['T_cl'] +cl_ymv], sum(rho_k1), rho_k1
rho_dot_n = pt_s_ca.get_net_production_rates(naf_b_ca) *cl['SApv_pt']\
*cl['1/eps_n'] *naf_b_ca.molecular_weights *cl['Vf_ishl'][j]
i_Far_r[j] = pt_rxn *pt_s_ca.get_net_production_rates(carb_ca)\
*ct.faraday *cl['Vf_ishl'][j]
# Pt surface coverages:
iMid = iSV['theta_pt_k'] +cl_ymv +j*cl['nxt_r']
dsvdt[iMid] = pt_s_ca.get_net_production_rates(pt_s_ca) *cl['1/gamma']\
*pt_rxn *surf_tog
# Combine ORR and flux to get overall ODE for Nafion densities:
iMid = iSV['rho_naf_k'] +cl_ymv +j*cl['nxt_r']
dsvdt[iMid] = rho_flx_otr - rho_flx_inr + pt_rxn *rho_dot_n
dsvdt[iMid[cl['iH']]] = 0 # Ensure constant proton density
rho_flx_otr = rho_flx_inr
rho_k1 = rho_k2
" Apply symmetric flux BC at innermost agglomerate node "
rho_flx_inr = np.zeros(naf_b_ca.n_species)
# Set the phases for ORR at the Pt surface:
pt_s_ca.coverages = sv[iSV['theta_pt_k'] +cl_ymv +(cl['Nr'] -1)*cl['nxt_r']]
naf_b_ca.TDY = sv[iSV['T_cl'] +cl_ymv], sum(rho_k1), rho_k1
rho_dot_n = pt_s_ca.get_net_production_rates(naf_b_ca) *cl['SApv_pt']\
*cl['1/eps_n'] *naf_b_ca.molecular_weights *cl['Vf_ishl'][-1]
i_Far_r[-1] = pt_rxn *pt_s_ca.get_net_production_rates(carb_ca)\
*ct.faraday *cl['Vf_ishl'][-1]
# Pt surface coverages:
iLast = iSV['theta_pt_k'] +cl_ymv +(cl['Nr'] -1)*cl['nxt_r']
dsvdt[iLast] = pt_s_ca.get_net_production_rates(pt_s_ca) *cl['1/gamma']\
*pt_rxn *surf_tog
# Combine ORR and flux to get overall ODE:
iLast = iSV['rho_naf_k'] +cl_ymv +(cl['Nr'] -1)*cl['nxt_r']
dsvdt[iLast] = rho_flx_otr - rho_flx_inr + pt_rxn *rho_dot_n
dsvdt[iLast[cl['iH']]] = 0 # Ensure constant proton density
# Double layer potential at each Y node:-------------------------------
# The double layer potential is only stored as a function of CL depth,
# but is based on the reactions that occur throughout the radial
# direction of each agglomerate. Looping through the radial nodes of
# each agglomerate and summing over all faradaic currents is used to
# evaluate an overall double layer current.
" Simplify all radial terms into a single y-dependent double layer "
# Combine the faradaic currents to get overall i_Far:
i_Far = np.sum(i_Far_r)
# Find ionic currents and define ODE for phi_dl:
if i == cl['Ny'] -1:
i_io_dwn = cl['i_ext']
else:
i_io_dwn = (sv[iSV['phi_dl'] +cl_ymv] - sv[iSV['phi_dl'] +cl_ymv +cl['nxt_y']])\
*cl['sig_naf_io'] *cl['1/dy']
i_dl = (i_io_up - i_io_dwn)*cl['1/dy'] - i_Far*cl['SApv_pt']
dsvdt[iSV['phi_dl'] +cl_ymv] = i_dl*cl['1/CA_dl']
i_io_up = i_io_dwn
# Update Y direction moving index:-------------------------------------
cl_ymv = cl_ymv +cl['nxt_y']
return dsvdt
""" Define dsvdt for pemfc models - common for GDL and then CLs above """
"-----------------------------------------------------------------------------"
def dsvdt_func(t, sv, objs, p, iSV):
# Initialize indecies for looping:-----------------------------------------
gdl_ymv = 0 # GDL y direction mover (y: gas channel -> CL)
dsvdt = np.zeros_like(sv)
""" Bondary Condition - GDL and CL gas transport """
# Densities/Temp of GDL gas species and CL BC (top):-----------------------
gas_ca.TPY = gdl['TPY_BC']
TDY_BC = gas_ca.TDY
# If GDL diffusion is turned on, compare adjacent nodes with ADF flux to
# determine the BC composition between the GDL and CL.
rho_gdl_k = sv[iSV['rho_gdl_k']]
TDY1 = sv[iSV['T_gdl']], sum(rho_gdl_k), rho_gdl_k
flux_up = fickian_adf(TDY_BC, TDY1, gas_ca, gdl, gdl_tog)
for k in range(gdl['Ny'] -1):
rho_gdl_k = sv[iSV['rho_gdl_k'] +gdl_ymv +gdl['nxt_y']]
TDY2 = sv[iSV['T_gdl'] +gdl_ymv +gdl['nxt_y']], sum(rho_gdl_k), rho_gdl_k
flux_dwn = fickian_adf(TDY1, TDY2, gas_ca, gdl, gdl_tog)
dsvdt[iSV['rho_gdl_k'] +gdl_ymv] = (flux_up - flux_dwn)*gdl['1/eps_g']*gdl['1/dy']
flux_up = flux_dwn
TDY1 = TDY2
gdl_ymv = gdl_ymv +gdl['nxt_y']
# Use the composition and state of the last GDL node to calculate the flux
# into the first CL node.
rho_gas_k = sv[iSV['rho_gas_k']]
TDY2 = sv[iSV['T_cl']], sum(rho_gas_k), rho_gas_k
flux_dwn = fickian_adf(TDY1, TDY2, gas_ca, gdl_cl, gdl_tog)
dsvdt[iSV['rho_gdl_k'] +gdl_ymv] = (flux_up - flux_dwn)*gdl['1/eps_g']*gdl['1/dy']
flux_up = fickian_adf(TDY1, TDY2, gas_ca, gdl_cl, gas_tog)
TDY1 = TDY2
# Load BC values to pass into CL functions:
gdl_BC = {}
gdl_BC['TDY1'] = TDY1
gdl_BC['flux_up'] = flux_up
""" Generic loop for interal CL nodes in y-direction """
if model == 'core_shell':
dsvdt = dsvdt_cl_cs(t, sv, dsvdt, objs, p, iSV, gdl_BC)
elif model == 'flooded_agg':
dsvdt = dsvdt_cl_fa(t, sv, dsvdt, objs, p, iSV, gdl_BC)
# print(t)
# print(dsvdt)
#
# user_in = input('"Enter" to continue or "Ctrl+d" to cancel.')
# if user_in == KeyboardInterrupt:
# sys.exit(0)
return dsvdt
""" Use integrator to call dsvdt and solve to SS """
"-----------------------------------------------------------------------------"
# Create vectors to store outputs:
i_ext = np.hstack([i_OCV, i_ext0, i_ext1, i_ext2])
eta_ss, dphi_ss = np.zeros_like(i_ext), np.zeros_like(i_ext)
sv_save = np.zeros([len(SV_0) +1, len(i_ext)])
# Define common index for last CL node's phi_dl:
iPhi_f = int(iSV['phi_dl'] + (Ny_cl-1)*L_cl/Ny_cl)
# Update and convert i_ext: A/cm^2 -> A/m^2
cl['i_ext'] = i_ext[0] *100**2
sol = solve_ivp(lambda t, sv: dsvdt_func(t, sv, objs, p, iSV), [0, t_sim],
SV_0, method=method, atol=atol, rtol=rtol, max_step=max_t)
# Calculate extra PEM resistance terms to subtract off:
R_naf_vec = i_ext*(pem['R_naf'] + 0.5*cl['dy'] / cl['sig_naf_io'] *100**2)
# Store solution and update initial values:
SV_0, sv_save[:,0] = sol.y[:,-1], np.append(i_ext[0], sol.y[:,-1])
dphi_ss[0] = sol.y[iPhi_f, -1] - dphi_eq_an - R_naf_vec[0]
print('t_f:',sol.t[-1],'i_ext:',round(cl['i_ext']*1e-4,3), 'dPhi:',round(dphi_ss[0],3))
for i in range(len(i_ext) -1):
# Don't run the for loop if i_OCV was not set to 0...
if any([all([i == 0, i_OCV != 0]), polar == 'off']):
break
# Update and convert i_ext: A/cm^2 -> A/m^2
cl['i_ext'] = i_ext[i+1] *100**2
sol = solve_ivp(lambda t, sv: dsvdt_func(t, sv, objs, p, iSV), [0, t_sim],
SV_0, method=method, atol=atol, rtol=rtol, max_step=max_t)
# Store solution and update initial values:
SV_0, sv_save[:,i+1] = sol.y[:,-1], np.append(i_ext[i+1], sol.y[:,-1])
eta_ss[i+1] = dphi_ss[0] - sol.y[iPhi_f,-1]
dphi_ss[i+1] = sol.y[iPhi_f,-1] - dphi_eq_an - R_naf_vec[i+1]
print('t_f:',sol.t[-1], 'i_ext:',round(cl['i_ext']*1e-4,3), 'dPhi:',round(dphi_ss[i+1],3)) | 2.09375 | 2 |
groupdocs_viewer_cloud/models/cad_options.py | groupdocs-viewer-cloud/groupdocs-viewer-cloud-python | 1 | 12761147 | # coding: utf-8
# -----------------------------------------------------------------------------------
# <copyright company="Aspose Pty Ltd" file="CadOptions.py">
# Copyright (c) 2003-2021 Aspose Pty Ltd
# </copyright>
# <summary>
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# </summary>
# -----------------------------------------------------------------------------------
import pprint
import re # noqa: F401
import six
class CadOptions(object):
"""
Rendering options for CAD file formats. CAD file formats include files with extensions: .dwg, .dxf, .dgn, .ifc, .stl
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'scale_factor': 'float',
'width': 'int',
'height': 'int',
'tiles': 'list[Tile]',
'render_layouts': 'bool',
'layout_name': 'str',
'layers': 'list[str]'
}
attribute_map = {
'scale_factor': 'ScaleFactor',
'width': 'Width',
'height': 'Height',
'tiles': 'Tiles',
'render_layouts': 'RenderLayouts',
'layout_name': 'LayoutName',
'layers': 'Layers'
}
def __init__(self, scale_factor=None, width=None, height=None, tiles=None, render_layouts=None, layout_name=None, layers=None, **kwargs): # noqa: E501
"""Initializes new instance of CadOptions""" # noqa: E501
self._scale_factor = None
self._width = None
self._height = None
self._tiles = None
self._render_layouts = None
self._layout_name = None
self._layers = None
if scale_factor is not None:
self.scale_factor = scale_factor
if width is not None:
self.width = width
if height is not None:
self.height = height
if tiles is not None:
self.tiles = tiles
if render_layouts is not None:
self.render_layouts = render_layouts
if layout_name is not None:
self.layout_name = layout_name
if layers is not None:
self.layers = layers
@property
def scale_factor(self):
"""
Gets the scale_factor. # noqa: E501
Scale factor allows to change the size of the output document. Values higher than 1 will enlarge output result and values between 0 and 1 will make output result smaller. This option is ignored when either Height or Width options are set. # noqa: E501
:return: The scale_factor. # noqa: E501
:rtype: float
"""
return self._scale_factor
@scale_factor.setter
def scale_factor(self, scale_factor):
"""
Sets the scale_factor.
Scale factor allows to change the size of the output document. Values higher than 1 will enlarge output result and values between 0 and 1 will make output result smaller. This option is ignored when either Height or Width options are set. # noqa: E501
:param scale_factor: The scale_factor. # noqa: E501
:type: float
"""
if scale_factor is None:
raise ValueError("Invalid value for `scale_factor`, must not be `None`") # noqa: E501
self._scale_factor = scale_factor
@property
def width(self):
"""
Gets the width. # noqa: E501
Width of the output result in pixels # noqa: E501
:return: The width. # noqa: E501
:rtype: int
"""
return self._width
@width.setter
def width(self, width):
"""
Sets the width.
Width of the output result in pixels # noqa: E501
:param width: The width. # noqa: E501
:type: int
"""
if width is None:
raise ValueError("Invalid value for `width`, must not be `None`") # noqa: E501
self._width = width
@property
def height(self):
"""
Gets the height. # noqa: E501
Height of the output result in pixels # noqa: E501
:return: The height. # noqa: E501
:rtype: int
"""
return self._height
@height.setter
def height(self, height):
"""
Sets the height.
Height of the output result in pixels # noqa: E501
:param height: The height. # noqa: E501
:type: int
"""
if height is None:
raise ValueError("Invalid value for `height`, must not be `None`") # noqa: E501
self._height = height
@property
def tiles(self):
"""
Gets the tiles. # noqa: E501
The drawing regions to render This option supported only for DWG and DWT file types The RenderLayouts and LayoutName options are ignored when rendering by tiles # noqa: E501
:return: The tiles. # noqa: E501
:rtype: list[Tile]
"""
return self._tiles
@tiles.setter
def tiles(self, tiles):
"""
Sets the tiles.
The drawing regions to render This option supported only for DWG and DWT file types The RenderLayouts and LayoutName options are ignored when rendering by tiles # noqa: E501
:param tiles: The tiles. # noqa: E501
:type: list[Tile]
"""
self._tiles = tiles
@property
def render_layouts(self):
"""
Gets the render_layouts. # noqa: E501
Indicates whether layouts from CAD document should be rendered # noqa: E501
:return: The render_layouts. # noqa: E501
:rtype: bool
"""
return self._render_layouts
@render_layouts.setter
def render_layouts(self, render_layouts):
"""
Sets the render_layouts.
Indicates whether layouts from CAD document should be rendered # noqa: E501
:param render_layouts: The render_layouts. # noqa: E501
:type: bool
"""
if render_layouts is None:
raise ValueError("Invalid value for `render_layouts`, must not be `None`") # noqa: E501
self._render_layouts = render_layouts
@property
def layout_name(self):
"""
Gets the layout_name. # noqa: E501
The name of the specific layout to render. Layout name is case-sensitive # noqa: E501
:return: The layout_name. # noqa: E501
:rtype: str
"""
return self._layout_name
@layout_name.setter
def layout_name(self, layout_name):
"""
Sets the layout_name.
The name of the specific layout to render. Layout name is case-sensitive # noqa: E501
:param layout_name: The layout_name. # noqa: E501
:type: str
"""
self._layout_name = layout_name
@property
def layers(self):
"""
Gets the layers. # noqa: E501
The CAD drawing layers to render By default all layers are rendered; Layer names are case-sensitive # noqa: E501
:return: The layers. # noqa: E501
:rtype: list[str]
"""
return self._layers
@layers.setter
def layers(self, layers):
"""
Sets the layers.
The CAD drawing layers to render By default all layers are rendered; Layer names are case-sensitive # noqa: E501
:param layers: The layers. # noqa: E501
:type: list[str]
"""
self._layers = layers
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, CadOptions):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 1.4375 | 1 |
pytorch_seed_rl/functional/loss.py | mjanschek/pytorch_seed_rl | 9 | 12761148 | <gh_stars>1-10
# Copyright 2020 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Collection of loss functions necessary for reinforcement learning objective calculations.
"""
import torch
import torch.nn.functional as F
def entropy(logits: torch.Tensor) -> torch.Tensor:
"""Return the entropy loss, i.e., the negative entropy of the policy.
This can be used to discourage an RL model to converge prematurely.
See Also
--------
`Entropy Regularization in Reinforcement Learning
<https://towardsdatascience.com/entropy-regularization-in-reinforcement-learning-a6fa6d7598df>`__
Parameters
----------
logits: :py:class:`torch.Tensor`
Logits returned by the models policy network.
"""
policy = F.softmax(logits, dim=-1)
log_policy = F.log_softmax(logits, dim=-1)
return torch.sum(policy * log_policy)
def policy_gradient(logits: torch.Tensor,
actions: torch.Tensor,
advantages: torch.Tensor) -> torch.Tensor:
"""Compute the policy gradient loss.
See Also
--------
`https://spinningup.openai.com
<https://spinningup.openai.com/en/latest/spinningup/rl_intro3.html>`__
Parameters
----------
logits: :py:class:`torch.Tensor`
Logits returned by the models policy network.
actions: :py:class:`torch.Tensor`
Actions that were selected from :py:attr:`logits`
advantages: :py:class:`torch.Tensor`
Advantages that resulted for the related states.
"""
cross_entropy = F.nll_loss(
F.log_softmax(torch.flatten(logits, 0, 1), dim=-1),
target=torch.flatten(actions, 0, 1).to(torch.long),
reduction="none",
)
cross_entropy = cross_entropy.view_as(advantages)
return torch.sum(cross_entropy * advantages.detach())
| 2.359375 | 2 |
2d/caissonBreakwater/sliding/tank.py | erdc-cm/air-water-vv | 5 | 12761149 | from proteus import Domain, Context
from proteus.mprans import SpatialTools as st
from proteus import Gauges as ga
from proteus import WaveTools as wt
from math import *
import numpy as np
from proteus.mprans import BodyDynamics as bd
opts=Context.Options([
# predefined test cases
("water_level", 0.325, "Height of free surface above bottom"),
# Geometry
('Lgen', 1.0, 'Genaration zone in terms of wave lengths'),
('Labs', 1.0, 'Absorption zone in terms of wave lengths'),
('Ls', 1.0, 'Length of domain from genZone to the front toe of rubble mound in terms of wave lengths'),
('Lend', 1.0, 'Length of domain from absZone to the back toe of rubble mound in terms of wave lengths'),
# waves
('wave', True, 'Enable wave generation'),
('waveType', 'Fenton', 'Wavetype for regular waves, Linear or Fenton'),
("wave_period", 1.30, "Period of the waves"),
("wave_height", 0.167, "Height of the waves"),
('wavelength', 2.121, 'Wavelength only if Fenton is activated'),
('Ycoeff', [0.21107604, 0.07318902, 0.02782228, 0.01234846, 0.00618291, 0.00346483, 0.00227917, 0.00194241], 'Ycoeff only if Fenton is activated'),
('Bcoeff', [0.23112932, 0.03504843, 0.00431442, 0.00036993, 0.00004245, 0.00001877, 0.00000776, 0.00000196], 'Bcoeff only if Fenton is activated'),
('Nf', 8 ,'Number of frequency components for fenton waves'),
('meanVelocity', [ 0., 0., 0.],'Velocity used for currents'),
('phi0', 0.0 ,'Initial phase for waves'),
('Uwind', [0.0, 0.0, 0.0], 'Set air velocity'),
('fast', True ,'Switches ON fast cosh approximation'),
# rubble mound
('porousMedia', True, 'Enable porus media region'),
("hs", 0.175, "Height of the breakwater"),
("slope1", 1./3., "Slope1 of the breakwater"),
("slope2", 1./2., "Slope2 of the breakwater"),
('porosity', 0.4, "Porosity of the medium"),
('d50', 0.030, "Mean diameter of the medium"),
('d15', None, "15% grading curve diameter of the medium"),
('Resistance', 'Shih', 'Ergun or Engelund or Shih'),
# soil foundation
("springs", True, "Switch on/off soil module"),
("Kx", 541553.2, "Horizontal stiffness in Pa"),
("Ky", 582633.7, "Vertical stiffness in Pa"),
("Krot", 16246.6, "Rotational stiffness in N"),
("Cx", 1694.2, "Damping factor in Pa s "),
("Cy", 1757.32, "Damping factor in Pa s "),
("Crot", 69.61, "Rotational damping factor in N s "),
# caisson
("caisson2D", True, "Switch on/off caisson2D"),
('dimx', 0.300, 'X-dimension of the caisson2D'),
('dimy', 0.385, 'Y-dimension of the caisson2D'),
('width', 1.0, 'Z-dimension of the caisson2D'),
('mass', 64.8/0.4, 'Mass of the caisson2D [kg]'),
('caissonBC', 'FreeSlip', 'caisson2D boundaries: NoSlip or FreeSlip'),
("rotation", False, "Initial position for free oscillation"),
("friction", True, "Switch on/off friction module for sliding"),
("overturning", True, "Switch on/off overturning module"),
("m_static", 0.500, "Static friction factor between caisson2D and rubble mound"),
("m_dynamic", 0.500, "Dynamic friction factor between caisson2D and rubble mound"),
('scheme', 'Runge_Kutta', 'Numerical scheme applied to solve motion calculation (Runge_Kutta or Central_Difference)'),
# numerical options
("GenZone", True, 'Turn on generation zone at left side'),
("AbsZone", True, 'Turn on absorption zone at right side'),
("refinement_level", 0.0,"he=walength/refinement_level"),
("he", 0.05,"he=walength/refinement_level"),
("cfl", 0.450 ,"Target cfl"),
("duration", 20., "Durarion of the simulation"),
("freezeLevelSet", True, "No motion to the levelset"),
("useVF", 1.0, "For density and viscosity smoothing"),
('movingDomain', True, "Moving domain and mesh option"),
('conservativeFlux', True,'Fix post-processing velocity bug for porous interface'),
])
# ----- DOMAIN ----- #
domain = Domain.PlanarStraightLineGraphDomain()
# ----- WAVE CONDITIONS ----- #
period=opts.wave_period
omega=2*np.pi/opts.wave_period
waterLevel=opts.water_level
waveDir=np.array([1, 0., 0.])
mwl=waterLevel #coordinate of the initial mean level of water surface
waveHeight=opts.wave_height
inflowHeightMean=waterLevel
inflowVelocityMean =np.array([0.,0.,0.])
windVelocity = np.array([0.,0.,0.])
# ----- Phisical constants ----- #
rho_0=998.2
nu_0 =1.004e-6
rho_1=1.205
nu_1 =1.500e-5
sigma_01=0.0
g =np.array([0.,-9.8,0.])
gAbs=sqrt(sum(g**2))
# ----- WAVE input ----- #
if opts.wave == True:
waveinput = wt.MonochromaticWaves(period=period,
waveHeight=waveHeight,
mwl=mwl,
depth=waterLevel,
g=g,
waveDir=waveDir,
wavelength=opts.wavelength, # used by fenton waves
waveType=opts.waveType,
Ycoeff=np.array(opts.Ycoeff), # used by fenton waves
Bcoeff=np.array(opts.Bcoeff), # used by fenton waves
Nf=opts.Nf, # used by fenton waves
meanVelocity = np.array(opts.meanVelocity),
phi0 = opts.phi0,
fast = opts.fast,
)
#---------Domain Dimension
nd = 2
wl = waveinput.wavelength
#---------MESH SIZE
if opts.he == 0.0:
he = wl/opts.refinement_level
else:
he = opts.he
####################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################
# ----- SHAPES ----- #
####################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################
if opts.caisson2D:
L_leftSpo = opts.Lgen*wl
L_rightSpo = opts.Labs*wl
hs=opts.hs
slope1=opts.slope1
slope2=opts.slope2
#-caisson2D
dimx=opts.dimx
dimy=opts.dimy
b=dimx
#-Tank
x1=L_leftSpo
x2=x1+opts.Ls*wl
x3=x2+(hs/slope1)
xc1=x3+0.20
xc2=xc1+b
yc1=yc2=hs
x4=xc2+0.20
x5=x4+(hs/slope2)
x6=x5+opts.Lend*wl
x7=x6+L_rightSpo
tank_dim = [x7, 1.0]
boundaryOrientations = {'y-': np.array([0., -1.,0.]),
'x+': np.array([+1., 0.,0.]),
'y+': np.array([0., +1.,0.]),
'x-': np.array([-1., 0.,0.]),
'sponge': None,
'porousLayer': None,
'moving_porousLayer': None,
}
boundaryTags = {'y-' : 1,
'x+' : 2,
'y+' : 3,
'x-' : 4,
'sponge' : 5,
'porousLayer' : 6,
'moving_porousLayer' : 7,
}
else:
L_leftSpo = opts.Lgen*wl
L_rightSpo = opts.Labs*wl
#-Tank
x1=L_leftSpo
x2=x1+opts.Ls*wl
x3=x2+L_rightSpo
tank_dim = [x3, 1.0]
boundaryOrientations = {'y-': np.array([0., -1.,0.]),
'x+': np.array([+1., 0.,0.]),
'y+': np.array([0., +1.,0.]),
'x-': np.array([-1., 0.,0.]),
'sponge': None,
}
boundaryTags = {'y-': 1,
'x+': 2,
'y+': 3,
'x-': 4,
'sponge': 5,
}
##############################################################################################################################################################################################################
# caisson2D
############################################################################################################################################################################################################
if opts.caisson2D:
dimx=dimx
dimy=dimy
dim=(dimx,dimy)
coords=[xc1+b/2., hs+dimy/2.] # For bodyDimensions and barycenter
VCG=dim[1]/2. # For barycenter
width=opts.width # The 3rd dimension
mass=opts.mass #kg
volume=float(dimx*dimy*width)
density=float(mass/volume) #kg/m3
I=mass*(dimx**2.+dimy**2.)/12.
# It=(dimx**2.+dimy**2.)/12.
# --- Shape properties setup
caisson = st.Rectangle(domain, dim=dim, coords=coords)
caisson.vertices[0][0]=xc1
caisson.vertices[0][1]=yc1
caisson.vertices[1][0]=xc2
caisson.vertices[1][1]=yc2
# --- Body properties setup
caisson2D = bd.CaissonBody(shape=caisson, substeps=20)
free_x=(0.0, 0.0, 0.0) # Translational DOFs
free_r=(0.0, 0.0, 0.0) # Rotational DOFs
m_static=opts.m_static # Static friction
m_dynamic=opts.m_dynamic # Dynamic friction
if opts.movingDomain==True:
free_x=(1.0, 1.0, 0.0) # Translational DOFs
if opts.overturning==True:
free_r=(0.0, 0.0, 1.0) # Rotational DOFs
caisson2D.setMass(mass)
caisson2D.setConstraints(free_x=free_x, free_r=free_r)
caisson2D.setFriction(friction=opts.friction, m_static=m_static, m_dynamic=m_dynamic,
tolerance=he/(float(10**6)), grainSize=opts.d50)
overturning=opts.overturning
caisson2D.setOverturning(overturning)
if opts.rotation==True: # Initial position for free oscillation
caisson2D.rotate(rotation)
caisson2D.It= I/caisson2D.mass/width
caisson2D.setNumericalScheme(scheme=opts.scheme)
caisson2D.setRecordValues(filename='caisson2D', all_values=True)
##############################################################################################################################################################################################################
# Tank
#########################################################################################################################################################################################################
if opts.caisson2D==False:
vertices=[[0.0, 0.0],#0
[x1, 0.0],#1
[x2, 0.0], #2
[x3, 0.0 ],#3
[x3, tank_dim[1] ],#4
[x2, tank_dim[1] ],#5
[x1, tank_dim[1] ],#6
[0.0, tank_dim[1] ],#7
]
vertexFlags=np.array([1, 1, 1, 1,
3, 3, 3, 3,
])
segments=[[0,1],
[1,2],
[2,3],
[3,4],
[4,5],
[5,6],
[6,7],
[7,0],
[1,6],
[2,5],
]
segmentFlags=np.array([1, 1, 1,
2, 3, 3, 3, 4,
5, 5,
])
regions = [ [ 0.90*x1 , 0.10*tank_dim[1] ],
[ 0.90*x2 , 0.90*tank_dim[1] ],
[ 0.95*x3 , 0.95*tank_dim[1] ] ]
regionFlags=np.array([1, 2, 3])
else:
vertices=[[0.0, 0.0],#0
[x1, 0.0],#1
[x2, 0.0], #2
[x3, hs ],#3
[x4, hs ],#4
[x5, 0.0],#5
[x6, 0.0],#6
[x7, 0.0],#7
[x7, tank_dim[1]],#8
[x6, tank_dim[1]],#9
[x1, tank_dim[1]],#10
[0.0, tank_dim[1]],#11
[xc1, yc1],#12
[xc2, yc2],#13
]
vertexFlags=np.array([1, 1, 1,
6, 6,
1, 1, 1,
3, 3, 3, 3,
7, 7,
])
segments=[[0,1],
[1,2],
[2,3],
[4,5],
[5,6],
[6,7],
[7,8],
[8,9],
[9,10],
[10,11],
[11,0],
[2,5],
[1,10],
[6,9],
[3,12],
[13,4],
]
segmentFlags=np.array([1, 1,
6, 6,
1, 1,
2, 3, 3, 3, 4,
1,
5, 5,
7, 7,
])
regions = [ [ 0.90*x1 , 0.10*tank_dim[1] ],
[ 0.90*x2 , 0.90*tank_dim[1] ],
[ xc1 , 0.50*hs ],
[ 0.95*x7 , 0.95*tank_dim[1] ] ]
regionFlags=np.array([1, 2, 3, 4])
tank = st.CustomShape(domain, vertices=vertices, vertexFlags=vertexFlags,
segments=segments, segmentFlags=segmentFlags,
regions=regions, regionFlags=regionFlags,
boundaryTags=boundaryTags, boundaryOrientations=boundaryOrientations)
##################################################################################################################################################################################################################
# POROUS MEDIA
##################################################################################################################################################################################################################
porosity=opts.porosity
voidFrac=1.0-porosity
d50=opts.d50
if d50==None:
d15=opts.d15
else:
d15=d50/1.2
#----- SHIH
if opts.Resistance=='Shih':
term1=3.12*(10**-3.)
term2=(gAbs/(nu_0**2.))**(2./3.)
term3=(d15**2.)
Alpha1=1684+term1*term2*term3 #Shih
Alpha=Alpha1*nu_0*(voidFrac**2)/((porosity**3)*(d15**2))
term1=-5.10*(10**-3.)
term2=(gAbs/(nu_0**2.))**(1./3.)
term3=(d15)
Beta1=1.72+1.57*exp(term1*term2*term3) #Shih
Beta=Beta1*voidFrac/((porosity**3)*d15)
#----- ERGUN
if opts.Resistance=='Ergun':
Alpha1=150 #Ergun
Beta1=1.75 #Ergun
Alpha=Alpha1*nu_0*(voidFrac**2)/((porosity**3)*(d15**2))
Beta=Beta1*voidFrac/((porosity**3)*d15)
#----- ENGELUND
if opts.Resistance=='Engelund':
Alpha1=360 #Ergun
Beta1=3.6 #Ergun
Alpha=Alpha1*nu_0*(voidFrac**3)/((porosity**2)*(d15**2))
Beta=Beta1*voidFrac/((porosity**3)*d15)
#Proteus scale in viscosity, so i need to divide alpha and beta by nu_0
dragAlpha=(porosity**2)*Alpha/nu_0
dragBeta=(porosity**3)*Beta/nu_0
#----- Spring setup
springs=opts.springs
Kx = opts.Kx
Ky = opts.Ky
Krot = opts.Krot
Cx = opts.Cx
Cy = opts.Cy
Crot = opts.Crot
if opts.caisson2D:
caisson2D.setSprings(springs, Kx, Ky, Krot, Cx, Cy, Crot)
#############################################################################################################################################################################################################################################################################################################################################################################################
# ----- BOUNDARY CONDITIONS ----- #
#############################################################################################################################################################################################################################################################################################################################################################################################
if opts.caisson2D:
# Caisson boundaries
for bc in caisson.BC_list:
if opts.caissonBC == 'FreeSlip':
bc.setFreeSlip()
if opts.caissonBC == 'NoSlip':
bc.setNoSlip()
# Tank Boundaries
tank.BC['y+'].setAtmosphere()
tank.BC['x-'].setUnsteadyTwoPhaseVelocityInlet(wave=waveinput, vert_axis=1, smoothing=3.0*he)
tank.BC['y-'].setFreeSlip()
tank.BC['x+'].setFreeSlip()
tank.BC['sponge'].setNonMaterial()
if opts.caisson2D:
# Porous media buondaries
tank.BC['porousLayer'].reset()
tank.BC['moving_porousLayer'].reset()
# Moving Mesh Options
if opts.movingDomain==True:
for tb in [tank.BC['x+'], tank.BC['x-'], tank.BC['y+'], tank.BC['y-'], tank.BC['sponge'], tank.BC['porousLayer']]:
tb.hx_dirichlet.uOfXT= lambda x, t: 0.0
tb.hy_dirichlet.uOfXT= lambda x, t: 0.0
tb.hz_dirichlet.uOfXT= lambda x, t: 0.0
tb.u_stress.uOfXT=None
tb.v_stress.uOfXT=None
tb.w_stress.uOfXT=None
ms=tank.BC['moving_porousLayer']
ms.hx_dirichlet.uOfXT= None
ms.hy_dirichlet.uOfXT= None
ms.hz_dirichlet.uOfXT= lambda x, t: 0.0
ms.u_stress.uOfXT=None
ms.v_stress.uOfXT=None
ms.w_stress.uOfXT=None
########################################################################################################################################################################################################################################################################################################################################################
# ----- GENERATION ZONE & ABSORPTION ZONE ----- #
########################################################################################################################################################################################################################################################################################################################################################
# Waves and Generation zone
if opts.GenZone and opts.wave:
tank.setGenerationZones(flags=1, epsFact_solid=float(L_leftSpo/2.),
orientation=[1., 0.], center=(float(L_leftSpo/2.), 0., 0.),
waves=waveinput, smoothing=3.0*he, dragAlpha=10.*omega/nu_0)
# Only Generation zone
elif opts.GenZone:
tank.setAbsorptionZones(flags=1, epsFact_solid=float(L_leftSpo/2.),
orientation=[1., 0.], center=(float(L_leftSpo/2.), 0., 0.),
dragAlpha=10.*omega/nu_0)
# Porous zone
if opts.porousMedia:
tank.setPorousZones(flags=3,
dragAlpha=dragAlpha, dragBeta=dragBeta,
porosity=porosity,)
# Absorption zone
if opts.AbsZone:
if opts.caisson2D:
tank.setAbsorptionZones(flags=4, epsFact_solid=float(L_rightSpo/2.),
orientation=[-1., 0.], center=(float(tank_dim[0]-L_rightSpo/2.), 0., 0.),
dragAlpha=10.*omega/nu_0)
else:
tank.setAbsorptionZones(flags=3, epsFact_solid=float(L_rightSpo/2.),
orientation=[-1., 0.], center=(float(tank_dim[0]-L_rightSpo/2.), 0., 0.),
dragAlpha=10.*omega/nu_0)
############################################################################################################################################################################
# ----- Output Gauges ----- #
############################################################################################################################################################################
T = opts.duration
gauge_dx=0.25
tank_dim_x=int(tank_dim[0])
nprobes=int(tank_dim_x/gauge_dx)+1
probes=np.linspace(0., tank_dim_x, nprobes)
PG=[]
if opts.caisson2D:
zProbes=hs*0.5
else:
zProbes=opts.water_level*0.5
for i in probes:
PG.append((i, zProbes, 0.),)
if opts.caisson2D:
gauge_dy=0.01
tol=np.array([1*(10**-5),1*(10**-5),0.])
i_point_f=np.array([caisson.vertices[0][0],caisson.vertices[0][1],0.])
i_point_f += -tol #to avoid floating point error
i_point_b=np.array([caisson.vertices[1][0],caisson.vertices[1][1],0.])
i_point_b += tol #to avoid floating point error
yProbes = np.linspace(i_point_f[1],i_point_f[1]+dimy, int(dimy/gauge_dy)+1)
LG1=[]
LG2=[]
for j in yProbes:
LG1.append((i_point_f[0],j,0.),)
LG2.append((i_point_b[0],j,0.),)
#point_output=ga.PointGauges(gauges=((('p'),PG),
# ),
# activeTime = (0., T),
# sampleRate=0.,
# fileName='point_gauges.csv')
#loadingsGauges=ga.PointGauges(gauges=((('p'),LG1),
# (('p'),LG2),
# ),
# activeTime = (0., T),
# sampleRate=0.,
# fileName='loadingsGauges.csv')
levelset_output=ga.PointGauges(gauges=((('phi',),PG),
),
activeTime = (0., T),
sampleRate=0.,
fileName='levelset_gauges.csv')
######################################################################################################################################################################################################################
# Numerical Options and other parameters #
######################################################################################################################################################################################################################
he = he
domain.MeshOptions.he = he
from math import *
from proteus import MeshTools, AuxiliaryVariables
import numpy
import proteus.MeshTools
from proteus import Domain
from proteus.Profiling import logEvent
from proteus.default_n import *
from proteus.ctransportCoefficients import smoothedHeaviside
from proteus.ctransportCoefficients import smoothedHeaviside_integral
st.assembleDomain(domain)
#----------------------------------------------------
# Time stepping and velocity
#----------------------------------------------------
weak_bc_penalty_constant = 10.0/nu_0 #100
dt_fixed = 1
dt_init = min(0.1*dt_fixed,0.001)
T = T
nDTout= int(round(T/dt_fixed))
runCFL = opts.cfl
#----------------------------------------------------
# Discretization -- input options
#----------------------------------------------------
checkMass=False
applyCorrection=True
applyRedistancing=True
freezeLevelSet=opts.freezeLevelSet
useOnlyVF = False # if TRUE proteus uses only these modules --> twp_navier_stokes_p + twp_navier_stokes_n
# vof_p + vof_n
movingDomain=opts.movingDomain
useRANS = 0 # 0 -- None
# 1 -- K-Epsilon
# 2 -- K-Omega, 1998
# 3 -- K-Omega, 1988
genMesh=True
# By DEFAULT on the other files.py --> fullNewtonFlag = True
# multilevelNonlinearSolver & levelNonlinearSolver == NonlinearSolvers.Newton
useOldPETSc=False # if TRUE --> multilevelLinearSolver & levelLinearSolver == LinearSolvers.PETSc
# if FALSE --> multilevelLinearSolver & levelLinearSolver == LinearSolvers.KSP_petsc4py
useSuperlu = False #if TRUE --> multilevelLinearSolver & levelLinearSolver == LinearSolvers.LU
spaceOrder = 1
useHex = False # used for discretization, if 1.0 --> CubeGaussQuadrature
# ELSE --> SimplexGaussQuadrature
useRBLES = 0.0 # multiplied with subGridError
useMetrics = 1.0 # if 1.0 --> use of user's parameters as (ns_shockCapturingFactor, ns_lag_shockCapturing, ecc ...)
useVF = opts.useVF # used in the smoothing functions as (1.0-useVF)*smoothedHeaviside(eps_rho,phi) + useVF*fmin(1.0,fmax(0.0,vf))
# Input checks
if spaceOrder not in [1,2]:
print("INVALID: spaceOrder" + spaceOrder)
sys.exit()
if useRBLES not in [0.0, 1.0]:
print("INVALID: useRBLES" + useRBLES)
sys.exit()
if useMetrics not in [0.0, 1.0]:
print("INVALID: useMetrics")
sys.exit()
# Discretization
nd = 2
if spaceOrder == 1:
hFactor=1.0
if useHex:
basis=C0_AffineLinearOnCubeWithNodalBasis
elementQuadrature = CubeGaussQuadrature(nd,3)
elementBoundaryQuadrature = CubeGaussQuadrature(nd-1,3)
else:
basis=C0_AffineLinearOnSimplexWithNodalBasis
elementQuadrature = SimplexGaussQuadrature(nd,3)
elementBoundaryQuadrature = SimplexGaussQuadrature(nd-1,3)
#elementBoundaryQuadrature = SimplexLobattoQuadrature(nd-1,1)
elif spaceOrder == 2:
hFactor=0.5
if useHex:
basis=C0_AffineLagrangeOnCubeWithNodalBasis
elementQuadrature = CubeGaussQuadrature(nd,4)
elementBoundaryQuadrature = CubeGaussQuadrature(nd-1,4)
else:
basis=C0_AffineQuadraticOnSimplexWithNodalBasis
elementQuadrature = SimplexGaussQuadrature(nd,4)
elementBoundaryQuadrature = SimplexGaussQuadrature(nd-1,4)
# Numerical parameters
ns_forceStrongDirichlet = False
backgroundDiffusionFactor=0.01
if useMetrics:
ns_shockCapturingFactor = 0.5 # magnifies numerical viscosity in NS (smoothening velocity fields)
ns_lag_shockCapturing = True # lagging numerical viscosity speedsup Newton but destabilzes the solution
ns_lag_subgridError = True # less nonlinear but less stable
ls_shockCapturingFactor = 0.5 # numerical diffusion of level set (smoothening phi)
ls_lag_shockCapturing = True # less nonlinear but less stable
ls_sc_uref = 1.0 # reference gradient in numerical solution (higher=more diffusion)
ls_sc_beta = 1.5 # 1 is fully nonlinear, 2 is linear
vof_shockCapturingFactor = 0.5 # numerical diffusion of level set (smoothening volume of fraction)
vof_lag_shockCapturing = True # less nonlinear but less stable
vof_sc_uref = 1.0
vof_sc_beta = 1.5
rd_shockCapturingFactor = 0.5
rd_lag_shockCapturing = False
epsFact_density = 3.0 # control width of water/air transition zone
epsFact_viscosity = epsFact_curvature = epsFact_vof = epsFact_consrv_heaviside = epsFact_consrv_dirac = ecH = epsFact_density
epsFact_redistance = 0.33
epsFact_consrv_diffusion = 1.0 # affects smoothing diffusion in mass conservation
redist_Newton = True
kappa_shockCapturingFactor = 0.5
kappa_lag_shockCapturing = True # False
kappa_sc_uref = 1.0
kappa_sc_beta = 1.5
dissipation_shockCapturingFactor = 0.5
dissipation_lag_shockCapturing = True # False
dissipation_sc_uref = 1.0
dissipation_sc_beta = 1.5
else:
ns_shockCapturingFactor = 0.9
ns_lag_shockCapturing = True
ns_lag_subgridError = True
ls_shockCapturingFactor = 0.9
ls_lag_shockCapturing = True
ls_sc_uref = 1.0
ls_sc_beta = 1.0
vof_shockCapturingFactor = 0.9
vof_lag_shockCapturing = True
vof_sc_uref = 1.0
vof_sc_beta = 1.0
rd_shockCapturingFactor = 0.9
rd_lag_shockCapturing = False
epsFact_density = 1.5
epsFact_viscosity = epsFact_curvature = epsFact_vof = epsFact_consrv_heaviside = epsFact_consrv_dirac = epsFact_density
epsFact_redistance = 0.33
epsFact_consrv_diffusion = 10.0
redist_Newton = False
kappa_shockCapturingFactor = 0.9
kappa_lag_shockCapturing = True#False
kappa_sc_uref = 1.0
kappa_sc_beta = 1.0
dissipation_shockCapturingFactor = 0.9
dissipation_lag_shockCapturing = True#False
dissipation_sc_uref = 1.0
dissipation_sc_beta = 1.0
ns_nl_atol_res = max(1.0e-12,0.001*domain.MeshOptions.he**2)
vof_nl_atol_res = max(1.0e-12,0.001*domain.MeshOptions.he**2)
ls_nl_atol_res = max(1.0e-12,0.001*domain.MeshOptions.he**2)
mcorr_nl_atol_res = max(1.0e-12,0.0001*domain.MeshOptions.he**2)
rd_nl_atol_res = max(1.0e-12,0.01*domain.MeshOptions.he)
kappa_nl_atol_res = max(1.0e-12,0.001*domain.MeshOptions.he**2)
dissipation_nl_atol_res = max(1.0e-12,0.001*domain.MeshOptions.he**2)
mesh_nl_atol_res = max(1.0e-12,0.001*domain.MeshOptions.he**2)
#turbulence
ns_closure=0 #1-classic smagorinsky, 2-dynamic smagorinsky, 3 -- k-epsilon, 4 -- k-omega
if useRANS == 1:
ns_closure = 3
elif useRANS >= 2:
ns_closure == 4
# Initial condition
waterLine_x = 2*tank_dim[0]
waterLine_z = waterLevel
def waveHeight(x,t):
waterDepth = waveinput.eta(x, t) + waveinput.mwl
return waterDepth
def wavePhi(x,t):
[nd-1]- waveHeight(x,t)
def waveVF(x,t):
return smoothedHeaviside(epsFact_consrv_heaviside*he,wavePhi(x,t))
def signedDistance(x):
phi_x = x[0]-waterLine_x
phi_z = x[nd-1]-waterLine_z
if phi_x < 0.0:
if phi_z < 0.0:
return max(phi_x,phi_z)
else:
return phi_z
else:
if phi_z < 0.0:
return phi_x
else:
return sqrt(phi_x**2 + phi_z**2)
| 1.796875 | 2 |
src/wintools/s3.py | yukkun007/wintools | 0 | 12761150 | <filename>src/wintools/s3.py
import os
import sys
import logging
from typing import Tuple
import pyperclip
import shutil
import subprocess
from boto3.session import Session
from botocore.errorfactory import ClientError
from wintools.log import set_logger
logger = logging.getLogger(__name__)
set_logger(logger)
def download_from_s3(save_path: str, s3_uri: str = "", aws_profile: str = "default") -> None:
if not os.path.exists(save_path):
# 存在しなければdownload先のディレクトリを作成
os.mkdir(save_path)
if not os.path.isdir(save_path):
# download先がディレクトリでない場合はエラー(既にpathとしてファイルが存在している場合)
logger.error("download path is not directory. path={}".format(save_path))
sys.exit(1)
# s3 uriが未指定ならクリップボードからコピーする
if s3_uri == "":
s3_uri = pyperclip.paste()
if not __is_s3_key_exists(s3_uri, aws_profile):
# s3パスが存在しなければエラー
sys.exit(1)
logger.info("{} -----> {}".format(s3_uri, save_path))
# ダウンロード
if s3_uri.endswith("/"):
cmd = "aws s3 cp {} {} --recursive --profile {}".format(s3_uri, save_path, aws_profile)
else:
cmd = "aws s3 cp {} {} --profile {}".format(s3_uri, save_path, aws_profile)
logger.info(cmd)
os.system(cmd)
download_path = os.path.join(save_path, os.path.basename(s3_uri))
logger.info("download_path: " + download_path)
if os.path.exists(download_path):
logger.info("----------------------")
logger.info("SUCCESS")
logger.info("----------------------")
subprocess.Popen("call " + download_path, shell=True)
else:
logger.error("!!!!!!!!!!!!!!!!!!!!!!")
logger.error("ERROR")
logger.error("!!!!!!!!!!!!!!!!!!!!!!")
def upload_to_s3(target_path: str, s3_uri: str, aws_profile: str = "default") -> None:
if not os.path.exists(target_path):
logger.error("target path is not found. path={}".format(target_path))
sys.exit(1)
# ディレクトリの場合、圧縮
if os.path.isdir(target_path):
logger.info("target is dir. zip start..")
shutil.make_archive(target_path, "zip", root_dir=target_path)
target_path = target_path + ".zip"
if not (s3_uri.startswith("s3://")):
s3_uri = "s3://" + s3_uri
if not (s3_uri.endswith("/")):
s3_uri = s3_uri + "/"
basename = os.path.basename(target_path)
s3_uri_up_dir = s3_uri
s3_uri_up_path = s3_uri + basename
logger.info("{} -----> {}".format(target_path, s3_uri_up_path))
# アップロード
cmd = "aws s3 cp {} {} --profile {}".format(target_path, s3_uri_up_dir, aws_profile)
logger.info(cmd)
os.system(cmd)
# アップロードに成功したらs3パスをクリップボードへコピー
if __is_s3_key_exists(s3_uri_up_path, aws_profile):
logger.info("----------------------")
logger.info("SUCCESS")
logger.info("----------------------")
logger.info("{} ( copied to clipboad. )".format(s3_uri_up_path))
logger.info("----------------------")
pyperclip.copy(s3_uri_up_path)
else:
logger.error("!!!!!!!!!!!!!!!!!!!!!!")
logger.error("ERROR")
logger.error("!!!!!!!!!!!!!!!!!!!!!!")
def __is_s3_key_exists(s3_uri: str, aws_profile: str = "default") -> bool:
try:
session = Session(profile_name=aws_profile)
s3 = session.client("s3")
bucket, key = sepalate_s3_key(s3_uri)
s3.head_object(Bucket=bucket, Key=key)
logger.info("specified s3 uri is exists. uri=s3://{}/{}".format(bucket, key))
return True
except ClientError:
logger.error("specified s3 uri is not exists! uri=s3://{}/{}".format(bucket, key))
logger.error("s3 'directory' must end with '/'")
return False
def sepalate_s3_key(s3_key: str) -> Tuple[str, str]:
"""与えられたS3のkeyをバケットとパスに分割
Parameters
----------
s3_key : str
S3 Key (s3://から始まった場合はトリム)
Returns
-------
Tuple[str, str]
Tuple(バケット, パス)
"""
s3_key = s3_key.replace("s3://", "")
splitted_s3_key = s3_key.split("/", 1)
bucket = splitted_s3_key[0]
path = splitted_s3_key[1]
return (bucket, path)
| 2.234375 | 2 |