content stringlengths 22 815k | id int64 0 4.91M |
|---|---|
def fetch_last_posts(conn) -> list:
"""Fetch tooted posts from db"""
cur = conn.cursor()
cur.execute("select postid from posts")
last_posts = cur.fetchall()
return [e[0] for e in last_posts] | 30,800 |
def update_click_map(selectedData, date, hoverData, inputData):
"""
click to select a airport to find the detail information
:param selectedData:
:param date:
:param hoverData:
:return:
"""
timestamp = pd.to_datetime(date) if date else 0
fig = px.scatter_geo(
airports_info,
scope="usa",
lat=airports_info["LATITUDE"],
lon=airports_info["LONGITUDE"],
hover_name=airports_info["IATA_CODE"],
color="COLOR_MAP",
color_discrete_map="identity"
)
fig.update_layout(hovermode="closest",
margin=dict(l=5, r=0, t=20, b=20),
clickmode="event+select",
template='ggplot2')
if inputData:
origin_lon = location_dic[inputData]['lon']
origin_lat = location_dic[inputData]['lat']
airport = inputData
infos = airports[(airports["ORIGIN_AIRPORT"] == airport) & (airports["DATE"] == timestamp)] if timestamp != 0 \
else overview_destination[overview_destination["ORIGIN_AIRPORT"] == airport]
destinations = infos["DESTINATION_AIRPORT"].tolist()[0] if infos["DESTINATION_AIRPORT"].tolist() else []
points = airports_info[airports_info["IATA_CODE"].isin(destinations) | (airports_info["IATA_CODE"] == airport)]
points["COLOR_MAP"] = "#525252"
fig = px.scatter_geo(
airports_info,
scope="usa",
lat=points["LATITUDE"],
lon=points["LONGITUDE"],
hover_name=points["IATA_CODE"],
hover_data=None,
color=points["COLOR_MAP"],
color_discrete_map="identity"
)
fig.update_layout(clickmode="event+select",
margin=dict(l=0, r=0, t=20, b=20),
template="ggplot2")
for des in destinations:
fig.add_trace(
go.Scattergeo(
lon=[origin_lon, location_dic[des]["lon"]],
lat=[origin_lat, location_dic[des]["lat"]],
mode="lines",
line=dict(width=1, color='#cb181d'),
marker=dict(color='#cb181d'),
hoverinfo="skip",
showlegend=False
)
)
return fig
if selectedData and inputData:
point_dict = selectedData["points"][0]
origin_lon = point_dict['lon']
origin_lat = point_dict['lat']
airport = point_dict['hovertext']
infos = airports[(airports["ORIGIN_AIRPORT"] == airport) & (airports["DATE"] == timestamp)] if timestamp != 0 \
else overview_destination[overview_destination["ORIGIN_AIRPORT"] == airport]
destinations = infos["DESTINATION_AIRPORT"].tolist()[0] if infos["DESTINATION_AIRPORT"].tolist() else []
points = airports_info[airports_info["IATA_CODE"].isin(destinations) | (airports_info["IATA_CODE"] == airport)]
points["COLOR_MAP"] = "#525252"
fig = px.scatter_geo(
airports_info,
scope="usa",
lat=points["LATITUDE"],
lon=points["LONGITUDE"],
hover_name=points["IATA_CODE"],
hover_data=None,
color=points["COLOR_MAP"],
color_discrete_map="identity"
)
fig.update_layout(clickmode="event+select")
fig.update_layout(
margin=dict(l=0, r=0, t=20, b=20),
template="ggplot2"
)
for des in destinations:
fig.add_trace(
go.Scattergeo(
lon=[origin_lon, location_dic[des]["lon"]],
lat=[origin_lat, location_dic[des]["lat"]],
mode="lines",
line=dict(width=1, color='#cb181d'),
marker=dict(color='#cb181d'),
hoverinfo="skip",
showlegend=False
)
)
return fig
# hover的时候显示hover的点可以去到的机场
elif hoverData:
point_dict = hoverData["points"][0]
origin_lon = point_dict['lon']
origin_lat = point_dict['lat']
airport = point_dict['hovertext']
infos = airports[(airports["ORIGIN_AIRPORT"] == airport) & (airports["DATE"] == timestamp)] if timestamp != 0 \
else overview_destination[overview_destination["ORIGIN_AIRPORT"] == airport]
# infos = airports[(airports["ORIGIN_AIRPORT"]==airport) & (airports["DATE"]==timestamp)]
destinations = infos["DESTINATION_AIRPORT"].tolist()[0] if infos["DESTINATION_AIRPORT"].tolist() else []
for des in destinations:
fig.add_trace(
go.Scattergeo(
lon=[origin_lon, location_dic[des]["lon"]],
lat=[origin_lat, location_dic[des]["lat"]],
mode="lines",
line=dict(width=1, color='#cb181d'),
hoverinfo="skip",
showlegend=False
)
)
# fig.update_layout(clear_on_unhover=True)
return fig
else:
return fig | 30,801 |
def configure_blueprints(app: Flask):
"""
Configure blueprints.
"""
app.register_blueprint({{cookiecutter.blueprint_name}}) | 30,802 |
def EncoderText(model_name, vocab_size, word_dim, embed_size, num_layers, use_bi_gru=False, text_norm=True, dropout=0.0):
"""A wrapper to text encoders. Chooses between an different encoders
that uses precomputed image features.
"""
model_name = model_name.lower()
EncoderMap = {
'scan': EncoderTextRegion,
'vsepp': EncoderTextGlobal,
'sgraf': EncoderTextRegion,
'imram': EncoderTextRegion
}
if model_name in EncoderMap:
txt_enc = EncoderMap[model_name](vocab_size, word_dim, embed_size, num_layers, use_bi_gru, text_norm, dropout)
else:
raise ValueError("Unknown model: {}".format(model_name))
return txt_enc | 30,803 |
def feat_extract(pretrained=False, **kwargs):
"""Constructs a ResNet-Mini-Imagenet model"""
model_urls = {
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'resnet52': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
}
logger = kwargs['opts'].logger
# resnet"x", x = 1 + sum(layers)x3
if kwargs['structure'] == 'resnet40':
model = ResNet(Bottleneck, [3, 4, 6], kwargs['in_c'])
elif kwargs['structure'] == 'resnet19':
model = ResNet(Bottleneck, [2, 2, 2], kwargs['in_c'])
elif kwargs['structure'] == 'resnet12':
dropblock_size = 5 if 'imagenet' in kwargs['opts'].dataset.name else 2
model = resnet12(avg_pool=False, drop_rate=0.1, dropblock_size=dropblock_size)
elif kwargs['structure'] == 'resnet52':
model = ResNet(Bottleneck, [4, 8, 5], kwargs['in_c'])
elif kwargs['structure'] == 'resnet34':
model = ResNet(Bottleneck, [3, 4, 4], kwargs['in_c'])
elif kwargs['structure'] == 'shallow':
model = CNNEncoder(kwargs['in_c'])
else:
raise NameError('structure not known {} ...'.format(kwargs['structure']))
if pretrained:
logger('Using pre-trained model from pytorch official webiste, {:s}'.format(kwargs['structure']))
model.load_state_dict(model_zoo.load_url(model_urls[kwargs['structure']]), strict=False)
return model | 30,804 |
def compose_all(
mirror: Union[str, Path],
branch_pattern: str = "android-*",
work_dir: Optional[Path] = None,
force: bool = False,
) -> Path:
"""Iterates through all the branches in AOSP and create the source maps.
This methods:
- list all the existing branches and filter those matching the pattern
- does a partial checkout of each of them
- parses the Soong File and store them
:param mirror: Path/Link to a mirror directory or an URL.
:param branch_pattern: Optional. Pattern to filter branches
:param work_dir: Optional. Work directory
:param force: Optional. Overrides results.
:return: The path to the work directory
"""
# List branches
all_branches = get_all_branches(mirror)
branches = fnmatch.filter(all_branches, branch_pattern)
if work_dir is None:
work_dir = Path(tempfile.mkdtemp(prefix="bgraph_"))
logger.info("Found %d branches", len(branches))
for branch_name in branches:
compose_manifest_branch(branch_name, mirror, work_dir, force)
logger.info("Finished")
return work_dir | 30,805 |
def rearrange_digits(input_list):
"""
Rearrange Array Elements so as to form two number such that their sum is maximum.
Args:
input_list(list): Input List
Returns:
(int),(int): Two maximum sums
"""
n = len(input_list)
heap_sort(input_list)
decimal_value = 1
n1 = 0
for i in range(0, n, 2):
n1 += input_list[i] * decimal_value
decimal_value *= 10
decimal_value = 1
n2 = 0
for i in range(1, n, 2):
n2 += input_list[i] * decimal_value
decimal_value *= 10
return n1, n2 | 30,806 |
def _partial_ema_scov_update(s:dict, x:[float], r:float=None, target=None):
""" Update recency weighted estimate of scov-like matrix by treating quadrants individually """
assert len(x)==s['n_dim']
# If target is not supplied we maintain a mean that switches from emp to ema
if target is None:
target = s['target']
if target is None:
target = s['sma']['mean']
# Update running partial scatter estimates
for q,(w,sgn1,sgn2) in QUADRANTS.items():
# Morally:
# x1 = max(0, (x-target)*sgn1) * sgn1
# x2 = (np.max(0, (x-target)*sgn2) * sgn2) if sgn1!=sgn2 else x1
x1 = (x-target)*sgn1
x2 = (x-target)*sgn2
x1[x1<0]=0
x2[x2<0]=0
x1 = sgn1*x1
x2 = sgn2*x2
s[q] = _ema_scov_update(s[q],x=x1,r=r,target=0, y=x2)
s['mean'] = np.copy( s['sma']['mean'] )
s['n_samples'] = s['sma']['n_samples']
if s['n_samples']>=2:
s['scov'] = np.zeros(shape=((s['n_dim'],s['n_dim'])))
for q in QUADRANTS:
try:
s['scov'] += s[q]['scov']
except:
pass
else:
s['scov'] = np.eye(s['n_dim'])
s['sma'] = sma(s=s['sma'], x=x, r=r)
return s | 30,807 |
def test_scalar_zero(py_c_vec: PyCVec):
"""Check zero behaviour with division ops."""
Vec, Angle, Matrix, parse_vec_str = py_c_vec
for x, y, z in iter_vec(VALID_NUMS):
vec = Vec(x, y, z)
assert_vec(0 / vec, 0, 0, 0)
assert_vec(0 // vec, 0, 0, 0)
assert_vec(0 % vec, 0, 0, 0)
assert_vec(0.0 / vec, 0, 0, 0)
assert_vec(0.0 // vec, 0, 0, 0)
assert_vec(0.0 % vec, 0, 0, 0)
# We don't need to check divmod(0, vec) -
# that always falls back to % and /.
with raises_zero_div: vec / 0
with raises_zero_div: vec // 0
with raises_zero_div: vec % 0
with raises_zero_div: divmod(vec, 0)
with raises_zero_div: vec / 0.0
with raises_zero_div: vec // 0.0
with raises_zero_div: vec % 0.0
with raises_zero_div: divmod(vec, 0.0)
with raises_zero_div: vec /= 0
with raises_zero_div: vec //= 0
with raises_zero_div: vec %= 0
with raises_zero_div: vec /= 0.0
with raises_zero_div: vec //= 0.0
with raises_zero_div: vec %= 0.0 | 30,808 |
def refresh_rates(config, path="rates.json"):
"""Fetch and save the newest rates
Arguments:
config {currency.config} -- Config object
Keyword Arguments:
path {str} -- path or filename of Rates JSON to be saved
(default: {"rates.json"})
Returns:
dict -- fetched dictionary of rates
Raises:
AppIDError -- Raised when App ID can not be used
ApiError -- Raised when API is unreachable or return bad response
UnknownPythonError -- Raised when Python runtime version can not be
correctly detected
"""
if sys.version_info.major == 2:
import urllib2
try:
response = urllib2.urlopen(RATE_ENDPOINT.format(config.app_id))
except urllib2.HTTPError as err:
response = _byteify(json.load(err, "utf-8"))
if err.code == 401:
raise AppIDError(
"Invalid App ID: {}".format(config.app_id), response["description"]
)
elif err.code == 429:
raise AppIDError("Access Restricted", response["description"])
else:
raise ApiError("Unexpected Error", response["description"])
rates = _byteify(json.load(response, "utf-8"))
elif sys.version_info.major == 3:
from urllib import error, request
try:
response = request.urlopen(RATE_ENDPOINT.format(config.app_id))
except error.HTTPError as err:
response = json.load(err)
if err.code == 401:
raise AppIDError(
"Invalid App ID: {}".format(config.app_id), response["description"]
)
elif err.code == 429:
raise AppIDError("Access Restricted", response["description"])
else:
raise ApiError("Unexpected Error", response["description"])
rates = json.load(response)
else:
raise UnknownPythonError("Unexpected Python Version", sys.version_info)
with open(path, "w+") as file:
json.dump(rates, file)
rates["rates"]["last_update"] = "Now"
return rates["rates"] | 30,809 |
def add_all_files(root_dir: str = 'data/') -> List[Path]:
"""Recursively iterates over a directory and returns all files with paths
Args:
root_dir: the starting directory to search from
Returns:
List of all files found, with paths relative to the root_dir
"""
root_path = Path(root_dir)
files = [f for f in root_path.glob('**/*.csv')]
files = files + [f for f in root_path.glob('**/*.txt')]
manager = database.DbManager()
session = manager.open_session()
total_pins = 0
print("Parsing files...")
for f in tqdm(files):
pins = file_to_pins(f)
total_pins += len(pins)
for p in pins:
session.add(p)
print(f"Added {total_pins} pins.")
print("Commiting to database...")
session.commit()
session.close()
print("Done!") | 30,810 |
def migrate_component_indicators_data(apps, schema_editor):
"""
Adds the Indicator object from Component.indicator to the
many-to-many relationship in Component.indicators
"""
Component = apps.get_model('goals', 'Component')
for component in Component.objects.select_related('indicator').iterator():
component.indicators.add(component.indicator) | 30,811 |
def _get_chrome_options():
"""
Returns the chrome options for the following arguments
"""
chrome_options = Options()
# Standard options
chrome_options.add_argument("--disable-infobars")
chrome_options.add_argument('--ignore-certificate-errors')
# chrome_options.add_argument('--no-sandbox')
chrome_options.add_argument('--disable-dev-shm-usage')
chrome_options.add_argument("--start-maximized")
chrome_options.add_argument("--auto-select-desktop-capture-source=Entire screen")
return chrome_options | 30,812 |
def add_document(dbname, colname, doc, url=cc.URL_KRB, krbheaders=cc.KRBHEADERS) :
"""Adds document to database collection.
"""
resp = post(url+dbname+'/'+colname+'/', headers=krbheaders, json=doc)
logger.debug('add_document: %s\n to %s/%s resp: %s' % (str(doc), dbname, colname, resp.text))
return resp.json().get('_id',None) | 30,813 |
def legendre(n, monic=0):
"""Returns the nth order Legendre polynomial, P_n(x), orthogonal over
[-1,1] with weight function 1.
"""
if n < 0:
raise ValueError("n must be nonnegative.")
if n==0: n1 = n+1
else: n1 = n
x,w,mu0 = p_roots(n1,mu=1)
if n==0: x,w = [],[]
hn = 2.0/(2*n+1)
kn = _gam(2*n+1)/_gam(n+1)**2 / 2.0**n
p = orthopoly1d(x,w,hn,kn,wfunc=lambda x: 1.0,limits=(-1,1),monic=monic,
eval_func=lambda x: eval_legendre(n,x))
return p | 30,814 |
def cal_sort_key(cal):
"""
Sort key for the list of calendars: primary calendar first,
then other selected calendars, then unselected calendars.
(" " sorts before "X", and tuples are compared piecewise)
"""
if cal["selected"]:
selected_key = " "
else:
selected_key = "X"
if cal["primary"]:
primary_key = " "
else:
primary_key = "X"
return (primary_key, selected_key, cal["summary"]) | 30,815 |
def test_env_dtm_formats(fmts, inp, exp):
"""
If $DTM_FORMATS is set, its value should be added at the beginning of the
default parseable input formats.
"""
pytest.dbgfunc()
with tbx.envset(DTM_FORMATS=fmts):
a = dt(inp)
assert a() == exp | 30,816 |
def get_data_loader():
"""Safely downloads data. Returns training/validation set dataloader."""
mnist_transforms = transforms.Compose(
[transforms.ToTensor(),
transforms.Normalize((0.1307, ), (0.3081, ))])
# We add FileLock here because multiple workers will want to
# download data, and this may cause overwrites since
# DataLoader is not threadsafe.
with FileLock(os.path.expanduser("~/data.lock")):
train_loader = torch.utils.data.DataLoader(
datasets.MNIST(
"~/data",
train=True,
download=True,
transform=mnist_transforms),
batch_size=128,
shuffle=True)
test_loader = torch.utils.data.DataLoader(
datasets.MNIST("~/data", train=False, transform=mnist_transforms),
batch_size=128,
shuffle=True)
return train_loader, test_loader | 30,817 |
def preview_game_num():
"""retorna el numero de la ultima partida jugada"""
df = pd.read_csv('./data/stats.csv', encoding="utf8")
x = sorted(df["Partida"],reverse=True)[0]
return x | 30,818 |
def ensure_is_dir(d, clear_dir=False):
""" If the directory doesn't exist, use os.makedirs
Parameters
----------
d: str
the directory we want to create
clear_dir: bool (optional)
if True clean the directory if it exists
"""
if not os.path.exists(d):
os.makedirs(d)
elif clear_dir:
shutil.rmtree(d)
os.makedirs(d) | 30,819 |
def XCO(
directed = False, preprocess = "auto", load_nodes = True, load_node_types = True,
load_edge_weights = True, auto_enable_tradeoffs = True,
sort_tmp_dir = None, verbose = 2, cache = True, cache_path = None,
cache_sys_var = "GRAPH_CACHE_DIR", version = "4.46", **kwargs
) -> Graph:
"""Return XCO graph
Parameters
----------
directed = False
preprocess = "auto"
Preprocess for optimal load time & memory peak.
Will preprocess in Linux/macOS but not Windows.
load_nodes = True
Load node names or use numeric range
auto_enable_tradeoffs = True
Enable when graph has < 50M edges
cache_path = None
Path to store graphs
Defaults either to `GRAPH_CACHE_DIR` sys var or `graphs`
cache_sys_var = "GRAPH_CACHE_DIR"
version = "4.46"
Version to retrieve
The available versions are:
- 4.46
"""
return AutomaticallyRetrievedGraph(
"XCO", version, "kgobo", directed, preprocess, load_nodes,
load_node_types, load_edge_weights, auto_enable_tradeoffs, sort_tmp_dir, verbose, cache,
cache_path, cache_sys_var, kwargs
)() | 30,820 |
async def test_wikidata_search_does_not_rank_aliases_high_enough(best_match_id, mock_aioresponse):
"""
Matches on aliases are not ranked high enough by the default search profile.
"""
assert (
await best_match_id('GER', typ='Q6256') ==
'Q183') | 30,821 |
def test_setURL_valid_URL_but_no_settings(caplog):
"""Test setURL"""
iface = MagicMock(autospec=SerialInterface)
url = "https://www.meshtastic.org/d/#"
with pytest.raises(SystemExit) as pytest_wrapped_e:
anode = Node(iface, 'bar', noProto=True)
anode.radioConfig = 'baz'
anode.setURL(url)
assert pytest_wrapped_e.type == SystemExit
assert pytest_wrapped_e.value.code == 1 | 30,822 |
def rasterize_layer_by_ref_raster(src_vector, ref_raster, use_attribute, all_touched=False, no_data_value=0):
"""Rasterize vector data. Get the cell value in defined grid of ref_raster
from its overlapped polygon.
Parameters
----------
src_vector: Geopandas.GeoDataFrame
Which vector data to be rasterize.
ref_raster: Raster
Target rasterized image's rows, cols, and geo_transform.
use_attribute: str
The column to use as rasterized image value.
all_touched: bool, optioonal, default: False
Pixels that touch (not overlap over 50%) the polygon will be assign the use_attribute value of the polygon.
no_data_value: int or float
The pixels not covered by any polygon will be filled no_data_value.
Returns
-------
raster: Raster.
Rasterized result.
Examples
--------
>>> import geopandas as gpd
>>> import TronGisPy as tgp
>>> from TronGisPy import ShapeGrid
>>> from matplotlib import pyplot as plt
>>> ref_raster_fp = tgp.get_testing_fp('satellite_tif') # get the geoinfo from the raster
>>> src_vector_fp = tgp.get_testing_fp('satellite_tif_clipper') # read source shapefile as GeoDataFrame
>>> src_vector = gpd.read_file(src_vector_fp)
>>> src_vector['FEATURE'] = 1 # make the value to fill in the raster cell
>>> ref_raster = tgp.read_raster(ref_raster_fp)
>>> raster = ShapeGrid.rasterize_layer_by_ref_raster(src_vector, ref_raster, use_attribute='FEATURE', no_data_value=99)
>>> fig, (ax1, ax2) = plt.subplots(1,2) # plot the result
>>> tgp.read_raster(ref_raster_fp).plot(ax=ax1)
>>> src_vector.plot(ax=ax1)
>>> ax1.set_title('polygon with ref_raster')
>>> raster.plot(ax=ax2)
>>> ax2.set_title('rasterized image')
>>> plt.show()
"""
# Open your shapefile
assert type(src_vector) is gpd.GeoDataFrame, "src_vector should be GeoDataFrame type."
assert use_attribute in src_vector.columns, "attribute not exists in src_vector."
rows, cols, geo_transform = ref_raster.rows, ref_raster.cols, ref_raster.geo_transform
raster = rasterize_layer(src_vector, rows, cols, geo_transform, use_attribute=use_attribute, all_touched=all_touched, no_data_value=no_data_value)
return raster | 30,823 |
def wraplatex(text, width=WIDTH):
""" Wrap the text, for LaTeX, using ``textwrap`` module, and ``width``."""
return "$\n$".join(wrap(text, width=width)) | 30,824 |
def test_simple_sql(engine_testaccount):
"""
Simple SQL by SQLAlchemy
"""
result = engine_testaccount.execute('show databases')
rows = [row for row in result]
assert len(rows) >= 0, 'show database results' | 30,825 |
def register(request):
"""
Render and process a basic registration form.
"""
ctx = {}
if request.user.is_authenticated():
if "next" in request.GET:
return redirect(request.GET.get("next", 'control:index'))
return redirect('control:index')
if request.method == 'POST':
form = GlobalRegistrationForm(data=request.POST)
if form.is_valid():
user = User.objects.create_global_user(
form.cleaned_data['email'], form.cleaned_data['password'],
locale=request.LANGUAGE_CODE,
timezone=request.timezone if hasattr(request, 'timezone') else settings.TIME_ZONE
)
user = authenticate(identifier=user.identifier, password=form.cleaned_data['password'])
auth_login(request, user)
return redirect('control:index')
else:
form = GlobalRegistrationForm()
ctx['form'] = form
return render(request, 'pretixcontrol/auth/register.html', ctx) | 30,826 |
def green_agg(robots: List[gs.Robot]) -> np.ndarray:
"""
This is a dummy aggregator function (for demonstration) that just saves
the value of each robot's green color channel
"""
out_arr = np.zeros([len(robots)])
for i, r in enumerate(robots):
out_arr[i] = r._color[1]
return out_arr | 30,827 |
def tcp_port_open_locally(port):
"""
Returns True if the given TCP port is open on the local machine
"""
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
result = sock.connect_ex(("127.0.0.1", port))
return result == 0 | 30,828 |
def wrap(text, width=80):
"""
Wraps a string at a fixed width.
Arguments
---------
text : str
Text to be wrapped
width : int
Line width
Returns
-------
str
Wrapped string
"""
return "\n".join(
[text[i:i + width] for i in range(0, len(text), width)]
) | 30,829 |
def parallel_vector(R, alt, max_alt=1e5):
"""
Generates a viewing and tangent vectors
parallel to the surface of a sphere
"""
if not hasattr(alt, '__len__'):
alt = np.array([alt])
viewer = np.zeros(shape=(3, len(alt)))
tangent = np.zeros_like(viewer)
viewer[0] = -(R+max_alt*2)
viewer[1] = R+alt
tangent[1] = R+alt
return viewer, tangent | 30,830 |
def load_datasets(parser, args):
"""Loads the specified dataset from commandline arguments
Returns:
train_dataset, validation_dataset
"""
args = parser.parse_args()
dataset_kwargs = {
"root": Path(args.train_dir),
}
source_augmentations = Compose(
[globals()["_augment_" + aug] for aug in args.source_augmentations]
)
train_dataset = MIMIIDataset(
split="0dB",
subset=train_tracks,
sources=args.sources,
targets=args.sources,
source_augmentations=source_augmentations,
random_track_mix=True,
segment=args.seq_dur,
random_segments=True,
sample_rate=args.sample_rate,
samples_per_track=args.samples_per_track,
**dataset_kwargs,
)
train_dataset = filtering_out_valid(train_dataset)
valid_dataset = MIMIIDataset(
split="0dB",
subset=validation_tracks,
sources=args.sources,
targets=args.sources,
segment=None,
**dataset_kwargs,
)
return train_dataset, valid_dataset | 30,831 |
def stock_total_deal_money():
"""
总成交量
:return:
"""
df = stock_zh_index_spot()
# 深证成指:sz399001,上证指数:sh00001
ds = df[(df['代码'] == 'sz399001') | (df['代码'] == 'sh000001')]
return ds['成交额'].sum() / 100000000 | 30,832 |
def reboot(WorkspaceId):
"""Reboot a specific AWS Workspace instance."""
client = aws()
console.log(
Panel('Attemptng reboot of workspaceId: ' + WorkspaceId,
title='INFO',
style=info_fmt))
response = client.reboot_workspaces(RebootWorkspaceRequests=[
{
'WorkspaceId': WorkspaceId
},
])
console.log(response) | 30,833 |
def _ls(dir=None, project=None, all=False, appendType=False, dereference=False, directoryOnly=False):
"""
Lists file(s) in specified MDSS directory.
:type dir: :obj:`str`
:param dir: MDSS directory path for which files are listed.
:type project: :obj:`str`
:param project: NCI project identifier string, if :samp:`None`, uses default
project (as returned from the :func:`getDefaultProject` function).
:type all: :obj:`bool` or :obj:`str`
:param all: If :samp:`True` or :samp:`"all"` lists files/directories whose names begin with '.'.
If :samp:`almost-all` lists files/directories whose names begin with '.' but not
the :samp:`"."` and :samp:`".."` entries.
:type appendType: :obj:`bool`
:param appendType: If :samp:`True` each name in the listing will have a character appended
which indicates the type of *file*.
:type dereference: :obj:`bool`
:param dereference: If :samp:`True` symbolic links are dereferenced in the listing.
:type directoryOnly: :obj:`bool`
:param directoryOnly: If :samp:`True` only list directory name and not directory contents.
:rtype: :obj:`list` of :obj:`str`
:return: MDSS directory listing.
"""
args = ["-1"] # Separate listed entries with newline, one entry per line.
args += _getListDirAllArg(all)
args += _getListDirDirectoryOnlyArg(directoryOnly)
args += _getListDirAppendTypeArg(appendType)
args += _getListDirDereferenceArg(dereference)
if (dir != None):
args += [dir,]
else:
args = []
p = MdssCommand(commandStr="ls", project=project, args=args).execute()
return p.communicate()[0].split("\n")[0:-1] | 30,834 |
def print_subs(valid_subs:list):
"""
Words of length 2
ab
ac
ca
Words of length 3
cab
"""
max_len = -1
for sub in valid_subs:
if len(sub) > max_len: max_len = len(sub)
output = [[] for _ in range(max_len)]
for sub in valid_subs: output[len(sub)-1].append(sub)
for i in range(len(output)): output[i].sort()
for i, o in enumerate(output):
if len(o) == 0: continue
print(f'Words of length {i+1}')
for o_ in o: print(f'{o_}')
print() | 30,835 |
def uploadMetadata(doi, current, delta, forceUpload=False, datacenter=None):
"""
Uploads citation metadata for the resource identified by an existing
scheme-less DOI identifier (e.g., "10.5060/FOO") to DataCite. This
same function can be used to overwrite previously-uploaded metadata.
'current' and 'delta' should be dictionaries mapping metadata
element names (e.g., "Title") to values. 'current+delta' is
uploaded, but only if there is at least one DataCite-relevant
difference between it and 'current' alone (unless 'forceUpload' is
true). 'datacenter', if specified, should be the identifier's
datacenter, e.g., "CDL.BUL". There are three possible returns: None
on success; a string error message if the uploaded DataCite Metadata
Scheme record was not accepted by DataCite (due to an XML-related
problem); or a thrown exception on other error. No error checking
is done on the inputs.
"""
try:
oldRecord = formRecord("doi:" + doi, current)
except AssertionError:
oldRecord = None
m = current.copy()
m.update(delta)
try:
newRecord = formRecord("doi:" + doi, m)
except AssertionError, e:
return "DOI metadata requirements not satisfied: " + str(e)
if newRecord == oldRecord and not forceUpload:
return None
if not _enabled:
return None
# To hide transient network errors, we make multiple attempts.
for i in range(_numAttempts):
o = urllib2.build_opener(_HTTPErrorProcessor)
r = urllib2.Request(_metadataUrl)
# We manually supply the HTTP Basic authorization header to avoid
# the doubling of the number of HTTP transactions caused by the
# challenge/response model.
r.add_header("Authorization", _authorization(doi, datacenter))
r.add_header("Content-Type", "application/xml; charset=UTF-8")
r.add_data(newRecord.encode("UTF-8"))
c = None
try:
_modifyActiveCount(1)
c = o.open(r, timeout=_timeout)
s = c.read()
assert s.startswith("OK"), (
"unexpected return from DataCite store metadata operation: " + s
)
except urllib2.HTTPError, e:
message = e.fp.read()
if e.code in [400, 422]:
return "element 'datacite': " + message
if e.code != 500 or i == _numAttempts - 1:
raise e
except:
if i == _numAttempts - 1:
raise
else:
return None
finally:
_modifyActiveCount(-1)
if c:
c.close()
time.sleep(_reattemptDelay) | 30,836 |
def one_away(string_1: str, string_2: str)-> bool:
"""DP, classic edit distance
funny move, we calculate the LCS and then substract from the len() of the biggest string in O(n*m)
"""
if string_1 == string_2: return False
@lru_cache(maxsize=1024)
def dp(s_1, s_2, distance=0):
"""standard longest common substring
"""
if not s_1 or not s_2: return distance
if s_1[0] == s_2[0]:
return dp(s_1[1:], s_2[1:], distance+1)
return max(dp(s_1[1:], s_2, distance), dp(s_1, s_2[1:], distance))
return max(len(string_1), len(string_2)) - dp(string_1, string_2) == 1 | 30,837 |
async def reset_alarm_panel(hass, cluster, entity_id):
"""Reset the state of the alarm panel."""
cluster.client_command.reset_mock()
await hass.services.async_call(
ALARM_DOMAIN,
"alarm_disarm",
{ATTR_ENTITY_ID: entity_id, "code": "4321"},
blocking=True,
)
await hass.async_block_till_done()
assert hass.states.get(entity_id).state == STATE_ALARM_DISARMED
assert cluster.client_command.call_count == 2
assert cluster.client_command.await_count == 2
assert cluster.client_command.call_args == call(
4,
security.IasAce.PanelStatus.Panel_Disarmed,
0,
security.IasAce.AudibleNotification.Default_Sound,
security.IasAce.AlarmStatus.No_Alarm,
)
cluster.client_command.reset_mock() | 30,838 |
def test(net, loss_normalizer):
"""
Tests the Neural Network using IdProbNet on the test set.
Args:
net -- (IdProbNet instance)
loss_normalizer -- (Torch.Tensor) value to be divided from the loss
Returns:
3-tuple -- (Execution Time, End loss value,
Model's prediction after feed forward [Px])
"""
return run_model_data_t(net, loss_normalizer, NUM_TEST, 'test') | 30,839 |
def scale_from_matrix(matrix):
"""Return scaling factor, origin and direction from scaling matrix.
"""
M = jnp.array(matrix, dtype=jnp.float64, copy=False)
M33 = M[:3, :3]
factor = jnp.trace(M33) - 2.0
try:
# direction: unit eigenvector corresponding to eigenvalue factor
w, V = jnp.linalg.eig(M33)
i = jnp.where(abs(jnp.real(w) - factor) < 1e-8)[0][0]
direction = jnp.real(V[:, i]).squeeze()
direction /= vector_norm(direction)
#WARNING(@cpgoodri): I'm not sure if this error-handling approach works with JAX, but it seems to pass tests...
except IndexError:
# uniform scaling
factor = (factor + 2.0) / 3.0
direction = None
# origin: any eigenvector corresponding to eigenvalue 1
w, V = jnp.linalg.eig(M)
i = jnp.where(abs(jnp.real(w) - 1.0) < 1e-8)[0]
if not len(i):
raise ValueError('no eigenvector corresponding to eigenvalue 1')
origin = jnp.real(V[:, i[-1]]).squeeze()
origin /= origin[3]
return factor, origin, direction | 30,840 |
def get_fort44_info(NDX, NDY, NATM, NMOL, NION, NSTRA, NCL, NPLS, NSTS, NLIM):
"""Collection of labels and dimensions for all fort.44 variables, as collected in the
SOLPS-ITER 2020 manual.
"""
fort44_info = {
"dab2": [r"Atom density ($m^{-3}$)", (NDX, NDY, NATM)],
"tab2": [r"Atom temperature (eV )", (NDX, NDY, NATM)],
"dmb2": [r"Molecule density ($m^{-3}$)", (NDX, NDY, NMOL)],
"tmb2": [r"Molecule temperature (eV )", (NDX, NDY, NMOL)],
"dib2": [r"Test ion density ($m^{-3}$)", (NDX, NDY, NION)],
"tib2": [r" Test ion temperature (eV)", (NDX, NDY, NION)],
"rfluxa": [r"Radial flux density of atoms ($m^{-2} s^{-1}$)", (NDX, NDY, NATM)],
"rfluxm": [
r"Radial flux density of molecules ($m^{-2} s^{-1}$)",
(NDX, NDY, NMOL),
],
"pfluxa": [
r"Poloidal flux density of atoms ($m^{-2} s^{-1}$)",
(NDX, NDY, NATM),
],
"pfluxm": [
r"Poloidal flux density of molecules ($m^{-2} s^{-1}$)",
(NDX, NDY, NMOL),
],
"refluxa": [
r"Radial energy flux density carried by atoms ($W m^{-2}$)",
(NDX, NDY, NATM),
],
"refluxm": [
r"Radial energy flux density carried by molecules ($W m^{-2}$)",
(NDX, NDY, NMOL),
],
"pefluxa": [
r"Poloidal energy flux density carried by atoms ($W m^{-2}$)",
(NDX, NDY, NATM),
],
"pefluxm": [
r"Poloidal energy flux density carried by molecules ($W m^{-2}$)",
(NDX, NDY, NMOL),
],
#
"emiss": [
r"$H_\alpha$ emissivity due to atoms ($photons m^{-2} s^{-1}$)",
(NDX, NDY),
],
"emissmol": [
r"$H_\alpha$ emissivity due to molecules and molecular ions ($photons m^{-2} s^{-1}$)",
(NDX, NDY),
],
"srcml": [r"Molecule particle source (A)", (NDX, NDY, NMOL)],
"edissml": [
r"Energy spent for dissociating hydrogenic molecules (W)",
(NDX, NDY, NMOL),
],
"wldnek": [
r"Heat transferred by neutrals (W), total over strata",
(NLIM + NSTS,),
],
"wldnep": [
r"Potential energy released by neutrals (W), total over strata",
(NLIM + NSTS,),
],
"wldna": [
r"Flux of atoms impinging on surface (A), total over strata",
(NLIM + NSTS, NATM),
],
"ewlda": [
r"Average energy of impinging atoms on surface (eV), total over strata",
(NLIM + NSTS, NATM),
],
"wldnm": [
r"Flux of molecules impinging on surface (A), total over strata",
(NLIM + NSTS, NMOL),
],
"ewldm": [
r"Average energy of impinging molecules on surface (eV), total over strata",
(NLIM + NSTS, NMOL),
],
"p1,p2": [
r"Endpoints of surface (X and Y coordinates, in m), total over strata",
(NLIM,),
],
"wldra": [
r"Flux of reflected atoms from surface (A), total over strata",
(NLIM + NSTS, NATM),
],
"wldrm": [
r"Flux of reflected molecules from surface (A), total over strata",
(NLIM + NSTS, NMOL),
],
}
for i in np.arange(NSTRA + 1): # from 0 to NSTRA, unlike in manual
fort44_info.update(
{
f"wldnek({i})": [r"Heat transferred by neutrals (W)", (NLIM + NSTS,)],
f"wldnep({i})": [
r"Potential energy released by neutrals (W)",
(NLIM + NSTS,),
],
f"wldna({i})": [
r"Flux of atoms impinging on surface (A)",
(NLIM + NSTS, NATM),
],
f"ewlda({i})": [
r"Average energy of impinging atoms on surface (eV)",
(NLIM + NSTS, NATM),
],
f"wldnm({i})": [
r"Flux of molecules impinging on surface (A)",
(NLIM + NSTS, NMOL),
],
f"ewldm({i})": [
r"Average energy of impinging molecules on surface (eV)",
(NLIM + NSTS, NMOL),
],
f"wldra({i})": [
r"Flux of reflected atoms from surface (A)",
(NLIM + NSTS, NATM),
],
f"wldrm({i})": [
r"Flux of reflected molecules from surface (A)",
(NLIM + NSTS, NMOL),
],
}
)
fort44_info.update(
{
"wldpp": [
r"Flux of plasma ions impinging on surface (A), total over strata",
(NLIM + NSTS, NPLS),
],
"wldpa": [
r"Net flux of atoms emitted from surface (A), total over strata",
(NLIM + NSTS, NATM),
],
"wldpm": [
r"Net flux of molecules emitted from surface (A), total over strata",
(NLIM + NSTS, NMOL),
],
"wldpeb": [
r"Power carried by particles emitted from surface (W), total over strata",
(NLIM + NSTS,),
],
"wldspt": [
r"Flux of sputtered wall material (A), total over strata",
(NLIM + NSTS,),
],
"wldspta": [
r"Flux of sputtered wall material per atom (A), total over strata",
(NLIM + NSTS, NATM),
],
"wldsptm": [
r"Flux of sputtered wall material per molecule (A), total over strata",
(NLIM + NSTS, NMOL),
],
}
)
for i in np.arange(NSTRA + 1): # from 0 to NSTRA, unlike in manual
fort44_info.update(
{
f"wldpp({i})": [
r"Flux of plasma ions impinging on surface (A)",
(NLIM + NSTS, NPLS),
],
f"wldpa({i})": [
r"Net flux of atoms emitted from surface (A)",
(NLIM + NSTS, NATM),
],
f"wldpm({i})": [
r"Net flux of molecules emitted from surface (A)",
(NLIM + NSTS, NMOL),
],
f"wldpeb({i})": [
r"Power carried by particles emitted from surface (W)",
(NLIM + NSTS,),
],
f"wldspt({i})": [
r"Flux of sputtered wall material (A)",
(NLIM + NSTS,),
],
f"wldspta({i})": [
r"Flux of sputtered wall material per atom (A)",
(NLIM + NSTS, NATM),
],
f"wldsptm({i})": [
r"Flux of sputtered wall material per molecule (A)",
(NLIM + NSTS, NMOL),
],
}
)
fort44_info.update(
{
"isrftype": [r"ILIIN surface type variable in Eirene", (NLIM + NSTS,)],
"wlarea": [r"Surface area (m2)", (NLIM + NSTS,)],
"wlabsrp(A)": [r"Absorption rate for atoms", (NATM, NLIM + NSTS)],
"wlabsrp(M)": [r"Absorption rate for molecules", (NMOL, NLIM + NSTS)],
"wlabsrp(I)": [r"Absorption rate for test ions", (NION, NLIM + NSTS)],
"wlabsrp(P)": [r"Absorption rate for plasma ions", (NPLS, NLIM + NSTS)],
"wlpump(A)": [r"Pumped flux per atom (A)", (NATM, NLIM + NSTS)],
"wlpump(M)": [r"Pumped flux per molecule (A)", (NMOL, NLIM + NSTS)],
"wlpump(I)": [r"Pumped flux per test ion (A)", (NION, NLIM + NSTS)],
"wlpump(P)": [r"Pumped flux per plasma ion (A)", (NPLS, NLIM + NSTS)],
"eneutrad": [r"Radiation rate due to atoms (W)", (NDX, NDY, NATM)],
"emolrad": [r"Radiation rate due to molecules (W)", (NDX, NDY, NMOL)],
"eionrad": [r"Radiation rate due to test ions (W)", (NDX, NDY, NION)],
# eirdiag rather than eirdiag_nds, as in manual...
"eirdiag": [
r"Indices for segments on resolved non-standard surfaces",
(5 * NSTS + 1,),
],
"sarea_res": [r"Surface area of surface segment (m2)", (NCL,)],
"wldna_res": [
r"Flux of atoms impinging on surface segment (A)",
(NATM, NCL),
],
"wldnm_res": [
r"Flux of molecules impinging on surface segment (A)",
(NMOL, NCL),
],
"ewlda_res": [
r"Average energy of impinging atoms on surface segment (eV)",
(NATM, NCL),
],
"ewldm_res": [
r"Average energy of impinging molecules on surface segment (eV)",
(NMOL, NCL),
],
"ewldea_res": [
r"Energy flux carried by emitted atoms from surface segment (W)",
(NATM, NCL),
],
"ewldem_res": [
r"Energy flux carried by emitted molecules from surface segment (W)",
(NMOL, NCL),
],
"ewldrp_res": [
r"Total energy flux carried by emitted particles from surface segment (W)",
(NCL,),
],
"ewldmr_res": [
r"Flux of emitted molecules from recycling atoms (A)",
(NMOL, NCL),
],
"wldspt_res": [r"Flux of sputtered wall material (A)", (NCL,)],
"wldspta_res": [
r"Flux of sputtered wall material per atom (A)",
(NCL, NATM),
],
"wldsptm_res": [
r"Flux of sputtered wall material per molecule (A)",
(NCL, NMOL),
],
"wlpump_res(A)": [r"Pumped flux per atom (A)", (NCL, NATM)],
"wlpump_res(M)": [r"Pumped flux per molecule (A)", (NCL, NMOL)],
"wlpump_res(I)": [r"Pumped flux per test ion (A)", (NCL, NION)],
"wlpump_res(P)": [r"Pumped flux per plasma ion (A)", (NCL, NPLS)],
"ewldt_res": [r"Total wall power loading from Eirene particles", (NCL,)],
"pdena_int": [
r"Integral number of atoms over the entire Eirene computational grid",
(NATM, NSTRA + 1),
],
"pdenm_int": [
r"Integral number of molecules over the entire Eirene computational grid",
(NMOL, NSTRA + 1),
],
"pdeni_int": [
r"Integral number of test ions over the entire Eirene computational grid",
(NION, NSTRA + 1),
],
"pdena_int_b2": [
r"Integral number of atoms over the B2.5 computational grid",
(NATM, NSTRA + 1),
],
"pdenm_int_b2": [
r"Integral number of molecules over the B2.5 computational grid",
(NMOL, NSTRA + 1),
],
"pdeni_int_b2": [
r"Integral number of test ions over the B2.5 computational grid",
(NION, NSTRA + 1),
],
"edena_int": [
r"Integral energy carried by atoms over the entire Eirene computational grid (J)",
(NATM, NSTRA + 1),
],
"edenm_int": [
r"Integral energy carried by molecules over the entire Eirene computational grid (J)",
(NMOL, NSTRA + 1),
],
"edeni_int": [
r"Integral energy carried by test ions over the entire Eirene computational grid (J)",
(NION, NSTRA + 1),
],
"edena_int_b2": [
r"Integral energy carried by atoms over the B2.5 computational grid (J)",
(NATM, NSTRA + 1),
],
"edenm_int_b2": [
r"Integral energy carried by molecules over the B2.5 computational grid (J)",
(NMOL, NSTRA + 1),
],
"edeni_int_b2": [
r"Integral energy carried by test ions over the B2.5 computational grid (J)",
(NION, NSTRA + 1),
],
}
)
# extra, undocumented
fort44_info.update({"wall_geometry": [r"Wall geometry points", (4 * NLIM,)]})
return fort44_info | 30,841 |
def lens2memnamegen_first50(nmems):
"""Generate the member names for LENS2 simulations
Input:
nmems = number of members
Output:
memstr(nmems) = an array containing nmems strings corresponding to the member names
"""
memstr=[]
for imem in range(0,nmems,1):
if (imem < 10):
memstr1=str(1000+imem*20+1)
memstr2=str(imem+1).zfill(3)
memstr.append(memstr1+'.'+memstr2)
if ((imem >= 10) and (imem < 20)):
memstr1=str(1231)
memstr2=str(imem-10+1).zfill(3)
memstr.append(memstr1+'.'+memstr2)
if ((imem >= 20) and (imem < 30)):
memstr1=str(1251)
memstr2=str(imem-20+1).zfill(3)
memstr.append(memstr1+'.'+memstr2)
if ((imem >= 30) and (imem < 40)):
memstr1=str(1281)
memstr2=str(imem-30+1).zfill(3)
memstr.append(memstr1+'.'+memstr2)
if ((imem >= 40) and (imem < 50)):
memstr1=str(1301)
memstr2=str(imem-40+1).zfill(3)
memstr.append(memstr1+'.'+memstr2)
return memstr | 30,842 |
def initialize_settings(tool_name, source_path, dest_file_name=None):
""" Creates settings directory and copies or merges the source to there.
In case source already exists, merge is done.
Destination file name is the source_path's file name unless dest_file_name
is given.
"""
settings_dir = os.path.join(SETTINGS_DIRECTORY, tool_name)
if not os.path.exists(settings_dir):
os.mkdir(settings_dir)
if not dest_file_name:
dest_file_name = os.path.basename(source_path)
settings_path = os.path.join(settings_dir, dest_file_name)
if not os.path.exists(settings_path):
shutil.copy(source_path, settings_path)
else:
try:
SettingsMigrator(source_path, settings_path).migrate()
except ConfigObjError, parsing_error:
print 'WARNING! corrupted configuration file replaced with defaults'
print parsing_error
shutil.copy(source_path, settings_path)
return os.path.abspath(settings_path) | 30,843 |
def preprocess_output(fp_hdf_out, raw_timestamps, output, output_timestamps, average_window=1000, dataset_name='aligned'):
"""
Base file for preprocessing outputs (handles M-D case as of March2020).
For more complex cases use specialized functions (see for example preprocess_output in util.tetrode module)
Parameters
----------
fp_hdf_out : str
File path to HDF5 file
raw_timestamps : (N,1) array_like
Timestamps for each sample in continous
output : array_like
M dimensional output which will be aligned with continous
output_timestamps : (N,1) array_like
Timestamps for output
average_window : int, optional
Downsampling factor for raw data and output, by default 1000
dataset_name : str, optional
Field name for output stored in HDF5 file
"""
hdf5_file = h5py.File(fp_hdf_out, mode='a')
# Get size of wavelets
input_length = hdf5_file['inputs/wavelets'].shape[0]
# Get positions of both LEDs
raw_timestamps = raw_timestamps[()] # Slightly faster than np.array
if output.ndim == 1:
output = output[..., np.newaxis]
output_aligned = np.array([np.interp(raw_timestamps[np.arange(0, raw_timestamps.shape[0],
average_window)], output_timestamps, output[:, i]) for i in range(output.shape[1])]).transpose()
# Create and save datasets in HDF5 File
hdf5.create_or_update(hdf5_file, dataset_name="outputs/{}".format(dataset_name),
dataset_shape=[input_length, output_aligned.shape[1]], dataset_type=np.float16, dataset_value=output_aligned[0: input_length, ...])
hdf5_file.flush()
hdf5_file.close()
print('Successfully written Dataset="{}" to {}'.format(dataset_name, fp_hdf_out)) | 30,844 |
def landing_pg():
"""Landing page."""
landing = st.sidebar.selectbox("Welcome", ["Home", "Interactive"])
if landing == "Home":
landing_src()
else:
interactive() | 30,845 |
def test_list_boolean_length_nistxml_sv_iv_list_boolean_length_1_4(mode, save_output, output_format):
"""
Type list/boolean is restricted by facet length with value 5.
"""
assert_bindings(
schema="nistData/list/boolean/Schema+Instance/NISTSchema-SV-IV-list-boolean-length-1.xsd",
instance="nistData/list/boolean/Schema+Instance/NISTXML-SV-IV-list-boolean-length-1-4.xml",
class_name="NistschemaSvIvListBooleanLength1",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
) | 30,846 |
def get_stopword_list(filename=stopword_filepath):
""" Get a list of stopword from a file """
with open(filename, 'r', encoding=encoding) as f:
stoplist = [line for line in f.read().splitlines()]
return stoplist | 30,847 |
def annotate(d, text='', filename='notes.txt'):
"""Create a file FILENAME in the directory D with contents TEXT."""
f = open(os.path.join(d, filename), 'w')
f.write(text)
f.close() | 30,848 |
def test_create_file_obj_deleted(new_dataset):
"""Test for the case where this file only exists in ancestor commits"""
hexsha = new_dataset.repo.repo.head.commit
new_dataset.remove('dataset_description.json')
tree = new_dataset.repo.repo.commit(hexsha).tree
assert create_file_obj(
new_dataset, tree, ('dataset_description.json', None)) == expected_file_object | 30,849 |
def append_composite_tensor(target, to_append):
"""Helper function to append composite tensors to each other in the 0 axis.
In order to support batching within a fit/evaluate/predict call, we need
to be able to aggregate within a CompositeTensor. Unfortunately, the CT
API currently does not make this easy - especially in V1 mode, where we're
working with CompositeTensor Value objects that have no connection with the
CompositeTensors that created them.
Arguments:
target: CompositeTensor or CompositeTensor value object that will be
appended to.
to_append: CompositeTensor or CompositeTensor value object to append to.
'target'.
Returns:
A CompositeTensor or CompositeTensor value object.
Raises:
RuntimeError: if concatenation is not possible.
"""
if type(target) is not type(to_append):
raise RuntimeError('Unable to concatenate %s and %s' %
(type(target), type(to_append)))
# Perform type-specific concatenation.
# TODO(b/125094323): This should be replaced by a simple call to
# target.append() that should work on all of the below classes.
# If we're seeing a CompositeTensor here, we know it's because we're in
# Eager mode (or else we'd have evaluated the CT to a CT Value object
# already). Therefore, it's safe to call concat() on it without evaluating
# the result any further. If not - that is, if we're seeing a
# SparseTensorValue or a RaggedTensorValue - we need to hand-update it
# since we're outside of the graph anyways.
if isinstance(target, sparse_tensor.SparseTensor):
# We need to invoke the sparse version of concatenate here - tf.concat
# won't work.
return sparse_ops.sparse_concat(sp_inputs=[target, to_append], axis=0)
elif isinstance(target, ragged_tensor.RaggedTensor):
return ragged_concat_ops.concat([target, to_append], axis=0)
elif isinstance(target, sparse_tensor.SparseTensorValue):
return _append_sparse_tensor_value(target, to_append)
elif isinstance(target, ragged_tensor_value.RaggedTensorValue):
return _append_ragged_tensor_value(target, to_append)
else:
raise RuntimeError('Attempted to concatenate unsupported object %s.' %
type(target)) | 30,850 |
def check_index(ind, dimension):
"""Check validity of index for a given dimension
Examples
--------
>>> check_index(3, 5)
>>> check_index(5, 5)
Traceback (most recent call last):
...
IndexError: Index is not smaller than dimension 5 >= 5
>>> check_index(6, 5)
Traceback (most recent call last):
...
IndexError: Index is not smaller than dimension 6 >= 5
>>> check_index(-1, 5)
>>> check_index(-6, 5)
Traceback (most recent call last):
...
IndexError: Negative index is not greater than negative dimension -6 <= -5
>>> check_index([1, 2], 5)
>>> check_index([6, 3], 5)
Traceback (most recent call last):
...
IndexError: Index out of bounds for dimension 5
>>> check_index(slice(0, 3), 5)
"""
# unknown dimension, assumed to be in bounds
if isinstance(ind, Iterable):
x = np.asanyarray(ind)
if (
np.issubdtype(x.dtype, np.integer)
and ((x >= dimension) | (x < -dimension)).any()
):
raise IndexError("Index out of bounds for dimension {:d}".format(dimension))
elif x.dtype == bool and len(x) != dimension:
raise IndexError(
"boolean index did not match indexed array; dimension is {:d} "
"but corresponding boolean dimension is {:d}".format(dimension, len(x))
)
elif isinstance(ind, slice):
return
elif not isinstance(ind, Integral):
raise IndexError(
"only integers, slices (`:`), ellipsis (`...`), numpy.newaxis (`None`) and "
"integer or boolean arrays are valid indices"
)
elif ind >= dimension:
raise IndexError(
"Index is not smaller than dimension {:d} >= {:d}".format(ind, dimension)
)
elif ind < -dimension:
msg = "Negative index is not greater than negative dimension {:d} <= -{:d}"
raise IndexError(msg.format(ind, dimension)) | 30,851 |
def to_text(value):
"""Convert an opcode to text.
*value*, an ``int`` the opcode value,
Raises ``dns.opcode.UnknownOpcode`` if the opcode is unknown.
Returns a ``str``.
"""
return Opcode.to_text(value) | 30,852 |
def prompt_merge(target_path,
additional_uris,
additional_specs,
path_change_message=None,
merge_strategy='KillAppend',
confirmed=False,
confirm=False,
show_advanced=True,
show_verbosity=True,
config_filename=None,
config=None,
allow_other_element=True):
"""
Prompts the user for the resolution of a merge. Without
further options, will prompt only if elements change. New
elements are just added without prompt.
:param target_path: Location of the config workspace
:param additional_uris: uris from which to load more elements
:param additional_specs: path specs for additional elements
:param path_change_message: Something to tell the user about elements order
:param merge_strategy: See Config.insert_element
:param confirmed: Never ask
:param confirm: Always ask, supercedes confirmed
:param config: None or a Config object for target path if available
:param show_advanced: if true allow to change merge strategy
:param show_verbosity: if true allows to change verbosity
:param allow_other_element: if False merge fails hwen it could cause other elements
:returns: tupel (Config or None if no change, bool path_changed)
"""
if config is None:
config = multiproject_cmd.get_config(
target_path,
additional_uris=[],
config_filename=config_filename)
elif config.get_base_path() != target_path:
msg = "Config path does not match %s %s " % (config.get_base_path(),
target_path)
raise MultiProjectException(msg)
local_names_old = [x.get_local_name() for x in config.get_config_elements()]
extra_verbose = confirmed or confirm
abort = False
last_merge_strategy = None
while not abort:
if (last_merge_strategy is None
or last_merge_strategy != merge_strategy):
if not config_filename:
# should never happen right now with rosinstall/rosws/wstool
# TODO Need a better way to work with clones of original config
raise ValueError('Cannot merge when no config filename is set')
newconfig = multiproject_cmd.get_config(
target_path,
additional_uris=[],
config_filename=config_filename)
config_actions = multiproject_cmd.add_uris(
config=newconfig,
additional_uris=additional_uris,
config_filename=None,
merge_strategy=merge_strategy,
allow_other_element=allow_other_element)
for path_spec in additional_specs:
action = newconfig.add_path_spec(path_spec, merge_strategy)
config_actions[path_spec.get_local_name()] = (action, path_spec)
last_merge_strategy = merge_strategy
local_names_new = [x.get_local_name() for x in newconfig.get_config_elements()]
path_changed = False
ask_user = False
output = ""
new_elements = []
changed_elements = []
discard_elements = []
for localname, (action, new_path_spec) in list(config_actions.items()):
index = -1
if localname in local_names_old:
index = local_names_old.index(localname)
if action == 'KillAppend':
ask_user = True
if (index > -1 and local_names_old[:index + 1] == local_names_new[:index + 1]):
action = 'MergeReplace'
else:
changed_elements.append(_get_element_diff(new_path_spec, config, extra_verbose))
path_changed = True
if action == 'Append':
path_changed = True
new_elements.append(_get_element_diff(new_path_spec,
config,
extra_verbose))
elif action == 'MergeReplace':
changed_elements.append(_get_element_diff(new_path_spec,
config,
extra_verbose))
ask_user = True
elif action == 'MergeKeep':
discard_elements.append(_get_element_diff(new_path_spec,
config,
extra_verbose))
ask_user = True
if len(changed_elements) > 0:
output += "\n Change details of element (Use --merge-keep or --merge-replace to change):\n"
if extra_verbose:
output += " %s\n" % ("\n".join(sorted(changed_elements)))
else:
output += " %s\n" % (", ".join(sorted(changed_elements)))
if len(new_elements) > 0:
output += "\n Add new elements:\n"
if extra_verbose:
output += " %s\n" % ("\n".join(sorted(new_elements)))
else:
output += " %s\n" % (", ".join(sorted(new_elements)))
if local_names_old != local_names_new[:len(local_names_old)]:
old_order = ' '.join(reversed(local_names_old))
new_order = ' '.join(reversed(local_names_new))
output += "\n %s " % path_change_message or "Element order change"
output += "(Use --merge-keep or --merge-replace to prevent) "
output += "from\n %s\n to\n %s\n\n" % (old_order, new_order)
ask_user = True
if output == "":
return (None, False)
if not confirm and (confirmed or not ask_user):
print(" Performing actions: ")
print(output)
return (newconfig, path_changed)
else:
print(output)
showhelp = True
while(showhelp):
showhelp = False
prompt = "Continue: (y)es, (n)o"
if show_verbosity:
prompt += ", (v)erbosity"
if show_advanced:
prompt += ", (a)dvanced options"
prompt += ": "
mode_input = Ui.get_ui().get_input(prompt)
if mode_input == 'y':
return (newconfig, path_changed)
elif mode_input == 'n':
abort = True
elif show_advanced and mode_input == 'a':
strategies = {'MergeKeep': "(k)eep",
'MergeReplace': "(s)witch in",
'KillAppend': "(a)ppending"}
unselected = [v for k, v in
list(strategies.items())
if k != merge_strategy]
print("""New entries will just be appended to the config and
appear at the beginning of your ROS_PACKAGE_PATH. The merge strategy
decides how to deal with entries having a duplicate localname or path.
"(k)eep" means the existing entry will stay as it is, the new one will
be discarded. Useful for getting additional elements from other
workspaces without affecting your setup.
"(s)witch in" means that the new entry will replace the old in the
same position. Useful for upgrading/downgrading.
"switch (a)ppend" means that the existing entry will be removed, and
the new entry appended to the end of the list. This maintains order
of elements in the order they were given.
Switch append is the default.
""")
prompt = "Change Strategy %s: " % (", ".join(unselected))
mode_input = Ui.get_ui().get_input(prompt)
if mode_input == 's':
merge_strategy = 'MergeReplace'
elif mode_input == 'k':
merge_strategy = 'MergeKeep'
elif mode_input == 'a':
merge_strategy = 'KillAppend'
elif show_verbosity and mode_input == 'v':
extra_verbose = not extra_verbose
if abort:
print("No changes made.")
print('==========================================')
return (None, False) | 30,853 |
def _demo_mm_inputs(input_shape, num_classes):
"""Create a superset of inputs needed to run test or train batches.
Args:
input_shape (tuple):
input batch dimensions
num_classes (int):
number of semantic classes
"""
(N, C, H, W) = input_shape
rng = np.random.RandomState(0)
imgs = rng.rand(*input_shape)
segs = rng.randint(
low=0, high=num_classes - 1, size=(N, 1, H, W)).astype(np.uint8)
img_metas = [{
'img_shape': (H, W, C),
'ori_shape': (H, W, C),
'pad_shape': (H, W, C),
'filename': '<demo>.png',
'scale_factor': 1.0,
'flip': False,
} for _ in range(N)]
mm_inputs = {
'imgs': torch.FloatTensor(imgs).requires_grad_(True),
'img_metas': img_metas,
'gt_semantic_seg': torch.LongTensor(segs)
}
return mm_inputs | 30,854 |
def clean_vm(root):
"""Remove vagrant VM from specified path"""
v = vagrant.Vagrant(root=root)
print(" - Cleanig VM ", root)
try:
v.destroy()
except Exception as err:
print(err)
try:
os.remove(root + "/Vagrantfile")
except FileNotFoundError:
pass | 30,855 |
def get_ephemeral_port(sock_family=socket.AF_INET, sock_type=socket.SOCK_STREAM):
"""Return an ostensibly available ephemeral port number."""
# We expect that the operating system is polite enough to not hand out the
# same ephemeral port before we can explicitly bind it a second time.
s = socket.socket(sock_family, sock_type)
s.bind(('', 0))
port = s.getsockname()[1]
s.close()
return port | 30,856 |
def iterMergesort(list_of_lists, key=None):
# Based on http://code.activestate.com/recipes/511509-n-way-merge-sort/
# from Mike Klaas
""" Perform an N-way merge operation on sorted lists.
@param list_of_lists: (really iterable of iterable) of sorted elements
(either by naturally or by C{key})
@param key: specify sort key function (like C{sort()}, C{sorted()})
@param iterfun: function that returns an iterator.
Yields tuples of the form C{(item, iterator)}, where the iterator is the
built-in list iterator or something you pass in, if you pre-generate the
iterators.
This is a stable merge; complexity O(N lg N)
Examples::
print list(x[0] for x in mergesort([[1,2,3,4],
[2,3.5,3.7,4.5,6,7],
[2.6,3.6,6.6,9]]))
[1, 2, 2, 2.6, 3, 3.5, 3.6, 3.7, 4, 4.5, 6, 6.6, 7, 9]
# note stability
print list(x[0] for x in mergesort([[1,2,3,4],
[2,3.5,3.7,4.5,6,7],
[2.6,3.6,6.6,9]], key=int))
[1, 2, 2, 2.6, 3, 3.5, 3.7, 3.6, 4, 4.5, 6, 6.6, 7, 9]
print list(x[0] for x in mergesort([[4,3,2,1],
[7,6.5,4,3.7,3.3,1.9],
[9,8.6,7.6,6.6,5.5,4.4,3.3]],
key=lambda x: -x))
[9, 8.6, 7.6, 7, 6.6, 6.5, 5.5, 4.4, 4, 4, 3.7, 3.3, 3.3, 3, 2, 1.9, 1]
"""
heap = []
for i, itr in enumerate(iter(pl) for pl in list_of_lists):
try:
item = itr.next()
toadd = (key(item), i, item, itr) if key else (item, i, itr)
heap.append(toadd)
except StopIteration:
pass
heapq.heapify(heap)
if key:
while heap:
_, idx, item, itr = heap[0]
yield item # , itr
try:
item = itr.next()
heapq.heapreplace(heap, (key(item), idx, item, itr) )
except StopIteration:
heapq.heappop(heap)
else:
while heap:
item, idx, itr = heap[0]
yield item # , itr
try:
heapq.heapreplace(heap, (itr.next(), idx, itr))
except StopIteration:
heapq.heappop(heap) | 30,857 |
def main():
"""
Let this thing fly
"""
args = get_args()
# connect this thing
si = vmware_lib.connect(args.host, args.user, args.password, args.port, args.insecure)
content = si.RetrieveContent()
vm_object = None
if args.template_folder:
folder = vmware_lib.get_obj(content, [vmware_lib.vim.Folder], args.template_folder)
for vm in folder.childEntity:
if vm.name == args.template:
vm_object = vm
else:
vm_object = vmware_lib.get_obj(content, [vmware_lib.vim.VirtualMachine], args.template)
if vm_object:
clone_vm(
content, vm_object, args.vm_name, si,
args.datacenter_name, args.vm_folder,
args.datastore_name, args.cluster_name,
args.resource_pool, args.power_on,
args.cpus, args.memory, args.linked_clone)
else:
print "template not found" | 30,858 |
def tear_down():
"""
tear down test environment
"""
# destroy test database
from django.db import connection
connection.creation.destroy_test_db("not_needed")
# teardown environment
from django.test.utils import teardown_test_environment
teardown_test_environment() | 30,859 |
def IR_guess_model(spectrum: ConvSpectrum, peak_args: Optional[dict] = None) -> tuple[Model, dict]:
"""
Guess a fit for the IR spectrum based on its peaks.
:param spectrum: the ConvSpectrum to be fit
:param peak_args: arguments for finding peaks
:return: Model, parameters
"""
min_intensity, max_intensity = spectrum.range
range_intensities = max_intensity - min_intensity
IR_peak_defaults = {
"prominence": 0.1 * range_intensities,
}
peak_args = IR_peak_defaults if peak_args is None else {**IR_peak_defaults, **peak_args}
peak_indices, peak_properties = spectrum.peaks(**peak_args, indices=True)
params = Parameters()
composite_model = None
# Fit the peaks
for i, peak_idx in enumerate(peak_indices):
prefix = f"a{i}_"
model = models.GaussianModel(prefix=prefix)
center = spectrum.energies[peak_idx]
height = spectrum.intensities[peak_idx]
model.set_param_hint("amplitude", min=0.05 * height)
model.set_param_hint("center", min=center - 10, max=center + 10)
model.set_param_hint("sigma", min=0.1, max=100)
peak_params = {
f"{prefix}amplitude": height * 0.8,
f"{prefix}center": center,
f"{prefix}sigma": 10,
}
params = params.update(model.make_params(**peak_params))
composite_model = model if composite_model is None else composite_model + model
return composite_model, params | 30,860 |
def virus_monte_carlo(initial_infected, population, k):
""" Generates a list of points to which some is infected
at a given value k starting with initial_infected infected.
There is no mechanism to stop the infection from reaching
the entire population.
:param initial_infected: The amount of people whom are infected at the
start.
:type initial_infected: int
:param population: The total population sample.
:type population: int
:param k: The rate of infection.
:type k: float
:return: An array of the amount of people per time infected.
:rtype: tuple(time, infected)
"""
people_array = np.arange(1, population+1, dtype=int)
current_infected = initial_infected
people_infected = np.array([current_infected])
time_array = np.array([0])
# Array math.
counter = 0
for _ in people_array:
probability = (k)*current_infected/population
random_array = np.random.uniform(0, 1, size=people_array.size)
random_bool = np.where(random_array <= probability, True, False)
people_array = people_array[random_bool != True]
if people_array.size != population:
current_infected = (population-people_array.size)
people_infected = np.append(people_infected, current_infected)
counter+=1
time_array = np.append(time_array, counter)
if people_infected.size == population:
break
return (time_array, people_infected) | 30,861 |
def threadsafe_generator(f):
"""
A decorator that takes a generator function and makes it thread-safe.
"""
def g(*a, **kw):
return threadsafe_iter(f(*a, **kw))
return g | 30,862 |
def test_heirarchical_data(heirarchical_data_structure, settings_number_of_columns_5):
"""Testing that heirarchical data can be handled by VMC (even though this is NOT recommended"""
instance = EvenVMCView()
rows = instance.process_entries(heirarchical_data_structure)
for row in range(len(rows) - 1):
row_content = rows[row]
for i in row_content:
for j in range(5):
assert isinstance(row[j], dict)
row = rows[len(rows) - 1]
for i in range(2):
assert isinstance(row[i], dict)
for i in range(2, 5):
assert row[i] == "" | 30,863 |
def lastmsg(self):
"""
Return last logged message if **_lastmsg** attribute is available.
Returns:
last massage or empty str
"""
return getattr(self, '_last_message', '') | 30,864 |
def _callback_on_all_dict_keys(dt, callback_fn):
"""
Callback callback_fn on all dictionary keys recursively
"""
result = {}
for (key, val) in dt.items():
if type(val) == dict:
val = _callback_on_all_dict_keys(val, callback_fn)
result[callback_fn(key)] = val
return result | 30,865 |
def ovb_partial_r2_bound(model=None, treatment=None, r2dxj_x=None, r2yxj_dx=None,
benchmark_covariates=None, kd=1, ky=None):
"""
Provide a Pandas DataFrame with the bounds on the strength of the unobserved confounder.
Adjusted estimates, standard errors and t-values (among other quantities) need to be computed
manually by the user using those bounds with the functions adjusted_estimate, adjusted_se and adjusted_t.
:Required parameters: (model and treatment) or (r2dxj_x and r2yxj_dx).
Parameters
----------
model : statsmodels OLSResults object
a fitted statsmodels OLSResults object for the restricted regression model you have provided.
treatment : string
a string with the name of the "treatment" variable, e.g. the independent variable of interest.
r2dxj_x : float
float with the partial R2 of covariate Xj with the treatment D (after partialling out the effect of the remaining covariates X, excluding Xj).
r2yxj_dx : float
float with the partial R2 of covariate Xj with the outcome Y (after partialling out the effect of the remaining covariates X, excluding Xj).
benchmark_covariates : string or list of strings
a string or list of strings with names of the variables to use for benchmark bounding.
kd : float or list of floats
a float or list of floats with each being a multiple of the strength of association between a
benchmark variable and the treatment variable to test with benchmark bounding (Default value = 1).
ky : float or list of floats
same as kd except measured in terms of strength of association with the outcome variable (Default value = None).
Returns
-------
Pandas DataFrame
A Pandas DataFrame containing the following variables:
**bound_label** : a string created by label_maker to serve as a label for the bound for printing & plotting purposes.
**r2dz_x** : a float or list of floats with the partial R^2 of a putative unobserved confounder "z"
with the treatment variable "d", with observed covariates "x" partialed out, as implied by z being kd-times
as strong as the benchmark_covariates.
**r2yz_dx** : a float or list of floats with the partial R^2 of a putative unobserved confounder "z"
with the outcome variable "y", with observed covariates "x" and the treatment variable "d" partialed out,
as implied by z being ky-times as strong as the benchmark_covariates.
Examples
---------
Let's construct bounds from summary statistics only. Suppose you didn't have access to the data, but only to the treatment and outcome regression tables.
You can still compute the bounds.
>>> # First import the necessary libraries.
>>> import sensemakr as smkr
>>> # Use the t statistic of female in the outcome regression to compute the partial R2 of female with the outcome.
>>> r2yxj_dx = smkr.partial_r2(t_statistic = -9.789, dof = 783)
>>> # Use the t-value of female in the *treatment* regression to compute the partial R2 of female with the treatment.
>>> r2dxj_x = smkr.partial_r2(t_statistic = -2.680, dof = 783)
>>> # Compute manually bounds on the strength of confounders 1, 2, or 3 times as strong as female.
>>> bounds = smkr.ovb_partial_r2_bound(r2dxj_x = r2dxj_x, r2yxj_dx = r2yxj_dx,kd = [1, 2, 3], ky = [1, 2, 3])
>>> # Compute manually adjusted estimates.
>>> bound_values = smkr.adjusted_estimate(estimate = 0.0973, se = 0.0232, dof = 783, r2dz_x = bounds['r2dz_x'], r2yz_dx = bounds['r2yz_dx'])
>>> # Plot contours and bounds.
>>> smkr.ovb_contour_plot(estimate = 0.0973, se = 0.0232, dof = 783)
>>> smkr.add_bound_to_contour(bounds=bounds, bound_value = bound_values)
"""
if (model is None or treatment is None) and (r2dxj_x is None or r2yxj_dx is None):
sys.exit('Error: ovb_partial_r2_bound requires either a statsmodels OLSResults object and a treatment name'
'or the partial R^2 values with the benchmark covariate, r2dxj_x and r2yxj_dx.')
if (treatment is not None and type(treatment) is not str):
sys.exit('Error: treatment must be a single string.')
if ((benchmark_covariates is None) and (r2dxj_x is not None)) :
#return None
benchmark_covariates=['manual']
elif(benchmark_covariates is None):
return None
elif type(benchmark_covariates) is str:
benchmark_covariates = [benchmark_covariates]
else:
if ((type(benchmark_covariates) is not list) and (type(benchmark_covariates) is not dict)):
sys.exit('Benchmark covariates must be a string, list of strings, 2d list containing only strings or dict containing only strings and list of strings.')
if (type(benchmark_covariates) is list):
for i in benchmark_covariates:
if type(i) is not str and (type(i) is not list or any(type(j) is not str for j in i)):
sys.exit('Benchmark covariates must be a string, list of strings, 2d list containing only strings or dict containing only strings and list of strings.')
else: #benchmark_covariates is a dict
for i in benchmark_covariates:
if(type(benchmark_covariates[i]) is not str and (type(benchmark_covariates[i]) is not list or any(type(j) is not str for j in benchmark_covariates[i]))):
sys.exit('Benchmark covariates must be a string, list of strings, 2d list containing only strings or dict containing only strings and list of strings.')
if model is not None:
m = pd.DataFrame(model.model.exog, columns=model.model.exog_names)
d = np.array(m[treatment])
non_treatment = m.drop(columns=treatment) # all columns except treatment
non_treatment.insert(0, 0, 1) # add constant term for regression
treatment_model = sm.OLS(d, non_treatment)
treatment_results = treatment_model.fit()
if type(benchmark_covariates) is str:
# r2yxj_dx = partial R^2 with outcome; r2dxj_x = partial R^2 with treatment
r2yxj_dx = [sensitivity_statistics.partial_r2(model, covariates=benchmark_covariates)]
r2dxj_x = [sensitivity_statistics.partial_r2(treatment_results, covariates=benchmark_covariates)]
elif(type(benchmark_covariates) is list):
r2yxj_dx, r2dxj_x = [], []
for b in benchmark_covariates:
r2yxj_dx.append(sensitivity_statistics.group_partial_r2(model, covariates=b))
r2dxj_x.append(sensitivity_statistics.group_partial_r2(treatment_results, covariates=b))
# Group Benchmark
elif(type(benchmark_covariates) is dict):
r2yxj_dx, r2dxj_x = [], []
for b in benchmark_covariates:
r2yxj_dx.append(sensitivity_statistics.group_partial_r2(model, benchmark_covariates[b]))
r2dxj_x.append(sensitivity_statistics.group_partial_r2(treatment_results, benchmark_covariates[b]))
elif r2dxj_x is not None:
if np.isscalar(r2dxj_x):
r2dxj_x = [r2dxj_x]
if np.isscalar(r2yxj_dx):
r2yxj_dx = [r2yxj_dx]
bounds = pd.DataFrame()
for i in range(len(benchmark_covariates)):
r2dxj_x[i], r2yxj_dx[i] = sensitivity_statistics.check_r2(r2dxj_x[i], r2yxj_dx[i])
if type(kd) is list:
kd = np.array(kd)
if ky is None:
ky=kd
r2dz_x = kd * (r2dxj_x[i] / (1 - r2dxj_x[i]))
if (np.isscalar(r2dz_x) and r2dz_x >= 1) or (not np.isscalar(r2dz_x) and any(i >= 1 for i in r2dz_x)):
sys.exit("Implied bound on r2dz.x >= 1. Impossible kd value. Try a lower kd.")
r2zxj_xd = kd * (r2dxj_x[i] ** 2) / ((1 - kd * r2dxj_x[i]) * (1 - r2dxj_x[i]))
if (np.isscalar(r2zxj_xd) and r2zxj_xd >= 1) or (not np.isscalar(r2zxj_xd) and any(i >= 1 for i in r2zxj_xd)):
sys.exit("Impossible kd value. Try a lower kd.")
r2yz_dx = ((np.sqrt(ky) + np.sqrt(r2zxj_xd)) / np.sqrt(1 - r2zxj_xd)) ** 2 * (r2yxj_dx[i] / (1 - r2yxj_dx[i]))
if (np.isscalar(r2yz_dx) and r2yz_dx > 1) or (not np.isscalar(r2yz_dx) and any(i > 1 for i in r2yz_dx)):
print('Warning: Implied bound on r2yz.dx greater than 1, try lower kd and/or ky. Setting r2yz.dx to 1.')
r2yz_dx[r2yz_dx > 1] = 1
if(type(benchmark_covariates) is not dict):
if np.isscalar(kd):
bound_label = label_maker(benchmark_covariate=benchmark_covariates[i], kd=kd, ky=ky)
bounds = bounds.append({'bound_label': bound_label, 'r2dz_x': r2dz_x, 'r2yz_dx': r2yz_dx},
ignore_index=True)
else:
for j in range(len(kd)):
bound_label = label_maker(benchmark_covariate=benchmark_covariates[i], kd=kd[j], ky=ky[j])
bounds = bounds.append({'bound_label': bound_label, 'r2dz_x': r2dz_x[j], 'r2yz_dx': r2yz_dx[j]},
ignore_index=True)
else:
if np.isscalar(kd):
bound_label = label_maker(benchmark_covariate=list(benchmark_covariates)[i], kd=kd, ky=ky)
bounds = bounds.append({'bound_label': bound_label, 'r2dz_x': r2dz_x, 'r2yz_dx': r2yz_dx},
ignore_index=True)
else:
for j in range(len(kd)):
bound_label = label_maker(benchmark_covariate=list(benchmark_covariates)[i], kd=kd[j], ky=ky[j])
bounds = bounds.append({'bound_label': bound_label, 'r2dz_x': r2dz_x[j], 'r2yz_dx': r2yz_dx[j]},
ignore_index=True)
return bounds | 30,866 |
def top_ngrams(df, n=2, ngrams=10):
"""
* Not generalizable in this form *
* This works well, but is very inefficient and should be optimized or rewritten *
Takes a preposcessed, tokenized column and create a large list.
Returns most frequent ngrams
Arguments:
df = name of DataFrame with no_hashtags column (this will be generalizable in a future commit)
n = number of words per grouping eg. 1, 2 or 3
ngrams = Number of ngrams to return
"""
word_list = preprocess(''.join(str(df['lemma'].tolist())))
return (pd.Series(nltk.ngrams(word_list, n)).value_counts())[:ngrams] | 30,867 |
def install_openvpn(instance, arg, verbose=True):
""" """
install(instance, {"module":"openvpn"}, verbose=True)
generate_dh_key(instance, {"dh_name":"openvpn", "key_size":"2048"})
server_conf = open("simulation/workstations/"+instance.name+"/server_openvpn.conf", "w")
server_conf.write("port 1197\n")
server_conf.write("proto udp\n")
server_conf.write("dev tun\n")
server_conf.write("ca /certs/"+arg["private_key_certificate_name"]+"/"+arg["private_key_middle_certificate_name"]+".cert\n")
server_conf.write("cert /certs/"+arg["private_key_certificate_name"]+"/"+arg["private_key_certificate_name"]+".cert\n")
server_conf.write("key /certs/"+arg["private_key_certificate_name"]+"/"+arg["private_key_certificate_name"]+".key\n")
server_conf.write("dh /certs/dh/openvpn-2048.key\n")
server_conf.write("server 10.122.0.0 255.255.255.0 \n")
server_conf.write("push \"10.122.1.0 255.255.255.0\"\n")
server_conf.write("keepalive \n")
server_conf.write("cipher AES-128-CBC \n")
server_conf.write("comp-lzo \n")
server_conf.write("max-clients "+arg["max_client"]+"\n")
if arg["user"] == "":
server_conf.write("user nobody\n")
else:
server_conf.write("user "+arg["user"]+"\n")
if arg["group"] == "":
server_conf.write("group nobody\n")
else:
server_conf.write("group "+arg["group"]+"\n")
server_conf.write("persist-key\n")
server_conf.write("persist-tun\n")
server_conf.write("status openvpn-status.log\n")
server_conf.write("log openvpn.log\n")
server_conf.write("verb 9\n")
server_conf.close()
if upload_file(instance, {"instance_path":"/etc/openvpn/server.conf", "host_manager_path": "simulation/workstations/"+instance.name+"/server_openvpn.conf"}, verbose=False) == 1:
return 1
if restart_service(instance, {"service":"openvpn"}) == 1:
return 1 | 30,868 |
def filter_params(module: Module, train_bn: bool = True) -> Generator:
"""Yields the trainable parameters of a given module.
Args:
module: A given module
train_bn: If True, leave the BatchNorm layers in training mode
Returns:
Generator
"""
children = list(module.children())
if not children:
if not (isinstance(module, BN_TYPES) and train_bn):
for param in module.parameters():
if param.requires_grad:
yield param
else:
for child in children:
for param in filter_params(module=child, train_bn=train_bn):
yield param | 30,869 |
def rossoporn_parse(driver: webdriver.Firefox) -> tuple[list[str], int, str]:
"""Read the html for rossoporn.com"""
#Parses the html of the site
soup = soupify(driver)
dir_name = soup.find("div", class_="content_right").find("h1").text
dir_name = clean_dir_name(dir_name)
images = soup.find_all("div", class_="wrapper_g")
images = ["".join([PROTOCOL, img.get("src").replace("tn_", "")]) for tag_list in images for img in tag_list.find_all("img")]
num_files = len(images)
driver.quit()
return (images, num_files, dir_name) | 30,870 |
def pid_from_context(_, context, **kwargs):
"""Get PID from marshmallow context."""
pid = (context or {}).get('pid')
return pid.pid_value if pid else missing | 30,871 |
def score_text(text, tokenizer, preset_model, finetuned_model):
""" Uses rule-based rankings. Higher is better, but different features have different scales.
Args:
text (str/ List[str]): one story to rank.
tokenizer (Pytroch tokenizer): GPT2 Byte Tokenizer.
preset_model (Pytorch model): preset GPT2 model of the same/ different size of the finetuned model.
finetuned_model (Pytorch model): fine-tuned GPT2 model.
Returns a scores np.array of corresponding to text.
"""
assert isinstance(
text, (str, list)), f"score_text accepts type(text) = str/list, but got {type(text)}"
if isinstance(text, list):
text = ' '.join(text)
# Keep same order as in constants.FEATURES
scores = [0 for _ in range(len(constants.FEATURES))]
texts_sentences = split_to_sentences(text)
# scores[0] = _coherency(texts_sentences, lsa_embedder)
scores[1] = _readabilty(text, texts_sentences)
# Set of text words without punctuation and stop words.
filtered_words = list(filter(
lambda word: word not in constants.STOP_WORDS, split_words(text.lower().strip())))
filtered_words_set = set(filtered_words)
# Sentiment.
scores[2] = _sentiment_polarity(filtered_words)
# Set based measures.
scores[3], scores[4] = _simplicity(filtered_words_set), _diversity(
filtered_words, filtered_words_set)
# The bigger differene, the more tale-like, similar to the fine-tuned model, the text is.
scores[5] = KLDIV_error_per_text(
tokenizer, preset_model, finetuned_model, text)
# print(" | ".join(f'{key}: {score:.2f}' for key,
# score in zip(constants.FEATURES, scores)))
return np.array(scores) | 30,872 |
def get_bot_list(swarming_server, dimensions, dead_only):
"""Returns a list of swarming bots."""
cmd = [
sys.executable, 'swarming.py', 'bots',
'--swarming', swarming_server,
'--bare',
]
for k, v in sorted(dimensions.iteritems()):
cmd.extend(('--dimension', k, v))
if dead_only:
cmd.append('--dead-only')
return subprocess.check_output(cmd, cwd=ROOT_DIR).splitlines() | 30,873 |
def running_on_kaggle() -> bool:
"""Detect if the current environment is running on Kaggle.
Returns:
bool:
True if the current environment is on Kaggle, False
otherwise.
"""
return os.environ.get("KAGGLE_KERNEL_RUN_TYPE") == "Interactive" | 30,874 |
def _get_picture_from_attachments(path):
"""Get picture bytes from telegram server"""
url = 'https://api.telegram.org/file/bot' + API_TOKEN + '/' + path
pic_path = './photos/pic.jpg'
curl_command = f'curl {url} > {pic_path}'
data = subprocess.run(curl_command, shell=True)
if data.returncode != 0:
raise InvalidAttachments('Something evil has happened :c')
with open(pic_path, 'rb') as f:
data = f.read()
return data | 30,875 |
def parse_cards(account_page_content):
"""
Parse card metadata and product balances from /ClipperCard/dashboard.jsf
"""
begin = account_page_content.index(b'<!--YOUR CLIPPER CARDS-->')
end = account_page_content.index(b'<!--END YOUR CLIPPER CARDS-->')
card_soup = bs4.BeautifulSoup(account_page_content[begin:end], "html.parser")
serial_numbers = find_values(card_soup, 'Serial Number:', get_next_sibling_text)
nicknames = find_values(card_soup, 'Card Nickname:', get_inner_display_text)
types = find_values(card_soup, 'Type:', get_next_sibling_text)
statuses = find_values(card_soup, 'Status:', get_next_sibling_text)
products = parse_card_products(card_soup)
cards = []
for sn, nn, tp, st, pd in zip(serial_numbers, nicknames, types, statuses, products):
cards.append(Card(serial_number=sn, nickname=nn, type=tp, status=st, products=pd))
return cards | 30,876 |
def __setitem__(x, key, value, /):
"""
Note: __setitem__ is a method of the array object.
"""
pass | 30,877 |
def interaction_time_data_string(logs, title):
"""
times = utils.valid_values_for_enum((models.LogEntry.TIME_CHOICES))
contexts_map = dict(models.LogEntry.TIME_CHOICES)
counts = {contexts_map[k]: v
for k, v in _counts_by_getter(logs, lambda l: l.time_of_day).items()
}
plt.clf()
xs = list(range(len(times)))
ys = [counts.get(cont, 0) for cont in times]
plt.bar(xs, ys)
plt.xticks(xs, times)
plt.title(title)
plt.xlabel("Social Context")
plt.ylabel('Num Interactions')
plt.gca().xaxis.grid(False)
plt.tight_layout()
return pyplot_to_base64()
"""
contexts = utils.valid_values_for_enum((models.LogEntry.SOCIAL_CHOICES))
contexts_map = dict(models.LogEntry.SOCIAL_CHOICES)
reacc_map = dict(models.LogEntry.REACTION_CHOICES)
interaction_map = dict(models.LogEntry.MEDIUM_CHOICES)
time_map = dict(models.LogEntry.TIME_CHOICES)
first_agg = recommender.group_list_by_sel(logs, lambda l: interaction_map[l.interaction_medium])
plt.clf()
keys = sorted(first_agg.keys())
sub_keys = sorted(list(time_map.keys()))
xs = np.arange(len(sub_keys)) * 2
width = .35
colors = np.array([
[205,224,241],
[190,26,9],
[0,105,253],
[255,114,0],
]) / 255.0
for i, reacc in enumerate( keys ):
sub_logs = first_agg[reacc]
counts = _counts_by_getter(sub_logs, lambda l: l.time_of_day)
ys = [counts.get(cont, 0) for cont in sub_keys]
plt.bar(xs + i * width, ys, width, label=reacc, color=colors[i])
ax = plt.gca()
ax.set_xticks(xs + width * (len(keys) // 2))
ax.set_xticklabels([time_map[k] for k in sub_keys])
plt.title(title)
plt.xlabel("Social Context")
plt.ylabel('Num Interactions')
plt.legend()
ax.xaxis.grid(False)
plt.tight_layout()
return pyplot_to_base64() | 30,878 |
def main() -> None:
"""
Load the data from a csv file and insert it into a MongoDB database.
"""
import json
with open("script_constants.json", "r") as file:
constants = json.load(file)
pokemon_data_filename = constants["Pokemon_data_filename"]
pokemon = extract_data_from_csv(pokemon_data_filename)
insert_data_into_db(pokemon) | 30,879 |
def _label_boost(boost_form, label):
"""Returns the label boost.
Args:
boost_form: Either NDCG or PRECISION.
label: The example label.
Returns:
A list of per list weight.
"""
boost = {
'NDCG': math.pow(2.0, label) - 1.0,
'PRECISION': 1.0 if label >= 1.0 else 0.0,
}
return boost[boost_form] | 30,880 |
def iterate_orthologous_lexical_matches(prefix) -> Iterable[MappingTuple]:
"""Generate orthologous relations between lexical matches from different species."""
names = pyobo.get_id_name_mapping(prefix)
species = pyobo.get_id_species_mapping(prefix)
provenance = get_script_url(__file__)
count = 0
it = itt.combinations(names.items(), r=2)
it = tqdm(it, total=len(names) * (len(names) - 1) / 2, unit_scale=True)
for (source_id, source_name), (target_id, target_name) in it:
if source_id == target_id or species[source_id] == species[target_id]:
continue
if _lexical_exact_match(source_name, target_name):
count += 1
yield MappingTuple(
prefix, source_id, source_name,
'orthologous',
prefix, target_id, target_name,
'lexical',
provenance,
)
print(f'Identified {count} orthologs in {prefix}') | 30,881 |
def has_matching_ts_templates(reactant, bond_rearr):
"""
See if there are any templates suitable to get a TS guess from a template
Arguments:
reactant (autode.complex.ReactantComplex):
bond_rearr (autode.bond_rearrangement.BondRearrangement):
Returns:
bool:
"""
mol_graph = get_truncated_active_mol_graph(graph=reactant.graph,
active_bonds=bond_rearr.all)
ts_guess_templates = get_ts_templates()
for ts_template in ts_guess_templates:
if template_matches(reactant=reactant, ts_template=ts_template,
truncated_graph=mol_graph):
return True
return False | 30,882 |
def get_commands(xml: objectify.ObjectifiedElement):
"""
Returns an action and the room from the xml string.
:param xml:
:return:
"""
return xml.body.attrib["action"] | 30,883 |
def compression_point(w_db, slope = 1, compression = 1,
extrapolation_point = None, axis = -1):
"""Return input referred compression point"""
interpol_line = calc_extrapolation_line(w_db, slope, extrapolation_point,
axis)
return cross(interpol_line - w_db, compression) | 30,884 |
def main():
"""
calculates a hilbert pseudocurve of p iterations.
Scale the pseudocurve to match the desired image.
Move along the curve and sample image and to get average color at each segment
Output gcode corresponding the coordinates and scaled Z levels.
If no image is given, a flat hilbert curve of the desired size is produced.
"""
parser = argparse.ArgumentParser(description='Generate a hilbert curve')
parser.add_argument('-p', '--iterations', type=int, default=5,
help='hilbert curve is fractal, number of iterative levels')
parser.add_argument('-s', '--size', type=int, default=100,
help='size in millimeters')
parser.add_argument('-zmin', type=float, default=0,
help='The minimum z value which approximates black')
parser.add_argument('-zmax', type=float, default=255,
help='The maximum z value which approximates white')
parser.add_argument('--laser', action='store_true',
help='Z values will be output as spindle power (S words)')
parser.add_argument('-F','--FEED', type=int, default=4000,
help='Feed rate in mm/min, default = 4000')
parser.add_argument('-i','--infile',
help='image filename to process')
parser.add_argument('-o', '--outfile',
help='gcode filename to write')
args = parser.parse_args()
N=2 # Number of dimensions. For images, this is always 2
MAX = 2**(args.iterations*N)-1
scalefactor = args.size/(math.sqrt(MAX+1))
imagefile = None
if args.infile is not None:
imagefile = Image.open(args.infile)
if imagefile.size[0] != imagefile.size[1]:
print('the image is not square. It will be cropped')
cropsize = min(imagefile.size)
imagefile = imagefile.crop((0, 0, cropsize, cropsize))
imagefactor = imagefile.size[0]/(math.sqrt(MAX+1))
hilbert_curve = HilbertCurve(args.iterations, N)
with open(args.outfile, "w") as outfile:
outfile.write(PREAMBLE)
for ii in range(MAX):
coords = hilbert_curve.coordinates_from_distance(ii)
if imagefile is not None:
zval = get_average_color(x=int(coords[0]*imagefactor), y=int(coords[1]*imagefactor), n=5, img=imagefile)
else:
zval = 1
scalez = (args.zmax-args.zmin)/255*zval-0+args.zmin
if args.laser:
#outfile.write(f'M3 S{scalez}\n')
outfile.write(f'G1 X{coords[0]*scalefactor} Y{coords[1]*scalefactor} F{scalez}\n')
else:
outfile.write(f'G1 X{coords[0]*scalefactor} Y{coords[1]*scalefactor} Z{scalez} F{args.FEED}\n')
outfile.write(POSTAMBLE) | 30,885 |
def pattern_remove_incomplete_region_or_spatial_path(
perception_graph: PerceptionGraphPattern
) -> PerceptionGraphPattern:
"""
Helper function to return a `PerceptionGraphPattern` verifying
that region and spatial path perceptions contain a reference object.
"""
graph = perception_graph.copy_as_digraph()
region_and_path_nodes: ImmutableSet[NodePredicate] = immutableset(
node
for node in graph.nodes
if isinstance(node, IsPathPredicate) or isinstance(node, RegionPredicate)
)
nodes_without_reference: List[NodePredicate] = []
for node in region_and_path_nodes:
has_reference_edge: bool = False
for successor in graph.successors(node):
predicate = graph.edges[node, successor]["predicate"]
if isinstance(predicate, RelationTypeIsPredicate):
if predicate.relation_type in [
REFERENCE_OBJECT_LABEL,
REFERENCE_OBJECT_DESTINATION_LABEL,
REFERENCE_OBJECT_SOURCE_LABEL,
]:
has_reference_edge = True
break
if not has_reference_edge:
nodes_without_reference.append(node)
logging.info(
f"Removing incomplete regions and paths. "
f"Removing nodes: {nodes_without_reference}"
)
graph.remove_nodes_from(nodes_without_reference)
def sort_by_num_nodes(g: DiGraph) -> int:
return len(g.nodes)
# We should maybe consider doing this a different way
# As this approach just brute force solves the problem rather than being methodical about it
if number_weakly_connected_components(graph) > 1:
components = [
component
for component in [
subgraph(graph, comp) for comp in weakly_connected_components(graph)
]
]
components.sort(key=sort_by_num_nodes, reverse=True)
computed_graph = subgraph(graph, components[0].nodes)
removed_nodes: List[NodePredicate] = []
for i in range(1, len(components)):
removed_nodes.extend(components[i].nodes)
logging.info(f"Cleanup disconnected elements. Removing: {removed_nodes}")
else:
computed_graph = graph
return PerceptionGraphPattern(computed_graph, dynamic=perception_graph.dynamic) | 30,886 |
def test_multiplication():
"""test multiplication (f*g) for both value and derivative"""
assert f_5.get()[0] == f.get()[0] * g.get()[0]
assert f_5.get()[1] == f.get()[0] * g.get()[1] + f.get()[1] * g.get()[0]
"""test multiplication (g*f) for both value and derivative"""
assert f_6.get()[0] == g.get()[0] * f.get()[0]
assert f_6.get()[1] == g.get()[0] * f.get()[1] + g.get()[1] * f.get()[0]
"""test multiplication of function and constant for both value and derivative"""
assert (f*3).get()[0] == 3*(f.get()[0])
assert (f*3).get()[1] == 3*(f.get()[1])
"""test multiplication of constant and function for both value and derivative"""
assert (3*f).get()[0] == 3*(f.get()[0])
assert (3*f).get()[1] == 3*(f.get()[1]) | 30,887 |
def other_shifted_bottleneck_distance(A, B, fudge=default_fudge, analysis=False):
"""Compute the shifted bottleneck distance between two diagrams, A and B (multisets)"""
A = pu.SaneCounter(A)
B = pu.SaneCounter(B)
if not A and not B:
return 0
radius = fudge(upper_bound_on_radius(A, B))
events = event_queue.EventQueue(A, B)
matching = GeometricBipartiteMatching(A, B)
# these counters are for performance monitoring only - they don't affect the logic
ctr, R_ctr, L_ctr, fail_ctr, win_ctr = 0, 0, 0, 0, 0
while events and radius > epsilon:
ctr += 1
event = events.next_event(radius)
if isinstance(event, event_queue.ExitEvent):
R_ctr += 1
matching.remove_all(event.edge)
else:
L_ctr += 1
if birth(event.edge, radius) >= death(event.edge, radius):
win_ctr += 1
continue # relies on ties being broken with the highest-radius edge
# assert not matching.diagonal_perfect()
if matching.diagonal_perfect():
fail_ctr += 1
radius = fudge(max(
events.next_diagonal_height(),
radius - (events.next_exit_shift(radius)
- birth(event.edge, radius)) / 2))
events.push(event)
continue
matching.maximize_matching(
shift=event.shift_to_check,
radius=radius)
if matching.diagonal_perfect():
# radius = fudge(matching.value())
events.push(event)
if analysis:
print("other:", len(A) + len(B), "total", ctr, "R", R_ctr, "L", L_ctr, "fail", fail_ctr, "win", win_ctr)
return radius | 30,888 |
def getCountdown(c):
"""
Parse into a Friendly Readable format for Humans
"""
days = c.days
c = c.total_seconds()
hours = round(c//3600)
minutes = round(c // 60 - hours * 60)
seconds = round(c - hours * 3600 - minutes * 60)
return days, hours, minutes, seconds | 30,889 |
def from_aiida_type(x):
"""Turn Aiida types into their corresponding native Python types."""
raise TypeError(f"Do not know how to convert {type(x)} to native Python type") | 30,890 |
def fromPSK(valstr):
"""A special version of fromStr that assumes the user is trying to set a PSK.
In that case we also allow "none", "default" or "random" (to have python generate one), or simpleN
"""
if valstr == "random":
return genPSK256()
elif valstr == "none":
return bytes([0]) # Use the 'no encryption' PSK
elif valstr == "default":
return bytes([1]) # Use default channel psk
elif valstr.startswith("simple"):
# Use one of the single byte encodings
return bytes([int(valstr[6:]) + 1])
else:
return fromStr(valstr) | 30,891 |
def init_app(_):
"""Initializes backend""" | 30,892 |
def get_subpackages(name):
"""Return subpackages of package *name*"""
splist = []
for dirpath, _dirnames, _filenames in os.walk(name):
if osp.isfile(osp.join(dirpath, "__init__.py")):
splist.append(".".join(dirpath.split(os.sep)))
return splist | 30,893 |
def list_hierarchy(class_name, bases):
"""
Creates a list of the class hierarchy
Args:
-----
class_name: name of the current class
bases: list/tuple of bases for the current class
"""
class_list = [Uri(class_name)]
for base in bases:
if base.__name__ not in IGNORE_CLASSES:
class_list.append(Uri(base.__name__))
return list([i for i in set(class_list)]) | 30,894 |
def output_mesh(mesh, config):
"""Outputs mesh in formats specified in config.
:param mesh MeshInfo: tetrahedral mesh.
:param config Config: configuration for mesh build.
"""
if not config.output_format:
return
logger.info('Outputting mesh in formats: {}'
.format(', '.join(config.output_format))
)
if 'msh' in config.output_format:
from mesh_sphere_packing.tetmesh import write_msh
write_msh('%s.msh' % config.output_prefix, mesh)
if 'multiflow' in config.output_format:
from mesh_sphere_packing.tetmesh import write_multiflow
write_multiflow('%s.h5' % config.output_prefix, mesh)
if 'ply' in config.output_format:
from mesh_sphere_packing.tetmesh import write_ply
write_ply('%s.ply' % config.output_prefix, mesh)
if 'poly' in config.output_format:
from mesh_sphere_packing.tetmesh import write_poly
write_poly('%s.poly' % config.output_prefix, mesh)
if 'vtk' in config.output_format:
mesh.write_vtk('%s.vtk' % config.output_prefix) | 30,895 |
def move_mouse_to_specific_location(x_coordinate, y_coordinate):
"""Moves the mouse to a specific point"""
LOGGER.debug("Moving mouse to (%d,%d)", x_coordinate, y_coordinate)
pyautogui.moveTo(x_coordinate, y_coordinate)
return Promise.resolve((x_coordinate, y_coordinate)) | 30,896 |
def rss():
"""Return ps -o rss (resident) memory in kB."""
return float(mem("rss")) / 1024 | 30,897 |
def load_pickle_file(filename):
"""Read a pickle file, return a generator"""
with open(filename, "rb") as f:
while True:
try:
yield pickle.load(f)
except EOFError:
break | 30,898 |
def compare_words(
word1_features,
word2_features,
count=10,
exclude=set(),
similarity_degree=0.5,
separate=False,
min_feature_value=0.3
):
"""
Сравнение двух слов на основе списка похожих (или вообще каких-либо фич слова).
Возвращает 3 списка: характерные для первого слова, второго и общие
:param dict[int, float] word1_features: фичи первого слова: словарь {feature: value}
:param dict[int, float] word2_features: фичи второго слова: словарь {feature: value}
:param in count: число слов в результах
:param float similarity_degree: число 0..1. 1 — полное разделение слов, 0 — максимальный поиск сходства
:param bool separate: «срогое разделение» — запрет попадания одного слова в несколько колонок
:param float min_feature_value: минимальное значение
"""
diff1, diff2, common = {}, {}, {} # Характерное для первого слова, для второго и общее
features = set(word1_features.keys()).union(word2_features.keys())
for feature in features:
if feature in exclude:
continue
feature1 = word1_features.get(feature, 0)
feature2 = word2_features.get(feature, 0)
if feature1 < min_feature_value and feature2 < min_feature_value:
continue
diff1_value = feature1 * (1 - feature2)
diff2_value = feature2 * (1 - feature1)
common_value = (feature1 * feature2) ** similarity_degree
max_value = max(diff1_value, diff2_value, common_value)
if diff1_value == max_value or not separate:
diff1[feature] = diff1_value
if diff2_value == max_value or not separate:
diff2[feature] = diff2_value
if common_value == max_value or not separate:
common[feature] = common_value
return (
sorted(diff1.items(), key=itemgetter(1), reverse=True)[:count],
sorted(diff2.items(), key=itemgetter(1), reverse=True)[:count],
sorted(common.items(), key=itemgetter(1), reverse=True)[:count],
) | 30,899 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.